IGNITE-7251 Remove term "fabric" from Ignite deliverables

Signed-off-by: Anton Vinogradov <av@apache.org>
diff --git a/.gitignore b/.gitignore
index cd52dfa..c2eea34 100644
--- a/.gitignore
+++ b/.gitignore
@@ -98,3 +98,6 @@
 
 #Files related to ML manual-runnable tests
 /modules/ml/src/test/resources/manualrun/trees/columntrees.manualrun.properties
+
+#NodeJs files
+/modules/platforms/nodejs/node_modules
diff --git a/bin/control.bat b/bin/control.bat
index 15d5e6f..6b36a92 100644
--- a/bin/control.bat
+++ b/bin/control.bat
@@ -42,13 +42,33 @@
 goto error_finish
 
 :checkJdkVersion
-"%JAVA_HOME%\bin\java.exe" -version 2>&1 | findstr /R /c:"version .9\..*" /c:"java version .1\.8\..*" > nul
-if %ERRORLEVEL% equ 0 goto checkIgniteHome1
+set cmd="%JAVA_HOME%\bin\java.exe"
+for /f "tokens=* USEBACKQ" %%f in (`%cmd% -version 2^>^&1`) do (
+    set var=%%f
+    goto :LoopEscape
+)
+:LoopEscape
+
+set var=%var:~14%
+set var=%var:"=%
+for /f "tokens=1,2 delims=." %%a in ("%var%") do set MAJOR_JAVA_VER=%%a & set MINOR_JAVA_VER=%%b
+
+if %MAJOR_JAVA_VER% == 1 set MAJOR_JAVA_VER=%MINOR_JAVA_VER%
+
+if %MAJOR_JAVA_VER% LSS 8 (
     echo %0, ERROR:
     echo The version of JAVA installed in %JAVA_HOME% is incorrect.
     echo Please point JAVA_HOME variable to installation of JDK 1.8 or JDK 9.
     echo You can also download latest JDK at http://java.com/download.
-goto error_finish
+	goto error_finish
+)
+
+if %MAJOR_JAVA_VER% GTR 9 (
+	echo %0, WARNING:
+    echo The version of JAVA installed in %JAVA_HOME% was not tested with Apache Ignite.
+    echo Run it on your own risk or point JAVA_HOME variable to installation of JDK 1.8 or JDK 9.
+    echo You can also download latest JDK at http://java.com/download.
+)
 
 :: Check IGNITE_HOME.
 :checkIgniteHome1
@@ -198,9 +218,9 @@
 ::
 
 ::
-:: Final JVM_OPTS for Java 9 compatibility
+:: Final JVM_OPTS for Java 9+ compatibility
 ::
-"%JAVA_HOME%\bin\java.exe" -version 2>&1 | findstr /R /c:"version .9\..*" > nul && set JVM_OPTS=--add-exports java.base/jdk.internal.misc=ALL-UNNAMED --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-exports java.management/com.sun.jmx.mbeanserver=ALL-UNNAMED --add-exports jdk.internal.jvmstat/sun.jvmstat.monitor=ALL-UNNAMED --add-modules java.xml.bind %JVM_OPTS%
+if %MAJOR_JAVA_VER% GEQ 9 set JVM_OPTS=--add-exports java.base/jdk.internal.misc=ALL-UNNAMED --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-exports java.management/com.sun.jmx.mbeanserver=ALL-UNNAMED --add-exports jdk.internal.jvmstat/sun.jvmstat.monitor=ALL-UNNAMED --add-modules java.xml.bind %JVM_OPTS%
 
 if "%INTERACTIVE%" == "1" (
     "%JAVA_HOME%\bin\java.exe" %JVM_OPTS% %QUIET% %RESTART_SUCCESS_OPT% %JMX_MON% ^
diff --git a/bin/ignite.bat b/bin/ignite.bat
index 984e01f..25c828f 100644
--- a/bin/ignite.bat
+++ b/bin/ignite.bat
@@ -42,13 +42,33 @@
 goto error_finish
 
 :checkJdkVersion
-"%JAVA_HOME%\bin\java.exe" -version 2>&1 | findstr /R /c:"version .9\..*" /c:"version .1\.8\..*" > nul
-if %ERRORLEVEL% equ 0 goto checkIgniteHome1
+set cmd="%JAVA_HOME%\bin\java.exe"
+for /f "tokens=* USEBACKQ" %%f in (`%cmd% -version 2^>^&1`) do (
+    set var=%%f
+    goto :LoopEscape
+)
+:LoopEscape
+
+set var=%var:~14%
+set var=%var:"=%
+for /f "tokens=1,2 delims=." %%a in ("%var%") do set MAJOR_JAVA_VER=%%a & set MINOR_JAVA_VER=%%b
+
+if %MAJOR_JAVA_VER% == 1 set MAJOR_JAVA_VER=%MINOR_JAVA_VER%
+
+if %MAJOR_JAVA_VER% LSS 8 (
     echo %0, ERROR:
     echo The version of JAVA installed in %JAVA_HOME% is incorrect.
     echo Please point JAVA_HOME variable to installation of JDK 1.8 or JDK 9.
     echo You can also download latest JDK at http://java.com/download.
-goto error_finish
+	goto error_finish
+)
+
+if %MAJOR_JAVA_VER% GTR 9 (
+	echo %0, WARNING:
+    echo The version of JAVA installed in %JAVA_HOME% was not tested with Apache Ignite.
+    echo Run it on your own risk or point JAVA_HOME variable to installation of JDK 1.8 or JDK 9.
+    echo You can also download latest JDK at http://java.com/download.
+)
 
 :: Check IGNITE_HOME.
 :checkIgniteHome1
@@ -213,9 +233,9 @@
 ::
 
 ::
-:: Final JVM_OPTS for Java 9 compatibility
+:: Final JVM_OPTS for Java 9+ compatibility
 ::
-"%JAVA_HOME%\bin\java.exe" -version 2>&1 | findstr /R /c:"version .9\..*" > nul && set JVM_OPTS=--add-exports java.base/jdk.internal.misc=ALL-UNNAMED --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-exports java.management/com.sun.jmx.mbeanserver=ALL-UNNAMED --add-exports jdk.internal.jvmstat/sun.jvmstat.monitor=ALL-UNNAMED --add-modules java.xml.bind %JVM_OPTS%
+if %MAJOR_JAVA_VER% GEQ 9 set JVM_OPTS=--add-exports java.base/jdk.internal.misc=ALL-UNNAMED --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-exports java.management/com.sun.jmx.mbeanserver=ALL-UNNAMED --add-exports jdk.internal.jvmstat/sun.jvmstat.monitor=ALL-UNNAMED --add-modules java.xml.bind %JVM_OPTS%
 
 if "%INTERACTIVE%" == "1" (
     "%JAVA_HOME%\bin\java.exe" %JVM_OPTS% %QUIET% %RESTART_SUCCESS_OPT% %JMX_MON% ^
diff --git a/bin/ignite.sh b/bin/ignite.sh
index 0692306..c7b7318 100755
--- a/bin/ignite.sh
+++ b/bin/ignite.sh
@@ -90,11 +90,7 @@
 # ADD YOUR/CHANGE ADDITIONAL OPTIONS HERE
 #
 if [ -z "$JVM_OPTS" ] ; then
-    if [[ `"$JAVA" -version 2>&1 | egrep "1\.[7]\."` ]]; then
-        JVM_OPTS="-Xms1g -Xmx1g -server -XX:+AggressiveOpts -XX:MaxPermSize=256m"
-    else
-        JVM_OPTS="-Xms1g -Xmx1g -server -XX:+AggressiveOpts -XX:MaxMetaspaceSize=256m"
-    fi
+    JVM_OPTS="-Xms1g -Xmx1g -server -XX:+AggressiveOpts -XX:MaxMetaspaceSize=256m"
 fi
 
 #
@@ -149,16 +145,18 @@
 # JVM_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=8787 ${JVM_OPTS}"
 
 #
-# Final JVM_OPTS for Java 9 compatibility
+# Final JVM_OPTS for Java 9+ compatibility
 #
-${JAVA_HOME}/bin/java -version 2>&1 | grep -qE 'java version "9.*"' && {
-JVM_OPTS="--add-exports java.base/jdk.internal.misc=ALL-UNNAMED \
+javaMajorVersion "${JAVA_HOME}/bin/java"
+
+if [ $version -gt 8 ]; then
+    JVM_OPTS="--add-exports java.base/jdk.internal.misc=ALL-UNNAMED \
           --add-exports java.base/sun.nio.ch=ALL-UNNAMED \
           --add-exports java.management/com.sun.jmx.mbeanserver=ALL-UNNAMED \
           --add-exports jdk.internal.jvmstat/sun.jvmstat.monitor=ALL-UNNAMED \
           --add-modules java.xml.bind \
       ${JVM_OPTS}"
-} || true
+fi
 
 ERRORCODE="-1"
 
diff --git a/bin/ignitevisorcmd.bat b/bin/ignitevisorcmd.bat
index c12869b..86e688f 100644
--- a/bin/ignitevisorcmd.bat
+++ b/bin/ignitevisorcmd.bat
@@ -42,13 +42,33 @@
 goto error_finish
 
 :checkJdkVersion
-"%JAVA_HOME%\bin\java.exe" -version 2>&1 | findstr /R /c:"version .9\..*" /c:"version .1\.8\..*" > nul
-if %ERRORLEVEL% equ 0 goto checkIgniteHome1
+set cmd="%JAVA_HOME%\bin\java.exe"
+for /f "tokens=* USEBACKQ" %%f in (`%cmd% -version 2^>^&1`) do (
+    set var=%%f
+    goto :LoopEscape
+)
+:LoopEscape
+
+set var=%var:~14%
+set var=%var:"=%
+for /f "tokens=1,2 delims=." %%a in ("%var%") do set MAJOR_JAVA_VER=%%a & set MINOR_JAVA_VER=%%b
+
+if %MAJOR_JAVA_VER% == 1 set MAJOR_JAVA_VER=%MINOR_JAVA_VER%
+
+if %MAJOR_JAVA_VER% LSS 8 (
     echo %0, ERROR:
-    echo The version of JAVA installed in %JAVA_HOME% is incorrect.
+    echo The %MAJOR_JAVA_VER% version of JAVA installed in %JAVA_HOME% is incorrect.
     echo Please point JAVA_HOME variable to installation of JDK 1.8 or JDK 9.
     echo You can also download latest JDK at http://java.com/download.
-goto error_finish
+	goto error_finish
+)
+
+if %MAJOR_JAVA_VER% GTR 9 (
+	echo %0, WARNING:
+    echo The %MAJOR_JAVA_VER% version of JAVA installed in %JAVA_HOME% was not tested with Apache Ignite.
+    echo Run it on your own risk or point JAVA_HOME variable to installation of JDK 1.8 or JDK 9.
+    echo You can also download latest JDK at http://java.com/download.
+)
 
 :: Check IGNITE_HOME.
 :checkIgniteHome1
@@ -142,9 +162,9 @@
 if "%ARGS%" == "" set ARGS=%*
 
 ::
-:: Final JVM_OPTS for Java 9 compatibility
+:: Final JVM_OPTS for Java 9+ compatibility
 ::
-"%JAVA_HOME%\bin\java.exe" -version 2>&1 | findstr /R /c:"version .9\..*" > nul && set JVM_OPTS_VISOR=--add-exports java.base/jdk.internal.misc=ALL-UNNAMED --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-exports java.management/com.sun.jmx.mbeanserver=ALL-UNNAMED --add-exports jdk.internal.jvmstat/sun.jvmstat.monitor=ALL-UNNAMED --add-modules java.xml.bind %JVM_OPTS%
+if %MAJOR_JAVA_VER% GEQ 9 set JVM_OPTS=--add-exports java.base/jdk.internal.misc=ALL-UNNAMED --add-exports java.base/sun.nio.ch=ALL-UNNAMED --add-exports java.management/com.sun.jmx.mbeanserver=ALL-UNNAMED --add-exports jdk.internal.jvmstat/sun.jvmstat.monitor=ALL-UNNAMED --add-modules java.xml.bind %JVM_OPTS%
 
 ::
 :: Starts Visor console.
diff --git a/bin/ignitevisorcmd.sh b/bin/ignitevisorcmd.sh
index 0471550..1fcc127 100755
--- a/bin/ignitevisorcmd.sh
+++ b/bin/ignitevisorcmd.sh
@@ -108,14 +108,16 @@
 #
 # Final JVM_OPTS for Java 9 compatibility
 #
-${JAVA_HOME}/bin/java -version 2>&1 | grep -qE 'java version "9.*"' && {
-JVM_OPTS="--add-exports java.base/jdk.internal.misc=ALL-UNNAMED \
+javaMajorVersion "${JAVA_HOME}/bin/java"
+
+if [ $version -gt 8 ]; then
+    JVM_OPTS="--add-exports java.base/jdk.internal.misc=ALL-UNNAMED \
           --add-exports java.base/sun.nio.ch=ALL-UNNAMED \
           --add-exports java.management/com.sun.jmx.mbeanserver=ALL-UNNAMED \
           --add-exports jdk.internal.jvmstat/sun.jvmstat.monitor=ALL-UNNAMED \
           --add-modules java.xml.bind \
       ${JVM_OPTS}"
-} || true
+fi
 
 #
 # Start Visor console.
diff --git a/bin/include/functions.sh b/bin/include/functions.sh
index 3fd1a72..dbeee11 100755
--- a/bin/include/functions.sh
+++ b/bin/include/functions.sh
@@ -27,6 +27,28 @@
 #   source "${IGNITE_HOME_TMP}"/bin/include/functions.sh
 #
 
+# Extract java version to `version` variable.
+javaVersion() {
+    version=$("$1" -version 2>&1 | awk -F '"' '/version/ {print $2}')
+}
+
+# Extract only major version of java to `version` variable.
+javaMajorVersion() {
+    javaVersion "$1"
+    version="${version%%.*}"
+
+    if [ ${version} -eq 1 ]; then
+        # Version seems starts from 1, we need second number.
+        javaVersion "$1"
+        backIFS=$IFS
+
+        IFS=. ver=(${version##*-})
+        version=${ver[1]}
+
+        IFS=$backIFS
+    fi
+}
+
 #
 # Discovers path to Java executable and checks it's version.
 # The function exports JAVA variable with path to Java executable.
@@ -40,7 +62,7 @@
         if [ $RETCODE -ne 0 ]; then
             echo $0", ERROR:"
             echo "JAVA_HOME environment variable is not found."
-            echo "Please point JAVA_HOME variable to location of JDK 1.7 or JDK 1.8."
+            echo "Please point JAVA_HOME variable to location of JDK 1.8 or JDK 9."
             echo "You can also download latest JDK at http://java.com/download"
 
             exit 1
@@ -54,22 +76,20 @@
     #
     # Check JDK.
     #
-    if [ ! -e "$JAVA" ]; then
-        echo $0", ERROR:"
-        echo "JAVA is not found in JAVA_HOME=$JAVA_HOME."
-        echo "Please point JAVA_HOME variable to installation of JDK 1.8 or JDK 9"
-        echo "You can also download latest JDK at http://java.com/download"
+    javaMajorVersion "$JAVA"
 
-        exit 1
-    fi
-
-    "$JAVA" -version 2>&1 | grep -qE 'version "(1.8.*|9.*)"' || {
+    if [ $version -lt 8 ]; then
         echo "$0, ERROR:"
-        echo "The version of JAVA installed in JAVA_HOME=$JAVA_HOME is incorrect."
+        echo "The $version version of JAVA installed in JAVA_HOME=$JAVA_HOME is incompatible."
         echo "Please point JAVA_HOME variable to installation of JDK 1.8 or JDK 9."
         echo "You can also download latest JDK at http://java.com/download"
         exit 1
-    }
+    elif [ $version -gt 9 ]; then
+        echo "$0, WARNING:"
+        echo "The $version version of JAVA installed in JAVA_HOME=$JAVA_HOME was not tested with Apache Ignite."
+        echo "Run it on your own risk or point JAVA_HOME variable to installation of JDK 1.8 or JDK 9."
+        echo "You can also download JDK at http://java.com/download"
+    fi
 }
 
 #
diff --git a/examples/DEVNOTES.txt b/examples/DEVNOTES.txt
new file mode 100644
index 0000000..ba578bb
--- /dev/null
+++ b/examples/DEVNOTES.txt
@@ -0,0 +1,11 @@
+Ignite Examples Build Instructions
+==================================
+1) Compile and install Ignite from project root folder:
+
+        mvn clean install -Pall-java,all-scala,licenses -DskipTests
+
+   (If needed, refer DEVNOTES.txt in project root folder for most up-to-date build instructions.)
+
+2) Build examples from "examples" sub-folder under Ignite project root:
+
+        mvn clean package -DskipTests
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/MLExamplesCommonArgs.java b/examples/src/main/java/org/apache/ignite/examples/ml/MLExamplesCommonArgs.java
deleted file mode 100644
index 701894b..0000000
--- a/examples/src/main/java/org/apache/ignite/examples/ml/MLExamplesCommonArgs.java
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.examples.ml;
-
-/**
- * Some common arguments for examples in ML module.
- */
-public class MLExamplesCommonArgs {
-    /**
-     * Unattended argument.
-     */
-    public static String UNATTENDED = "unattended";
-
-    /** Empty args for ML examples. */
-    public static final String[] EMPTY_ARGS_ML = new String[] {"--" + UNATTENDED};
-}
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/clustering/KMeansClusterizationExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/clustering/KMeansClusterizationExample.java
index e1b9844..152375a 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/clustering/KMeansClusterizationExample.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/clustering/KMeansClusterizationExample.java
@@ -18,27 +18,33 @@
 package org.apache.ignite.examples.ml.clustering;
 
 import java.util.Arrays;
-import java.util.UUID;
 import javax.cache.Cache;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.Ignition;
-import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
 import org.apache.ignite.cache.query.QueryCursor;
 import org.apache.ignite.cache.query.ScanQuery;
-import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.examples.ml.util.TestCache;
 import org.apache.ignite.ml.clustering.kmeans.KMeansModel;
 import org.apache.ignite.ml.clustering.kmeans.KMeansTrainer;
-import org.apache.ignite.ml.knn.classification.KNNClassificationTrainer;
 import org.apache.ignite.ml.math.Tracer;
 import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
 import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
 import org.apache.ignite.thread.IgniteThread;
 
 /**
- * Run kNN multi-class classification trainer over distributed dataset.
- *
- * @see KNNClassificationTrainer
+ * Run KMeans clustering algorithm ({@link KMeansTrainer}) over distributed dataset.
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with test data points (based on the
+ * <a href="https://en.wikipedia.org/wiki/Iris_flower_data_set"></a>Iris dataset</a>).</p>
+ * <p>
+ * After that it trains the model based on the specified data using
+ * <a href="https://en.wikipedia.org/wiki/K-means_clustering">KMeans</a> algorithm.</p>
+ * <p>
+ * Finally, this example loops over the test set of data points, applies the trained model to predict what cluster
+ * does this point belong to, and compares prediction to expected outcome (ground truth).</p>
+ * <p>
+ * You can change the test data used in this example and re-run it to explore this algorithm further.</p>
  */
 public class KMeansClusterizationExample {
     /** Run example. */
@@ -51,7 +57,7 @@
 
             IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
                 KMeansClusterizationExample.class.getSimpleName(), () -> {
-                IgniteCache<Integer, double[]> dataCache = getTestCache(ignite);
+                IgniteCache<Integer, double[]> dataCache = new TestCache(ignite).fillCacheWith(data);
 
                 KMeansTrainer trainer = new KMeansTrainer()
                     .withSeed(7867L);
@@ -64,8 +70,8 @@
                 );
 
                 System.out.println(">>> KMeans centroids");
-                Tracer.showAscii(mdl.centers()[0]);
-                Tracer.showAscii(mdl.centers()[1]);
+                Tracer.showAscii(mdl.getCenters()[0]);
+                Tracer.showAscii(mdl.getCenters()[1]);
                 System.out.println(">>>");
 
                 System.out.println(">>> -----------------------------------");
@@ -94,6 +100,8 @@
 
                     System.out.println("\n>>> Absolute amount of errors " + amountOfErrors);
                     System.out.println("\n>>> Accuracy " + (1 - amountOfErrors / (double)totalAmount));
+
+                    System.out.println(">>> KMeans clustering algorithm over cached dataset usage example completed.");
                 }
             });
 
@@ -102,25 +110,6 @@
         }
     }
 
-    /**
-     * Fills cache with data and returns it.
-     *
-     * @param ignite Ignite instance.
-     * @return Filled Ignite Cache.
-     */
-    private static IgniteCache<Integer, double[]> getTestCache(Ignite ignite) {
-        CacheConfiguration<Integer, double[]> cacheConfiguration = new CacheConfiguration<>();
-        cacheConfiguration.setName("TEST_" + UUID.randomUUID());
-        cacheConfiguration.setAffinity(new RendezvousAffinityFunction(false, 10));
-
-        IgniteCache<Integer, double[]> cache = ignite.createCache(cacheConfiguration);
-
-        for (int i = 0; i < data.length; i++)
-            cache.put(i, data[i]);
-
-        return cache;
-    }
-
     /** The Iris dataset. */
     private static final double[][] data = {
         {0, 5.1, 3.5, 1.4, 0.2},
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/dataset/AlgorithmSpecificDatasetExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/dataset/AlgorithmSpecificDatasetExample.java
index 1229fb1..4d42d19 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/dataset/AlgorithmSpecificDatasetExample.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/dataset/AlgorithmSpecificDatasetExample.java
@@ -34,22 +34,31 @@
 import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
 
 /**
- * Example that shows how to implement your own algorithm (gradient descent trainer for linear regression) which uses
- * dataset as an underlying infrastructure.
- *
+ * Example that shows how to implement your own algorithm
+ * (<a href="https://en.wikipedia.org/wiki/Gradient_descent">gradient</a> descent trainer for linear regression)
+ * which uses dataset as an underlying infrastructure.
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with simple test data.</p>
+ * <p>
+ * After that it creates an algorithm specific dataset to perform linear regression as described in more detail below.</p>
+ * <p>
+ * Finally, this example trains linear regression model using gradient descent and outputs the result.</p>
+ * <p>
+ * You can change the test data used in this example and re-run it to explore this functionality further.</p>
+ * <p>
  * The common idea behind using algorithm specific datasets is to write a simple local version algorithm at first, then
- * find operations which involves data manipulations, and finally define algorithm specific version of the dataset
- * extended by introducing these new operations. As result your algorithm will work with extended dataset (based on
- * {@link DatasetWrapper}) in a sequential manner.
- *
+ * find operations which involve data manipulations, and finally define algorithm specific version of the dataset
+ * extended by introducing these new operations. As a result your algorithm will work with extended dataset (based on
+ * {@link DatasetWrapper}) in a sequential manner.</p>
+ * <p>
  * In this example we need to implement gradient descent. This is iterative method that involves calculation of gradient
- * on every step. In according with the common idea we defines
+ * on every step. In according with the common idea we define
  * {@link AlgorithmSpecificDatasetExample.AlgorithmSpecificDataset} - extended version of {@code Dataset} with
- * {@code gradient} method. As result our gradient descent method looks like a simple loop where every iteration
+ * {@code gradient} method. As a result our gradient descent method looks like a simple loop where every iteration
  * includes call of the {@code gradient} method. In the example we want to keep iteration number as well for logging.
  * Iteration number cannot be recovered from the {@code upstream} data and we need to keep it in the custom
  * partition {@code context} which is represented by
- * {@link AlgorithmSpecificDatasetExample.AlgorithmSpecificPartitionContext} class.
+ * {@link AlgorithmSpecificDatasetExample.AlgorithmSpecificPartitionContext} class.</p>
  */
 public class AlgorithmSpecificDatasetExample {
     /** Run example. */
@@ -59,7 +68,7 @@
 
             IgniteCache<Integer, Person> persons = createCache(ignite);
 
-            // Creates a algorithm specific dataset to perform linear regression. Here we defines the way features and
+            // Creates a algorithm specific dataset to perform linear regression. Here we define the way features and
             // labels are extracted, and partition data and context are created.
             try (AlgorithmSpecificDataset dataset = DatasetFactory.create(
                 ignite,
@@ -169,12 +178,12 @@
         private int iteration;
 
         /** */
-        public int getIteration() {
+        int getIteration() {
             return iteration;
         }
 
         /** */
-        public void setIteration(int iteration) {
+        void setIteration(int iteration) {
             this.iteration = iteration;
         }
     }
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/dataset/CacheBasedDatasetExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/dataset/CacheBasedDatasetExample.java
index b5a7059..3f75540 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/dataset/CacheBasedDatasetExample.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/dataset/CacheBasedDatasetExample.java
@@ -17,13 +17,13 @@
 
 package org.apache.ignite.examples.ml.dataset;
 
-import java.util.Arrays;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.Ignition;
 import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
 import org.apache.ignite.configuration.CacheConfiguration;
 import org.apache.ignite.examples.ml.dataset.model.Person;
+import org.apache.ignite.examples.ml.util.DatasetHelper;
 import org.apache.ignite.ml.dataset.DatasetFactory;
 import org.apache.ignite.ml.dataset.primitive.SimpleDataset;
 import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
@@ -31,6 +31,13 @@
 /**
  * Example that shows how to create dataset based on an existing Ignite Cache and then use it to calculate {@code mean}
  * and {@code std} values as well as {@code covariance} and {@code correlation} matrices.
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with simple test data.</p>
+ * <p>
+ * After that it creates the dataset based on the data in the cache and uses Dataset API to find and output
+ * various statistical metrics of the data.</p>
+ * <p>
+ * You can change the test data used in this example and re-run it to explore this functionality further.</p>
  */
 public class CacheBasedDatasetExample {
     /** Run example. */
@@ -46,25 +53,7 @@
                 persons,
                 (k, v) -> VectorUtils.of(v.getAge(), v.getSalary())
             )) {
-                // Calculation of the mean value. This calculation will be performed in map-reduce manner.
-                double[] mean = dataset.mean();
-                System.out.println("Mean \n\t" + Arrays.toString(mean));
-
-                // Calculation of the standard deviation. This calculation will be performed in map-reduce manner.
-                double[] std = dataset.std();
-                System.out.println("Standard deviation \n\t" + Arrays.toString(std));
-
-                // Calculation of the covariance matrix.  This calculation will be performed in map-reduce manner.
-                double[][] cov = dataset.cov();
-                System.out.println("Covariance matrix ");
-                for (double[] row : cov)
-                    System.out.println("\t" + Arrays.toString(row));
-
-                // Calculation of the correlation matrix.  This calculation will be performed in map-reduce manner.
-                double[][] corr = dataset.corr();
-                System.out.println("Correlation matrix ");
-                for (double[] row : corr)
-                    System.out.println("\t" + Arrays.toString(row));
+                new DatasetHelper(dataset).describe();
             }
 
             System.out.println(">>> Cache Based Dataset example completed.");
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/dataset/LocalDatasetExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/dataset/LocalDatasetExample.java
deleted file mode 100644
index e3af738..0000000
--- a/examples/src/main/java/org/apache/ignite/examples/ml/dataset/LocalDatasetExample.java
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.examples.ml.dataset;
-
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Map;
-import org.apache.ignite.Ignite;
-import org.apache.ignite.Ignition;
-import org.apache.ignite.examples.ml.dataset.model.Person;
-import org.apache.ignite.ml.dataset.DatasetFactory;
-import org.apache.ignite.ml.dataset.primitive.SimpleDataset;
-import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
-
-/**
- * Example that shows how to create dataset based on an existing local storage and then use it to calculate {@code mean}
- * and {@code std} values as well as {@code covariance} and {@code correlation} matrices.
- */
-public class LocalDatasetExample {
-    /** Run example. */
-    public static void main(String[] args) throws Exception {
-        try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
-            System.out.println(">>> Local Dataset example started.");
-
-            Map<Integer, Person> persons = createCache(ignite);
-
-            // Creates a local simple dataset containing features and providing standard dataset API.
-            try (SimpleDataset<?> dataset = DatasetFactory.createSimpleDataset(
-                persons,
-                2,
-                (k, v) -> VectorUtils.of(v.getAge(), v.getSalary())
-            )) {
-                // Calculation of the mean value. This calculation will be performed in map-reduce manner.
-                double[] mean = dataset.mean();
-                System.out.println("Mean \n\t" + Arrays.toString(mean));
-
-                // Calculation of the standard deviation. This calculation will be performed in map-reduce manner.
-                double[] std = dataset.std();
-                System.out.println("Standard deviation \n\t" + Arrays.toString(std));
-
-                // Calculation of the covariance matrix.  This calculation will be performed in map-reduce manner.
-                double[][] cov = dataset.cov();
-                System.out.println("Covariance matrix ");
-                for (double[] row : cov)
-                    System.out.println("\t" + Arrays.toString(row));
-
-                // Calculation of the correlation matrix.  This calculation will be performed in map-reduce manner.
-                double[][] corr = dataset.corr();
-                System.out.println("Correlation matrix ");
-                for (double[] row : corr)
-                    System.out.println("\t" + Arrays.toString(row));
-            }
-
-            System.out.println(">>> Local Dataset example completed.");
-        }
-    }
-
-    /** */
-    private static Map<Integer, Person> createCache(Ignite ignite) {
-        Map<Integer, Person> persons = new HashMap<>();
-
-        persons.put(1, new Person("Mike", 42, 10000));
-        persons.put(2, new Person("John", 32, 64000));
-        persons.put(3, new Person("George", 53, 120000));
-        persons.put(4, new Person("Karl", 24, 70000));
-
-        return persons;
-    }
-}
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/Coin.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/Coin.java
index 4944a6b..14b7553 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/Coin.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/Coin.java
@@ -20,62 +20,69 @@
 import java.io.Serializable;
 
 /**
- * POJO to model a coin
+ * POJO to model a coin.
  */
 public class Coin implements Serializable {
-    /** Define Coin Type */
-    public static enum CoinType {
-        PENNY, QUARTER, NICKEL, DIME
+    /** Define Coin Type. */
+    public enum CoinType {
+        /** */
+        PENNY,
+        /** */
+        QUARTER,
+        /** */
+        NICKEL,
+        /** */
+        DIME
     }
 
-    ;
+    /** Number of coins. */
+    private int numOfCoins;
 
-    /** number of coins */
-    private int numberOfCoins = 0;
-
-    /** CoinType */
-    private CoinType coinType = null;
+    /** Coin type. */
+    private CoinType coinType;
 
     /**
-     * @param coinType Type of coin
-     * @param numberOfCoins Number of coins
+     * Create instance.
+     *
+     * @param coinType Type of coin.
+     * @param numOfCoins Number of coins.
      */
-    public Coin(CoinType coinType, int numberOfCoins) {
+    Coin(CoinType coinType, int numOfCoins) {
         this.coinType = coinType;
-        this.numberOfCoins = numberOfCoins;
+        this.numOfCoins = numOfCoins;
     }
 
     /**
-     * Retrieve the number of coins
+     * Retrieve the number of coins.
      *
-     * @return Number of coins
+     * @return Number of coins.
      */
-    public int getNumberOfCoins() {
-        return numberOfCoins;
+    public int getNumOfCoins() {
+        return numOfCoins;
     }
 
     /**
-     * Set the number of coins
+     * Set the number of coins.
      *
-     * @param numberOfCoins Number of coins
+     * @param numOfCoins Number of coins.
      */
-    public void setNumberOfCoins(int numberOfCoins) {
-        this.numberOfCoins = numberOfCoins;
+    public void setNumOfCoins(int numOfCoins) {
+        this.numOfCoins = numOfCoins;
     }
 
     /**
-     * Retrieve Coin type
+     * Retrieve Coin type.
      *
-     * @return Coin type
+     * @return Coin type.
      */
     public CoinType getCoinType() {
         return coinType;
     }
 
     /**
-     * Set Coin type
+     * Set Coin type.
      *
-     * @param coinType Coin Type
+     * @param coinType Coin type.
      */
     public void setCoinType(CoinType coinType) {
         this.coinType = coinType;
@@ -83,7 +90,6 @@
 
     /** {@inheritDoc} */
     @Override public String toString() {
-        return "Coin [numberOfCoins=" + numberOfCoins + ", coinType=" + coinType + "]";
+        return "Coin [numOfCoins=" + numOfCoins + ", coinType=" + coinType + "]";
     }
-
 }
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/OptimizeMakeChangeFitnessFunction.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/OptimizeMakeChangeFitnessFunction.java
index 34626be..36b5777 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/OptimizeMakeChangeFitnessFunction.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/OptimizeMakeChangeFitnessFunction.java
@@ -22,83 +22,75 @@
 import org.apache.ignite.ml.genetic.IFitnessFunction;
 
 /**
- * This example demonstrates how to create a IFitnessFunction <br/>
- *
- * Your IFitness function will vary depending on your particular use case. <br/>
- *
- * For this fitness function, we simply want to calculate the value of  <br/>
- *
- * an individual solution relative to other solutions. <br/>
+ * This example demonstrates how to create a {@link IFitnessFunction}.
+ * <p>
+ * Your fitness function will vary depending on your particular use case. For this fitness function, we simply want
+ * to calculate the value of an individual solution relative to other solutions.</p>
  */
 public class OptimizeMakeChangeFitnessFunction implements IFitnessFunction {
-    /** target amount */
-    int targetAmount = 0;
+    /** Target amount. */
+    private int targetAmount;
 
     /**
-     * @param targetAmount Amount of change
+     * @param targetAmount Amount of change.
      */
     public OptimizeMakeChangeFitnessFunction(int targetAmount) {
         this.targetAmount = targetAmount;
     }
 
     /**
-     * Calculate fitness
+     * Calculate fitness.
      *
-     * @param genes Genes
-     * @return Fitness value
+     * @param genes List of genes.
+     * @return Fitness value.
      */
     public double evaluate(List<Gene> genes) {
-
         int changeAmount = getAmountOfChange(genes);
         int totalCoins = getTotalNumberOfCoins(genes);
         int changeDifference = Math.abs(targetAmount - changeAmount);
 
         double fitness = (99 - changeDifference);
 
-        if (changeAmount == targetAmount) {
+        if (changeAmount == targetAmount)
             fitness += 100 - (10 * totalCoins);
-        }
 
         return fitness;
-
     }
 
     /**
-     * Calculate amount of change
+     * Calculate amount of change.
      *
-     * @param genes Genes
-     * @return Amount of change
+     * @param genes List of genes.
+     * @return Amount of change.
      */
     private int getAmountOfChange(List<Gene> genes) {
-        Gene quarterGene = (Gene)genes.get(0);
-        Gene dimeGene = (Gene)genes.get(1);
-        Gene nickelGene = (Gene)genes.get(2);
-        Gene pennyGene = (Gene)genes.get(3);
+        Gene quarterGene = genes.get(0);
+        Gene dimeGene = genes.get(1);
+        Gene nickelGene = genes.get(2);
+        Gene pennyGene = genes.get(3);
 
-        int numQuarters = ((Coin)quarterGene.getVal()).getNumberOfCoins();
-        int numDimes = ((Coin)dimeGene.getVal()).getNumberOfCoins();
-        int numNickels = ((Coin)nickelGene.getVal()).getNumberOfCoins();
-        int numPennies = ((Coin)pennyGene.getVal()).getNumberOfCoins();
+        int numQuarters = ((Coin)quarterGene.getVal()).getNumOfCoins();
+        int numDimes = ((Coin)dimeGene.getVal()).getNumOfCoins();
+        int numNickels = ((Coin)nickelGene.getVal()).getNumOfCoins();
+        int numPennies = ((Coin)pennyGene.getVal()).getNumOfCoins();
 
         return (numQuarters * 25) + (numDimes * 10) + (numNickels * 5) + numPennies;
     }
 
     /**
-     * Return the total number of coins
+     * Return the total number of coins.
      *
-     * @param genes Genes
-     * @return Number of coins
+     * @param genes List of genes.
+     * @return Number of coins.
      */
     private int getTotalNumberOfCoins(List<Gene> genes) {
-
-        int totalNumberOfCoins = 0;
+        int totalNumOfCoins = 0;
 
         for (Gene gene : genes) {
-            int numberOfCoins = ((Coin)gene.getVal()).getNumberOfCoins();
-            totalNumberOfCoins = totalNumberOfCoins + numberOfCoins;
-
+            int numOfCoins = ((Coin)gene.getVal()).getNumOfCoins();
+            totalNumOfCoins = totalNumOfCoins + numOfCoins;
         }
-        return totalNumberOfCoins;
 
+        return totalNumOfCoins;
     }
 }
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/OptimizeMakeChangeGAExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/OptimizeMakeChangeGAExample.java
index 1e80d6d..36a5f47 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/OptimizeMakeChangeGAExample.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/OptimizeMakeChangeGAExample.java
@@ -20,7 +20,6 @@
 import java.util.ArrayList;
 import java.util.List;
 import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteLogger;
 import org.apache.ignite.Ignition;
 import org.apache.ignite.ml.genetic.Chromosome;
 import org.apache.ignite.ml.genetic.GAGrid;
@@ -30,110 +29,98 @@
 import org.apache.ignite.ml.genetic.parameter.GAGridConstants;
 
 /**
- * This example demonstrates how to use the GAGrid framework. <br/>
- *
- * This example is inspired by JGAP's "Minimize Make Change" example. <br/>
- *
+ * This example demonstrates how to use the {@link GAGrid} framework. It is inspired by
+ * <a href="https://github.com/martin-steghoefer/jgap/blob/master/examples/src/examples/MinimizingMakeChange.java">
+ * JGAP's "Minimize Make Change"</a> example.
+ * <p>
  * In this example, the objective is to calculate the minimum number of coins that equal user specified amount of
- * change
- *
- * ie: -DAMOUNTCHANGE
- *
- * mvn exec:java -Dexec.mainClass="org.apache.ignite.examples.ml.genetic.change.OptimizeMakeChangeGAExample"
- * -DAMOUNTCHANGE=75
- *
- * <p> Remote nodes should always be started with special configuration file which enables P2P class loading: {@code
- * 'ignite.{sh|bat} examples/config/example-ignite.xml'}.</p> <p> Alternatively you can run ExampleNodeStartup in
- * another JVM which will start node with {@code examples/config/example-ignite.xml} configuration.</p>
+ * change ie: {@code -DAMOUNTCHANGE}.</p>
+ * <p>
+ * {@code mvn exec:java -Dexec.mainClass="org.apache.ignite.examples.ml.genetic.change.OptimizeMakeChangeGAExample"
+ * -DAMOUNTCHANGE=75}</p>
+ * <p>
+ * Code in this example launches Ignite grid, prepares simple test data (gene pool) and configures GA grid.</p>
+ * <p>
+ * After that it launches the process of evolution on GA grid and outputs the progress and results.</p>
+ * <p>
+ * You can change the test data and parameters of GA grid used in this example and re-run it to explore
+ * this functionality further.</p>
+ * <p>
+ * Remote nodes should always be started with special configuration file which enables P2P class loading: {@code
+ * 'ignite.{sh|bat} examples/config/example-ignite.xml'}.</p>
+ * <p>
+ *  Alternatively you can run ExampleNodeStartup in another JVM which will start node with
+ *  {@code examples/config/example-ignite.xml} configuration.</p>
  */
 public class OptimizeMakeChangeGAExample {
-    /** Ignite instance */
-    private static Ignite ignite = null;
-
-    /** GAGrid */
-    private static GAGrid gaGrid = null;
-
-    /** GAConfiguration */
-    private static GAConfiguration gaConfig = null;
-
-    /** amount of change */
-    private static String sAmountChange = null;
-
-    /** Ignite logger */
-    private static IgniteLogger logger = null;
-
     /**
      * Executes example.
      *
-     * Specify value for -DAMOUNTCHANGE JVM system variable
+     * Specify value for {@code -DAMOUNTCHANGE} JVM system variable.
      *
      * @param args Command line arguments, none required.
      */
     public static void main(String args[]) {
-        System.setProperty("IGNITE_QUIET", "false");
+        System.out.println(">>> OptimizeMakeChange GA grid example started.");
 
-        sAmountChange = "75";
+        String sAmountChange = "75";
 
-        StringBuffer sbErrorMessage = new StringBuffer();
-        sbErrorMessage.append("AMOUNTCHANGE System property not set. Please provide a valid value between 1 and 99. ");
-        sbErrorMessage.append(" ");
-        sbErrorMessage.append("IE: -DAMOUNTCHANGE=75");
-        sbErrorMessage.append("\n");
-        sbErrorMessage.append("Using default value: 75");
+        StringBuilder sbErrorMsg = new StringBuilder();
+        sbErrorMsg.append("AMOUNTCHANGE System property not set. Please provide a valid value between 1 and 99. ");
+        sbErrorMsg.append(" ");
+        sbErrorMsg.append("IE: -DAMOUNTCHANGE=75");
+        sbErrorMsg.append("\n");
+        sbErrorMsg.append("Using default value: 75");
 
-        //Check if -DAMOUNTCHANGE JVM system variable is provided
-        if (System.getProperty("AMOUNTCHANGE") == null) {
-            System.out.println(sbErrorMessage);
-        }
-        else {
+        //Check if -DAMOUNTCHANGE JVM system variable is provided.
+        if (System.getProperty("AMOUNTCHANGE") == null)
+            System.out.println(sbErrorMsg);
+        else
             sAmountChange = System.getProperty("AMOUNTCHANGE");
-        }
 
         try {
+            // Create an Ignite instance as you would in any other use case.
+            Ignite ignite = Ignition.start("examples/config/example-ignite.xml");
 
-            //Create an Ignite instance as you would in any other use case.
-            ignite = Ignition.start("examples/config/example-ignite.xml");
+            // Create GAConfiguration.
+            GAConfiguration gaCfg = new GAConfiguration();
 
-            logger = ignite.log();
-
-            // Create GAConfiguration
-            gaConfig = new GAConfiguration();
-
-            // set Gene Pool
+            // Set Gene Pool.
             List<Gene> genes = getGenePool();
 
-            // set selection method
-            gaConfig.setSelectionMtd(GAGridConstants.SELECTION_METHOD.SELECTON_METHOD_ELETISM);
-            gaConfig.setElitismCnt(10);
+            // Set selection method.
+            gaCfg.setSelectionMtd(GAGridConstants.SELECTION_METHOD.SELECTON_METHOD_ELETISM);
+            gaCfg.setElitismCnt(10);
 
-            // set the Chromosome Length to '4' since we have 4 coins.
-            gaConfig.setChromosomeLen(4);
+            // Set the Chromosome Length to '4' since we have 4 coins.
+            gaCfg.setChromosomeLen(4);
 
-            // set population size
-            gaConfig.setPopulationSize(500);
+            // Set population size.
+            gaCfg.setPopulationSize(500);
 
-            // initialize gene pool
-            gaConfig.setGenePool(genes);
+            // Initialize gene pool.
+            gaCfg.setGenePool(genes);
 
-            // set Truncate Rate
-            gaConfig.setTruncateRate(.10);
+            // Set Truncate Rate.
+            gaCfg.setTruncateRate(.10);
 
-            // set Cross Over Rate
-            gaConfig.setCrossOverRate(.50);
+            // Set Cross Over Rate.
+            gaCfg.setCrossOverRate(.50);
 
-            // set Mutation Rate
-            gaConfig.setMutationRate(.50);
+            // Set Mutation Rate.
+            gaCfg.setMutationRate(.50);
 
-            // create and set Fitness function
+            // Create and set Fitness function.
             OptimizeMakeChangeFitnessFunction function = new OptimizeMakeChangeFitnessFunction(new Integer(sAmountChange));
-            gaConfig.setFitnessFunction(function);
+            gaCfg.setFitnessFunction(function);
 
-            // create and set TerminateCriteria
-            OptimizeMakeChangeTerminateCriteria termCriteria = new OptimizeMakeChangeTerminateCriteria(ignite);
+            // Create and set TerminateCriteria.
+            OptimizeMakeChangeTerminateCriteria termCriteria = new OptimizeMakeChangeTerminateCriteria(ignite,
+                System.out::println);
 
             ChromosomeCriteria chromosomeCriteria = new ChromosomeCriteria();
 
-            List values = new ArrayList();
+            List<String> values = new ArrayList<>();
 
             values.add("coinType=QUARTER");
             values.add("coinType=DIME");
@@ -142,40 +129,41 @@
 
             chromosomeCriteria.setCriteria(values);
 
-            gaConfig.setChromosomeCriteria(chromosomeCriteria);
-            gaConfig.setTerminateCriteria(termCriteria);
+            gaCfg.setChromosomeCriteria(chromosomeCriteria);
+            gaCfg.setTerminateCriteria(termCriteria);
 
-            // initialize GAGrid
-            gaGrid = new GAGrid(gaConfig, ignite);
+            // Initialize GAGrid.
+            GAGrid gaGrid = new GAGrid(gaCfg, ignite);
 
-            logger.info("##########################################################################################");
+            System.out.println("##########################################################################################");
 
-            logger.info("Calculating optimal set of coins where amount of change is " + sAmountChange);
+            System.out.println("Calculating optimal set of coins where amount of change is " + sAmountChange);
 
-            logger.info("##########################################################################################");
+            System.out.println("##########################################################################################");
 
-            Chromosome fittestChromosome = gaGrid.evolve();
+            Chromosome chromosome = gaGrid.evolve();
+
+            System.out.println(">>> Evolution result: " + chromosome);
 
             Ignition.stop(true);
 
-            ignite = null;
-
+            System.out.println(">>> OptimizeMakeChange GA grid example completed.");
         }
         catch (Exception e) {
-            System.out.println(e);
+            System.out.println(e.getMessage());
+            e.printStackTrace();
         }
-
     }
 
     /**
-     * Helper routine to initialize Gene pool
+     * Helper routine to initialize Gene pool.
      *
-     * In typical usecase genes may be stored in database.
+     * In typical use case genes may be stored in database.
      *
-     * @return List of Genes
+     * @return List of Genes.
      */
     private static List<Gene> getGenePool() {
-        List<Gene> list = new ArrayList();
+        List<Gene> list = new ArrayList<>();
 
         Gene quarterGene1 = new Gene(new Coin(Coin.CoinType.QUARTER, 3));
         Gene quarterGene2 = new Gene(new Coin(Coin.CoinType.QUARTER, 2));
@@ -212,5 +200,4 @@
 
         return list;
     }
-
 }
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/OptimizeMakeChangeTerminateCriteria.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/OptimizeMakeChangeTerminateCriteria.java
index 2080ac3..2d65d3f 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/OptimizeMakeChangeTerminateCriteria.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/OptimizeMakeChangeTerminateCriteria.java
@@ -18,65 +18,75 @@
 package org.apache.ignite.examples.ml.genetic.change;
 
 import java.util.List;
+import java.util.function.Consumer;
 import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteLogger;
 import org.apache.ignite.ml.genetic.Chromosome;
 import org.apache.ignite.ml.genetic.Gene;
 import org.apache.ignite.ml.genetic.parameter.ITerminateCriteria;
 import org.apache.ignite.ml.genetic.utils.GAGridUtils;
 
 /**
- * Terminate Condition implementation for OptimizeMakeChangeGATest <br/>
+ * Terminate Condition implementation for {@link OptimizeMakeChangeGAExample}.
  */
 public class OptimizeMakeChangeTerminateCriteria implements ITerminateCriteria {
-    /** Ignite logger */
-    private IgniteLogger igniteLogger = null;
-    /** Ignite instance */
-    private Ignite ignite = null;
+    /** */
+    private final Ignite ignite;
+
+    /** */
+    private final Consumer<String> logConsumer;
 
     /**
-     * @param ignite Ignite
+     * Create class instance.
+     *
+     * @param ignite Ignite instance.
+     * @param logConsumer Logging consumer.
      */
-    public OptimizeMakeChangeTerminateCriteria(Ignite ignite) {
+    OptimizeMakeChangeTerminateCriteria(Ignite ignite, Consumer<String> logConsumer) {
         this.ignite = ignite;
-        this.igniteLogger = ignite.log();
+        this.logConsumer = logConsumer;
     }
 
     /**
-     * @param fittestChromosome Most fit chromosome at for the nth generation
-     * @param averageFitnessScore Average fitness score as of the nth generation
-     * @param currentGeneration Current generation
-     * @return Boolean value
+     * Check whether termination condition is met.
+     *
+     * @param fittestChromosome Most fit chromosome at for the nth generation.
+     * @param averageFitnessScore Average fitness score as of the nth generation.
+     * @param currGeneration Current generation.
+     * @return Status whether condition is met or not.
      */
     public boolean isTerminationConditionMet(Chromosome fittestChromosome, double averageFitnessScore,
-        int currentGeneration) {
+        int currGeneration) {
         boolean isTerminate = true;
 
-        igniteLogger.info("##########################################################################################");
-        igniteLogger.info("Generation: " + currentGeneration);
-        igniteLogger.info("Fittest is Chromosome Key: " + fittestChromosome);
-        igniteLogger.info("Chromsome: " + fittestChromosome);
-        printCoins(GAGridUtils.getGenesInOrderForChromosome(ignite, fittestChromosome));
-        igniteLogger.info("Avg Chromsome Fitness: " + averageFitnessScore);
-        igniteLogger.info("##########################################################################################");
+        logConsumer.accept(
+            "\n##########################################################################################"
+                + "\n Generation: " + currGeneration
+                + "\n Fittest is Chromosome Key: " + fittestChromosome
+                + "\n Chromosome: " + fittestChromosome
+                + "\n" + reportCoins(GAGridUtils.getGenesInOrderForChromosome(ignite, fittestChromosome))
+                + "\nAvg Chromosome Fitness: " + averageFitnessScore
+                + "\n##########################################################################################");
 
-        if (!(currentGeneration > 5)) {
+        if (!(currGeneration > 5))
             isTerminate = false;
-        }
 
         return isTerminate;
     }
 
     /**
-     * Helper to print change detail
+     * Helper to print change details.
      *
-     * @param genes List if Genes
+     * @param genes List if Genes.
+     * @return Details to print.
      */
-    private void printCoins(List<Gene> genes) {
+    private String reportCoins(List<Gene> genes) {
+        StringBuilder sb = new StringBuilder();
+
         for (Gene gene : genes) {
-            igniteLogger.info("Coin Type: " + ((Coin)gene.getVal()).getCoinType().toString());
-            igniteLogger.info("Number of Coins: " + ((Coin)gene.getVal()).getNumberOfCoins());
+            sb.append("\nCoin Type: ").append(((Coin)gene.getVal()).getCoinType().toString())
+                .append("\nNumber of Coins: ").append(((Coin)gene.getVal()).getNumOfCoins());
         }
 
+        return sb.toString();
     }
 }
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/helloworld/HelloWorldFitnessFunction.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/helloworld/HelloWorldFitnessFunction.java
index 190bd2b..a339ff5 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/helloworld/HelloWorldFitnessFunction.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/helloworld/HelloWorldFitnessFunction.java
@@ -22,41 +22,30 @@
 import org.apache.ignite.ml.genetic.IFitnessFunction;
 
 /**
- * This example demonstrates how to create a IFitnessFunction
- *
- * Your IFitness function will vary depending on your particular use case.
- *
- * For this fitness function, we simply want to calculate the value of
- *
- * an individual solution relative to other solutions.
- *
- *
- * To do this, we simply increase fitness score by '1' for each character
- *
- * that is correct position.
- *
- * For our solution, our genetic algorithm will continue until
- *
- * we achieve a fitness score of '11', as 'HELLO WORLD' contains '11' characters.
+ * This example demonstrates how to create a {@link IFitnessFunction}.
+ * <p>
+ * Your fitness function will vary depending on your particular use case. For this fitness function, we simply want
+ * to calculate the value of an individual solution relative to other solutions.
+ * <p>
+ * To do this, we increase fitness score by '1' for each character that is in correct position.</p>
+ * <p>
+ * For our solution, genetic algorithm will continue until we achieve a fitness score of '11', as 'HELLO WORLD'
+ * contains 11 characters.</p>
  */
 public class HelloWorldFitnessFunction implements IFitnessFunction {
-    /** Optimal target solution */
-    private String targetString = "HELLO WORLD";
-
     /**
-     * Calculate fitness
+     * Calculate fitness.
      *
-     * @param genes List of Genes
-     * @return Fitness value
+     * @param genes List of Genes.
+     * @return Fitness value.
      */
     public double evaluate(List<Gene> genes) {
-
         double matches = 0;
 
         for (int i = 0; i < genes.size(); i++) {
-            if (((Character)(genes.get(i).getVal())).equals(targetString.charAt(i))) {
+            String targetStr = "HELLO WORLD";
+            if (genes.get(i).getVal().equals(targetStr.charAt(i)))
                 matches = matches + 1;
-            }
         }
         return matches;
     }
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/helloworld/HelloWorldGAExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/helloworld/HelloWorldGAExample.java
index 839471a..585cbb5 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/helloworld/HelloWorldGAExample.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/helloworld/HelloWorldGAExample.java
@@ -19,6 +19,7 @@
 
 import java.util.ArrayList;
 import java.util.List;
+import java.util.concurrent.atomic.AtomicInteger;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.Ignition;
 import org.apache.ignite.ml.genetic.Chromosome;
@@ -27,99 +28,101 @@
 import org.apache.ignite.ml.genetic.parameter.GAConfiguration;
 
 /**
- * This example demonstrates how to use the GAGrid framework.
- *
- * In this example, we want to evolve a string of 11 characters such that the word 'HELLO WORLD'.
- *
- * is found.
- *
- *
- * How To Run:
- *
- * mvn exec:java -Dexec.mainClass="org.apache.ignite.examples.ml.genetic.helloworld.HelloWorldGAExample"
- *
- * <p> Remote nodes should always be started with special configuration file which enables P2P class loading: {@code
- * 'ignite.{sh|bat} examples/config/example-ignite.xml'}.</p> <p> Alternatively you can run ExampleNodeStartup in
- * another JVM which will start node with {@code examples/config/example-ignite.xml} configuration.</p>
+ * This example demonstrates how to use the {@link GAGrid} framework. In this example, we want to evolve a string
+ * of 11 characters such that the word 'HELLO WORLD' is found.
+ * <p>
+ * Code in this example launches Ignite grid, prepares simple test data (gene pool) and configures GA grid.</p>
+ * <p>
+ * After that it launches the process of evolution on GA grid and outputs the progress and results.</p>
+ * <p>
+ * You can change the test data and parameters of GA grid used in this example and re-run it to explore
+ * this functionality further.</p>
+ * <p>
+ * How to run from command line:</p>
+ * <p>
+ * {@code mvn exec:java -Dexec.mainClass="org.apache.ignite.examples.ml.genetic.helloworld.HelloWorldGAExample"}</p>
+ * <p>
+ *  Remote nodes should always be started with special configuration file which enables P2P class loading: {@code
+ * 'ignite.{sh|bat} examples/config/example-ignite.xml'}.</p>
+ * <p>
+ * Alternatively you can run ExampleNodeStartup in another JVM which will start node with
+ * {@code examples/config/example-ignite.xml} configuration.</p>
  */
 public class HelloWorldGAExample {
-    /** Ignite instance */
-    private static Ignite ignite = null;
-    /** GAGrid */
-    private static GAGrid gaGrid = null;
-    /** GAConfiguration */
-    private static GAConfiguration gaConfig = null;
-
     /**
      * Executes example.
      *
      * @param args Command line arguments, none required.
      */
     public static void main(String args[]) {
-        System.setProperty("IGNITE_QUIET", "false");
+        System.out.println(">>> HelloWorld GA grid example started.");
 
         try {
+            // Create an Ignite instance as you would in any other use case.
+            Ignite ignite = Ignition.start("examples/config/example-ignite.xml");
 
-            //Create an Ignite instance as you would in any other use case.
+            // Create GAConfiguration.
+            GAConfiguration gaCfg = new GAConfiguration();
 
-            ignite = Ignition.start("examples/config/example-ignite.xml");
-
-            // Create GAConfiguration
-            gaConfig = new GAConfiguration();
-
-            // set Gene Pool
+            // Set Gene Pool.
             List<Gene> genes = getGenePool();
 
-            // set the Chromosome Length to '11' since 'HELLO WORLD' contains 11 characters.
-            gaConfig.setChromosomeLen(11);
+            // Set the Chromosome Length to '11' since 'HELLO WORLD' contains 11 characters.
+            gaCfg.setChromosomeLen(11);
 
-            // initialize gene pool
-            gaConfig.setGenePool(genes);
+            // Initialize gene pool.
+            gaCfg.setGenePool(genes);
 
-            // create and set Fitness function
+            // Create and set Fitness function.
             HelloWorldFitnessFunction function = new HelloWorldFitnessFunction();
-            gaConfig.setFitnessFunction(function);
+            gaCfg.setFitnessFunction(function);
 
-            // create and set TerminateCriteria
-            HelloWorldTerminateCriteria termCriteria = new HelloWorldTerminateCriteria(ignite);
-            gaConfig.setTerminateCriteria(termCriteria);
+            // Create and set TerminateCriteria.
+            AtomicInteger cnt = new AtomicInteger(0);
+            HelloWorldTerminateCriteria termCriteria = new HelloWorldTerminateCriteria(ignite,
+                msg -> {
+                    if (cnt.getAndIncrement() % 20 == 0)
+                        System.out.println(msg);
+                });
 
-            ignite.log();
+            gaCfg.setTerminateCriteria(termCriteria);
 
-            gaGrid = new GAGrid(gaConfig, ignite);
-            // evolve the population
-            Chromosome fittestChromosome = gaGrid.evolve();
+            GAGrid gaGrid = new GAGrid(gaCfg, ignite);
+
+            // Evolve the population.
+            Chromosome chromosome = gaGrid.evolve();
+
+            System.out.println(">>> Evolution result: " + chromosome);
 
             Ignition.stop(true);
 
-            ignite = null;
-
+            System.out.println(">>> HelloWorld GA grid example completed.");
         }
         catch (Exception e) {
-            System.out.println(e);
+            System.out.println(e.getMessage());
+            e.printStackTrace();
         }
-
     }
 
     /**
-     * Helper routine to initialize Gene pool
+     * Helper routine to initialize Gene pool.
      *
-     * In typical usecase genes may be stored in database.
+     * In typical use case genes may be stored in database.
      *
-     * @return List<Gene>
+     * @return List of Gene objects.
      */
     private static List<Gene> getGenePool() {
-        List<Gene> list = new ArrayList();
+        List<Gene> list = new ArrayList<>();
 
         char[] chars = {
             'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S',
             'T', 'U', 'V', 'W', 'X', 'Y', 'Z', ' '};
 
-        for (int i = 0; i < chars.length; i++) {
-            Gene gene = new Gene(new Character(chars[i]));
+        for (char aChar : chars) {
+            Gene gene = new Gene(aChar);
             list.add(gene);
         }
+
         return list;
     }
-
 }
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/helloworld/HelloWorldTerminateCriteria.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/helloworld/HelloWorldTerminateCriteria.java
index 41809d4..610d479 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/helloworld/HelloWorldTerminateCriteria.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/helloworld/HelloWorldTerminateCriteria.java
@@ -18,70 +18,75 @@
 package org.apache.ignite.examples.ml.genetic.helloworld;
 
 import java.util.List;
+import java.util.function.Consumer;
 import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteLogger;
 import org.apache.ignite.ml.genetic.Chromosome;
 import org.apache.ignite.ml.genetic.Gene;
 import org.apache.ignite.ml.genetic.parameter.ITerminateCriteria;
 import org.apache.ignite.ml.genetic.utils.GAGridUtils;
 
 /**
- * Represents the terminate condition for HelloWorld Genetic algorithm
- *
- * Class terminates Genetic algorithm when fitnessScore > 10
+ * Represents the terminate condition for {@link HelloWorldGAExample}.
+ * <p>
+ * Class terminates Genetic algorithm when fitness score is more than 10.</p>
  */
 public class HelloWorldTerminateCriteria implements ITerminateCriteria {
-    /** Ignite logger */
-    private IgniteLogger igniteLogger = null;
-    /** Ignite instance */
-    private Ignite ignite = null;
+    /** Ignite instance. */
+    private final Ignite ignite;
+
+    /** */
+    private final Consumer<String> logConsumer;
 
     /**
-     * @param ignite Ignite
+     * Create class instance.
+     *
+     * @param ignite Ignite instance.
+     * @param logConsumer Logging consumer.
      */
-    public HelloWorldTerminateCriteria(Ignite ignite) {
+    HelloWorldTerminateCriteria(Ignite ignite, Consumer<String> logConsumer) {
         this.ignite = ignite;
-        this.igniteLogger = ignite.log();
+        this.logConsumer = logConsumer;
     }
 
     /**
-     * @param fittestChromosome Most fit chromosome at for the nth generation
-     * @param averageFitnessScore Average fitness score as of the nth generation
-     * @param currentGeneration Current generation
-     * @return Boolean value
+     * Check whether termination condition is met.
+     *
+     * @param fittestChromosome Most fit chromosome at for the nth generation.
+     * @param averageFitnessScore Average fitness score as of the nth generation.
+     * @param currGeneration Current generation.
+     * @return Status whether condition is met or not.
      */
     public boolean isTerminationConditionMet(Chromosome fittestChromosome, double averageFitnessScore,
-        int currentGeneration) {
+        int currGeneration) {
         boolean isTerminate = true;
 
-        igniteLogger.info("##########################################################################################");
-        igniteLogger.info("Generation: " + currentGeneration);
-        igniteLogger.info("Fittest is Chromosome Key: " + fittestChromosome);
-        igniteLogger.info("Chromosome: " + fittestChromosome);
-        printPhrase(GAGridUtils.getGenesInOrderForChromosome(ignite, fittestChromosome));
-        igniteLogger.info("Avg Chromosome Fitness: " + averageFitnessScore);
-        igniteLogger.info("##########################################################################################");
+        logConsumer.accept(
+            "\n##########################################################################################"
+                + "\n Generation: " + currGeneration
+                + "\n Fittest is Chromosome Key: " + fittestChromosome
+                + "\n Chromosome: " + fittestChromosome
+                + "\n" + printPhrase(GAGridUtils.getGenesInOrderForChromosome(ignite, fittestChromosome))
+                + "\nAvg Chromosome Fitness: " + averageFitnessScore
+                + "\n##########################################################################################");
 
-        if (!(fittestChromosome.getFitnessScore() > 10)) {
+        if (!(fittestChromosome.getFitnessScore() > 10))
             isTerminate = false;
-        }
 
         return isTerminate;
     }
 
     /**
-     * Helper to print Phrase
+     * Helper to print phrase.
      *
-     * @param List of Genes
+     * @param genes List of Genes.
+     * @return Phrase to print.
      */
-    private void printPhrase(List<Gene> genes) {
+    private String printPhrase(List<Gene> genes) {
+        StringBuilder sbPhrase = new StringBuilder();
 
-        StringBuffer sbPhrase = new StringBuffer();
-
-        for (Gene gene : genes) {
+        for (Gene gene : genes)
             sbPhrase.append(((Character)gene.getVal()).toString());
-        }
-        igniteLogger.info(sbPhrase.toString());
-    }
 
+        return sbPhrase.toString();
+    }
 }
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/knapsack/Item.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/knapsack/Item.java
index f64cb17..43a387c 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/knapsack/Item.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/knapsack/Item.java
@@ -20,52 +20,54 @@
 import java.io.Serializable;
 
 /**
- * POJO to model an Item
+ * POJO to model an Item.
  */
 public class Item implements Serializable {
-    /** weight of item in lbs. */
+    /** Weight of item in lbs. */
     private double weight;
-    /** value of item */
-    private double value;
-    /** name of item */
+    /** Value of item. */
+    private double val;
+    /** Name of item. */
     private String name;
 
     /**
-     * Get the weight
+     * Get the weight.
      *
-     * @return Weight
+     * @return Weight.
      */
     public double getWeight() {
         return weight;
     }
 
     /**
-     * Set the weight
+     * Set the weight.
      *
-     * @param weight Weight
+     * @param weight Weight.
      */
     public void setWeight(double weight) {
         this.weight = weight;
     }
 
     /**
-     * Get the value
+     * Get the value.
      *
-     * @return Value
+     * @return Value.
      */
-    public double getValue() {
-        return value;
+    public double getVal() {
+        return val;
     }
 
     /**
-     * @param value Value
+     * Set the value.
+     *
+     * @param val Value.
      */
-    public void setValue(double value) {
-        this.value = value;
+    public void setVal(double val) {
+        this.val = val;
     }
 
     /**
-     * Get the name
+     * Get the name.
      *
      * @return Name
      */
@@ -74,9 +76,9 @@
     }
 
     /**
-     * Set the name
+     * Set the name.
      *
-     * @param name Name
+     * @param name Name.
      */
     public void setName(String name) {
         this.name = name;
@@ -84,7 +86,6 @@
 
     /** {@inheritDoc} */
     @Override public String toString() {
-        return "Item [weight=" + weight + ", value=" + value + ", name=" + name + "]";
+        return "Item [weight=" + weight + ", value=" + val + ", name=" + name + "]";
     }
-
 }
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/knapsack/KnapsackFitnessFunction.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/knapsack/KnapsackFitnessFunction.java
index bf24edf..e7c72b0 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/knapsack/KnapsackFitnessFunction.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/knapsack/KnapsackFitnessFunction.java
@@ -23,47 +23,40 @@
 import org.apache.ignite.ml.genetic.IFitnessFunction;
 
 /**
- * This example demonstrates how to create a IFitnessFunction
- *
- * Your IFitnessFunction will vary depending on your particular use case.
- *
- * For this fitness function, we simply want to calculate the weight and value of
- *
- * an individual solution relative to other solutions.
- *
- *
- * To do this, we total the weights and values of all the genes within a chromosome.
+ * This example demonstrates how to create a {@link IFitnessFunction}.
+ * <p>
+ * Your fitness function will vary depending on your particular use case. For this fitness function, we simply want
+ * to calculate the weight and value of an individual solution relative to other solutions.</p>
+ * <p>
+ * To do this, we total the weights and values of all the genes within a chromosome.</p>
  */
 public class KnapsackFitnessFunction implements IFitnessFunction {
-    /** weight capacity of knapsack */
-    private double maximumWeight = 20;
-
     /**
-     * Calculate fitness
+     * Calculate fitness.
      *
-     * @param genes List of Genes
-     * @return Fitness value
+     * @param genes List of Genes.
+     * @return Fitness value.
      */
     public double evaluate(List<Gene> genes) {
-
-        double value = 0;
+        double val = 0;
         double weight = 0;
 
-        List<Long> dups = new ArrayList<Long>();
+        List<Long> duplicates = new ArrayList<>();
         int badSolution = 1;
 
-        for (Gene agene : genes) {
-            weight = weight + ((Item)(agene.getVal())).getWeight();
-            value = value + ((Item)(agene.getVal())).getValue();
+        for (Gene gene : genes) {
+            weight = weight + ((Item)(gene.getVal())).getWeight();
+            val = val + ((Item)(gene.getVal())).getVal();
 
-            if (dups.contains(agene.id()) || (weight > maximumWeight)) {
+            double maximumWeight = 20;
+            if (duplicates.contains(gene.id()) || (weight > maximumWeight)) {
                 badSolution = 0;
                 break;
             }
             else
-                dups.add(agene.id());
+                duplicates.add(gene.id());
         }
 
-        return (value * badSolution);
+        return (val * badSolution);
     }
 }
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/knapsack/KnapsackGAExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/knapsack/KnapsackGAExample.java
index 7578226..1631a95 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/knapsack/KnapsackGAExample.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/knapsack/KnapsackGAExample.java
@@ -19,271 +19,277 @@
 
 import java.util.ArrayList;
 import java.util.List;
+import java.util.concurrent.atomic.AtomicInteger;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.Ignition;
+import org.apache.ignite.ml.genetic.Chromosome;
 import org.apache.ignite.ml.genetic.GAGrid;
 import org.apache.ignite.ml.genetic.Gene;
 import org.apache.ignite.ml.genetic.parameter.GAConfiguration;
 
 /**
- * This example demonstrates how to use the GAGrid framework.
- *
- * Example demonstrates Knapsack Problem:  Given a set of 30 items, each with a weight and a value, pack 10 items in
- * knapsack so that the total weight is less <= 20 lbs. and the total value is maximized.
- *
- *
- * How To Run:
- *
- * mvn exec:java -Dexec.mainClass="org.apache.ignite.examples.ml.genetic.knapsack.KnapsackGAExample"
- *
- * <p> Remote nodes should always be started with special configuration file which enables P2P class loading: {@code
- * 'ignite.{sh|bat} examples/config/example-ignite.xml'}.</p> <p> Alternatively you can run ExampleNodeStartup in
- * another JVM which will start node with {@code examples/config/example-ignite.xml} configuration.</p>
+ * This example demonstrates how to use the {@link GAGrid} framework. It shows working with Knapsack Problem:
+ * Given a set of 30 items, each with a weight and a value, pack 10 items in knapsack so that the total weight
+ * is less or equal 20 lbs, and the total value is maximized.
+ * <p>
+ * Code in this example launches Ignite grid, prepares simple test data (gene pool) and configures GA grid.</p>
+ * <p>
+ * After that it launches the process of evolution on GA grid and outputs the progress and results.</p>
+ * <p>
+ * You can change the test data and parameters of GA grid used in this example and re-run it to explore
+ * this functionality further.</p>
+ * <p>
+ * How to run from command line:</p>
+ * <p>
+ * {@code mvn exec:java -Dexec.mainClass="org.apache.ignite.examples.ml.genetic.knapsack.KnapsackGAExample"}</p>
+ * <p>
+ * Remote nodes should always be started with special configuration file which enables P2P class loading: {@code
+ * 'ignite.{sh|bat} examples/config/example-ignite.xml'}.</p>
+ * <p>
+ * Alternatively you can run ExampleNodeStartup in another JVM which will start node with
+ * {@code examples/config/example-ignite.xml} configuration.</p>
  */
 public class KnapsackGAExample {
-    /** Ignite instance */
-    private static Ignite ignite = null;
-    /** GAGrid */
-    private static GAGrid gaGrid = null;
-    /** GAConfiguration */
-    private static GAConfiguration gaConfig = null;
-
     /**
      * @param args Command line arguments, none required.
      */
     public static void main(String args[]) {
-        System.setProperty("IGNITE_QUIET", "false");
+        System.out.println(">>> Knapsack GA grid example started.");
 
         try {
+            // Create an Ignite instance as you would in any other use case.
+            Ignite ignite = Ignition.start("examples/config/example-ignite.xml");
 
-            //Create an Ignite instance as you would in any other use case.
-            ignite = Ignition.start("examples/config/example-ignite.xml");
+            // Create GAConfiguration.
+            GAConfiguration gaCfg = new GAConfiguration();
 
-            // Create GAConfiguration
-            gaConfig = new GAConfiguration();
-
-            // set Gene Pool
+            // Set Gene Pool.
             List<Gene> genes = getGenePool();
 
-            // set the Chromosome Length to '10' since our knapsack may contain a total of 10 items.
-            gaConfig.setChromosomeLen(10);
+            // Set the Chromosome Length to '10' since our knapsack may contain a total of 10 items.
+            gaCfg.setChromosomeLen(10);
 
-            // initialize gene pool
-            gaConfig.setGenePool(genes);
+            // Initialize gene pool.
+            gaCfg.setGenePool(genes);
 
-            // create and set Fitness function
+            // Create and set Fitness function.
             KnapsackFitnessFunction function = new KnapsackFitnessFunction();
-            gaConfig.setFitnessFunction(function);
+            gaCfg.setFitnessFunction(function);
 
-            // create and set TerminateCriteria
-            KnapsackTerminateCriteria termCriteria = new KnapsackTerminateCriteria(ignite);
-            gaConfig.setTerminateCriteria(termCriteria);
+            // Create and set TerminateCriteria.
+            AtomicInteger cnt = new AtomicInteger(0);
+            KnapsackTerminateCriteria termCriteria = new KnapsackTerminateCriteria(ignite,
+                msg -> {
+                    if (cnt.getAndIncrement() % 10 == 0)
+                        System.out.println(msg);
+                });
+            gaCfg.setTerminateCriteria(termCriteria);
 
-            ignite.log();
+            GAGrid gaGrid = new GAGrid(gaCfg, ignite);
 
-            gaGrid = new GAGrid(gaConfig, ignite);
-            // evolve the population
-            gaGrid.evolve();
+            // Evolve the population.
+            Chromosome chromosome = gaGrid.evolve();
+
+            System.out.println(">>> Evolution result: " + chromosome);
 
             Ignition.stop(true);
 
-            ignite = null;
-
+            System.out.println(">>> Knapsack GA grid example completed.");
         }
         catch (Exception e) {
-            System.out.println(e);
+            System.out.println(e.getMessage());
+            e.printStackTrace();
         }
-
     }
 
     /**
-     * Helper routine to initialize Gene pool
+     * Helper routine to initialize Gene pool.
      *
-     * In typical usecase genes may be stored in database.
+     * In typical use case genes may be stored in database.
      *
-     * @return List<Gene>
+     * @return List of Gene objects.
      */
     private static List<Gene> getGenePool() {
-        List<Gene> list = new ArrayList<Gene>();
+        List<Gene> list = new ArrayList<>();
 
         Item item1 = new Item();
         item1.setName("Swiss Army Knife");
         item1.setWeight(0.08125);
-        item1.setValue(15);
+        item1.setVal(15);
         Gene gene1 = new Gene(item1);
 
         Item item2 = new Item();
         item2.setName("Duct Tape");
         item2.setWeight(1.3);
-        item2.setValue(3);
+        item2.setVal(3);
         Gene gene2 = new Gene(item2);
 
         Item item3 = new Item();
         item3.setName("Rope (50 feet)");
         item3.setWeight(7);
-        item3.setValue(10);
+        item3.setVal(10);
         Gene gene3 = new Gene(item3);
 
         Item item4 = new Item();
         item4.setName("Satellite phone");
         item4.setWeight(2);
-        item4.setValue(8);
+        item4.setVal(8);
         Gene gene4 = new Gene(item4);
 
         Item item5 = new Item();
         item5.setName("Elmer's Glue");
         item5.setWeight(0.25);
-        item5.setValue(2);
+        item5.setVal(2);
         Gene gene5 = new Gene(item5);
 
         Item item6 = new Item();
         item6.setName("Toilet Paper Roll");
         item6.setWeight(.5);
-        item6.setValue(4);
+        item6.setVal(4);
         Gene gene6 = new Gene(item6);
 
         Item item7 = new Item();
         item7.setName("Binoculars");
         item7.setWeight(3);
-        item7.setValue(5);
+        item7.setVal(5);
         Gene gene7 = new Gene(item7);
 
         Item item8 = new Item();
         item8.setName("Compass");
         item8.setWeight(0.0573202);
-        item8.setValue(15);
+        item8.setVal(15);
         Gene gene8 = new Gene(item8);
 
         Item item9 = new Item();
-        item9.setName("Jug (prefilled with water)");
+        item9.setName("Jug (pre-filled with water)");
         item9.setWeight(4);
-        item9.setValue(6);
+        item9.setVal(6);
         Gene gene9 = new Gene(item9);
 
         Item item10 = new Item();
         item10.setName("Flashlight");
         item10.setWeight(2);
-        item10.setValue(4);
+        item10.setVal(4);
         Gene gene10 = new Gene(item10);
 
         Item item11 = new Item();
         item11.setName("Box of paper clips");
         item11.setWeight(.9);
-        item11.setValue(2);
+        item11.setVal(2);
         Gene gene11 = new Gene(item11);
 
         Item item12 = new Item();
         item12.setName("Gloves (1 pair)");
         item12.setWeight(.8125);
-        item12.setValue(3);
+        item12.setVal(3);
         Gene gene12 = new Gene(item12);
 
         Item item13 = new Item();
         item13.setName("Scissors");
         item13.setWeight(0.2);
-        item13.setValue(2);
+        item13.setVal(2);
         Gene gene13 = new Gene(item13);
 
         Item item14 = new Item();
         item14.setName("Signal Flair (4pk)");
         item14.setWeight(4);
-        item14.setValue(5);
+        item14.setVal(5);
         Gene gene14 = new Gene(item14);
 
         Item item15 = new Item();
         item15.setName("Water Purifying System");
         item15.setWeight(0.5125);
-        item15.setValue(4);
+        item15.setVal(4);
         Gene gene15 = new Gene(item15);
 
         Item item16 = new Item();
         item16.setName("Whistle");
         item16.setWeight(0.075);
-        item16.setValue(2);
+        item16.setVal(2);
         Gene gene16 = new Gene(item16);
 
         Item item17 = new Item();
         item17.setName("Sleeping Bag");
         item17.setWeight(0.38125);
-        item17.setValue(4);
+        item17.setVal(4);
         Gene gene17 = new Gene(item17);
 
         Item item18 = new Item();
         item18.setName("Insect Repellent");
         item18.setWeight(1.15);
-        item18.setValue(3);
+        item18.setVal(3);
         Gene gene18 = new Gene(item18);
 
         Item item19 = new Item();
         item19.setName("Trowel");
         item19.setWeight(0.31875);
-        item19.setValue(3);
+        item19.setVal(3);
         Gene gene19 = new Gene(item19);
 
         Item item20 = new Item();
         item20.setName("Lighter");
         item20.setWeight(.2);
-        item20.setValue(4);
+        item20.setVal(4);
         Gene gene20 = new Gene(item20);
 
         Item item21 = new Item();
         item21.setName("Safety Horn");
         item21.setWeight(.21);
-        item21.setValue(3);
+        item21.setVal(3);
         Gene gene21 = new Gene(item21);
 
         Item item22 = new Item();
         item22.setName("Headlamp");
         item22.setWeight(.8);
-        item22.setValue(4);
+        item22.setVal(4);
         Gene gene22 = new Gene(item22);
 
         Item item23 = new Item();
         item23.setName("Freeze Dried Food Kit");
         item23.setWeight(2);
-        item23.setValue(6);
+        item23.setVal(6);
         Gene gene23 = new Gene(item23);
 
         Item item24 = new Item();
         item24.setName("Sunscreen");
         item24.setWeight(.5);
-        item24.setValue(4);
+        item24.setVal(4);
         Gene gene24 = new Gene(item24);
 
         Item item25 = new Item();
         item25.setName("Trekking Pole (Adjustable)");
         item25.setWeight(1.3);
-        item25.setValue(4);
+        item25.setVal(4);
         Gene gene25 = new Gene(item25);
 
         Item item26 = new Item();
         item26.setName("Counter Assault Bear Spray");
         item26.setWeight(.5);
-        item26.setValue(4);
+        item26.setVal(4);
         Gene gene26 = new Gene(item26);
 
         Item item27 = new Item();
         item27.setName("Insect Spray");
         item27.setWeight(.5);
-        item27.setValue(3);
+        item27.setVal(3);
         Gene gene27 = new Gene(item27);
 
         Item item28 = new Item();
         item28.setName("Hand sanitizer");
         item28.setWeight(.625);
-        item28.setValue(3);
+        item28.setVal(3);
         Gene gene28 = new Gene(item28);
 
         Item item29 = new Item();
         item29.setName("Mirror");
         item29.setWeight(.5);
-        item29.setValue(3);
+        item29.setVal(3);
         Gene gene29 = new Gene(item29);
 
         Item item30 = new Item();
         item30.setName("First Aid Kit");
         item30.setWeight(3);
-        item30.setValue(6);
+        item30.setVal(6);
         Gene gene30 = new Gene(item30);
 
         list.add(gene1);
@@ -319,5 +325,4 @@
 
         return list;
     }
-
 }
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/knapsack/KnapsackTerminateCriteria.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/knapsack/KnapsackTerminateCriteria.java
index 487f8a7..09e047b 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/knapsack/KnapsackTerminateCriteria.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/knapsack/KnapsackTerminateCriteria.java
@@ -18,62 +18,70 @@
 package org.apache.ignite.examples.ml.genetic.knapsack;
 
 import java.util.List;
+import java.util.function.Consumer;
 import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteLogger;
 import org.apache.ignite.ml.genetic.Chromosome;
 import org.apache.ignite.ml.genetic.Gene;
 import org.apache.ignite.ml.genetic.parameter.ITerminateCriteria;
 import org.apache.ignite.ml.genetic.utils.GAGridUtils;
 
 /**
- * Represents the terminate condition for Knapsack Genetic algorithm
- *
- * Class terminates Genetic algorithm when once GA Grid has performed 30 generations.
+ * Represents the terminate condition for {@link KnapsackGAExample}.
+ * <p>
+ * Class terminates Genetic algorithm when once GA Grid has performed 30 generations.</p>
  */
 public class KnapsackTerminateCriteria implements ITerminateCriteria {
-    /** Ignite instance */
-    private static Ignite ignite = null;
+    /** Ignite instance. */
+    private final Ignite ignite;
 
-    /** Ignite logger */
-    private IgniteLogger igniteLogger = null;
+    /** */
+    private final Consumer<String> logConsumer;
 
     /**
-     * @param ignite Ignite
+     * Create class instance.
+     *
+     * @param ignite Ignite instance.
+     * @param logConsumer Logging consumer.
      */
-    public KnapsackTerminateCriteria(Ignite ignite) {
+    KnapsackTerminateCriteria(Ignite ignite, Consumer<String> logConsumer) {
         this.ignite = ignite;
-        this.igniteLogger = this.ignite.log();
+        this.logConsumer = logConsumer;
     }
 
     /**
-     * @param fittestChromosome Most fit chromosome at for the nth generation
-     * @param averageFitnessScore Average fitness score as of the nth generation
-     * @param currentGeneration Current generation
-     * @return Boolean value
+     * Check whether termination condition is met.
+     *
+     * @param fittestChromosome Most fit chromosome at for the nth generation.
+     * @param averageFitnessScore Average fitness score as of the nth generation.
+     * @param currGeneration Current generation.
+     * @return Status whether condition is met or not.
      */
     public boolean isTerminationConditionMet(Chromosome fittestChromosome, double averageFitnessScore,
-        int currentGeneration) {
+        int currGeneration) {
         boolean isTerminate = true;
 
-        igniteLogger.info("##########################################################################################");
-        igniteLogger.info("Generation: " + currentGeneration);
-        igniteLogger.info("Fittest is Chromosome Key: " + fittestChromosome);
-        igniteLogger.info("Total value is: " + fittestChromosome.getFitnessScore());
-        igniteLogger.info("Total weight is: " + calculateTotalWeight(GAGridUtils.getGenesInOrderForChromosome(ignite, fittestChromosome)));
-        igniteLogger.info("Avg Chromosome Fitness: " + averageFitnessScore);
-        igniteLogger.info("Chromosome: " + fittestChromosome);
-        printItems(GAGridUtils.getGenesInOrderForChromosome(ignite, fittestChromosome));
-        igniteLogger.info("##########################################################################################");
+        logConsumer.accept(
+            "\n##########################################################################################"
+                + "\n Generation: " + currGeneration
+                + "\n Fittest is Chromosome Key: " + fittestChromosome
+                + "\nTotal value is: " + fittestChromosome.getFitnessScore()
+                + "\nTotal weight is: " + calculateTotalWeight(
+                    GAGridUtils.getGenesInOrderForChromosome(ignite, fittestChromosome))
+                + "\nChromosome: " + fittestChromosome
+                + "\n" + reportItems(GAGridUtils.getGenesInOrderForChromosome(ignite, fittestChromosome))
+                + "\n##########################################################################################");
 
-        if (!(currentGeneration > 29))
+        if (!(currGeneration > 29))
             isTerminate = false;
 
         return isTerminate;
     }
 
     /**
-     * @param genes List of Genes
-     * @return double value
+     * Calculate total weight.
+     *
+     * @param genes List of Genes.
+     * @return Calculated value.
      */
     private double calculateTotalWeight(List<Gene> genes) {
         double totalWeight = 0;
@@ -84,16 +92,21 @@
     }
 
     /**
-     * Helper to print items in knapsack
+     * Helper to print items in knapsack.
      *
-     * @param genes List of Genes
+     * @param genes List of Genes.
+     * @return Items to print.
      */
-    private void printItems(List<Gene> genes) {
+    private String reportItems(List<Gene> genes) {
+        StringBuilder sb = new StringBuilder();
+
         for (Gene gene : genes) {
-            igniteLogger.info("------------------------------------------------------------------------------------------");
-            igniteLogger.info("Name: " + ((Item)gene.getVal()).getName().toString());
-            igniteLogger.info("Weight: " + ((Item)gene.getVal()).getWeight());
-            igniteLogger.info("Value: " + ((Item)gene.getVal()).getValue());
+            sb.append("\n------------------------------------------------------------------------------------------")
+                .append("\nName: ").append(((Item)gene.getVal()).getName())
+                .append("\nWeight: ").append(((Item)gene.getVal()).getWeight())
+                .append("\nValue: ").append(((Item)gene.getVal()).getVal());
         }
+
+        return sb.toString();
     }
 }
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/Movie.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/Movie.java
index 38d27ff2..2eebe5e 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/Movie.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/Movie.java
@@ -23,102 +23,104 @@
  * POJO to model a movie.
  */
 public class Movie {
-    /** name of movie */
+    /** Name of movie. */
     private String name;
-    /** genre of movie */
+    /** Genre of movie. */
     private List genre;
-    /** rating of movie */
+    /** Rating of movie. */
     private String rating;
 
-    /** IMDB rating */
+    /** IMDB rating. */
     private double imdbRating;
 
-    /** year of movie */
+    /** Year of movie. */
     private String year;
 
     /**
-     * Get the year
+     * Get the year.
      *
-     * @return Year
+     * @return Year.
      */
     public String getYear() {
         return year;
     }
 
     /**
-     * Set the year
+     * Set the year.
      *
-     * @param year Year
+     * @param year Year.
      */
     public void setYear(String year) {
         this.year = year;
     }
 
     /**
-     * Get the IMDB rating
+     * Get the <a href="https://en.wikipedia.org/wiki/IMDb">IMDB rating</a>.
      *
-     * @return IMDB rating
+     * @return IMDB rating.
      */
     public double getImdbRating() {
         return imdbRating;
     }
 
     /**
-     * Set the IMDB rating
+     * Set the IMDB rating.
      *
-     * @param imdbRating IMDB rating
+     * @param imdbRating IMDB rating.
      */
     public void setImdbRating(double imdbRating) {
         this.imdbRating = imdbRating;
     }
 
     /**
-     * Get the name of movie
+     * Get the name of movie.
      *
-     * @return Name of movie
+     * @return Name of movie.
      */
     public String getName() {
         return name;
     }
 
     /**
-     * Set the name of movie
+     * Set the name of movie.
      *
-     * @param name Movie name
+     * @param name Movie name.
      */
     public void setName(String name) {
         this.name = name;
     }
 
     /**
-     * @return List of genres
+     * Get movie genres.
+     *
+     * @return List of genres.
      */
     public List getGenre() {
         return genre;
     }
 
     /**
-     * Set the genre
+     * Set the genre.
      *
-     * @param genre Genre of movie
+     * @param genre List of genres of movie.
      */
     public void setGenre(List genre) {
         this.genre = genre;
     }
 
     /**
-     * Get the rating of the movie
+     * Get the rating of the movie.
      *
-     * @return Movie rating
+     * @return Movie rating.
      */
     public String getRating() {
         return rating;
     }
 
     /**
-     * Set the rating of the movie
+     * Set the rating of the movie.
      *
-     * @param rating Movie rating
+     * @param rating Movie rating.
      */
     public void setRating(String rating) {
         this.rating = rating;
@@ -129,5 +131,4 @@
         return "Movie [name=" + name + ", genre=" + genre + ", rating=" + rating + ", imdbRating=" + imdbRating
             + ", year=" + year + "]";
     }
-
 }
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/MovieFitnessFunction.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/MovieFitnessFunction.java
index bfd50f0..154ce79 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/MovieFitnessFunction.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/MovieFitnessFunction.java
@@ -23,78 +23,68 @@
 import org.apache.ignite.ml.genetic.IFitnessFunction;
 
 /**
- * This example demonstrates how to create a IFitnessFunction
- *
- * Your IFitness function will vary depending on your particular use case.
- *
- * For this fitness function, we simply want to calculate the value of
- *
- * an individual solution relative to other solutions.
- *
- *
- * To do this, we simply increase fitness score by number of times
- *
- * genre is found in list of movies.
- *
- * In addition, we increase score by fictional IMDB rating.
- *
- * If there are duplicate movies in selection, we automatically apply a '0'
- *
- * fitness score.
+ * This example demonstrates how to create a {@link IFitnessFunction}.
+ * <p>
+ * Your fitness function will vary depending on your particular use case. For this fitness function, we want
+ * to calculate the value of an individual solution relative to other solutions.</p>
+ * <p>
+ * To do this, we increase fitness score by number of times genre is found in list of movies. In addition,
+ * we increase score by fictional IMDB rating.</p>
+ * <p>
+ * If there are duplicate movies in selection, we automatically apply a '0' fitness score.</p>
  */
 public class MovieFitnessFunction implements IFitnessFunction {
-    /** genes */
-    private List<String> genres = null;
+    /** Genres. */
+    private List<String> genres;
 
     /**
-     * @param genres List of genres
+     * Create instance.
+     *
+     * @param genres List of genres.
      */
     public MovieFitnessFunction(List<String> genres) {
         this.genres = genres;
     }
 
     /**
-     * Calculate fitness score
+     * Calculate fitness score.
      *
-     * @param genes List of Genes
-     * @return Fitness score
+     * @param genes List of Genes.
+     * @return Fitness score.
      */
     public double evaluate(List<Gene> genes) {
-
         double score = 0;
-        List<String> dups = new ArrayList();
+        List<String> duplicates = new ArrayList<>();
         int badSolution = 1;
 
-        for (int i = 0; i < genes.size(); i++) {
-            Movie movie = (Movie)genes.get(i).getVal();
-            if (dups.contains(movie.getName())) {
+        for (Gene gene : genes) {
+            Movie movie = (Movie)gene.getVal();
+            if (duplicates.contains(movie.getName()))
                 badSolution = 0;
-            }
-            else {
-                dups.add(movie.getName());
-            }
+            else
+                duplicates.add(movie.getName());
+
             double genreScore = getGenreScore(movie);
-            if (genreScore == 0) {
+            if (genreScore == 0)
                 badSolution = 0;
-            }
+
             score = (score + movie.getImdbRating()) + (genreScore);
         }
         return (score * badSolution);
     }
 
     /**
-     * helper to calculate genre score
+     * Helper to calculate genre score.
      *
-     * @param movie Move
-     * @return Genre score
+     * @param movie Movie.
+     * @return Genre score.
      */
     private double getGenreScore(Movie movie) {
         double genreScore = 0;
 
         for (String genre : this.genres) {
-            if (movie.getGenre().contains(genre)) {
+            if (movie.getGenre().contains(genre))
                 genreScore = genreScore + 1;
-            }
         }
         return genreScore;
     }
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/MovieGAExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/MovieGAExample.java
index a0b368af..cec2719 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/MovieGAExample.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/MovieGAExample.java
@@ -29,56 +29,52 @@
 import org.apache.ignite.ml.genetic.parameter.GAGridConstants;
 
 /**
- * This example demonstrates how to use the GAGrid framework.
- *
- * In this example, we utilize GA Grid to calculate an optimal set of movies based on our interests in various genres
- * (ie: Action, Comedy, and Romance)
- *
- *
- * How To Run:
- *
- * mvn exec:java -Dexec.mainClass="org.apache.ignite.examples.ml.genetic.movie.MovieGAExample" -DGENRES=Action,Comedy
- *
- * <p> Remote nodes should always be started with special configuration file which enables P2P class loading: {@code
- * 'ignite.{sh|bat} examples/config/example-ignite.xml'}.</p> <p> Alternatively you can run ExampleNodeStartup in
- * another JVM which will start node with {@code examples/config/example-ignite.xml} configuration.</p>
+ * In this example, we utilize {@link GAGrid} framework to calculate an optimal set of movies based on our interests
+ * in various genres (ie: Action, Comedy, and Romance).
+ * <p>
+ * Code in this example launches Ignite grid, prepares simple test data (gene pool) and configures GA grid.</p>
+ * <p>
+ * After that it launches the process of evolution on GA grid and outputs the progress and results.</p>
+ * <p>
+ * You can change the test data and parameters of GA grid used in this example and re-run it to explore
+ * this functionality further.</p>
+ * <p>
+ * How to run from command line:</p>
+ * <p>
+ * {@code mvn exec:java -Dexec.mainClass="org.apache.ignite.examples.ml.genetic.movie.MovieGAExample"
+ * -DGENRES=Action,Comedy}</p>
+ * <p>
+ * Remote nodes should always be started with special configuration file which enables P2P class loading: {@code
+ * 'ignite.{sh|bat} examples/config/example-ignite.xml'}.</p>
+ * <p>
+ * Alternatively you can run ExampleNodeStartup in another JVM which will start node with
+ * {@code examples/config/example-ignite.xml} configuration.</p>
  */
 public class MovieGAExample {
-    /** Ignite instance */
-    private static Ignite ignite = null;
-    /** GAGrid */
-    private static GAGrid gaGrid = null;
-    /** GAConfiguration */
-    private static GAConfiguration gaConfig = null;
-
     /**
      * Executes example.
      *
-     * Specify value for -DGENRES JVM system variable
+     * Specify value for {@code -DGENRES} JVM system variable.
      *
      * @param args Command line arguments, none required.
      */
-
     public static void main(String args[]) {
-        System.setProperty("IGNITE_QUIET", "false");
+        System.out.println(">>> Movie GA grid example started.");
 
-        List genres = new ArrayList();
+        List<String> genres = new ArrayList<>();
         String sGenres = "Action,Comedy,Romance";
 
-        StringBuffer sbErrorMessage = new StringBuffer();
-        sbErrorMessage.append("GENRES System property not set. Please provide GENRES information.");
-        sbErrorMessage.append(" ");
-        sbErrorMessage.append("IE: -DGENRES=Action,Comedy,Romance");
-        sbErrorMessage.append("\n");
-        sbErrorMessage.append("Using default value: Action,Comedy,Romance");
+        StringBuffer sbErrorMsg = new StringBuffer();
+        sbErrorMsg.append("GENRES System property not set. Please provide GENRES information.");
+        sbErrorMsg.append(" ");
+        sbErrorMsg.append("IE: -DGENRES=Action,Comedy,Romance");
+        sbErrorMsg.append("\n");
+        sbErrorMsg.append("Using default value: Action,Comedy,Romance");
 
-        if (System.getProperty("GENRES") == null) {
-            System.out.println(sbErrorMessage);
-
-        }
-        else {
+        if (System.getProperty("GENRES") == null)
+            System.out.println(sbErrorMsg);
+        else
             sGenres = System.getProperty("GENRES");
-        }
 
         StringTokenizer st = new StringTokenizer(sGenres, ",");
 
@@ -87,58 +83,59 @@
             genres.add(genre);
         }
 
-        // Create GAConfiguration
-        gaConfig = new GAConfiguration();
+        // Create GAConfiguration.
+        GAConfiguration gaCfg = new GAConfiguration();
 
-        // set Gene Pool
+        // Set Gene Pool.
         List<Gene> genes = getGenePool();
 
-        // Define Chromosome
-        gaConfig.setChromosomeLen(3);
-        gaConfig.setPopulationSize(100);
-        gaConfig.setGenePool(genes);
-        gaConfig.setTruncateRate(.10);
-        gaConfig.setCrossOverRate(.50);
-        gaConfig.setMutationRate(.50);
-        gaConfig.setSelectionMtd(GAGridConstants.SELECTION_METHOD.SELECTION_METHOD_TRUNCATION);
+        // Define Chromosome.
+        gaCfg.setChromosomeLen(3);
+        gaCfg.setPopulationSize(100);
+        gaCfg.setGenePool(genes);
+        gaCfg.setTruncateRate(.10);
+        gaCfg.setCrossOverRate(.50);
+        gaCfg.setMutationRate(.50);
+        gaCfg.setSelectionMtd(GAGridConstants.SELECTION_METHOD.SELECTION_METHOD_TRUNCATION);
 
-        //Create fitness function
+        // Create fitness function.
         MovieFitnessFunction function = new MovieFitnessFunction(genres);
 
-        //set fitness function
-        gaConfig.setFitnessFunction(function);
+        // Set fitness function.
+        gaCfg.setFitnessFunction(function);
 
         try {
+            // Create an Ignite instance as you would in any other use case.
+            Ignite ignite = Ignition.start("examples/config/example-ignite.xml");
 
-            //Create an Ignite instance as you would in any other use case.
-            ignite = Ignition.start("examples/config/example-ignite.xml");
+            MovieTerminateCriteria termCriteria = new MovieTerminateCriteria(ignite, System.out::println);
 
-            MovieTerminateCriteria termCriteria = new MovieTerminateCriteria(ignite);
+            gaCfg.setTerminateCriteria(termCriteria);
 
-            gaConfig.setTerminateCriteria(termCriteria);
+            GAGrid gaGrid = new GAGrid(gaCfg, ignite);
 
-            gaGrid = new GAGrid(gaConfig, ignite);
+            Chromosome chromosome = gaGrid.evolve();
 
-            ignite.log();
-            Chromosome fittestChromosome = gaGrid.evolve();
+            System.out.println(">>> Evolution result: " + chromosome);
 
             Ignition.stop(true);
-            ignite = null;
 
+            System.out.println(">>> Movie GA grid example completed.");
         }
         catch (Exception e) {
-            System.out.println(e);
+            System.out.println(e.getMessage());
+            e.printStackTrace();
         }
-
     }
 
+    /** */
     private static List<Gene> getGenePool() {
-        List list = new ArrayList();
+        List<Gene> list = new ArrayList<>();
 
         Movie movie1 = new Movie();
         movie1.setName("The Matrix");
         movie1.setImdbRating(7);
-        List genre1 = new ArrayList();
+        List<String> genre1 = new ArrayList<>();
         genre1.add("SciFi");
         genre1.add("Action");
         movie1.setGenre(genre1);
@@ -150,7 +147,7 @@
         Movie movie2 = new Movie();
         movie2.setName("The Dark Knight");
         movie2.setImdbRating(9.6);
-        List genre2 = new ArrayList();
+        List<String> genre2 = new ArrayList<>();
         genre2.add("Action");
         movie2.setGenre(genre2);
         movie2.setRating("PG-13");
@@ -163,7 +160,7 @@
         movie3.setImdbRating(9.6);
         movie3.setYear("2012");
 
-        List genre3 = new ArrayList();
+        List<String> genre3 = new ArrayList<>();
         genre3.add("Action");
         movie3.setGenre(genre3);
         movie3.setRating("PG-13");
@@ -173,7 +170,7 @@
         Movie movie4 = new Movie();
         movie4.setName("The Hangover");
         movie4.setImdbRating(7.6);
-        List genre4 = new ArrayList();
+        List<String> genre4 = new ArrayList<>();
         genre4.add("Comedy");
         movie4.setGenre(genre4);
         movie4.setRating("R");
@@ -184,7 +181,7 @@
         Movie movie5 = new Movie();
         movie5.setName("The Hangover 2");
         movie5.setImdbRating(9.6);
-        List genre5 = new ArrayList();
+        List<String> genre5 = new ArrayList<>();
         genre5.add("Comedy");
         movie5.setGenre(genre5);
         movie5.setRating("R");
@@ -195,7 +192,7 @@
         Movie movie6 = new Movie();
         movie6.setName("This Means War");
         movie6.setImdbRating(6.4);
-        List genre6 = new ArrayList();
+        List<String> genre6 = new ArrayList<>();
         genre6.add("Comedy");
         genre6.add("Action");
         genre6.add("Romance");
@@ -208,7 +205,7 @@
         Movie movie7 = new Movie();
         movie7.setName("Hitch");
         movie7.setImdbRating(10);
-        List genre7 = new ArrayList();
+        List<String> genre7 = new ArrayList<>();
         genre7.add("Comedy");
         genre7.add("Romance");
         movie7.setGenre(genre7);
@@ -220,7 +217,7 @@
         Movie movie8 = new Movie();
         movie8.setName("21 Jump Street");
         movie8.setImdbRating(6.7);
-        List genre8 = new ArrayList();
+        List<String> genre8 = new ArrayList<>();
         genre8.add("Comedy");
         genre8.add("Action");
         movie8.setGenre(genre8);
@@ -232,7 +229,7 @@
         Movie movie9 = new Movie();
         movie9.setName("Killers");
         movie9.setImdbRating(5.1);
-        List genre9 = new ArrayList();
+        List<String> genre9 = new ArrayList<>();
         genre9.add("Comedy");
         genre9.add("Action");
         genre9.add("Romance");
@@ -245,7 +242,7 @@
         Movie movie10 = new Movie();
         movie10.setName("What to Expect When You're Expecting");
         movie10.setImdbRating(5.1);
-        List genre10 = new ArrayList();
+        List<String> genre10 = new ArrayList<>();
         genre10.add("Comedy");
         genre10.add("Romance");
         movie10.setGenre(genre10);
@@ -266,7 +263,5 @@
         list.add(gene10);
 
         return list;
-
     }
-
 }
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/MovieTerminateCriteria.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/MovieTerminateCriteria.java
index 43804b7..68c577f 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/MovieTerminateCriteria.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/MovieTerminateCriteria.java
@@ -18,69 +18,78 @@
 package org.apache.ignite.examples.ml.genetic.movie;
 
 import java.util.List;
+import java.util.function.Consumer;
 import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteLogger;
 import org.apache.ignite.ml.genetic.Chromosome;
 import org.apache.ignite.ml.genetic.Gene;
 import org.apache.ignite.ml.genetic.parameter.ITerminateCriteria;
 import org.apache.ignite.ml.genetic.utils.GAGridUtils;
 
 /**
- * Represents the terminate condition for Movie Genetic algorithm  <br/>
- *
- * Class terminates Genetic algorithm when fitnessScore > 32  <br/>
+ * Represents the terminate condition for {@link MovieGAExample}.
+ * <p>
+ * Class terminates Genetic algorithm when fitness score is more than 32.</p>
  */
 public class MovieTerminateCriteria implements ITerminateCriteria {
-    /** Ignite logger */
-    private IgniteLogger igniteLogger = null;
-    /** Ignite instance */
-    private Ignite ignite = null;
+    /** Ignite instance. */
+    private final Ignite ignite;
+
+    /** */
+    private final Consumer<String> logConsumer;
 
     /**
-     * @param ignite
+     * Create class instance.
+     *
+     * @param ignite Ignite instance.
+     * @param logConsumer Logging consumer.
      */
-    public MovieTerminateCriteria(Ignite ignite) {
+    MovieTerminateCriteria(Ignite ignite, Consumer<String> logConsumer) {
         this.ignite = ignite;
-        this.igniteLogger = ignite.log();
+        this.logConsumer = logConsumer;
 
     }
 
     /**
-     * @param fittestChromosome Most fit chromosome at for the nth generation
-     * @param averageFitnessScore Average fitness score as of the nth generation
-     * @param currentGeneration Current generation
-     * @return Boolean value
+     * Check whether termination condition is met.
+     *
+     * @param fittestChromosome Most fit chromosome at for the nth generation.
+     * @param averageFitnessScore Average fitness score as of the nth generation.
+     * @param currGeneration Current generation.
+     * @return Status whether condition is met or not.
      */
     public boolean isTerminationConditionMet(Chromosome fittestChromosome, double averageFitnessScore,
-        int currentGeneration) {
+        int currGeneration) {
         boolean isTerminate = true;
 
-        igniteLogger.info("##########################################################################################");
-        igniteLogger.info("Generation: " + currentGeneration);
-        igniteLogger.info("Fittest is Chromosome Key: " + fittestChromosome);
-        igniteLogger.info("Chromsome: " + fittestChromosome);
-        printMovies(GAGridUtils.getGenesInOrderForChromosome(ignite, fittestChromosome));
-        igniteLogger.info("##########################################################################################");
+        logConsumer.accept(
+            "\n##########################################################################################"
+                + "\n Generation: " + currGeneration
+                + "\n Fittest is Chromosome Key: " + fittestChromosome
+                + "\nChromosome: " + fittestChromosome
+                + "\n" + reportMovies(GAGridUtils.getGenesInOrderForChromosome(ignite, fittestChromosome))
+                + "\n##########################################################################################");
 
-        if (!(fittestChromosome.getFitnessScore() > 32)) {
+        if (!(fittestChromosome.getFitnessScore() > 32))
             isTerminate = false;
-        }
 
         return isTerminate;
     }
 
     /**
-     * Helper to print change detail
+     * Helper to print movies details.
      *
-     * @param genes List of Genes
+     * @param genes List of Genes.
+     * @return Movies details.
      */
-    private void printMovies(List<Gene> genes) {
+    private String reportMovies(List<Gene> genes) {
+        StringBuilder sb = new StringBuilder();
+
         for (Gene gene : genes) {
-            igniteLogger.info("Name: " + ((Movie)gene.getVal()).getName().toString());
-            igniteLogger.info("Genres: " + ((Movie)gene.getVal()).getGenre().toString());
-            igniteLogger.info("IMDB Rating: " + ((Movie)gene.getVal()).getImdbRating());
+            sb.append("\nName: ").append(((Movie)gene.getVal()).getName())
+                .append("\nGenres: ").append(((Movie)gene.getVal()).getGenre().toString())
+                .append("\nIMDB Rating: ").append(((Movie)gene.getVal()).getImdbRating());
         }
 
+        return sb.toString();
     }
-
 }
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/knn/ANNClassificationExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/knn/ANNClassificationExample.java
new file mode 100644
index 0000000..8a2d786
--- /dev/null
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/knn/ANNClassificationExample.java
@@ -0,0 +1,317 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.examples.ml.knn;
+
+import java.util.Arrays;
+import java.util.UUID;
+import javax.cache.Cache;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.Ignition;
+import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
+import org.apache.ignite.cache.query.QueryCursor;
+import org.apache.ignite.cache.query.ScanQuery;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.ml.knn.NNClassificationModel;
+import org.apache.ignite.ml.knn.ann.ANNClassificationTrainer;
+import org.apache.ignite.ml.knn.classification.NNStrategy;
+import org.apache.ignite.ml.math.distances.EuclideanDistance;
+import org.apache.ignite.ml.math.distances.ManhattanDistance;
+import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
+import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
+import org.apache.ignite.thread.IgniteThread;
+
+/**
+ * Run ANN multi-class classification trainer ({@link ANNClassificationTrainer}) over distributed dataset.
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with test data points (based on the
+ * <a href="https://en.wikipedia.org/wiki/Iris_flower_data_set"></a>Iris dataset</a>).</p>
+ * <p>
+ * After that it trains the model based on the specified data using
+ * <a href="https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm">kNN</a> algorithm.</p>
+ * <p>
+ * Finally, this example loops over the test set of data points, applies the trained model to predict what cluster
+ * does this point belong to, and compares prediction to expected outcome (ground truth).</p>
+ * <p>
+ * You can change the test data used in this example and re-run it to explore this algorithm further.</p>
+ */
+public class ANNClassificationExample {
+    /** Run example. */
+    public static void main(String[] args) throws InterruptedException {
+        System.out.println();
+        System.out.println(">>> ANN multi-class classification algorithm over cached dataset usage example started.");
+        // Start ignite grid.
+        try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
+            System.out.println(">>> Ignite grid started.");
+
+            IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
+                ANNClassificationExample.class.getSimpleName(), () -> {
+                IgniteCache<Integer, double[]> dataCache = getTestCache(ignite);
+
+                ANNClassificationTrainer trainer = new ANNClassificationTrainer()
+                    .withDistance(new ManhattanDistance())
+                    .withK(50)
+                    .withMaxIterations(1000)
+                    .withSeed(1234L)
+                    .withEpsilon(1e-2);
+
+                long startTrainingTime = System.currentTimeMillis();
+
+                NNClassificationModel knnMdl = trainer.fit(
+                    ignite,
+                    dataCache,
+                    (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 1, v.length)),
+                    (k, v) -> v[0]
+                ).withK(5)
+                    .withDistanceMeasure(new EuclideanDistance())
+                    .withStrategy(NNStrategy.WEIGHTED);
+
+                long endTrainingTime = System.currentTimeMillis();
+
+                System.out.println(">>> ---------------------------------");
+                System.out.println(">>> | Prediction\t| Ground Truth\t|");
+                System.out.println(">>> ---------------------------------");
+
+                int amountOfErrors = 0;
+                int totalAmount = 0;
+
+                long totalPredictionTime = 0L;
+
+                try (QueryCursor<Cache.Entry<Integer, double[]>> observations = dataCache.query(new ScanQuery<>())) {
+                    for (Cache.Entry<Integer, double[]> observation : observations) {
+                        double[] val = observation.getValue();
+                        double[] inputs = Arrays.copyOfRange(val, 1, val.length);
+                        double groundTruth = val[0];
+
+                        long startPredictionTime = System.currentTimeMillis();
+                        double prediction = knnMdl.apply(new DenseVector(inputs));
+                        long endPredictionTime = System.currentTimeMillis();
+
+                        totalPredictionTime += (endPredictionTime - startPredictionTime);
+
+                        totalAmount++;
+                        if (groundTruth != prediction)
+                            amountOfErrors++;
+
+                        System.out.printf(">>> | %.4f\t\t| %.4f\t\t|\n", prediction, groundTruth);
+                    }
+
+                    System.out.println(">>> ---------------------------------");
+
+                    System.out.println("Training costs = " + (endTrainingTime - startTrainingTime));
+                    System.out.println("Prediction costs = " + totalPredictionTime);
+
+                    System.out.println("\n>>> Absolute amount of errors " + amountOfErrors);
+                    System.out.println("\n>>> Accuracy " + (1 - amountOfErrors / (double) totalAmount));
+                    System.out.println(totalAmount);
+
+                    System.out.println(">>> ANN multi-class classification algorithm over cached dataset usage example completed.");
+                }
+            });
+
+            igniteThread.start();
+            igniteThread.join();
+        }
+    }
+
+    /**
+     * Fills cache with data and returns it.
+     *
+     * @param ignite Ignite instance.
+     * @return Filled Ignite Cache.
+     */
+    private static IgniteCache<Integer, double[]> getTestCache(Ignite ignite) {
+        CacheConfiguration<Integer, double[]> cacheConfiguration = new CacheConfiguration<>();
+        cacheConfiguration.setName("TEST_" + UUID.randomUUID());
+        cacheConfiguration.setAffinity(new RendezvousAffinityFunction(false, 10));
+
+        IgniteCache<Integer, double[]> cache = ignite.createCache(cacheConfiguration);
+
+        for (int k = 0; k < 10; k++) { // multiplies the Iris dataset k times.
+            for (int i = 0; i < data.length; i++)
+                cache.put(k * 10000 + i, mutate(data[i], k));
+        }
+
+        return cache;
+    }
+
+    /**
+     * Tiny changing of data depending on k parameter.
+     * @param datum The vector data.
+     * @param k The passed parameter.
+     * @return The changed vector data.
+     */
+    private static double[] mutate(double[] datum, int k) {
+        for (int i = 0; i < datum.length; i++) datum[i] += k / 100000;
+        return datum;
+    }
+
+    /** The Iris dataset. */
+    private static final double[][] data = {
+        {1, 5.1, 3.5, 1.4, 0.2},
+        {1, 4.9, 3, 1.4, 0.2},
+        {1, 4.7, 3.2, 1.3, 0.2},
+        {1, 4.6, 3.1, 1.5, 0.2},
+        {1, 5, 3.6, 1.4, 0.2},
+        {1, 5.4, 3.9, 1.7, 0.4},
+        {1, 4.6, 3.4, 1.4, 0.3},
+        {1, 5, 3.4, 1.5, 0.2},
+        {1, 4.4, 2.9, 1.4, 0.2},
+        {1, 4.9, 3.1, 1.5, 0.1},
+        {1, 5.4, 3.7, 1.5, 0.2},
+        {1, 4.8, 3.4, 1.6, 0.2},
+        {1, 4.8, 3, 1.4, 0.1},
+        {1, 4.3, 3, 1.1, 0.1},
+        {1, 5.8, 4, 1.2, 0.2},
+        {1, 5.7, 4.4, 1.5, 0.4},
+        {1, 5.4, 3.9, 1.3, 0.4},
+        {1, 5.1, 3.5, 1.4, 0.3},
+        {1, 5.7, 3.8, 1.7, 0.3},
+        {1, 5.1, 3.8, 1.5, 0.3},
+        {1, 5.4, 3.4, 1.7, 0.2},
+        {1, 5.1, 3.7, 1.5, 0.4},
+        {1, 4.6, 3.6, 1, 0.2},
+        {1, 5.1, 3.3, 1.7, 0.5},
+        {1, 4.8, 3.4, 1.9, 0.2},
+        {1, 5, 3, 1.6, 0.2},
+        {1, 5, 3.4, 1.6, 0.4},
+        {1, 5.2, 3.5, 1.5, 0.2},
+        {1, 5.2, 3.4, 1.4, 0.2},
+        {1, 4.7, 3.2, 1.6, 0.2},
+        {1, 4.8, 3.1, 1.6, 0.2},
+        {1, 5.4, 3.4, 1.5, 0.4},
+        {1, 5.2, 4.1, 1.5, 0.1},
+        {1, 5.5, 4.2, 1.4, 0.2},
+        {1, 4.9, 3.1, 1.5, 0.1},
+        {1, 5, 3.2, 1.2, 0.2},
+        {1, 5.5, 3.5, 1.3, 0.2},
+        {1, 4.9, 3.1, 1.5, 0.1},
+        {1, 4.4, 3, 1.3, 0.2},
+        {1, 5.1, 3.4, 1.5, 0.2},
+        {1, 5, 3.5, 1.3, 0.3},
+        {1, 4.5, 2.3, 1.3, 0.3},
+        {1, 4.4, 3.2, 1.3, 0.2},
+        {1, 5, 3.5, 1.6, 0.6},
+        {1, 5.1, 3.8, 1.9, 0.4},
+        {1, 4.8, 3, 1.4, 0.3},
+        {1, 5.1, 3.8, 1.6, 0.2},
+        {1, 4.6, 3.2, 1.4, 0.2},
+        {1, 5.3, 3.7, 1.5, 0.2},
+        {1, 5, 3.3, 1.4, 0.2},
+        {2, 7, 3.2, 4.7, 1.4},
+        {2, 6.4, 3.2, 4.5, 1.5},
+        {2, 6.9, 3.1, 4.9, 1.5},
+        {2, 5.5, 2.3, 4, 1.3},
+        {2, 6.5, 2.8, 4.6, 1.5},
+        {2, 5.7, 2.8, 4.5, 1.3},
+        {2, 6.3, 3.3, 4.7, 1.6},
+        {2, 4.9, 2.4, 3.3, 1},
+        {2, 6.6, 2.9, 4.6, 1.3},
+        {2, 5.2, 2.7, 3.9, 1.4},
+        {2, 5, 2, 3.5, 1},
+        {2, 5.9, 3, 4.2, 1.5},
+        {2, 6, 2.2, 4, 1},
+        {2, 6.1, 2.9, 4.7, 1.4},
+        {2, 5.6, 2.9, 3.6, 1.3},
+        {2, 6.7, 3.1, 4.4, 1.4},
+        {2, 5.6, 3, 4.5, 1.5},
+        {2, 5.8, 2.7, 4.1, 1},
+        {2, 6.2, 2.2, 4.5, 1.5},
+        {2, 5.6, 2.5, 3.9, 1.1},
+        {2, 5.9, 3.2, 4.8, 1.8},
+        {2, 6.1, 2.8, 4, 1.3},
+        {2, 6.3, 2.5, 4.9, 1.5},
+        {2, 6.1, 2.8, 4.7, 1.2},
+        {2, 6.4, 2.9, 4.3, 1.3},
+        {2, 6.6, 3, 4.4, 1.4},
+        {2, 6.8, 2.8, 4.8, 1.4},
+        {2, 6.7, 3, 5, 1.7},
+        {2, 6, 2.9, 4.5, 1.5},
+        {2, 5.7, 2.6, 3.5, 1},
+        {2, 5.5, 2.4, 3.8, 1.1},
+        {2, 5.5, 2.4, 3.7, 1},
+        {2, 5.8, 2.7, 3.9, 1.2},
+        {2, 6, 2.7, 5.1, 1.6},
+        {2, 5.4, 3, 4.5, 1.5},
+        {2, 6, 3.4, 4.5, 1.6},
+        {2, 6.7, 3.1, 4.7, 1.5},
+        {2, 6.3, 2.3, 4.4, 1.3},
+        {2, 5.6, 3, 4.1, 1.3},
+        {2, 5.5, 2.5, 4, 1.3},
+        {2, 5.5, 2.6, 4.4, 1.2},
+        {2, 6.1, 3, 4.6, 1.4},
+        {2, 5.8, 2.6, 4, 1.2},
+        {2, 5, 2.3, 3.3, 1},
+        {2, 5.6, 2.7, 4.2, 1.3},
+        {2, 5.7, 3, 4.2, 1.2},
+        {2, 5.7, 2.9, 4.2, 1.3},
+        {2, 6.2, 2.9, 4.3, 1.3},
+        {2, 5.1, 2.5, 3, 1.1},
+        {2, 5.7, 2.8, 4.1, 1.3},
+        {3, 6.3, 3.3, 6, 2.5},
+        {3, 5.8, 2.7, 5.1, 1.9},
+        {3, 7.1, 3, 5.9, 2.1},
+        {3, 6.3, 2.9, 5.6, 1.8},
+        {3, 6.5, 3, 5.8, 2.2},
+        {3, 7.6, 3, 6.6, 2.1},
+        {3, 4.9, 2.5, 4.5, 1.7},
+        {3, 7.3, 2.9, 6.3, 1.8},
+        {3, 6.7, 2.5, 5.8, 1.8},
+        {3, 7.2, 3.6, 6.1, 2.5},
+        {3, 6.5, 3.2, 5.1, 2},
+        {3, 6.4, 2.7, 5.3, 1.9},
+        {3, 6.8, 3, 5.5, 2.1},
+        {3, 5.7, 2.5, 5, 2},
+        {3, 5.8, 2.8, 5.1, 2.4},
+        {3, 6.4, 3.2, 5.3, 2.3},
+        {3, 6.5, 3, 5.5, 1.8},
+        {3, 7.7, 3.8, 6.7, 2.2},
+        {3, 7.7, 2.6, 6.9, 2.3},
+        {3, 6, 2.2, 5, 1.5},
+        {3, 6.9, 3.2, 5.7, 2.3},
+        {3, 5.6, 2.8, 4.9, 2},
+        {3, 7.7, 2.8, 6.7, 2},
+        {3, 6.3, 2.7, 4.9, 1.8},
+        {3, 6.7, 3.3, 5.7, 2.1},
+        {3, 7.2, 3.2, 6, 1.8},
+        {3, 6.2, 2.8, 4.8, 1.8},
+        {3, 6.1, 3, 4.9, 1.8},
+        {3, 6.4, 2.8, 5.6, 2.1},
+        {3, 7.2, 3, 5.8, 1.6},
+        {3, 7.4, 2.8, 6.1, 1.9},
+        {3, 7.9, 3.8, 6.4, 2},
+        {3, 6.4, 2.8, 5.6, 2.2},
+        {3, 6.3, 2.8, 5.1, 1.5},
+        {3, 6.1, 2.6, 5.6, 1.4},
+        {3, 7.7, 3, 6.1, 2.3},
+        {3, 6.3, 3.4, 5.6, 2.4},
+        {3, 6.4, 3.1, 5.5, 1.8},
+        {3, 6, 3, 4.8, 1.8},
+        {3, 6.9, 3.1, 5.4, 2.1},
+        {3, 6.7, 3.1, 5.6, 2.4},
+        {3, 6.9, 3.1, 5.1, 2.3},
+        {3, 5.8, 2.7, 5.1, 1.9},
+        {3, 6.8, 3.2, 5.9, 2.3},
+        {3, 6.7, 3.3, 5.7, 2.5},
+        {3, 6.7, 3, 5.2, 2.3},
+        {3, 6.3, 2.5, 5, 1.9},
+        {3, 6.5, 3, 5.2, 2},
+        {3, 6.2, 3.4, 5.4, 2.3},
+        {3, 5.9, 3, 5.1, 1.8}
+    };
+}
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/knn/KNNClassificationExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/knn/KNNClassificationExample.java
index d12fc1d..cf285a4 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/knn/KNNClassificationExample.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/knn/KNNClassificationExample.java
@@ -18,27 +18,34 @@
 package org.apache.ignite.examples.ml.knn;
 
 import java.util.Arrays;
-import java.util.UUID;
 import javax.cache.Cache;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.Ignition;
-import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
 import org.apache.ignite.cache.query.QueryCursor;
 import org.apache.ignite.cache.query.ScanQuery;
-import org.apache.ignite.configuration.CacheConfiguration;
-import org.apache.ignite.ml.knn.classification.KNNClassificationModel;
+import org.apache.ignite.examples.ml.util.TestCache;
+import org.apache.ignite.ml.knn.NNClassificationModel;
 import org.apache.ignite.ml.knn.classification.KNNClassificationTrainer;
-import org.apache.ignite.ml.knn.classification.KNNStrategy;
+import org.apache.ignite.ml.knn.classification.NNStrategy;
 import org.apache.ignite.ml.math.distances.EuclideanDistance;
 import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
 import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
 import org.apache.ignite.thread.IgniteThread;
 
 /**
- * Run kNN multi-class classification trainer over distributed dataset.
- *
- * @see KNNClassificationTrainer
+ * Run kNN multi-class classification trainer ({@link KNNClassificationTrainer}) over distributed dataset.
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with test data points (based on the
+ * <a href="https://en.wikipedia.org/wiki/Iris_flower_data_set"></a>Iris dataset</a>).</p>
+ * <p>
+ * After that it trains the model based on the specified data using
+ * <a href="https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm">kNN</a> algorithm.</p>
+ * <p>
+ * Finally, this example loops over the test set of data points, applies the trained model to predict what cluster
+ * does this point belong to, and compares prediction to expected outcome (ground truth).</p>
+ * <p>
+ * You can change the test data used in this example and re-run it to explore this algorithm further.</p>
  */
 public class KNNClassificationExample {
     /** Run example. */
@@ -51,18 +58,18 @@
 
             IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
                 KNNClassificationExample.class.getSimpleName(), () -> {
-                IgniteCache<Integer, double[]> dataCache = getTestCache(ignite);
+                IgniteCache<Integer, double[]> dataCache = new TestCache(ignite).fillCacheWith(data);
 
                 KNNClassificationTrainer trainer = new KNNClassificationTrainer();
 
-                KNNClassificationModel knnMdl = trainer.fit(
+                NNClassificationModel knnMdl = trainer.fit(
                     ignite,
                     dataCache,
                     (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 1, v.length)),
                     (k, v) -> v[0]
                 ).withK(3)
                     .withDistanceMeasure(new EuclideanDistance())
-                    .withStrategy(KNNStrategy.WEIGHTED);
+                    .withStrategy(NNStrategy.WEIGHTED);
 
                 System.out.println(">>> ---------------------------------");
                 System.out.println(">>> | Prediction\t| Ground Truth\t|");
@@ -90,6 +97,8 @@
 
                     System.out.println("\n>>> Absolute amount of errors " + amountOfErrors);
                     System.out.println("\n>>> Accuracy " + (1 - amountOfErrors / (double) totalAmount));
+
+                    System.out.println(">>> kNN multi-class classification algorithm over cached dataset usage example completed.");
                 }
             });
 
@@ -98,25 +107,6 @@
         }
     }
 
-    /**
-     * Fills cache with data and returns it.
-     *
-     * @param ignite Ignite instance.
-     * @return Filled Ignite Cache.
-     */
-    private static IgniteCache<Integer, double[]> getTestCache(Ignite ignite) {
-        CacheConfiguration<Integer, double[]> cacheConfiguration = new CacheConfiguration<>();
-        cacheConfiguration.setName("TEST_" + UUID.randomUUID());
-        cacheConfiguration.setAffinity(new RendezvousAffinityFunction(false, 10));
-
-        IgniteCache<Integer, double[]> cache = ignite.createCache(cacheConfiguration);
-
-        for (int i = 0; i < data.length; i++)
-            cache.put(i, data[i]);
-
-        return cache;
-    }
-
     /** The Iris dataset. */
     private static final double[][] data = {
         {1, 5.1, 3.5, 1.4, 0.2},
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/knn/KNNRegressionExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/knn/KNNRegressionExample.java
index d1e5055..78f38c8 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/knn/KNNRegressionExample.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/knn/KNNRegressionExample.java
@@ -18,17 +18,14 @@
 package org.apache.ignite.examples.ml.knn;
 
 import java.util.Arrays;
-import java.util.UUID;
 import javax.cache.Cache;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.Ignition;
-import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
 import org.apache.ignite.cache.query.QueryCursor;
 import org.apache.ignite.cache.query.ScanQuery;
-import org.apache.ignite.configuration.CacheConfiguration;
-import org.apache.ignite.ml.knn.classification.KNNClassificationTrainer;
-import org.apache.ignite.ml.knn.classification.KNNStrategy;
+import org.apache.ignite.examples.ml.util.TestCache;
+import org.apache.ignite.ml.knn.classification.NNStrategy;
 import org.apache.ignite.ml.knn.regression.KNNRegressionModel;
 import org.apache.ignite.ml.knn.regression.KNNRegressionTrainer;
 import org.apache.ignite.ml.math.distances.ManhattanDistance;
@@ -37,9 +34,19 @@
 import org.apache.ignite.thread.IgniteThread;
 
 /**
- * Run kNN regression trainer over distributed dataset.
- *
- * @see KNNClassificationTrainer
+ * Run kNN regression trainer ({@link KNNRegressionTrainer}) over distributed dataset.
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with test data points (based on the
+ * <a href="https://en.wikipedia.org/wiki/Iris_flower_data_set"></a>Iris dataset</a>).</p>
+ * <p>
+ * After that it trains the model based on the specified data using
+ * <a href="https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm">kNN</a> regression algorithm.</p>
+ * <p>
+ * Finally, this example loops over the test set of data points, applies the trained model to predict what cluster
+ * does this point belong to, and compares prediction to expected outcome (ground truth).</p>
+ * <p>
+ * You can change the test data used in this example or trainer object settings and re-run it to explore
+ * this algorithm further.</p>
  */
 public class KNNRegressionExample {
     /** Run example. */
@@ -52,7 +59,7 @@
 
             IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
                 KNNRegressionExample.class.getSimpleName(), () -> {
-                IgniteCache<Integer, double[]> dataCache = getTestCache(ignite);
+                IgniteCache<Integer, double[]> dataCache = new TestCache(ignite).fillCacheWith(data);
 
                 KNNRegressionTrainer trainer = new KNNRegressionTrainer();
 
@@ -63,7 +70,11 @@
                     (k, v) -> v[0]
                 ).withK(5)
                     .withDistanceMeasure(new ManhattanDistance())
-                    .withStrategy(KNNStrategy.WEIGHTED);
+                    .withStrategy(NNStrategy.WEIGHTED);
+
+                System.out.println(">>> ---------------------------------");
+                System.out.println(">>> | Prediction\t| Ground Truth\t|");
+                System.out.println(">>> ---------------------------------");
 
                 int totalAmount = 0;
                 // Calculate mean squared error (MSE)
@@ -83,13 +94,19 @@
                         mae += Math.abs(prediction - groundTruth);
 
                         totalAmount++;
+
+                        System.out.printf(">>> | %.4f\t\t| %.4f\t\t|\n", prediction, groundTruth);
                     }
 
+                    System.out.println(">>> ---------------------------------");
+
                     mse = mse / totalAmount;
                     System.out.println("\n>>> Mean squared error (MSE) " + mse);
 
                     mae = mae / totalAmount;
                     System.out.println("\n>>> Mean absolute error (MAE) " + mae);
+
+                    System.out.println(">>> kNN regression over cached dataset usage example completed.");
                 }
             });
 
@@ -98,25 +115,6 @@
         }
     }
 
-    /**
-     * Fills cache with data and returns it.
-     *
-     * @param ignite Ignite instance.
-     * @return Filled Ignite Cache.
-     */
-    private static IgniteCache<Integer, double[]> getTestCache(Ignite ignite) {
-        CacheConfiguration<Integer, double[]> cacheConfiguration = new CacheConfiguration<>();
-        cacheConfiguration.setName("TEST_" + UUID.randomUUID());
-        cacheConfiguration.setAffinity(new RendezvousAffinityFunction(false, 10));
-
-        IgniteCache<Integer, double[]> cache = ignite.createCache(cacheConfiguration);
-
-        for (int i = 0; i < data.length; i++)
-            cache.put(i, data[i]);
-
-        return cache;
-    }
-
     /** The Iris dataset. */
     private static final double[][] data = {
         {199, 125, 256, 6000, 256, 16, 128},
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/nn/MLPTrainerExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/nn/MLPTrainerExample.java
index 7873b12..3e5a98c 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/nn/MLPTrainerExample.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/nn/MLPTrainerExample.java
@@ -39,6 +39,17 @@
 /**
  * Example of using distributed {@link MultilayerPerceptron}.
  * <p>
+ * Code in this example launches Ignite grid and fills the cache with simple test data.</p>
+ * <p>
+ * After that it defines a layered architecture and a
+ * <a href="https://en.wikipedia.org/wiki/Neural_network">neural network</a> trainer, trains neural network
+ * and obtains multilayer perceptron model.</p>
+ * <p>
+ * Finally, this example loops over the test set, applies the trained model to predict the value and
+ * compares prediction to expected outcome.</p>
+ * <p>
+ * You can change the test data used in this example and re-run it to explore this functionality further.</p>
+ * <p>
  * Remote nodes should always be started with special configuration file which
  * enables P2P class loading: {@code 'ignite.{sh|bat} examples/config/example-ignite.xml'}.</p>
  * <p>
@@ -112,7 +123,11 @@
                 for (int i = 0; i < 4; i++) {
                     LabeledPoint pnt = trainingSet.get(i);
                     Matrix predicted = mlp.apply(new DenseMatrix(new double[][] {{pnt.x, pnt.y}}));
-                    failCnt += Math.abs(predicted.get(0, 0) - pnt.lb) < 0.5 ? 0 : 1;
+
+                    double predictedVal = predicted.get(0, 0);
+                    double lbl = pnt.lb;
+                    System.out.printf(">>> key: %d\t\t predicted: %.4f\t\tlabel: %.4f\n", i, predictedVal, lbl);
+                    failCnt += Math.abs(predictedVal - lbl) < 0.5 ? 0 : 1;
                 }
 
                 double failRatio = (double)failCnt / totalCnt;
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/BinarizationExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/BinarizationExample.java
index 4c873d9..a1e7672 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/BinarizationExample.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/BinarizationExample.java
@@ -17,13 +17,13 @@
 
 package org.apache.ignite.examples.ml.preprocessing;
 
-import java.util.Arrays;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.Ignition;
 import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
 import org.apache.ignite.configuration.CacheConfiguration;
 import org.apache.ignite.examples.ml.dataset.model.Person;
+import org.apache.ignite.examples.ml.util.DatasetHelper;
 import org.apache.ignite.ml.dataset.DatasetFactory;
 import org.apache.ignite.ml.dataset.primitive.SimpleDataset;
 import org.apache.ignite.ml.math.functions.IgniteBiFunction;
@@ -33,6 +33,15 @@
 
 /**
  * Example that shows how to use binarization preprocessor to binarize data.
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with simple test data.</p>
+ * <p>
+ * After that it defines preprocessors that extract features from an upstream data and normalize features.</p>
+ * <p>
+ * Finally, it creates the dataset based on the processed data and uses Dataset API to find and output
+ * various statistical metrics of the data.</p>
+ * <p>
+ * You can change the test data used in this example and re-run it to explore this functionality further.</p>
  */
 public class BinarizationExample {
     /** Run example. */
@@ -54,25 +63,7 @@
 
             // Creates a cache based simple dataset containing features and providing standard dataset API.
             try (SimpleDataset<?> dataset = DatasetFactory.createSimpleDataset(ignite, persons, preprocessor)) {
-                // Calculation of the mean value. This calculation will be performed in map-reduce manner.
-                double[] mean = dataset.mean();
-                System.out.println("Mean \n\t" + Arrays.toString(mean));
-
-                // Calculation of the standard deviation. This calculation will be performed in map-reduce manner.
-                double[] std = dataset.std();
-                System.out.println("Standard deviation \n\t" + Arrays.toString(std));
-
-                // Calculation of the covariance matrix.  This calculation will be performed in map-reduce manner.
-                double[][] cov = dataset.cov();
-                System.out.println("Covariance matrix ");
-                for (double[] row : cov)
-                    System.out.println("\t" + Arrays.toString(row));
-
-                // Calculation of the correlation matrix.  This calculation will be performed in map-reduce manner.
-                double[][] corr = dataset.corr();
-                System.out.println("Correlation matrix ");
-                for (double[] row : corr)
-                    System.out.println("\t" + Arrays.toString(row));
+                new DatasetHelper(dataset).describe();
             }
 
             System.out.println(">>> Binarization example completed.");
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/ImputingExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/ImputingExample.java
index 3ea52d8..eefe063 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/ImputingExample.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/ImputingExample.java
@@ -17,13 +17,13 @@
 
 package org.apache.ignite.examples.ml.preprocessing;
 
-import java.util.Arrays;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.Ignition;
 import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
 import org.apache.ignite.configuration.CacheConfiguration;
 import org.apache.ignite.examples.ml.dataset.model.Person;
+import org.apache.ignite.examples.ml.util.DatasetHelper;
 import org.apache.ignite.ml.dataset.DatasetFactory;
 import org.apache.ignite.ml.dataset.primitive.SimpleDataset;
 import org.apache.ignite.ml.math.functions.IgniteBiFunction;
@@ -32,7 +32,17 @@
 import org.apache.ignite.ml.preprocessing.imputing.ImputerTrainer;
 
 /**
- * Example that shows how to use Imputing preprocessor to impute the missing value in the given data.
+ * Example that shows how to use <a href="https://en.wikipedia.org/wiki/Imputation_(statistics)">Imputing</a>
+ * preprocessor to impute the missing value in the given data.
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with simple test data.</p>
+ * <p>
+ * After that it defines preprocessors that extract features from an upstream data and impute missing values.</p>
+ * <p>
+ * Finally, it creates the dataset based on the processed data and uses Dataset API to find and output
+ * various statistical metrics of the data.</p>
+ * <p>
+ * You can change the test data used in this example and re-run it to explore this functionality further.</p>
  */
 public class ImputingExample {
     /** Run example. */
@@ -54,25 +64,7 @@
 
             // Creates a cache based simple dataset containing features and providing standard dataset API.
             try (SimpleDataset<?> dataset = DatasetFactory.createSimpleDataset(ignite, persons, preprocessor)) {
-                // Calculation of the mean value. This calculation will be performed in map-reduce manner.
-                double[] mean = dataset.mean();
-                System.out.println("Mean \n\t" + Arrays.toString(mean));
-
-                // Calculation of the standard deviation. This calculation will be performed in map-reduce manner.
-                double[] std = dataset.std();
-                System.out.println("Standard deviation \n\t" + Arrays.toString(std));
-
-                // Calculation of the covariance matrix.  This calculation will be performed in map-reduce manner.
-                double[][] cov = dataset.cov();
-                System.out.println("Covariance matrix ");
-                for (double[] row : cov)
-                    System.out.println("\t" + Arrays.toString(row));
-
-                // Calculation of the correlation matrix.  This calculation will be performed in map-reduce manner.
-                double[][] corr = dataset.corr();
-                System.out.println("Correlation matrix ");
-                for (double[] row : corr)
-                    System.out.println("\t" + Arrays.toString(row));
+                new DatasetHelper(dataset).describe();
             }
 
             System.out.println(">>> Imputing example completed.");
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/ImputingExampleWithMostFrequentValues.java b/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/ImputingExampleWithMostFrequentValues.java
deleted file mode 100644
index 10344bc..0000000
--- a/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/ImputingExampleWithMostFrequentValues.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.examples.ml.preprocessing;
-
-import java.util.Arrays;
-import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteCache;
-import org.apache.ignite.Ignition;
-import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
-import org.apache.ignite.configuration.CacheConfiguration;
-import org.apache.ignite.examples.ml.dataset.model.Person;
-import org.apache.ignite.ml.dataset.DatasetFactory;
-import org.apache.ignite.ml.dataset.primitive.SimpleDataset;
-import org.apache.ignite.ml.math.functions.IgniteBiFunction;
-import org.apache.ignite.ml.math.primitives.vector.Vector;
-import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
-import org.apache.ignite.ml.preprocessing.imputing.ImputerTrainer;
-import org.apache.ignite.ml.preprocessing.imputing.ImputingStrategy;
-
-/**
- * Example that shows how to use Imputing preprocessor to impute the missing values in the given data.
- */
-public class ImputingExampleWithMostFrequentValues {
-    /** Run example. */
-    public static void main(String[] args) throws Exception {
-        try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
-            System.out.println(">>> Imputing example started.");
-
-            IgniteCache<Integer, Person> persons = createCache(ignite);
-
-            // Defines first preprocessor that extracts features from an upstream data.
-            IgniteBiFunction<Integer, Person, Vector> featureExtractor = (k, v) -> VectorUtils.of(
-                v.getAge(),
-                v.getSalary()
-            );
-
-            // Defines second preprocessor that normalizes features.
-            IgniteBiFunction<Integer, Person, Vector> preprocessor = new ImputerTrainer<Integer, Person>()
-                .withImputingStrategy(ImputingStrategy.MOST_FREQUENT)
-                .fit(ignite, persons, featureExtractor);
-
-            // Creates a cache based simple dataset containing features and providing standard dataset API.
-            try (SimpleDataset<?> dataset = DatasetFactory.createSimpleDataset(ignite, persons, preprocessor)) {
-                // Calculation of the mean value. This calculation will be performed in map-reduce manner.
-                double[] mean = dataset.mean();
-                System.out.println("Mean \n\t" + Arrays.toString(mean));
-
-                // Calculation of the standard deviation. This calculation will be performed in map-reduce manner.
-                double[] std = dataset.std();
-                System.out.println("Standard deviation \n\t" + Arrays.toString(std));
-
-                // Calculation of the covariance matrix.  This calculation will be performed in map-reduce manner.
-                double[][] cov = dataset.cov();
-                System.out.println("Covariance matrix ");
-                for (double[] row : cov)
-                    System.out.println("\t" + Arrays.toString(row));
-
-                // Calculation of the correlation matrix.  This calculation will be performed in map-reduce manner.
-                double[][] corr = dataset.corr();
-                System.out.println("Correlation matrix ");
-                for (double[] row : corr)
-                    System.out.println("\t" + Arrays.toString(row));
-            }
-
-            System.out.println(">>> Imputing example completed.");
-        }
-    }
-
-    /** */
-    private static IgniteCache<Integer, Person> createCache(Ignite ignite) {
-        CacheConfiguration<Integer, Person> cacheConfiguration = new CacheConfiguration<>();
-
-        cacheConfiguration.setName("PERSONS");
-        cacheConfiguration.setAffinity(new RendezvousAffinityFunction(false, 2));
-
-        IgniteCache<Integer, Person> persons = ignite.createCache(cacheConfiguration);
-
-        persons.put(1, new Person("Mike", 10, 1));
-        persons.put(2, new Person("John", 20, 2));
-        persons.put(3, new Person("George", 15, 1));
-        persons.put(4, new Person("Piter", 25, Double.NaN));
-        persons.put(5, new Person("Karl", Double.NaN, 1));
-        persons.put(6, new Person("Gustaw", 20, 2));
-        persons.put(7, new Person("Alex", 20, 3));
-        return persons;
-    }
-}
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/ImputingWithMostFrequentValuesExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/ImputingWithMostFrequentValuesExample.java
new file mode 100644
index 0000000..8e39409
--- /dev/null
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/ImputingWithMostFrequentValuesExample.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.examples.ml.preprocessing;
+
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.Ignition;
+import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.examples.ml.dataset.model.Person;
+import org.apache.ignite.examples.ml.util.DatasetHelper;
+import org.apache.ignite.ml.dataset.DatasetFactory;
+import org.apache.ignite.ml.dataset.primitive.SimpleDataset;
+import org.apache.ignite.ml.math.functions.IgniteBiFunction;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
+import org.apache.ignite.ml.preprocessing.imputing.ImputerTrainer;
+import org.apache.ignite.ml.preprocessing.imputing.ImputingStrategy;
+
+/**
+ * Example that shows how to use <a href="https://en.wikipedia.org/wiki/Imputation_(statistics)">Imputing</a>
+ * preprocessor to impute the missing values in the given data with most frequent values.
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with simple test data.</p>
+ * <p>
+ * After that it defines preprocessors that extract features from an upstream data and impute missing values.</p>
+ * <p>
+ * Finally, it creates the dataset based on the processed data and uses Dataset API to find and output
+ * various statistical metrics of the data.</p>
+ * <p>
+ * You can change the test data used in this example and re-run it to explore this functionality further.</p>
+ */
+public class ImputingWithMostFrequentValuesExample {
+    /** Run example. */
+    public static void main(String[] args) throws Exception {
+        try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
+            System.out.println(">>> Imputing with most frequent values example started.");
+
+            IgniteCache<Integer, Person> persons = createCache(ignite);
+
+            // Defines first preprocessor that extracts features from an upstream data.
+            IgniteBiFunction<Integer, Person, Vector> featureExtractor = (k, v) -> VectorUtils.of(
+                v.getAge(),
+                v.getSalary()
+            );
+
+            // Defines second preprocessor that normalizes features.
+            IgniteBiFunction<Integer, Person, Vector> preprocessor = new ImputerTrainer<Integer, Person>()
+                .withImputingStrategy(ImputingStrategy.MOST_FREQUENT)
+                .fit(ignite, persons, featureExtractor);
+
+            // Creates a cache based simple dataset containing features and providing standard dataset API.
+            try (SimpleDataset<?> dataset = DatasetFactory.createSimpleDataset(ignite, persons, preprocessor)) {
+                new DatasetHelper(dataset).describe();
+            }
+
+            System.out.println(">>> Imputing with most frequent values example completed.");
+        }
+    }
+
+    /** */
+    private static IgniteCache<Integer, Person> createCache(Ignite ignite) {
+        CacheConfiguration<Integer, Person> cacheConfiguration = new CacheConfiguration<>();
+
+        cacheConfiguration.setName("PERSONS");
+        cacheConfiguration.setAffinity(new RendezvousAffinityFunction(false, 2));
+
+        IgniteCache<Integer, Person> persons = ignite.createCache(cacheConfiguration);
+
+        persons.put(1, new Person("Mike", 10, 1));
+        persons.put(2, new Person("John", 20, 2));
+        persons.put(3, new Person("George", 15, 1));
+        persons.put(4, new Person("Piter", 25, Double.NaN));
+        persons.put(5, new Person("Karl", Double.NaN, 1));
+        persons.put(6, new Person("Gustaw", 20, 2));
+        persons.put(7, new Person("Alex", 20, 3));
+        return persons;
+    }
+}
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/MaxAbsScalerExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/MaxAbsScalerExample.java
new file mode 100644
index 0000000..955702a
--- /dev/null
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/MaxAbsScalerExample.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.examples.ml.preprocessing;
+
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.Ignition;
+import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.examples.ml.dataset.model.Person;
+import org.apache.ignite.examples.ml.util.DatasetHelper;
+import org.apache.ignite.ml.dataset.DatasetFactory;
+import org.apache.ignite.ml.dataset.primitive.SimpleDataset;
+import org.apache.ignite.ml.math.functions.IgniteBiFunction;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
+import org.apache.ignite.ml.preprocessing.maxabsscaling.MaxAbsScalerTrainer;
+
+/**
+ * Example that shows how to use MaxAbsScaler preprocessor to scale the given data.
+ *
+ * Machine learning preprocessors are built as a chain. Most often a first preprocessor is a feature extractor as shown
+ * in this example. The second preprocessor here is a MinMaxScaler preprocessor which is built on top of the feature
+ * extractor and represents a chain of itself and the underlying feature extractor.
+ */
+public class MaxAbsScalerExample {
+    /** Run example. */
+    public static void main(String[] args) throws Exception {
+        try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
+            System.out.println(">>> Max abs example started.");
+
+            IgniteCache<Integer, Person> persons = createCache(ignite);
+
+            // Defines first preprocessor that extracts features from an upstream data.
+            IgniteBiFunction<Integer, Person, Vector> featureExtractor = (k, v) -> VectorUtils.of(
+                v.getAge(),
+                v.getSalary()
+            );
+
+            // Defines second preprocessor that processes features.
+            IgniteBiFunction<Integer, Person, Vector> preprocessor = new MaxAbsScalerTrainer<Integer, Person>()
+                .fit(ignite, persons, featureExtractor);
+
+            // Creates a cache based simple dataset containing features and providing standard dataset API.
+            try (SimpleDataset<?> dataset = DatasetFactory.createSimpleDataset(ignite, persons, preprocessor)) {
+                new DatasetHelper(dataset).describe();
+            }
+
+            System.out.println(">>> Max abs example completed.");
+        }
+    }
+
+    /** */
+    private static IgniteCache<Integer, Person> createCache(Ignite ignite) {
+        CacheConfiguration<Integer, Person> cacheConfiguration = new CacheConfiguration<>();
+
+        cacheConfiguration.setName("PERSONS");
+        cacheConfiguration.setAffinity(new RendezvousAffinityFunction(false, 2));
+
+        IgniteCache<Integer, Person> persons = ignite.createCache(cacheConfiguration);
+
+        persons.put(1, new Person("Mike", 42, 10000));
+        persons.put(2, new Person("John", 32, 64000));
+        persons.put(3, new Person("George", 53, 120000));
+        persons.put(4, new Person("Karl", 24, 70000));
+
+        return persons;
+    }
+}
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/MinMaxScalerExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/MinMaxScalerExample.java
index 3f3b0d6..f73228f 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/MinMaxScalerExample.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/MinMaxScalerExample.java
@@ -17,13 +17,13 @@
 
 package org.apache.ignite.examples.ml.preprocessing;
 
-import java.util.Arrays;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.Ignition;
 import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
 import org.apache.ignite.configuration.CacheConfiguration;
 import org.apache.ignite.examples.ml.dataset.model.Person;
+import org.apache.ignite.examples.ml.util.DatasetHelper;
 import org.apache.ignite.ml.dataset.DatasetFactory;
 import org.apache.ignite.ml.dataset.primitive.SimpleDataset;
 import org.apache.ignite.ml.math.functions.IgniteBiFunction;
@@ -33,16 +33,25 @@
 
 /**
  * Example that shows how to use MinMaxScaler preprocessor to scale the given data.
- *
+ * <p>
  * Machine learning preprocessors are built as a chain. Most often a first preprocessor is a feature extractor as shown
  * in this example. The second preprocessor here is a MinMaxScaler preprocessor which is built on top of the feature
- * extractor and represents a chain of itself and the underlying feature extractor.
+ * extractor and represents a chain of itself and the underlying feature extractor.</p>
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with simple test data.</p>
+ * <p>
+ * After that it defines preprocessors that extract features from an upstream data and normalize their values.</p>
+ * <p>
+ * Finally, it creates the dataset based on the processed data and uses Dataset API to find and output various
+ * statistical metrics of the data.</p>
+ * <p>
+ * You can change the test data used in this example and re-run it to explore this functionality further.</p>
  */
 public class MinMaxScalerExample {
     /** Run example. */
     public static void main(String[] args) throws Exception {
         try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
-            System.out.println(">>> Normalization example started.");
+            System.out.println(">>> MinMax preprocessing example started.");
 
             IgniteCache<Integer, Person> persons = createCache(ignite);
 
@@ -58,28 +67,10 @@
 
             // Creates a cache based simple dataset containing features and providing standard dataset API.
             try (SimpleDataset<?> dataset = DatasetFactory.createSimpleDataset(ignite, persons, preprocessor)) {
-                // Calculation of the mean value. This calculation will be performed in map-reduce manner.
-                double[] mean = dataset.mean();
-                System.out.println("Mean \n\t" + Arrays.toString(mean));
-
-                // Calculation of the standard deviation. This calculation will be performed in map-reduce manner.
-                double[] std = dataset.std();
-                System.out.println("Standard deviation \n\t" + Arrays.toString(std));
-
-                // Calculation of the covariance matrix.  This calculation will be performed in map-reduce manner.
-                double[][] cov = dataset.cov();
-                System.out.println("Covariance matrix ");
-                for (double[] row : cov)
-                    System.out.println("\t" + Arrays.toString(row));
-
-                // Calculation of the correlation matrix.  This calculation will be performed in map-reduce manner.
-                double[][] corr = dataset.corr();
-                System.out.println("Correlation matrix ");
-                for (double[] row : corr)
-                    System.out.println("\t" + Arrays.toString(row));
+                new DatasetHelper(dataset).describe();
             }
 
-            System.out.println(">>> Normalization example completed.");
+            System.out.println(">>> MinMax preprocessing example completed.");
         }
     }
 
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/NormalizationExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/NormalizationExample.java
index b8581d0..3159845 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/NormalizationExample.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/NormalizationExample.java
@@ -17,13 +17,13 @@
 
 package org.apache.ignite.examples.ml.preprocessing;
 
-import java.util.Arrays;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.Ignition;
 import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
 import org.apache.ignite.configuration.CacheConfiguration;
 import org.apache.ignite.examples.ml.dataset.model.Person;
+import org.apache.ignite.examples.ml.util.DatasetHelper;
 import org.apache.ignite.ml.dataset.DatasetFactory;
 import org.apache.ignite.ml.dataset.primitive.SimpleDataset;
 import org.apache.ignite.ml.math.functions.IgniteBiFunction;
@@ -33,6 +33,15 @@
 
 /**
  * Example that shows how to use normalization preprocessor to normalize each vector in the given data.
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with simple test data.</p>
+ * <p>
+ * After that it defines preprocessors that extract features from an upstream data and normalize their values.</p>
+ * <p>
+ * Finally, it creates the dataset based on the processed data and uses Dataset API to find and output
+ * various statistical metrics of the data.</p>
+ * <p>
+ * You can change the test data used in this example and re-run it to explore this functionality further.</p>
  */
 public class NormalizationExample {
     /** Run example. */
@@ -55,25 +64,7 @@
 
             // Creates a cache based simple dataset containing features and providing standard dataset API.
             try (SimpleDataset<?> dataset = DatasetFactory.createSimpleDataset(ignite, persons, preprocessor)) {
-                // Calculation of the mean value. This calculation will be performed in map-reduce manner.
-                double[] mean = dataset.mean();
-                System.out.println("Mean \n\t" + Arrays.toString(mean));
-
-                // Calculation of the standard deviation. This calculation will be performed in map-reduce manner.
-                double[] std = dataset.std();
-                System.out.println("Standard deviation \n\t" + Arrays.toString(std));
-
-                // Calculation of the covariance matrix.  This calculation will be performed in map-reduce manner.
-                double[][] cov = dataset.cov();
-                System.out.println("Covariance matrix ");
-                for (double[] row : cov)
-                    System.out.println("\t" + Arrays.toString(row));
-
-                // Calculation of the correlation matrix.  This calculation will be performed in map-reduce manner.
-                double[][] corr = dataset.corr();
-                System.out.println("Correlation matrix ");
-                for (double[] row : corr)
-                    System.out.println("\t" + Arrays.toString(row));
+                new DatasetHelper(dataset).describe();
             }
 
             System.out.println(">>> Normalization example completed.");
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/LinearRegressionLSQRTrainerExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/LinearRegressionLSQRTrainerExample.java
index 47cbb76..6ac445c 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/LinearRegressionLSQRTrainerExample.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/LinearRegressionLSQRTrainerExample.java
@@ -18,15 +18,13 @@
 package org.apache.ignite.examples.ml.regression.linear;
 
 import java.util.Arrays;
-import java.util.UUID;
 import javax.cache.Cache;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.Ignition;
-import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
 import org.apache.ignite.cache.query.QueryCursor;
 import org.apache.ignite.cache.query.ScanQuery;
-import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.examples.ml.util.TestCache;
 import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
 import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
 import org.apache.ignite.ml.regressions.linear.LinearRegressionLSQRTrainer;
@@ -34,9 +32,17 @@
 import org.apache.ignite.thread.IgniteThread;
 
 /**
- * Run linear regression model over cached dataset.
- *
- * @see LinearRegressionLSQRTrainer
+ * Run linear regression model based on <a href="http://web.stanford.edu/group/SOL/software/lsqr/">LSQR algorithm</a>
+ * ({@link LinearRegressionLSQRTrainer}) over cached dataset.
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with simple test data.</p>
+ * <p>
+ * After that it trains the linear regression model based on the specified data.</p>
+ * <p>
+ * Finally, this example loops over the test set of data points, applies the trained model to predict the target value
+ * and compares prediction to expected outcome (ground truth).</p>
+ * <p>
+ * You can change the test data used in this example and re-run it to explore this algorithm further.</p>
  */
 public class LinearRegressionLSQRTrainerExample {
     /** */
@@ -106,7 +112,7 @@
 
             IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
                 LinearRegressionLSQRTrainerExample.class.getSimpleName(), () -> {
-                IgniteCache<Integer, double[]> dataCache = getTestCache(ignite);
+                IgniteCache<Integer, double[]> dataCache = new TestCache(ignite).fillCacheWith(data);
 
                 System.out.println(">>> Create new linear regression trainer object.");
                 LinearRegressionLSQRTrainer trainer = new LinearRegressionLSQRTrainer();
@@ -138,6 +144,8 @@
                 }
 
                 System.out.println(">>> ---------------------------------");
+
+                System.out.println(">>> Linear regression model over cache based dataset usage example completed.");
             });
 
             igniteThread.start();
@@ -145,23 +153,4 @@
             igniteThread.join();
         }
     }
-
-    /**
-     * Fills cache with data and returns it.
-     *
-     * @param ignite Ignite instance.
-     * @return Filled Ignite Cache.
-     */
-    private static IgniteCache<Integer, double[]> getTestCache(Ignite ignite) {
-        CacheConfiguration<Integer, double[]> cacheConfiguration = new CacheConfiguration<>();
-        cacheConfiguration.setName("TEST_" + UUID.randomUUID());
-        cacheConfiguration.setAffinity(new RendezvousAffinityFunction(false, 10));
-
-        IgniteCache<Integer, double[]> cache = ignite.createCache(cacheConfiguration);
-
-        for (int i = 0; i < data.length; i++)
-            cache.put(i, data[i]);
-
-        return cache;
-    }
 }
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/LinearRegressionLSQRTrainerWithMinMaxScalerExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/LinearRegressionLSQRTrainerWithMinMaxScalerExample.java
index 03c82ef..320d464 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/LinearRegressionLSQRTrainerWithMinMaxScalerExample.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/LinearRegressionLSQRTrainerWithMinMaxScalerExample.java
@@ -18,15 +18,13 @@
 package org.apache.ignite.examples.ml.regression.linear;
 
 import java.util.Arrays;
-import java.util.UUID;
 import javax.cache.Cache;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.Ignition;
-import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
 import org.apache.ignite.cache.query.QueryCursor;
 import org.apache.ignite.cache.query.ScanQuery;
-import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.examples.ml.util.TestCache;
 import org.apache.ignite.ml.math.functions.IgniteBiFunction;
 import org.apache.ignite.ml.math.primitives.vector.Vector;
 import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
@@ -37,11 +35,20 @@
 import org.apache.ignite.thread.IgniteThread;
 
 /**
- * Run linear regression model over cached dataset.
- *
- * @see LinearRegressionLSQRTrainer
- * @see MinMaxScalerTrainer
- * @see MinMaxScalerPreprocessor
+ * Run linear regression model based on <a href="http://web.stanford.edu/group/SOL/software/lsqr/">LSQR algorithm</a>
+ * ({@link LinearRegressionLSQRTrainer}) over cached dataset that was created using
+ * a minmaxscaling preprocessor ({@link MinMaxScalerTrainer}, {@link MinMaxScalerPreprocessor}).
+ * <p>
+ * Code in this example launches Ignite grid, fills the cache with simple test data, and defines minmaxscaling
+ * trainer and preprocessor.</p>
+ * <p>
+ * After that it trains the linear regression model based on the specified data that has been processed
+ * using minmaxscaling.</p>
+ * <p>
+ * Finally, this example loops over the test set of data points, applies the trained model to predict predict the target
+ * value and compares prediction to expected outcome (ground truth).</p>
+ * <p>
+ * You can change the test data used in this example and re-run it to explore this algorithm further.</p>
  */
 public class LinearRegressionLSQRTrainerWithMinMaxScalerExample {
     /** */
@@ -104,14 +111,14 @@
     /** Run example. */
     public static void main(String[] args) throws InterruptedException {
         System.out.println();
-        System.out.println(">>> Linear regression model over cached dataset usage example started.");
+        System.out.println(">>> Linear regression model with minmaxscaling preprocessor over cached dataset usage example started.");
         // Start ignite grid.
         try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
             System.out.println(">>> Ignite grid started.");
 
             IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
                 LinearRegressionLSQRTrainerWithMinMaxScalerExample.class.getSimpleName(), () -> {
-                IgniteCache<Integer, Vector> dataCache = getTestCache(ignite);
+                IgniteCache<Integer, Vector> dataCache = new TestCache(ignite).getVectors(data);
 
                 System.out.println(">>> Create new minmaxscaling trainer object.");
                 MinMaxScalerTrainer<Integer, Vector> normalizationTrainer = new MinMaxScalerTrainer<>();
@@ -151,6 +158,8 @@
                 }
 
                 System.out.println(">>> ---------------------------------");
+
+                System.out.println(">>> Linear regression model with minmaxscaling preprocessor over cache based dataset usage example completed.");
             });
 
             igniteThread.start();
@@ -158,23 +167,4 @@
             igniteThread.join();
         }
     }
-
-    /**
-     * Fills cache with data and returns it.
-     *
-     * @param ignite Ignite instance.
-     * @return Filled Ignite Cache.
-     */
-    private static IgniteCache<Integer, Vector> getTestCache(Ignite ignite) {
-        CacheConfiguration<Integer, Vector> cacheConfiguration = new CacheConfiguration<>();
-        cacheConfiguration.setName("TEST_" + UUID.randomUUID());
-        cacheConfiguration.setAffinity(new RendezvousAffinityFunction(false, 10));
-
-        IgniteCache<Integer, Vector> cache = ignite.createCache(cacheConfiguration);
-
-        for (int i = 0; i < data.length; i++)
-            cache.put(i, VectorUtils.of(data[i]));
-
-        return cache;
-    }
 }
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/LinearRegressionSGDTrainerExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/LinearRegressionSGDTrainerExample.java
index a518c87..9fdc0df 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/LinearRegressionSGDTrainerExample.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/LinearRegressionSGDTrainerExample.java
@@ -18,15 +18,13 @@
 package org.apache.ignite.examples.ml.regression.linear;
 
 import java.util.Arrays;
-import java.util.UUID;
 import javax.cache.Cache;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.Ignition;
-import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
 import org.apache.ignite.cache.query.QueryCursor;
 import org.apache.ignite.cache.query.ScanQuery;
-import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.examples.ml.util.TestCache;
 import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
 import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
 import org.apache.ignite.ml.nn.UpdatesStrategy;
@@ -37,9 +35,19 @@
 import org.apache.ignite.thread.IgniteThread;
 
 /**
- * Run linear regression model over cached dataset.
- *
- * @see LinearRegressionSGDTrainer
+ * Run linear regression model based on  based on
+ * <a href="https://en.wikipedia.org/wiki/Stochastic_gradient_descent">stochastic gradient descent</a> algorithm
+ * ({@link LinearRegressionSGDTrainer}) over cached dataset.
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with simple test data.</p>
+ * <p>
+ * After that it trains the linear regression model based on stochastic gradient descent algorithm using
+ * the specified data.</p>
+ * <p>
+ * Finally, this example loops over the test set of data points, applies the trained model to predict the target value
+ * and compares prediction to expected outcome (ground truth).</p>
+ * <p>
+ * You can change the test data used in this example and re-run it to explore this algorithm further.</p>
  */
 public class LinearRegressionSGDTrainerExample {
     /** */
@@ -109,7 +117,7 @@
             IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
                 LinearRegressionSGDTrainerExample.class.getSimpleName(), () -> {
 
-                IgniteCache<Integer, double[]> dataCache = getTestCache(ignite);
+                IgniteCache<Integer, double[]> dataCache = new TestCache(ignite).fillCacheWith(data);
 
                 System.out.println(">>> Create new linear regression trainer object.");
                 LinearRegressionSGDTrainer<?> trainer = new LinearRegressionSGDTrainer<>(new UpdatesStrategy<>(
@@ -145,6 +153,8 @@
                 }
 
                 System.out.println(">>> ---------------------------------");
+
+                System.out.println(">>> Linear regression model over cache based dataset usage example completed.");
             });
 
             igniteThread.start();
@@ -152,23 +162,4 @@
             igniteThread.join();
         }
     }
-
-    /**
-     * Fills cache with data and returns it.
-     *
-     * @param ignite Ignite instance.
-     * @return Filled Ignite Cache.
-     */
-    private static IgniteCache<Integer, double[]> getTestCache(Ignite ignite) {
-        CacheConfiguration<Integer, double[]> cacheConfiguration = new CacheConfiguration<>();
-        cacheConfiguration.setName("TEST_" + UUID.randomUUID());
-        cacheConfiguration.setAffinity(new RendezvousAffinityFunction(false, 10));
-
-        IgniteCache<Integer, double[]> cache = ignite.createCache(cacheConfiguration);
-
-        for (int i = 0; i < data.length; i++)
-            cache.put(i, data[i]);
-
-        return cache;
-    }
 }
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/regression/logistic/binary/LogisticRegressionSGDTrainerExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/regression/logistic/binary/LogisticRegressionSGDTrainerExample.java
new file mode 100644
index 0000000..0a6ff01
--- /dev/null
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/regression/logistic/binary/LogisticRegressionSGDTrainerExample.java
@@ -0,0 +1,230 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.examples.ml.regression.logistic.binary;
+
+import java.util.Arrays;
+import javax.cache.Cache;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.Ignition;
+import org.apache.ignite.cache.query.QueryCursor;
+import org.apache.ignite.cache.query.ScanQuery;
+import org.apache.ignite.examples.ml.util.TestCache;
+import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
+import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
+import org.apache.ignite.ml.nn.UpdatesStrategy;
+import org.apache.ignite.ml.optimization.updatecalculators.SimpleGDParameterUpdate;
+import org.apache.ignite.ml.optimization.updatecalculators.SimpleGDUpdateCalculator;
+import org.apache.ignite.ml.regressions.logistic.binomial.LogisticRegressionModel;
+import org.apache.ignite.ml.regressions.logistic.binomial.LogisticRegressionSGDTrainer;
+import org.apache.ignite.thread.IgniteThread;
+
+/**
+ * Run logistic regression model based on <a href="https://en.wikipedia.org/wiki/Stochastic_gradient_descent">
+ * stochastic gradient descent</a> algorithm ({@link LogisticRegressionSGDTrainer}) over distributed cache.
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with test data points (based on the
+ * <a href="https://en.wikipedia.org/wiki/Iris_flower_data_set"></a>Iris dataset</a>).</p>
+ * <p>
+ * After that it trains the logistic regression model based on the specified data.</p>
+ * <p>
+ * Finally, this example loops over the test set of data points, applies the trained model to predict the target value,
+ * compares prediction to expected outcome (ground truth), and builds
+ * <a href="https://en.wikipedia.org/wiki/Confusion_matrix">confusion matrix</a>.</p>
+ * <p>
+ * You can change the test data used in this example and re-run it to explore this algorithm further.</p>
+ */
+public class LogisticRegressionSGDTrainerExample {
+    /** Run example. */
+    public static void main(String[] args) throws InterruptedException {
+        System.out.println();
+        System.out.println(">>> Logistic regression model over partitioned dataset usage example started.");
+        // Start ignite grid.
+        try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
+            System.out.println(">>> Ignite grid started.");
+            IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
+                LogisticRegressionSGDTrainerExample.class.getSimpleName(), () -> {
+
+                IgniteCache<Integer, double[]> dataCache = new TestCache(ignite).fillCacheWith(data);
+
+                System.out.println(">>> Create new logistic regression trainer object.");
+                LogisticRegressionSGDTrainer<?> trainer = new LogisticRegressionSGDTrainer<>(new UpdatesStrategy<>(
+                    new SimpleGDUpdateCalculator(0.2),
+                    SimpleGDParameterUpdate::sumLocal,
+                    SimpleGDParameterUpdate::avg
+                ), 100000,  10, 100, 123L);
+
+                System.out.println(">>> Perform the training to get the model.");
+                LogisticRegressionModel mdl = trainer.fit(
+                    ignite,
+                    dataCache,
+                    (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 1, v.length)),
+                    (k, v) -> v[0]
+                );
+
+                System.out.println(">>> Logistic regression model: " + mdl);
+
+                int amountOfErrors = 0;
+                int totalAmount = 0;
+
+                // Build confusion matrix. See https://en.wikipedia.org/wiki/Confusion_matrix
+                int[][] confusionMtx = {{0, 0}, {0, 0}};
+
+                try (QueryCursor<Cache.Entry<Integer, double[]>> observations = dataCache.query(new ScanQuery<>())) {
+                    for (Cache.Entry<Integer, double[]> observation : observations) {
+                        double[] val = observation.getValue();
+                        double[] inputs = Arrays.copyOfRange(val, 1, val.length);
+                        double groundTruth = val[0];
+
+                        double prediction = mdl.apply(new DenseVector(inputs));
+
+                        totalAmount++;
+                        if(groundTruth != prediction)
+                            amountOfErrors++;
+
+                        int idx1 = (int)prediction;
+                        int idx2 = (int)groundTruth;
+
+                        confusionMtx[idx1][idx2]++;
+
+                        System.out.printf(">>> | %.4f\t\t| %.4f\t\t|\n", prediction, groundTruth);
+                    }
+
+                    System.out.println(">>> ---------------------------------");
+
+                    System.out.println("\n>>> Absolute amount of errors " + amountOfErrors);
+                    System.out.println("\n>>> Accuracy " + (1 - amountOfErrors / (double)totalAmount));
+                }
+
+                System.out.println("\n>>> Confusion matrix is " + Arrays.deepToString(confusionMtx));
+                System.out.println(">>> ---------------------------------");
+
+                System.out.println(">>> Logistic regression model over partitioned dataset usage example completed.");
+            });
+
+            igniteThread.start();
+
+            igniteThread.join();
+        }
+    }
+
+    /** The 1st and 2nd classes from the Iris dataset. */
+    private static final double[][] data = {
+        {0, 5.1, 3.5, 1.4, 0.2},
+        {0, 4.9, 3, 1.4, 0.2},
+        {0, 4.7, 3.2, 1.3, 0.2},
+        {0, 4.6, 3.1, 1.5, 0.2},
+        {0, 5, 3.6, 1.4, 0.2},
+        {0, 5.4, 3.9, 1.7, 0.4},
+        {0, 4.6, 3.4, 1.4, 0.3},
+        {0, 5, 3.4, 1.5, 0.2},
+        {0, 4.4, 2.9, 1.4, 0.2},
+        {0, 4.9, 3.1, 1.5, 0.1},
+        {0, 5.4, 3.7, 1.5, 0.2},
+        {0, 4.8, 3.4, 1.6, 0.2},
+        {0, 4.8, 3, 1.4, 0.1},
+        {0, 4.3, 3, 1.1, 0.1},
+        {0, 5.8, 4, 1.2, 0.2},
+        {0, 5.7, 4.4, 1.5, 0.4},
+        {0, 5.4, 3.9, 1.3, 0.4},
+        {0, 5.1, 3.5, 1.4, 0.3},
+        {0, 5.7, 3.8, 1.7, 0.3},
+        {0, 5.1, 3.8, 1.5, 0.3},
+        {0, 5.4, 3.4, 1.7, 0.2},
+        {0, 5.1, 3.7, 1.5, 0.4},
+        {0, 4.6, 3.6, 1, 0.2},
+        {0, 5.1, 3.3, 1.7, 0.5},
+        {0, 4.8, 3.4, 1.9, 0.2},
+        {0, 5, 3, 1.6, 0.2},
+        {0, 5, 3.4, 1.6, 0.4},
+        {0, 5.2, 3.5, 1.5, 0.2},
+        {0, 5.2, 3.4, 1.4, 0.2},
+        {0, 4.7, 3.2, 1.6, 0.2},
+        {0, 4.8, 3.1, 1.6, 0.2},
+        {0, 5.4, 3.4, 1.5, 0.4},
+        {0, 5.2, 4.1, 1.5, 0.1},
+        {0, 5.5, 4.2, 1.4, 0.2},
+        {0, 4.9, 3.1, 1.5, 0.1},
+        {0, 5, 3.2, 1.2, 0.2},
+        {0, 5.5, 3.5, 1.3, 0.2},
+        {0, 4.9, 3.1, 1.5, 0.1},
+        {0, 4.4, 3, 1.3, 0.2},
+        {0, 5.1, 3.4, 1.5, 0.2},
+        {0, 5, 3.5, 1.3, 0.3},
+        {0, 4.5, 2.3, 1.3, 0.3},
+        {0, 4.4, 3.2, 1.3, 0.2},
+        {0, 5, 3.5, 1.6, 0.6},
+        {0, 5.1, 3.8, 1.9, 0.4},
+        {0, 4.8, 3, 1.4, 0.3},
+        {0, 5.1, 3.8, 1.6, 0.2},
+        {0, 4.6, 3.2, 1.4, 0.2},
+        {0, 5.3, 3.7, 1.5, 0.2},
+        {0, 5, 3.3, 1.4, 0.2},
+        {1, 7, 3.2, 4.7, 1.4},
+        {1, 6.4, 3.2, 4.5, 1.5},
+        {1, 6.9, 3.1, 4.9, 1.5},
+        {1, 5.5, 2.3, 4, 1.3},
+        {1, 6.5, 2.8, 4.6, 1.5},
+        {1, 5.7, 2.8, 4.5, 1.3},
+        {1, 6.3, 3.3, 4.7, 1.6},
+        {1, 4.9, 2.4, 3.3, 1},
+        {1, 6.6, 2.9, 4.6, 1.3},
+        {1, 5.2, 2.7, 3.9, 1.4},
+        {1, 5, 2, 3.5, 1},
+        {1, 5.9, 3, 4.2, 1.5},
+        {1, 6, 2.2, 4, 1},
+        {1, 6.1, 2.9, 4.7, 1.4},
+        {1, 5.6, 2.9, 3.6, 1.3},
+        {1, 6.7, 3.1, 4.4, 1.4},
+        {1, 5.6, 3, 4.5, 1.5},
+        {1, 5.8, 2.7, 4.1, 1},
+        {1, 6.2, 2.2, 4.5, 1.5},
+        {1, 5.6, 2.5, 3.9, 1.1},
+        {1, 5.9, 3.2, 4.8, 1.8},
+        {1, 6.1, 2.8, 4, 1.3},
+        {1, 6.3, 2.5, 4.9, 1.5},
+        {1, 6.1, 2.8, 4.7, 1.2},
+        {1, 6.4, 2.9, 4.3, 1.3},
+        {1, 6.6, 3, 4.4, 1.4},
+        {1, 6.8, 2.8, 4.8, 1.4},
+        {1, 6.7, 3, 5, 1.7},
+        {1, 6, 2.9, 4.5, 1.5},
+        {1, 5.7, 2.6, 3.5, 1},
+        {1, 5.5, 2.4, 3.8, 1.1},
+        {1, 5.5, 2.4, 3.7, 1},
+        {1, 5.8, 2.7, 3.9, 1.2},
+        {1, 6, 2.7, 5.1, 1.6},
+        {1, 5.4, 3, 4.5, 1.5},
+        {1, 6, 3.4, 4.5, 1.6},
+        {1, 6.7, 3.1, 4.7, 1.5},
+        {1, 6.3, 2.3, 4.4, 1.3},
+        {1, 5.6, 3, 4.1, 1.3},
+        {1, 5.5, 2.5, 4, 1.3},
+        {1, 5.5, 2.6, 4.4, 1.2},
+        {1, 6.1, 3, 4.6, 1.4},
+        {1, 5.8, 2.6, 4, 1.2},
+        {1, 5, 2.3, 3.3, 1},
+        {1, 5.6, 2.7, 4.2, 1.3},
+        {1, 5.7, 3, 4.2, 1.2},
+        {1, 5.7, 2.9, 4.2, 1.3},
+        {1, 6.2, 2.9, 4.3, 1.3},
+        {1, 5.1, 2.5, 3, 1.1},
+        {1, 5.7, 2.8, 4.1, 1.3},
+    };
+
+}
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/regression/logistic/binary/LogisticRegressionSGDTrainerSample.java b/examples/src/main/java/org/apache/ignite/examples/ml/regression/logistic/binary/LogisticRegressionSGDTrainerSample.java
deleted file mode 100644
index 9648bbd..0000000
--- a/examples/src/main/java/org/apache/ignite/examples/ml/regression/logistic/binary/LogisticRegressionSGDTrainerSample.java
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.examples.ml.regression.logistic.binary;
-
-import java.util.Arrays;
-import java.util.UUID;
-import javax.cache.Cache;
-import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteCache;
-import org.apache.ignite.Ignition;
-import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
-import org.apache.ignite.cache.query.QueryCursor;
-import org.apache.ignite.cache.query.ScanQuery;
-import org.apache.ignite.configuration.CacheConfiguration;
-import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
-import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
-import org.apache.ignite.ml.nn.UpdatesStrategy;
-import org.apache.ignite.ml.optimization.updatecalculators.SimpleGDParameterUpdate;
-import org.apache.ignite.ml.optimization.updatecalculators.SimpleGDUpdateCalculator;
-import org.apache.ignite.ml.regressions.logistic.binomial.LogisticRegressionModel;
-import org.apache.ignite.ml.regressions.logistic.binomial.LogisticRegressionSGDTrainer;
-import org.apache.ignite.thread.IgniteThread;
-
-/**
- * Run logistic regression model over distributed cache.
- *
- * @see LogisticRegressionSGDTrainer
- */
-public class LogisticRegressionSGDTrainerSample {
-    /** Run example. */
-    public static void main(String[] args) throws InterruptedException {
-        System.out.println();
-        System.out.println(">>> Logistic regression model over partitioned dataset usage example started.");
-        // Start ignite grid.
-        try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
-            System.out.println(">>> Ignite grid started.");
-            IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
-                LogisticRegressionSGDTrainerSample.class.getSimpleName(), () -> {
-
-                IgniteCache<Integer, double[]> dataCache = getTestCache(ignite);
-
-                System.out.println(">>> Create new logistic regression trainer object.");
-                LogisticRegressionSGDTrainer<?> trainer = new LogisticRegressionSGDTrainer<>(new UpdatesStrategy<>(
-                    new SimpleGDUpdateCalculator(0.2),
-                    SimpleGDParameterUpdate::sumLocal,
-                    SimpleGDParameterUpdate::avg
-                ), 100000,  10, 100, 123L);
-
-                System.out.println(">>> Perform the training to get the model.");
-                LogisticRegressionModel mdl = trainer.fit(
-                    ignite,
-                    dataCache,
-                    (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 1, v.length)),
-                    (k, v) -> v[0]
-                ).withRawLabels(true);
-
-                System.out.println(">>> Logistic regression model: " + mdl);
-
-                int amountOfErrors = 0;
-                int totalAmount = 0;
-
-                // Build confusion matrix. See https://en.wikipedia.org/wiki/Confusion_matrix
-                int[][] confusionMtx = {{0, 0}, {0, 0}};
-
-                try (QueryCursor<Cache.Entry<Integer, double[]>> observations = dataCache.query(new ScanQuery<>())) {
-                    for (Cache.Entry<Integer, double[]> observation : observations) {
-                        double[] val = observation.getValue();
-                        double[] inputs = Arrays.copyOfRange(val, 1, val.length);
-                        double groundTruth = val[0];
-
-                        double prediction = mdl.apply(new DenseVector(inputs));
-
-                        totalAmount++;
-                        if(groundTruth != prediction)
-                            amountOfErrors++;
-
-                        int idx1 = (int)prediction;
-                        int idx2 = (int)groundTruth;
-
-                        confusionMtx[idx1][idx2]++;
-
-                        System.out.printf(">>> | %.4f\t\t| %.4f\t\t|\n", prediction, groundTruth);
-                    }
-
-                    System.out.println(">>> ---------------------------------");
-
-                    System.out.println("\n>>> Absolute amount of errors " + amountOfErrors);
-                    System.out.println("\n>>> Accuracy " + (1 - amountOfErrors / (double)totalAmount));
-                }
-
-                System.out.println("\n>>> Confusion matrix is " + Arrays.deepToString(confusionMtx));
-                System.out.println(">>> ---------------------------------");
-            });
-
-            igniteThread.start();
-
-            igniteThread.join();
-        }
-    }
-    /**
-     * Fills cache with data and returns it.
-     *
-     * @param ignite Ignite instance.
-     * @return Filled Ignite Cache.
-     */
-    private static IgniteCache<Integer, double[]> getTestCache(Ignite ignite) {
-        CacheConfiguration<Integer, double[]> cacheConfiguration = new CacheConfiguration<>();
-        cacheConfiguration.setName("TEST_" + UUID.randomUUID());
-        cacheConfiguration.setAffinity(new RendezvousAffinityFunction(false, 10));
-
-        IgniteCache<Integer, double[]> cache = ignite.createCache(cacheConfiguration);
-
-        for (int i = 0; i < data.length; i++)
-            cache.put(i, data[i]);
-
-        return cache;
-    }
-
-
-    /** The 1st and 2nd classes from the Iris dataset. */
-    private static final double[][] data = {
-        {0, 5.1, 3.5, 1.4, 0.2},
-        {0, 4.9, 3, 1.4, 0.2},
-        {0, 4.7, 3.2, 1.3, 0.2},
-        {0, 4.6, 3.1, 1.5, 0.2},
-        {0, 5, 3.6, 1.4, 0.2},
-        {0, 5.4, 3.9, 1.7, 0.4},
-        {0, 4.6, 3.4, 1.4, 0.3},
-        {0, 5, 3.4, 1.5, 0.2},
-        {0, 4.4, 2.9, 1.4, 0.2},
-        {0, 4.9, 3.1, 1.5, 0.1},
-        {0, 5.4, 3.7, 1.5, 0.2},
-        {0, 4.8, 3.4, 1.6, 0.2},
-        {0, 4.8, 3, 1.4, 0.1},
-        {0, 4.3, 3, 1.1, 0.1},
-        {0, 5.8, 4, 1.2, 0.2},
-        {0, 5.7, 4.4, 1.5, 0.4},
-        {0, 5.4, 3.9, 1.3, 0.4},
-        {0, 5.1, 3.5, 1.4, 0.3},
-        {0, 5.7, 3.8, 1.7, 0.3},
-        {0, 5.1, 3.8, 1.5, 0.3},
-        {0, 5.4, 3.4, 1.7, 0.2},
-        {0, 5.1, 3.7, 1.5, 0.4},
-        {0, 4.6, 3.6, 1, 0.2},
-        {0, 5.1, 3.3, 1.7, 0.5},
-        {0, 4.8, 3.4, 1.9, 0.2},
-        {0, 5, 3, 1.6, 0.2},
-        {0, 5, 3.4, 1.6, 0.4},
-        {0, 5.2, 3.5, 1.5, 0.2},
-        {0, 5.2, 3.4, 1.4, 0.2},
-        {0, 4.7, 3.2, 1.6, 0.2},
-        {0, 4.8, 3.1, 1.6, 0.2},
-        {0, 5.4, 3.4, 1.5, 0.4},
-        {0, 5.2, 4.1, 1.5, 0.1},
-        {0, 5.5, 4.2, 1.4, 0.2},
-        {0, 4.9, 3.1, 1.5, 0.1},
-        {0, 5, 3.2, 1.2, 0.2},
-        {0, 5.5, 3.5, 1.3, 0.2},
-        {0, 4.9, 3.1, 1.5, 0.1},
-        {0, 4.4, 3, 1.3, 0.2},
-        {0, 5.1, 3.4, 1.5, 0.2},
-        {0, 5, 3.5, 1.3, 0.3},
-        {0, 4.5, 2.3, 1.3, 0.3},
-        {0, 4.4, 3.2, 1.3, 0.2},
-        {0, 5, 3.5, 1.6, 0.6},
-        {0, 5.1, 3.8, 1.9, 0.4},
-        {0, 4.8, 3, 1.4, 0.3},
-        {0, 5.1, 3.8, 1.6, 0.2},
-        {0, 4.6, 3.2, 1.4, 0.2},
-        {0, 5.3, 3.7, 1.5, 0.2},
-        {0, 5, 3.3, 1.4, 0.2},
-        {1, 7, 3.2, 4.7, 1.4},
-        {1, 6.4, 3.2, 4.5, 1.5},
-        {1, 6.9, 3.1, 4.9, 1.5},
-        {1, 5.5, 2.3, 4, 1.3},
-        {1, 6.5, 2.8, 4.6, 1.5},
-        {1, 5.7, 2.8, 4.5, 1.3},
-        {1, 6.3, 3.3, 4.7, 1.6},
-        {1, 4.9, 2.4, 3.3, 1},
-        {1, 6.6, 2.9, 4.6, 1.3},
-        {1, 5.2, 2.7, 3.9, 1.4},
-        {1, 5, 2, 3.5, 1},
-        {1, 5.9, 3, 4.2, 1.5},
-        {1, 6, 2.2, 4, 1},
-        {1, 6.1, 2.9, 4.7, 1.4},
-        {1, 5.6, 2.9, 3.6, 1.3},
-        {1, 6.7, 3.1, 4.4, 1.4},
-        {1, 5.6, 3, 4.5, 1.5},
-        {1, 5.8, 2.7, 4.1, 1},
-        {1, 6.2, 2.2, 4.5, 1.5},
-        {1, 5.6, 2.5, 3.9, 1.1},
-        {1, 5.9, 3.2, 4.8, 1.8},
-        {1, 6.1, 2.8, 4, 1.3},
-        {1, 6.3, 2.5, 4.9, 1.5},
-        {1, 6.1, 2.8, 4.7, 1.2},
-        {1, 6.4, 2.9, 4.3, 1.3},
-        {1, 6.6, 3, 4.4, 1.4},
-        {1, 6.8, 2.8, 4.8, 1.4},
-        {1, 6.7, 3, 5, 1.7},
-        {1, 6, 2.9, 4.5, 1.5},
-        {1, 5.7, 2.6, 3.5, 1},
-        {1, 5.5, 2.4, 3.8, 1.1},
-        {1, 5.5, 2.4, 3.7, 1},
-        {1, 5.8, 2.7, 3.9, 1.2},
-        {1, 6, 2.7, 5.1, 1.6},
-        {1, 5.4, 3, 4.5, 1.5},
-        {1, 6, 3.4, 4.5, 1.6},
-        {1, 6.7, 3.1, 4.7, 1.5},
-        {1, 6.3, 2.3, 4.4, 1.3},
-        {1, 5.6, 3, 4.1, 1.3},
-        {1, 5.5, 2.5, 4, 1.3},
-        {1, 5.5, 2.6, 4.4, 1.2},
-        {1, 6.1, 3, 4.6, 1.4},
-        {1, 5.8, 2.6, 4, 1.2},
-        {1, 5, 2.3, 3.3, 1},
-        {1, 5.6, 2.7, 4.2, 1.3},
-        {1, 5.7, 3, 4.2, 1.2},
-        {1, 5.7, 2.9, 4.2, 1.3},
-        {1, 6.2, 2.9, 4.3, 1.3},
-        {1, 5.1, 2.5, 3, 1.1},
-        {1, 5.7, 2.8, 4.1, 1.3},
-    };
-
-}
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/regression/logistic/multiclass/LogRegressionMultiClassClassificationExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/regression/logistic/multiclass/LogRegressionMultiClassClassificationExample.java
index 351f1c6..e670f01 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/regression/logistic/multiclass/LogRegressionMultiClassClassificationExample.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/regression/logistic/multiclass/LogRegressionMultiClassClassificationExample.java
@@ -18,15 +18,13 @@
 package org.apache.ignite.examples.ml.regression.logistic.multiclass;
 
 import java.util.Arrays;
-import java.util.UUID;
 import javax.cache.Cache;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.Ignition;
-import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
 import org.apache.ignite.cache.query.QueryCursor;
 import org.apache.ignite.cache.query.ScanQuery;
-import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.examples.ml.util.TestCache;
 import org.apache.ignite.ml.math.functions.IgniteBiFunction;
 import org.apache.ignite.ml.math.primitives.vector.Vector;
 import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
@@ -37,14 +35,23 @@
 import org.apache.ignite.ml.preprocessing.minmaxscaling.MinMaxScalerTrainer;
 import org.apache.ignite.ml.regressions.logistic.multiclass.LogRegressionMultiClassModel;
 import org.apache.ignite.ml.regressions.logistic.multiclass.LogRegressionMultiClassTrainer;
-import org.apache.ignite.ml.svm.SVMLinearMultiClassClassificationModel;
 import org.apache.ignite.thread.IgniteThread;
 
 /**
- * Run Logistic Regression multi-class classification trainer over distributed dataset to build two models:
- * one with minmaxscaling and one without minmaxscaling.
- *
- * @see SVMLinearMultiClassClassificationModel
+ * Run Logistic Regression multi-class classification trainer ({@link LogRegressionMultiClassModel}) over distributed
+ * dataset to build two models: one with minmaxscaling and one without minmaxscaling.
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with test data points (preprocessed
+ * <a href="https://archive.ics.uci.edu/ml/datasets/Glass+Identification">Glass dataset</a>).</p>
+ * <p>
+ * After that it trains two logistic regression models based on the specified data - one model is with minmaxscaling
+ * and one without minmaxscaling.</p>
+ * <p>
+ * Finally, this example loops over the test set of data points, applies the trained models to predict the target value,
+ * compares prediction to expected outcome (ground truth), and builds
+ * <a href="https://en.wikipedia.org/wiki/Confusion_matrix">confusion matrices</a>.</p>
+ * <p>
+ * You can change the test data used in this example and re-run it to explore this algorithm further.</p>
  */
 public class LogRegressionMultiClassClassificationExample {
     /** Run example. */
@@ -57,7 +64,7 @@
 
             IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
                 LogRegressionMultiClassClassificationExample.class.getSimpleName(), () -> {
-                IgniteCache<Integer, Vector> dataCache = getTestCache(ignite);
+                IgniteCache<Integer, Vector> dataCache = new TestCache(ignite).getVectors(data);
 
                 LogRegressionMultiClassTrainer<?> trainer = new LogRegressionMultiClassTrainer<>()
                     .withUpdatesStgy(new UpdatesStrategy<>(
@@ -157,6 +164,8 @@
                     System.out.println("\n>>> Absolute amount of errors " + amountOfErrorsWithNormalization);
                     System.out.println("\n>>> Accuracy " + (1 - amountOfErrorsWithNormalization / (double)totalAmount));
                     System.out.println("\n>>> Confusion matrix is " + Arrays.deepToString(confusionMtxWithNormalization));
+
+                    System.out.println(">>> Logistic Regression Multi-class classification model over cached dataset usage example completed.");
                 }
             });
 
@@ -165,25 +174,6 @@
         }
     }
 
-    /**
-     * Fills cache with data and returns it.
-     *
-     * @param ignite Ignite instance.
-     * @return Filled Ignite Cache.
-     */
-    private static IgniteCache<Integer, Vector> getTestCache(Ignite ignite) {
-        CacheConfiguration<Integer, Vector> cacheConfiguration = new CacheConfiguration<>();
-        cacheConfiguration.setName("TEST_" + UUID.randomUUID());
-        cacheConfiguration.setAffinity(new RendezvousAffinityFunction(false, 10));
-
-        IgniteCache<Integer, Vector> cache = ignite.createCache(cacheConfiguration);
-
-        for (int i = 0; i < data.length; i++)
-            cache.put(i, VectorUtils.of(data[i]));
-
-        return cache;
-    }
-
     /** The preprocessed Glass dataset from the Machine Learning Repository https://archive.ics.uci.edu/ml/datasets/Glass+Identification
      *  There are 3 classes with labels: 1 {building_windows_float_processed}, 3 {vehicle_windows_float_processed}, 7 {headlamps}.
      *  Feature names: 'Na-Sodium', 'Mg-Magnesium', 'Al-Aluminum', 'Ba-Barium', 'Fe-Iron'.
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/selection/cv/CrossValidationExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/selection/cv/CrossValidationExample.java
index 83656c5..eb4c8f3 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/selection/cv/CrossValidationExample.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/selection/cv/CrossValidationExample.java
@@ -33,9 +33,13 @@
 import org.apache.ignite.thread.IgniteThread;
 
 /**
- * Run decision tree classification with cross validation.
- *
- * @see CrossValidation
+ * Run <a href="https://en.wikipedia.org/wiki/Decision_tree">decision tree</a> classification with
+ * <a href="https://en.wikipedia.org/wiki/Cross-validation_(statistics)">cross validation</a> ({@link CrossValidation}).
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with pseudo random training data points.</p>
+ * <p>
+ * After that it creates classification trainer ({@link DecisionTreeClassificationTrainer}) and computes cross-validated
+ * metrics based on the training set.</p>
  */
 public class CrossValidationExample {
     /**
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/selection/split/TrainTestDatasetSplitterExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/selection/split/TrainTestDatasetSplitterExample.java
index e310ded..fa1c2ca 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/selection/split/TrainTestDatasetSplitterExample.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/selection/split/TrainTestDatasetSplitterExample.java
@@ -18,15 +18,13 @@
 package org.apache.ignite.examples.ml.selection.split;
 
 import java.util.Arrays;
-import java.util.UUID;
 import javax.cache.Cache;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.Ignition;
-import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
 import org.apache.ignite.cache.query.QueryCursor;
 import org.apache.ignite.cache.query.ScanQuery;
-import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.examples.ml.util.TestCache;
 import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
 import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
 import org.apache.ignite.ml.regressions.linear.LinearRegressionLSQRTrainer;
@@ -36,9 +34,18 @@
 import org.apache.ignite.thread.IgniteThread;
 
 /**
- * Run linear regression model over dataset splitted on train and test subsets.
- *
- * @see TrainTestDatasetSplitter
+ * Run linear regression model over dataset split on train and test subsets ({@link TrainTestDatasetSplitter}).
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with simple test data.</p>
+ * <p>
+ * After that it creates dataset splitter and trains the linear regression model based on the specified data using
+ * this splitter.</p>
+ * <p>
+ * Finally, this example loops over the test set of data points, applies the trained model to predict the target value
+ * and compares prediction to expected outcome (ground truth).</p>
+ * <p>
+ * You can change the test data and split parameters used in this example and re-run it to explore this functionality
+ * further.</p>
  */
 public class TrainTestDatasetSplitterExample {
     /** */
@@ -108,11 +115,12 @@
 
             IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
                 TrainTestDatasetSplitterExample.class.getSimpleName(), () -> {
-                IgniteCache<Integer, double[]> dataCache = getTestCache(ignite);
+                IgniteCache<Integer, double[]> dataCache = new TestCache(ignite).fillCacheWith(data);
 
                 System.out.println(">>> Create new linear regression trainer object.");
                 LinearRegressionLSQRTrainer trainer = new LinearRegressionLSQRTrainer();
 
+                System.out.println(">>> Create new training dataset splitter object.");
                 TrainTestSplit<Integer, double[]> split = new TrainTestDatasetSplitter<Integer, double[]>()
                     .split(0.75);
 
@@ -147,6 +155,8 @@
                 }
 
                 System.out.println(">>> ---------------------------------");
+
+                System.out.println(">>> Linear regression model over cache based dataset usage example completed.");
             });
 
             igniteThread.start();
@@ -154,23 +164,4 @@
             igniteThread.join();
         }
     }
-
-    /**
-     * Fills cache with data and returns it.
-     *
-     * @param ignite Ignite instance.
-     * @return Filled Ignite Cache.
-     */
-    private static IgniteCache<Integer, double[]> getTestCache(Ignite ignite) {
-        CacheConfiguration<Integer, double[]> cacheConfiguration = new CacheConfiguration<>();
-        cacheConfiguration.setName("TEST_" + UUID.randomUUID());
-        cacheConfiguration.setAffinity(new RendezvousAffinityFunction(false, 3));
-
-        IgniteCache<Integer, double[]> cache = ignite.createCache(cacheConfiguration);
-
-        for (int i = 0; i < data.length; i++)
-            cache.put(i, data[i]);
-
-        return cache;
-    }
 }
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/svm/binary/SVMBinaryClassificationExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/svm/binary/SVMBinaryClassificationExample.java
index 855517d..f71db2d 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/svm/binary/SVMBinaryClassificationExample.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/svm/binary/SVMBinaryClassificationExample.java
@@ -18,15 +18,13 @@
 package org.apache.ignite.examples.ml.svm.binary;
 
 import java.util.Arrays;
-import java.util.UUID;
 import javax.cache.Cache;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.Ignition;
-import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
 import org.apache.ignite.cache.query.QueryCursor;
 import org.apache.ignite.cache.query.ScanQuery;
-import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.examples.ml.util.TestCache;
 import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
 import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
 import org.apache.ignite.ml.svm.SVMLinearBinaryClassificationModel;
@@ -34,9 +32,18 @@
 import org.apache.ignite.thread.IgniteThread;
 
 /**
- * Run SVM binary-class classification model over distributed dataset.
- *
- * @see SVMLinearBinaryClassificationModel
+ * Run SVM binary-class classification model ({@link SVMLinearBinaryClassificationModel}) over distributed dataset.
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with test data points (based on the
+ * <a href="https://en.wikipedia.org/wiki/Iris_flower_data_set"></a>Iris dataset</a>).</p>
+ * <p>
+ * After that it trains the model based on the specified data using KMeans algorithm.</p>
+ * <p>
+ * Finally, this example loops over the test set of data points, applies the trained model to predict what cluster
+ * does this point belong to, compares prediction to expected outcome (ground truth), and builds
+ * <a href="https://en.wikipedia.org/wiki/Confusion_matrix">confusion matrix</a>.</p>
+ * <p>
+ * You can change the test data used in this example and re-run it to explore this algorithm further.</p>
  */
 public class SVMBinaryClassificationExample {
     /** Run example. */
@@ -49,7 +56,7 @@
 
             IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
                 SVMBinaryClassificationExample.class.getSimpleName(), () -> {
-                IgniteCache<Integer, double[]> dataCache = getTestCache(ignite);
+                IgniteCache<Integer, double[]> dataCache = new TestCache(ignite).fillCacheWith(data);
 
                 SVMLinearBinaryClassificationTrainer trainer = new SVMLinearBinaryClassificationTrainer();
 
@@ -84,8 +91,8 @@
                         if(groundTruth != prediction)
                             amountOfErrors++;
 
-                        int idx1 = (int)prediction == -1.0 ? 0 : 1;
-                        int idx2 = (int)groundTruth == -1.0 ? 0 : 1;
+                        int idx1 = prediction == 0.0 ? 0 : 1;
+                        int idx2 = groundTruth == 0.0 ? 0 : 1;
 
                         confusionMtx[idx1][idx2]++;
 
@@ -99,6 +106,8 @@
                 }
 
                 System.out.println("\n>>> Confusion matrix is " + Arrays.deepToString(confusionMtx));
+
+                System.out.println(">>> Linear regression model over cache based dataset usage example completed.");
             });
 
             igniteThread.start();
@@ -106,78 +115,58 @@
         }
     }
 
-    /**
-     * Fills cache with data and returns it.
-     *
-     * @param ignite Ignite instance.
-     * @return Filled Ignite Cache.
-     */
-    private static IgniteCache<Integer, double[]> getTestCache(Ignite ignite) {
-        CacheConfiguration<Integer, double[]> cacheConfiguration = new CacheConfiguration<>();
-        cacheConfiguration.setName("TEST_" + UUID.randomUUID());
-        cacheConfiguration.setAffinity(new RendezvousAffinityFunction(false, 10));
-
-        IgniteCache<Integer, double[]> cache = ignite.createCache(cacheConfiguration);
-
-        for (int i = 0; i < data.length; i++)
-            cache.put(i, data[i]);
-
-        return cache;
-    }
-
-
     /** The 1st and 2nd classes from the Iris dataset. */
     private static final double[][] data = {
-        {-1, 5.1, 3.5, 1.4, 0.2},
-        {-1, 4.9, 3, 1.4, 0.2},
-        {-1, 4.7, 3.2, 1.3, 0.2},
-        {-1, 4.6, 3.1, 1.5, 0.2},
-        {-1, 5, 3.6, 1.4, 0.2},
-        {-1, 5.4, 3.9, 1.7, 0.4},
-        {-1, 4.6, 3.4, 1.4, 0.3},
-        {-1, 5, 3.4, 1.5, 0.2},
-        {-1, 4.4, 2.9, 1.4, 0.2},
-        {-1, 4.9, 3.1, 1.5, 0.1},
-        {-1, 5.4, 3.7, 1.5, 0.2},
-        {-1, 4.8, 3.4, 1.6, 0.2},
-        {-1, 4.8, 3, 1.4, 0.1},
-        {-1, 4.3, 3, 1.1, 0.1},
-        {-1, 5.8, 4, 1.2, 0.2},
-        {-1, 5.7, 4.4, 1.5, 0.4},
-        {-1, 5.4, 3.9, 1.3, 0.4},
-        {-1, 5.1, 3.5, 1.4, 0.3},
-        {-1, 5.7, 3.8, 1.7, 0.3},
-        {-1, 5.1, 3.8, 1.5, 0.3},
-        {-1, 5.4, 3.4, 1.7, 0.2},
-        {-1, 5.1, 3.7, 1.5, 0.4},
-        {-1, 4.6, 3.6, 1, 0.2},
-        {-1, 5.1, 3.3, 1.7, 0.5},
-        {-1, 4.8, 3.4, 1.9, 0.2},
-        {-1, 5, 3, 1.6, 0.2},
-        {-1, 5, 3.4, 1.6, 0.4},
-        {-1, 5.2, 3.5, 1.5, 0.2},
-        {-1, 5.2, 3.4, 1.4, 0.2},
-        {-1, 4.7, 3.2, 1.6, 0.2},
-        {-1, 4.8, 3.1, 1.6, 0.2},
-        {-1, 5.4, 3.4, 1.5, 0.4},
-        {-1, 5.2, 4.1, 1.5, 0.1},
-        {-1, 5.5, 4.2, 1.4, 0.2},
-        {-1, 4.9, 3.1, 1.5, 0.1},
-        {-1, 5, 3.2, 1.2, 0.2},
-        {-1, 5.5, 3.5, 1.3, 0.2},
-        {-1, 4.9, 3.1, 1.5, 0.1},
-        {-1, 4.4, 3, 1.3, 0.2},
-        {-1, 5.1, 3.4, 1.5, 0.2},
-        {-1, 5, 3.5, 1.3, 0.3},
-        {-1, 4.5, 2.3, 1.3, 0.3},
-        {-1, 4.4, 3.2, 1.3, 0.2},
-        {-1, 5, 3.5, 1.6, 0.6},
-        {-1, 5.1, 3.8, 1.9, 0.4},
-        {-1, 4.8, 3, 1.4, 0.3},
-        {-1, 5.1, 3.8, 1.6, 0.2},
-        {-1, 4.6, 3.2, 1.4, 0.2},
-        {-1, 5.3, 3.7, 1.5, 0.2},
-        {-1, 5, 3.3, 1.4, 0.2},
+        {0, 5.1, 3.5, 1.4, 0.2},
+        {0, 4.9, 3, 1.4, 0.2},
+        {0, 4.7, 3.2, 1.3, 0.2},
+        {0, 4.6, 3.1, 1.5, 0.2},
+        {0, 5, 3.6, 1.4, 0.2},
+        {0, 5.4, 3.9, 1.7, 0.4},
+        {0, 4.6, 3.4, 1.4, 0.3},
+        {0, 5, 3.4, 1.5, 0.2},
+        {0, 4.4, 2.9, 1.4, 0.2},
+        {0, 4.9, 3.1, 1.5, 0.1},
+        {0, 5.4, 3.7, 1.5, 0.2},
+        {0, 4.8, 3.4, 1.6, 0.2},
+        {0, 4.8, 3, 1.4, 0.1},
+        {0, 4.3, 3, 1.1, 0.1},
+        {0, 5.8, 4, 1.2, 0.2},
+        {0, 5.7, 4.4, 1.5, 0.4},
+        {0, 5.4, 3.9, 1.3, 0.4},
+        {0, 5.1, 3.5, 1.4, 0.3},
+        {0, 5.7, 3.8, 1.7, 0.3},
+        {0, 5.1, 3.8, 1.5, 0.3},
+        {0, 5.4, 3.4, 1.7, 0.2},
+        {0, 5.1, 3.7, 1.5, 0.4},
+        {0, 4.6, 3.6, 1, 0.2},
+        {0, 5.1, 3.3, 1.7, 0.5},
+        {0, 4.8, 3.4, 1.9, 0.2},
+        {0, 5, 3, 1.6, 0.2},
+        {0, 5, 3.4, 1.6, 0.4},
+        {0, 5.2, 3.5, 1.5, 0.2},
+        {0, 5.2, 3.4, 1.4, 0.2},
+        {0, 4.7, 3.2, 1.6, 0.2},
+        {0, 4.8, 3.1, 1.6, 0.2},
+        {0, 5.4, 3.4, 1.5, 0.4},
+        {0, 5.2, 4.1, 1.5, 0.1},
+        {0, 5.5, 4.2, 1.4, 0.2},
+        {0, 4.9, 3.1, 1.5, 0.1},
+        {0, 5, 3.2, 1.2, 0.2},
+        {0, 5.5, 3.5, 1.3, 0.2},
+        {0, 4.9, 3.1, 1.5, 0.1},
+        {0, 4.4, 3, 1.3, 0.2},
+        {0, 5.1, 3.4, 1.5, 0.2},
+        {0, 5, 3.5, 1.3, 0.3},
+        {0, 4.5, 2.3, 1.3, 0.3},
+        {0, 4.4, 3.2, 1.3, 0.2},
+        {0, 5, 3.5, 1.6, 0.6},
+        {0, 5.1, 3.8, 1.9, 0.4},
+        {0, 4.8, 3, 1.4, 0.3},
+        {0, 5.1, 3.8, 1.6, 0.2},
+        {0, 4.6, 3.2, 1.4, 0.2},
+        {0, 5.3, 3.7, 1.5, 0.2},
+        {0, 5, 3.3, 1.4, 0.2},
         {1, 7, 3.2, 4.7, 1.4},
         {1, 6.4, 3.2, 4.5, 1.5},
         {1, 6.9, 3.1, 4.9, 1.5},
@@ -229,5 +218,4 @@
         {1, 5.1, 2.5, 3, 1.1},
         {1, 5.7, 2.8, 4.1, 1.3},
     };
-
 }
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/svm/multiclass/SVMMultiClassClassificationExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/svm/multiclass/SVMMultiClassClassificationExample.java
index b9e24c0..b7ca448 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/svm/multiclass/SVMMultiClassClassificationExample.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/svm/multiclass/SVMMultiClassClassificationExample.java
@@ -18,15 +18,13 @@
 package org.apache.ignite.examples.ml.svm.multiclass;
 
 import java.util.Arrays;
-import java.util.UUID;
 import javax.cache.Cache;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.Ignition;
-import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
 import org.apache.ignite.cache.query.QueryCursor;
 import org.apache.ignite.cache.query.ScanQuery;
-import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.examples.ml.util.TestCache;
 import org.apache.ignite.ml.math.functions.IgniteBiFunction;
 import org.apache.ignite.ml.math.primitives.vector.Vector;
 import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
@@ -37,10 +35,20 @@
 import org.apache.ignite.thread.IgniteThread;
 
 /**
- * Run SVM multi-class classification trainer over distributed dataset to build two models:
- * one with minmaxscaling and one without minmaxscaling.
- *
- * @see SVMLinearMultiClassClassificationModel
+ * Run SVM multi-class classification trainer ({@link SVMLinearMultiClassClassificationModel}) over distributed dataset
+ * to build two models: one with minmaxscaling and one without minmaxscaling.
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with test data points (preprocessed
+ * <a href="https://archive.ics.uci.edu/ml/datasets/Glass+Identification">Glass dataset</a>).</p>
+ * <p>
+ * After that it trains two SVM multi-class models based on the specified data - one model is with minmaxscaling
+ * and one without minmaxscaling.</p>
+ * <p>
+ * Finally, this example loops over the test set of data points, applies the trained models to predict what cluster
+ * does this point belong to, compares prediction to expected outcome (ground truth), and builds
+ * <a href="https://en.wikipedia.org/wiki/Confusion_matrix">confusion matrix</a>.</p>
+ * <p>
+ * You can change the test data used in this example and re-run it to explore this algorithm further.</p>
  */
 public class SVMMultiClassClassificationExample {
     /** Run example. */
@@ -53,7 +61,7 @@
 
             IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
                 SVMMultiClassClassificationExample.class.getSimpleName(), () -> {
-                IgniteCache<Integer, Vector> dataCache = getTestCache(ignite);
+                IgniteCache<Integer, Vector> dataCache = new TestCache(ignite).getVectors(data);
 
                 SVMLinearMultiClassClassificationTrainer trainer = new SVMLinearMultiClassClassificationTrainer();
 
@@ -144,6 +152,8 @@
                     System.out.println("\n>>> Absolute amount of errors " + amountOfErrorsWithNormalization);
                     System.out.println("\n>>> Accuracy " + (1 - amountOfErrorsWithNormalization / (double)totalAmount));
                     System.out.println("\n>>> Confusion matrix is " + Arrays.deepToString(confusionMtxWithNormalization));
+
+                    System.out.println(">>> Linear regression model over cache based dataset usage example completed.");
                 }
             });
 
@@ -152,25 +162,6 @@
         }
     }
 
-    /**
-     * Fills cache with data and returns it.
-     *
-     * @param ignite Ignite instance.
-     * @return Filled Ignite Cache.
-     */
-    private static IgniteCache<Integer, Vector> getTestCache(Ignite ignite) {
-        CacheConfiguration<Integer, Vector> cacheConfiguration = new CacheConfiguration<>();
-        cacheConfiguration.setName("TEST_" + UUID.randomUUID());
-        cacheConfiguration.setAffinity(new RendezvousAffinityFunction(false, 10));
-
-        IgniteCache<Integer, Vector> cache = ignite.createCache(cacheConfiguration);
-
-        for (int i = 0; i < data.length; i++)
-            cache.put(i, VectorUtils.of(data[i]));
-
-        return cache;
-    }
-
     /** The preprocessed Glass dataset from the Machine Learning Repository https://archive.ics.uci.edu/ml/datasets/Glass+Identification
      *  There are 3 classes with labels: 1 {building_windows_float_processed}, 3 {vehicle_windows_float_processed}, 7 {headlamps}.
      *  Feature names: 'Na-Sodium', 'Mg-Magnesium', 'Al-Aluminum', 'Ba-Barium', 'Fe-Iron'.
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeClassificationTrainerExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeClassificationTrainerExample.java
index f5a804d..28a5fbc 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeClassificationTrainerExample.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeClassificationTrainerExample.java
@@ -30,6 +30,15 @@
 
 /**
  * Example of using distributed {@link DecisionTreeClassificationTrainer}.
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with pseudo random training data points.</p>
+ * <p>
+ * After that it creates classification trainer and uses it to train the model on the training set.</p>
+ * <p>
+ * Finally, this example loops over the pseudo randomly generated test set of data points, applies the trained model,
+ * and compares prediction to expected outcome.</p>
+ * <p>
+ * You can change the test data used in this example and re-run it to explore this algorithm further.</p>
  */
 public class DecisionTreeClassificationTrainerExample {
     /**
@@ -71,14 +80,20 @@
                     (k, v) -> v.lb
                 );
 
+                System.out.println(">>> Decision tree classification model: " + mdl);
+
                 // Calculate score.
                 int correctPredictions = 0;
                 for (int i = 0; i < 1000; i++) {
                     LabeledPoint pnt = generatePoint(rnd);
 
                     double prediction = mdl.apply(VectorUtils.of(pnt.x, pnt.y));
+                    double lbl = pnt.lb;
 
-                    if (prediction == pnt.lb)
+                    if (i %50 == 1)
+                        System.out.printf(">>> test #: %d\t\t predicted: %.4f\t\tlabel: %.4f\n", i, prediction, lbl);
+
+                    if (prediction == lbl)
                         correctPredictions++;
                 }
 
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeRegressionTrainerExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeRegressionTrainerExample.java
index 3ebc56a..301df10 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeRegressionTrainerExample.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeRegressionTrainerExample.java
@@ -29,6 +29,16 @@
 
 /**
  * Example of using distributed {@link DecisionTreeRegressionTrainer}.
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with generated test data points ({@code sin(x)}
+ * on interval {@code [0, 10)}).</p>
+ * <p>
+ * After that it creates classification trainer and uses it to train the model on the training set.</p>
+ * <p>
+ * Finally, this example loops over the test data points, applies the trained model, and compares prediction
+ * to expected outcome (ground truth).</p>
+ * <p>
+ * You can change the test data used in this example and re-run it to explore this algorithm further.</p>
  */
 public class DecisionTreeRegressionTrainerExample {
     /**
@@ -92,7 +102,7 @@
     }
 
     /**
-     * Generates {@code sin(x)} on interval [0, 10) and loads into the specified cache.
+     * Generates {@code sin(x)} on interval {@code [0, 10)} and loads into the specified cache.
      */
     private static void generatePoints(IgniteCache<Integer, Point> trainingSet) {
         for (int i = 0; i < 1000; i++) {
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tree/boosting/GDBOnTreesClassificationTrainerExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/tree/boosting/GDBOnTreesClassificationTrainerExample.java
index 384d2d9..e092e5c 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/tree/boosting/GDBOnTreesClassificationTrainerExample.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/tree/boosting/GDBOnTreesClassificationTrainerExample.java
@@ -22,8 +22,8 @@
 import org.apache.ignite.Ignition;
 import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
 import org.apache.ignite.configuration.CacheConfiguration;
-import org.apache.ignite.ml.Model;
-import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.composition.ModelsComposition;
+import org.apache.ignite.ml.composition.boosting.convergence.mean.MeanAbsValueConvergenceCheckerFactory;
 import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
 import org.apache.ignite.ml.trainers.DatasetTrainer;
 import org.apache.ignite.ml.tree.boosting.GDBBinaryClassifierOnTreesTrainer;
@@ -32,10 +32,10 @@
 
 /**
  * Example represents a solution for the task of classification learning based on
- * Gradient Boosting on trees implementation. It shows an initialization of {@link org.apache.ignite.ml.tree.boosting.GDBBinaryClassifierOnTreesTrainer},
+ * Gradient Boosting on trees implementation. It shows an initialization of {@link GDBBinaryClassifierOnTreesTrainer},
  * initialization of Ignite Cache, learning step and comparing of predicted and real values.
- *
- * In this example dataset is creating automatically by meander function f(x) = [sin(x) > 0].
+ * <p>
+ * In this example dataset is created automatically by meander function {@code f(x) = [sin(x) > 0]}.</p>
  */
 public class GDBOnTreesClassificationTrainerExample {
     /**
@@ -44,6 +44,8 @@
      * @param args Command line arguments, none required.
      */
     public static void main(String... args) throws InterruptedException {
+        System.out.println();
+        System.out.println(">>> GDB classification trainer example started.");
         // Start ignite grid.
         try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
             System.out.println(">>> Ignite grid started.");
@@ -56,10 +58,11 @@
                 IgniteCache<Integer, double[]> trainingSet = fillTrainingData(ignite, trainingSetCfg);
 
                 // Create regression trainer.
-                DatasetTrainer<Model<Vector, Double>, Double> trainer = new GDBBinaryClassifierOnTreesTrainer(1.0, 300, 2, 0.);
+                DatasetTrainer<ModelsComposition, Double> trainer = new GDBBinaryClassifierOnTreesTrainer(1.0, 300, 2, 0.)
+                    .withCheckConvergenceStgyFactory(new MeanAbsValueConvergenceCheckerFactory(0.1));
 
                 // Train decision tree model.
-                Model<Vector, Double> mdl = trainer.fit(
+                ModelsComposition mdl = trainer.fit(
                     ignite,
                     trainingSet,
                     (k, v) -> VectorUtils.of(v[0]),
@@ -78,6 +81,8 @@
                 }
 
                 System.out.println(">>> ---------------------------------");
+                System.out.println(">>> Count of trees = " + mdl.getModels().size());
+                System.out.println(">>> ---------------------------------");
 
                 System.out.println(">>> GDB classification trainer example completed.");
             });
@@ -100,7 +105,7 @@
     /**
      * Fill meander-like training data.
      *
-     * @param ignite Ignite.
+     * @param ignite Ignite instance.
      * @param trainingSetCfg Training set config.
      */
     @NotNull private static IgniteCache<Integer, double[]> fillTrainingData(Ignite ignite,
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tree/boosting/GDBOnTreesRegressionTrainerExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/tree/boosting/GDBOnTreesRegressionTrainerExample.java
index fa7a0d4..3662973 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/tree/boosting/GDBOnTreesRegressionTrainerExample.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/tree/boosting/GDBOnTreesRegressionTrainerExample.java
@@ -23,6 +23,8 @@
 import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
 import org.apache.ignite.configuration.CacheConfiguration;
 import org.apache.ignite.ml.Model;
+import org.apache.ignite.ml.composition.ModelsComposition;
+import org.apache.ignite.ml.composition.boosting.convergence.mean.MeanAbsValueConvergenceCheckerFactory;
 import org.apache.ignite.ml.math.primitives.vector.Vector;
 import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
 import org.apache.ignite.ml.trainers.DatasetTrainer;
@@ -32,10 +34,10 @@
 
 /**
  * Example represents a solution for the task of regression learning based on
- * Gradient Boosting on trees implementation. It shows an initialization of {@link org.apache.ignite.ml.tree.boosting.GDBRegressionOnTreesTrainer},
+ * Gradient Boosting on trees implementation. It shows an initialization of {@link GDBRegressionOnTreesTrainer},
  * initialization of Ignite Cache, learning step and comparing of predicted and real values.
- *
- * In this example dataset is creating automatically by parabolic function f(x) = x^2.
+ * <p>
+ * In this example dataset is created automatically by parabolic function {@code f(x) = x^2}.</p>
  */
 public class GDBOnTreesRegressionTrainerExample {
     /**
@@ -44,6 +46,8 @@
      * @param args Command line arguments, none required.
      */
     public static void main(String... args) throws InterruptedException {
+        System.out.println();
+        System.out.println(">>> GDB regression trainer example started.");
         // Start ignite grid.
         try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
             System.out.println(">>> Ignite grid started.");
@@ -56,7 +60,8 @@
                 IgniteCache<Integer, double[]> trainingSet = fillTrainingData(ignite, trainingSetCfg);
 
                 // Create regression trainer.
-                DatasetTrainer<Model<Vector, Double>, Double> trainer = new GDBRegressionOnTreesTrainer(1.0, 2000, 1, 0.);
+                DatasetTrainer<ModelsComposition, Double> trainer = new GDBRegressionOnTreesTrainer(1.0, 2000, 1, 0.)
+                    .withCheckConvergenceStgyFactory(new MeanAbsValueConvergenceCheckerFactory(0.001));
 
                 // Train decision tree model.
                 Model<Vector, Double> mdl = trainer.fit(
@@ -79,7 +84,7 @@
 
                 System.out.println(">>> ---------------------------------");
 
-                System.out.println(">>> GDB Regression trainer example completed.");
+                System.out.println(">>> GDB regression trainer example completed.");
             });
 
             igniteThread.start();
@@ -98,9 +103,9 @@
     }
 
     /**
-     * Fill parabola training data.
+     * Fill parabolic training data.
      *
-     * @param ignite Ignite.
+     * @param ignite Ignite instance.
      * @param trainingSetCfg Training set config.
      */
     @NotNull private static IgniteCache<Integer, double[]> fillTrainingData(Ignite ignite,
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tree/randomforest/RandomForestClassificationExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/tree/randomforest/RandomForestClassificationExample.java
index 33c3a5f..6194153 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/tree/randomforest/RandomForestClassificationExample.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/tree/randomforest/RandomForestClassificationExample.java
@@ -18,29 +18,38 @@
 package org.apache.ignite.examples.ml.tree.randomforest;
 
 import java.util.Arrays;
-import java.util.UUID;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
 import javax.cache.Cache;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.Ignition;
-import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
 import org.apache.ignite.cache.query.QueryCursor;
 import org.apache.ignite.cache.query.ScanQuery;
-import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.examples.ml.util.TestCache;
 import org.apache.ignite.ml.composition.ModelsComposition;
+import org.apache.ignite.ml.dataset.feature.FeatureMeta;
 import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
 import org.apache.ignite.ml.tree.randomforest.RandomForestClassifierTrainer;
-import org.apache.ignite.ml.tree.randomforest.RandomForestTrainer;
+import org.apache.ignite.ml.tree.randomforest.data.FeaturesCountSelectionStrategies;
 import org.apache.ignite.thread.IgniteThread;
 
 /**
- * Example represents a solution for the task of wine classification based on RandomForest
- * implementation for multi-classification. It shows an initialization of {@link RandomForestTrainer} with
- * thread pool for multi-thread learning, initialization of Ignite Cache,
- * learning step and evaluation of accuracy of model.
- *
- * Dataset url: https://archive.ics.uci.edu/ml/machine-learning-databases/wine/
- * @see RandomForestClassifierTrainer
+ * Example represents a solution for the task of wine classification based on a
+ *  <a href ="https://en.wikipedia.org/wiki/Random_forest">Random Forest</a> implementation for
+ * multi-classification.
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with test data points (based on the
+ * <a href="https://archive.ics.uci.edu/ml/machine-learning-databases/wine/">Wine recognition dataset</a>).</p>
+ * <p>
+ * After that it initializes the  {@link RandomForestClassifierTrainer} with thread pool for multi-thread learning
+ * and trains the model based on the specified data using random forest regression algorithm.</p>
+ * <p>
+ * Finally, this example loops over the test set of data points, compares prediction of the trained model to the
+ * expected outcome (ground truth), and evaluates accuracy of the model.</p>
+ * <p>
+ * You can change the test data used in this example and re-run it to explore this algorithm further.</p>
  */
 public class RandomForestClassificationExample {
     /**
@@ -54,16 +63,29 @@
             System.out.println(">>> Ignite grid started.");
 
             IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
-                    RandomForestClassificationExample.class.getSimpleName(), () -> {
-                IgniteCache<Integer, double[]> dataCache = getTestCache(ignite);
+                RandomForestClassificationExample.class.getSimpleName(), () -> {
+                IgniteCache<Integer, double[]> dataCache = new TestCache(ignite).fillCacheWith(data);
 
-                RandomForestClassifierTrainer trainer = new RandomForestClassifierTrainer(13, 4, 101, 0.3, 2, 0);
+                AtomicInteger idx = new AtomicInteger(0);
+                RandomForestClassifierTrainer classifier = new RandomForestClassifierTrainer(
+                    IntStream.range(0, data[0].length - 1).mapToObj(
+                        x -> new FeatureMeta("", idx.getAndIncrement(), false)).collect(Collectors.toList())
+                ).withCountOfTrees(101)
+                    .withFeaturesCountSelectionStrgy(FeaturesCountSelectionStrategies.ONE_THIRD)
+                    .withMaxDepth(4)
+                    .withMinImpurityDelta(0.)
+                    .withSubSampleSize(0.3)
+                    .withSeed(0);
 
-                ModelsComposition randomForest = trainer.fit(ignite, dataCache,
-                        (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 1, v.length)),
-                        (k, v) -> v[0]
+                System.out.println(">>> Configured trainer: " + classifier.getClass().getSimpleName());
+
+                ModelsComposition randomForest = classifier.fit(ignite, dataCache,
+                    (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 1, v.length)),
+                    (k, v) -> v[0]
                 );
 
+                System.out.println(">>> Trained model: " + randomForest.toString(true));
+
                 int amountOfErrors = 0;
                 int totalAmount = 0;
 
@@ -81,8 +103,11 @@
 
                     }
 
+                    System.out.println("\n>>> Evaluated model on " + totalAmount + " data points.");
+
                     System.out.println("\n>>> Absolute amount of errors " + amountOfErrors);
                     System.out.println("\n>>> Accuracy " + (1 - amountOfErrors / (double) totalAmount));
+                    System.out.println(">>> Random Forest multi-class classification algorithm over cached dataset usage example completed.");
                 }
             });
 
@@ -91,206 +116,185 @@
         }
     }
 
-    /**
-     * Fills cache with data and returns it.
-     *
-     * @param ignite Ignite instance.
-     * @return Filled Ignite Cache.
-     */
-    private static IgniteCache<Integer, double[]> getTestCache(Ignite ignite) {
-        CacheConfiguration<Integer, double[]> cacheConfiguration = new CacheConfiguration<>();
-        cacheConfiguration.setName("TEST_" + UUID.randomUUID());
-        cacheConfiguration.setAffinity(new RendezvousAffinityFunction(false, 10));
-
-        IgniteCache<Integer, double[]> cache = ignite.createCache(cacheConfiguration);
-
-        for (int i = 0; i < data.length; i++)
-            cache.put(i, data[i]);
-
-        return cache;
-    }
-
-    /**
-     * The Wine dataset.
-     */
+    /** The Wine recognition dataset. */
     private static final double[][] data = {
-            {1, 14.23, 1.71, 2.43, 15.6, 127, 2.8, 3.06, .28, 2.29, 5.64, 1.04, 3.92, 1065},
-            {1, 13.2, 1.78, 2.14, 11.2, 100, 2.65, 2.76, .26, 1.28, 4.38, 1.05, 3.4, 1050},
-            {1, 13.16, 2.36, 2.67, 18.6, 101, 2.8, 3.24, .3, 2.81, 5.68, 1.03, 3.17, 1185},
-            {1, 14.37, 1.95, 2.5, 16.8, 113, 3.85, 3.49, .24, 2.18, 7.8, .86, 3.45, 1480},
-            {1, 13.24, 2.59, 2.87, 21, 118, 2.8, 2.69, .39, 1.82, 4.32, 1.04, 2.93, 735},
-            {1, 14.2, 1.76, 2.45, 15.2, 112, 3.27, 3.39, .34, 1.97, 6.75, 1.05, 2.85, 1450},
-            {1, 14.39, 1.87, 2.45, 14.6, 96, 2.5, 2.52, .3, 1.98, 5.25, 1.02, 3.58, 1290},
-            {1, 14.06, 2.15, 2.61, 17.6, 121, 2.6, 2.51, .31, 1.25, 5.05, 1.06, 3.58, 1295},
-            {1, 14.83, 1.64, 2.17, 14, 97, 2.8, 2.98, .29, 1.98, 5.2, 1.08, 2.85, 1045},
-            {1, 13.86, 1.35, 2.27, 16, 98, 2.98, 3.15, .22, 1.85, 7.22, 1.01, 3.55, 1045},
-            {1, 14.1, 2.16, 2.3, 18, 105, 2.95, 3.32, .22, 2.38, 5.75, 1.25, 3.17, 1510},
-            {1, 14.12, 1.48, 2.32, 16.8, 95, 2.2, 2.43, .26, 1.57, 5, 1.17, 2.82, 1280},
-            {1, 13.75, 1.73, 2.41, 16, 89, 2.6, 2.76, .29, 1.81, 5.6, 1.15, 2.9, 1320},
-            {1, 14.75, 1.73, 2.39, 11.4, 91, 3.1, 3.69, .43, 2.81, 5.4, 1.25, 2.73, 1150},
-            {1, 14.38, 1.87, 2.38, 12, 102, 3.3, 3.64, .29, 2.96, 7.5, 1.2, 3, 1547},
-            {1, 13.63, 1.81, 2.7, 17.2, 112, 2.85, 2.91, .3, 1.46, 7.3, 1.28, 2.88, 1310},
-            {1, 14.3, 1.92, 2.72, 20, 120, 2.8, 3.14, .33, 1.97, 6.2, 1.07, 2.65, 1280},
-            {1, 13.83, 1.57, 2.62, 20, 115, 2.95, 3.4, .4, 1.72, 6.6, 1.13, 2.57, 1130},
-            {1, 14.19, 1.59, 2.48, 16.5, 108, 3.3, 3.93, .32, 1.86, 8.7, 1.23, 2.82, 1680},
-            {1, 13.64, 3.1, 2.56, 15.2, 116, 2.7, 3.03, .17, 1.66, 5.1, .96, 3.36, 845},
-            {1, 14.06, 1.63, 2.28, 16, 126, 3, 3.17, .24, 2.1, 5.65, 1.09, 3.71, 780},
-            {1, 12.93, 3.8, 2.65, 18.6, 102, 2.41, 2.41, .25, 1.98, 4.5, 1.03, 3.52, 770},
-            {1, 13.71, 1.86, 2.36, 16.6, 101, 2.61, 2.88, .27, 1.69, 3.8, 1.11, 4, 1035},
-            {1, 12.85, 1.6, 2.52, 17.8, 95, 2.48, 2.37, .26, 1.46, 3.93, 1.09, 3.63, 1015},
-            {1, 13.5, 1.81, 2.61, 20, 96, 2.53, 2.61, .28, 1.66, 3.52, 1.12, 3.82, 845},
-            {1, 13.05, 2.05, 3.22, 25, 124, 2.63, 2.68, .47, 1.92, 3.58, 1.13, 3.2, 830},
-            {1, 13.39, 1.77, 2.62, 16.1, 93, 2.85, 2.94, .34, 1.45, 4.8, .92, 3.22, 1195},
-            {1, 13.3, 1.72, 2.14, 17, 94, 2.4, 2.19, .27, 1.35, 3.95, 1.02, 2.77, 1285},
-            {1, 13.87, 1.9, 2.8, 19.4, 107, 2.95, 2.97, .37, 1.76, 4.5, 1.25, 3.4, 915},
-            {1, 14.02, 1.68, 2.21, 16, 96, 2.65, 2.33, .26, 1.98, 4.7, 1.04, 3.59, 1035},
-            {1, 13.73, 1.5, 2.7, 22.5, 101, 3, 3.25, .29, 2.38, 5.7, 1.19, 2.71, 1285},
-            {1, 13.58, 1.66, 2.36, 19.1, 106, 2.86, 3.19, .22, 1.95, 6.9, 1.09, 2.88, 1515},
-            {1, 13.68, 1.83, 2.36, 17.2, 104, 2.42, 2.69, .42, 1.97, 3.84, 1.23, 2.87, 990},
-            {1, 13.76, 1.53, 2.7, 19.5, 132, 2.95, 2.74, .5, 1.35, 5.4, 1.25, 3, 1235},
-            {1, 13.51, 1.8, 2.65, 19, 110, 2.35, 2.53, .29, 1.54, 4.2, 1.1, 2.87, 1095},
-            {1, 13.48, 1.81, 2.41, 20.5, 100, 2.7, 2.98, .26, 1.86, 5.1, 1.04, 3.47, 920},
-            {1, 13.28, 1.64, 2.84, 15.5, 110, 2.6, 2.68, .34, 1.36, 4.6, 1.09, 2.78, 880},
-            {1, 13.05, 1.65, 2.55, 18, 98, 2.45, 2.43, .29, 1.44, 4.25, 1.12, 2.51, 1105},
-            {1, 13.07, 1.5, 2.1, 15.5, 98, 2.4, 2.64, .28, 1.37, 3.7, 1.18, 2.69, 1020},
-            {1, 14.22, 3.99, 2.51, 13.2, 128, 3, 3.04, .2, 2.08, 5.1, .89, 3.53, 760},
-            {1, 13.56, 1.71, 2.31, 16.2, 117, 3.15, 3.29, .34, 2.34, 6.13, .95, 3.38, 795},
-            {1, 13.41, 3.84, 2.12, 18.8, 90, 2.45, 2.68, .27, 1.48, 4.28, .91, 3, 1035},
-            {1, 13.88, 1.89, 2.59, 15, 101, 3.25, 3.56, .17, 1.7, 5.43, .88, 3.56, 1095},
-            {1, 13.24, 3.98, 2.29, 17.5, 103, 2.64, 2.63, .32, 1.66, 4.36, .82, 3, 680},
-            {1, 13.05, 1.77, 2.1, 17, 107, 3, 3, .28, 2.03, 5.04, .88, 3.35, 885},
-            {1, 14.21, 4.04, 2.44, 18.9, 111, 2.85, 2.65, .3, 1.25, 5.24, .87, 3.33, 1080},
-            {1, 14.38, 3.59, 2.28, 16, 102, 3.25, 3.17, .27, 2.19, 4.9, 1.04, 3.44, 1065},
-            {1, 13.9, 1.68, 2.12, 16, 101, 3.1, 3.39, .21, 2.14, 6.1, .91, 3.33, 985},
-            {1, 14.1, 2.02, 2.4, 18.8, 103, 2.75, 2.92, .32, 2.38, 6.2, 1.07, 2.75, 1060},
-            {1, 13.94, 1.73, 2.27, 17.4, 108, 2.88, 3.54, .32, 2.08, 8.90, 1.12, 3.1, 1260},
-            {1, 13.05, 1.73, 2.04, 12.4, 92, 2.72, 3.27, .17, 2.91, 7.2, 1.12, 2.91, 1150},
-            {1, 13.83, 1.65, 2.6, 17.2, 94, 2.45, 2.99, .22, 2.29, 5.6, 1.24, 3.37, 1265},
-            {1, 13.82, 1.75, 2.42, 14, 111, 3.88, 3.74, .32, 1.87, 7.05, 1.01, 3.26, 1190},
-            {1, 13.77, 1.9, 2.68, 17.1, 115, 3, 2.79, .39, 1.68, 6.3, 1.13, 2.93, 1375},
-            {1, 13.74, 1.67, 2.25, 16.4, 118, 2.6, 2.9, .21, 1.62, 5.85, .92, 3.2, 1060},
-            {1, 13.56, 1.73, 2.46, 20.5, 116, 2.96, 2.78, .2, 2.45, 6.25, .98, 3.03, 1120},
-            {1, 14.22, 1.7, 2.3, 16.3, 118, 3.2, 3, .26, 2.03, 6.38, .94, 3.31, 970},
-            {1, 13.29, 1.97, 2.68, 16.8, 102, 3, 3.23, .31, 1.66, 6, 1.07, 2.84, 1270},
-            {1, 13.72, 1.43, 2.5, 16.7, 108, 3.4, 3.67, .19, 2.04, 6.8, .89, 2.87, 1285},
-            {2, 12.37, .94, 1.36, 10.6, 88, 1.98, .57, .28, .42, 1.95, 1.05, 1.82, 520},
-            {2, 12.33, 1.1, 2.28, 16, 101, 2.05, 1.09, .63, .41, 3.27, 1.25, 1.67, 680},
-            {2, 12.64, 1.36, 2.02, 16.8, 100, 2.02, 1.41, .53, .62, 5.75, .98, 1.59, 450},
-            {2, 13.67, 1.25, 1.92, 18, 94, 2.1, 1.79, .32, .73, 3.8, 1.23, 2.46, 630},
-            {2, 12.37, 1.13, 2.16, 19, 87, 3.5, 3.1, .19, 1.87, 4.45, 1.22, 2.87, 420},
-            {2, 12.17, 1.45, 2.53, 19, 104, 1.89, 1.75, .45, 1.03, 2.95, 1.45, 2.23, 355},
-            {2, 12.37, 1.21, 2.56, 18.1, 98, 2.42, 2.65, .37, 2.08, 4.6, 1.19, 2.3, 678},
-            {2, 13.11, 1.01, 1.7, 15, 78, 2.98, 3.18, .26, 2.28, 5.3, 1.12, 3.18, 502},
-            {2, 12.37, 1.17, 1.92, 19.6, 78, 2.11, 2, .27, 1.04, 4.68, 1.12, 3.48, 510},
-            {2, 13.34, .94, 2.36, 17, 110, 2.53, 1.3, .55, .42, 3.17, 1.02, 1.93, 750},
-            {2, 12.21, 1.19, 1.75, 16.8, 151, 1.85, 1.28, .14, 2.5, 2.85, 1.28, 3.07, 718},
-            {2, 12.29, 1.61, 2.21, 20.4, 103, 1.1, 1.02, .37, 1.46, 3.05, .906, 1.82, 870},
-            {2, 13.86, 1.51, 2.67, 25, 86, 2.95, 2.86, .21, 1.87, 3.38, 1.36, 3.16, 410},
-            {2, 13.49, 1.66, 2.24, 24, 87, 1.88, 1.84, .27, 1.03, 3.74, .98, 2.78, 472},
-            {2, 12.99, 1.67, 2.6, 30, 139, 3.3, 2.89, .21, 1.96, 3.35, 1.31, 3.5, 985},
-            {2, 11.96, 1.09, 2.3, 21, 101, 3.38, 2.14, .13, 1.65, 3.21, .99, 3.13, 886},
-            {2, 11.66, 1.88, 1.92, 16, 97, 1.61, 1.57, .34, 1.15, 3.8, 1.23, 2.14, 428},
-            {2, 13.03, .9, 1.71, 16, 86, 1.95, 2.03, .24, 1.46, 4.6, 1.19, 2.48, 392},
-            {2, 11.84, 2.89, 2.23, 18, 112, 1.72, 1.32, .43, .95, 2.65, .96, 2.52, 500},
-            {2, 12.33, .99, 1.95, 14.8, 136, 1.9, 1.85, .35, 2.76, 3.4, 1.06, 2.31, 750},
-            {2, 12.7, 3.87, 2.4, 23, 101, 2.83, 2.55, .43, 1.95, 2.57, 1.19, 3.13, 463},
-            {2, 12, .92, 2, 19, 86, 2.42, 2.26, .3, 1.43, 2.5, 1.38, 3.12, 278},
-            {2, 12.72, 1.81, 2.2, 18.8, 86, 2.2, 2.53, .26, 1.77, 3.9, 1.16, 3.14, 714},
-            {2, 12.08, 1.13, 2.51, 24, 78, 2, 1.58, .4, 1.4, 2.2, 1.31, 2.72, 630},
-            {2, 13.05, 3.86, 2.32, 22.5, 85, 1.65, 1.59, .61, 1.62, 4.8, .84, 2.01, 515},
-            {2, 11.84, .89, 2.58, 18, 94, 2.2, 2.21, .22, 2.35, 3.05, .79, 3.08, 520},
-            {2, 12.67, .98, 2.24, 18, 99, 2.2, 1.94, .3, 1.46, 2.62, 1.23, 3.16, 450},
-            {2, 12.16, 1.61, 2.31, 22.8, 90, 1.78, 1.69, .43, 1.56, 2.45, 1.33, 2.26, 495},
-            {2, 11.65, 1.67, 2.62, 26, 88, 1.92, 1.61, .4, 1.34, 2.6, 1.36, 3.21, 562},
-            {2, 11.64, 2.06, 2.46, 21.6, 84, 1.95, 1.69, .48, 1.35, 2.8, 1, 2.75, 680},
-            {2, 12.08, 1.33, 2.3, 23.6, 70, 2.2, 1.59, .42, 1.38, 1.74, 1.07, 3.21, 625},
-            {2, 12.08, 1.83, 2.32, 18.5, 81, 1.6, 1.5, .52, 1.64, 2.4, 1.08, 2.27, 480},
-            {2, 12, 1.51, 2.42, 22, 86, 1.45, 1.25, .5, 1.63, 3.6, 1.05, 2.65, 450},
-            {2, 12.69, 1.53, 2.26, 20.7, 80, 1.38, 1.46, .58, 1.62, 3.05, .96, 2.06, 495},
-            {2, 12.29, 2.83, 2.22, 18, 88, 2.45, 2.25, .25, 1.99, 2.15, 1.15, 3.3, 290},
-            {2, 11.62, 1.99, 2.28, 18, 98, 3.02, 2.26, .17, 1.35, 3.25, 1.16, 2.96, 345},
-            {2, 12.47, 1.52, 2.2, 19, 162, 2.5, 2.27, .32, 3.28, 2.6, 1.16, 2.63, 937},
-            {2, 11.81, 2.12, 2.74, 21.5, 134, 1.6, .99, .14, 1.56, 2.5, .95, 2.26, 625},
-            {2, 12.29, 1.41, 1.98, 16, 85, 2.55, 2.5, .29, 1.77, 2.9, 1.23, 2.74, 428},
-            {2, 12.37, 1.07, 2.1, 18.5, 88, 3.52, 3.75, .24, 1.95, 4.5, 1.04, 2.77, 660},
-            {2, 12.29, 3.17, 2.21, 18, 88, 2.85, 2.99, .45, 2.81, 2.3, 1.42, 2.83, 406},
-            {2, 12.08, 2.08, 1.7, 17.5, 97, 2.23, 2.17, .26, 1.4, 3.3, 1.27, 2.96, 710},
-            {2, 12.6, 1.34, 1.9, 18.5, 88, 1.45, 1.36, .29, 1.35, 2.45, 1.04, 2.77, 562},
-            {2, 12.34, 2.45, 2.46, 21, 98, 2.56, 2.11, .34, 1.31, 2.8, .8, 3.38, 438},
-            {2, 11.82, 1.72, 1.88, 19.5, 86, 2.5, 1.64, .37, 1.42, 2.06, .94, 2.44, 415},
-            {2, 12.51, 1.73, 1.98, 20.5, 85, 2.2, 1.92, .32, 1.48, 2.94, 1.04, 3.57, 672},
-            {2, 12.42, 2.55, 2.27, 22, 90, 1.68, 1.84, .66, 1.42, 2.7, .86, 3.3, 315},
-            {2, 12.25, 1.73, 2.12, 19, 80, 1.65, 2.03, .37, 1.63, 3.4, 1, 3.17, 510},
-            {2, 12.72, 1.75, 2.28, 22.5, 84, 1.38, 1.76, .48, 1.63, 3.3, .88, 2.42, 488},
-            {2, 12.22, 1.29, 1.94, 19, 92, 2.36, 2.04, .39, 2.08, 2.7, .86, 3.02, 312},
-            {2, 11.61, 1.35, 2.7, 20, 94, 2.74, 2.92, .29, 2.49, 2.65, .96, 3.26, 680},
-            {2, 11.46, 3.74, 1.82, 19.5, 107, 3.18, 2.58, .24, 3.58, 2.9, .75, 2.81, 562},
-            {2, 12.52, 2.43, 2.17, 21, 88, 2.55, 2.27, .26, 1.22, 2, .9, 2.78, 325},
-            {2, 11.76, 2.68, 2.92, 20, 103, 1.75, 2.03, .6, 1.05, 3.8, 1.23, 2.5, 607},
-            {2, 11.41, .74, 2.5, 21, 88, 2.48, 2.01, .42, 1.44, 3.08, 1.1, 2.31, 434},
-            {2, 12.08, 1.39, 2.5, 22.5, 84, 2.56, 2.29, .43, 1.04, 2.9, .93, 3.19, 385},
-            {2, 11.03, 1.51, 2.2, 21.5, 85, 2.46, 2.17, .52, 2.01, 1.9, 1.71, 2.87, 407},
-            {2, 11.82, 1.47, 1.99, 20.8, 86, 1.98, 1.6, .3, 1.53, 1.95, .95, 3.33, 495},
-            {2, 12.42, 1.61, 2.19, 22.5, 108, 2, 2.09, .34, 1.61, 2.06, 1.06, 2.96, 345},
-            {2, 12.77, 3.43, 1.98, 16, 80, 1.63, 1.25, .43, .83, 3.4, .7, 2.12, 372},
-            {2, 12, 3.43, 2, 19, 87, 2, 1.64, .37, 1.87, 1.28, .93, 3.05, 564},
-            {2, 11.45, 2.4, 2.42, 20, 96, 2.9, 2.79, .32, 1.83, 3.25, .8, 3.39, 625},
-            {2, 11.56, 2.05, 3.23, 28.5, 119, 3.18, 5.08, .47, 1.87, 6, .93, 3.69, 465},
-            {2, 12.42, 4.43, 2.73, 26.5, 102, 2.2, 2.13, .43, 1.71, 2.08, .92, 3.12, 365},
-            {2, 13.05, 5.8, 2.13, 21.5, 86, 2.62, 2.65, .3, 2.01, 2.6, .73, 3.1, 380},
-            {2, 11.87, 4.31, 2.39, 21, 82, 2.86, 3.03, .21, 2.91, 2.8, .75, 3.64, 380},
-            {2, 12.07, 2.16, 2.17, 21, 85, 2.6, 2.65, .37, 1.35, 2.76, .86, 3.28, 378},
-            {2, 12.43, 1.53, 2.29, 21.5, 86, 2.74, 3.15, .39, 1.77, 3.94, .69, 2.84, 352},
-            {2, 11.79, 2.13, 2.78, 28.5, 92, 2.13, 2.24, .58, 1.76, 3, .97, 2.44, 466},
-            {2, 12.37, 1.63, 2.3, 24.5, 88, 2.22, 2.45, .4, 1.9, 2.12, .89, 2.78, 342},
-            {2, 12.04, 4.3, 2.38, 22, 80, 2.1, 1.75, .42, 1.35, 2.6, .79, 2.57, 580},
-            {3, 12.86, 1.35, 2.32, 18, 122, 1.51, 1.25, .21, .94, 4.1, .76, 1.29, 630},
-            {3, 12.88, 2.99, 2.4, 20, 104, 1.3, 1.22, .24, .83, 5.4, .74, 1.42, 530},
-            {3, 12.81, 2.31, 2.4, 24, 98, 1.15, 1.09, .27, .83, 5.7, .66, 1.36, 560},
-            {3, 12.7, 3.55, 2.36, 21.5, 106, 1.7, 1.2, .17, .84, 5, .78, 1.29, 600},
-            {3, 12.51, 1.24, 2.25, 17.5, 85, 2, .58, .6, 1.25, 5.45, .75, 1.51, 650},
-            {3, 12.6, 2.46, 2.2, 18.5, 94, 1.62, .66, .63, .94, 7.1, .73, 1.58, 695},
-            {3, 12.25, 4.72, 2.54, 21, 89, 1.38, .47, .53, .8, 3.85, .75, 1.27, 720},
-            {3, 12.53, 5.51, 2.64, 25, 96, 1.79, .6, .63, 1.1, 5, .82, 1.69, 515},
-            {3, 13.49, 3.59, 2.19, 19.5, 88, 1.62, .48, .58, .88, 5.7, .81, 1.82, 580},
-            {3, 12.84, 2.96, 2.61, 24, 101, 2.32, .6, .53, .81, 4.92, .89, 2.15, 590},
-            {3, 12.93, 2.81, 2.7, 21, 96, 1.54, .5, .53, .75, 4.6, .77, 2.31, 600},
-            {3, 13.36, 2.56, 2.35, 20, 89, 1.4, .5, .37, .64, 5.6, .7, 2.47, 780},
-            {3, 13.52, 3.17, 2.72, 23.5, 97, 1.55, .52, .5, .55, 4.35, .89, 2.06, 520},
-            {3, 13.62, 4.95, 2.35, 20, 92, 2, .8, .47, 1.02, 4.4, .91, 2.05, 550},
-            {3, 12.25, 3.88, 2.2, 18.5, 112, 1.38, .78, .29, 1.14, 8.21, .65, 2, 855},
-            {3, 13.16, 3.57, 2.15, 21, 102, 1.5, .55, .43, 1.3, 4, .6, 1.68, 830},
-            {3, 13.88, 5.04, 2.23, 20, 80, .98, .34, .4, .68, 4.9, .58, 1.33, 415},
-            {3, 12.87, 4.61, 2.48, 21.5, 86, 1.7, .65, .47, .86, 7.65, .54, 1.86, 625},
-            {3, 13.32, 3.24, 2.38, 21.5, 92, 1.93, .76, .45, 1.25, 8.42, .55, 1.62, 650},
-            {3, 13.08, 3.9, 2.36, 21.5, 113, 1.41, 1.39, .34, 1.14, 9.40, .57, 1.33, 550},
-            {3, 13.5, 3.12, 2.62, 24, 123, 1.4, 1.57, .22, 1.25, 8.60, .59, 1.3, 500},
-            {3, 12.79, 2.67, 2.48, 22, 112, 1.48, 1.36, .24, 1.26, 10.8, .48, 1.47, 480},
-            {3, 13.11, 1.9, 2.75, 25.5, 116, 2.2, 1.28, .26, 1.56, 7.1, .61, 1.33, 425},
-            {3, 13.23, 3.3, 2.28, 18.5, 98, 1.8, .83, .61, 1.87, 10.52, .56, 1.51, 675},
-            {3, 12.58, 1.29, 2.1, 20, 103, 1.48, .58, .53, 1.4, 7.6, .58, 1.55, 640},
-            {3, 13.17, 5.19, 2.32, 22, 93, 1.74, .63, .61, 1.55, 7.9, .6, 1.48, 725},
-            {3, 13.84, 4.12, 2.38, 19.5, 89, 1.8, .83, .48, 1.56, 9.01, .57, 1.64, 480},
-            {3, 12.45, 3.03, 2.64, 27, 97, 1.9, .58, .63, 1.14, 7.5, .67, 1.73, 880},
-            {3, 14.34, 1.68, 2.7, 25, 98, 2.8, 1.31, .53, 2.7, 13, .57, 1.96, 660},
-            {3, 13.48, 1.67, 2.64, 22.5, 89, 2.6, 1.1, .52, 2.29, 11.75, .57, 1.78, 620},
-            {3, 12.36, 3.83, 2.38, 21, 88, 2.3, .92, .5, 1.04, 7.65, .56, 1.58, 520},
-            {3, 13.69, 3.26, 2.54, 20, 107, 1.83, .56, .5, .8, 5.88, .96, 1.82, 680},
-            {3, 12.85, 3.27, 2.58, 22, 106, 1.65, .6, .6, .96, 5.58, .87, 2.11, 570},
-            {3, 12.96, 3.45, 2.35, 18.5, 106, 1.39, .7, .4, .94, 5.28, .68, 1.75, 675},
-            {3, 13.78, 2.76, 2.3, 22, 90, 1.35, .68, .41, 1.03, 9.58, .7, 1.68, 615},
-            {3, 13.73, 4.36, 2.26, 22.5, 88, 1.28, .47, .52, 1.15, 6.62, .78, 1.75, 520},
-            {3, 13.45, 3.7, 2.6, 23, 111, 1.7, .92, .43, 1.46, 10.68, .85, 1.56, 695},
-            {3, 12.82, 3.37, 2.3, 19.5, 88, 1.48, .66, .4, .97, 10.26, .72, 1.75, 685},
-            {3, 13.58, 2.58, 2.69, 24.5, 105, 1.55, .84, .39, 1.54, 8.66, .74, 1.8, 750},
-            {3, 13.4, 4.6, 2.86, 25, 112, 1.98, .96, .27, 1.11, 8.5, .67, 1.92, 630},
-            {3, 12.2, 3.03, 2.32, 19, 96, 1.25, .49, .4, .73, 5.5, .66, 1.83, 510},
-            {3, 12.77, 2.39, 2.28, 19.5, 86, 1.39, .51, .48, .64, 9.899999, .57, 1.63, 470},
-            {3, 14.16, 2.51, 2.48, 20, 91, 1.68, .7, .44, 1.24, 9.7, .62, 1.71, 660},
-            {3, 13.71, 5.65, 2.45, 20.5, 95, 1.68, .61, .52, 1.06, 7.7, .64, 1.74, 740},
-            {3, 13.4, 3.91, 2.48, 23, 102, 1.8, .75, .43, 1.41, 7.3, .7, 1.56, 750},
-            {3, 13.27, 4.28, 2.26, 20, 120, 1.59, .69, .43, 1.35, 10.2, .59, 1.56, 835},
-            {3, 13.17, 2.59, 2.37, 20, 120, 1.65, .68, .53, 1.46, 9.3, .6, 1.62, 840},
-            {3, 14.13, 4.1, 2.74, 24.5, 96, 2.05, .76, .56, 1.35, 9.2, .61, 1.6, 560}
+        {1, 14.23, 1.71, 2.43, 15.6, 127, 2.8, 3.06, .28, 2.29, 5.64, 1.04, 3.92, 1065},
+        {1, 13.2, 1.78, 2.14, 11.2, 100, 2.65, 2.76, .26, 1.28, 4.38, 1.05, 3.4, 1050},
+        {1, 13.16, 2.36, 2.67, 18.6, 101, 2.8, 3.24, .3, 2.81, 5.68, 1.03, 3.17, 1185},
+        {1, 14.37, 1.95, 2.5, 16.8, 113, 3.85, 3.49, .24, 2.18, 7.8, .86, 3.45, 1480},
+        {1, 13.24, 2.59, 2.87, 21, 118, 2.8, 2.69, .39, 1.82, 4.32, 1.04, 2.93, 735},
+        {1, 14.2, 1.76, 2.45, 15.2, 112, 3.27, 3.39, .34, 1.97, 6.75, 1.05, 2.85, 1450},
+        {1, 14.39, 1.87, 2.45, 14.6, 96, 2.5, 2.52, .3, 1.98, 5.25, 1.02, 3.58, 1290},
+        {1, 14.06, 2.15, 2.61, 17.6, 121, 2.6, 2.51, .31, 1.25, 5.05, 1.06, 3.58, 1295},
+        {1, 14.83, 1.64, 2.17, 14, 97, 2.8, 2.98, .29, 1.98, 5.2, 1.08, 2.85, 1045},
+        {1, 13.86, 1.35, 2.27, 16, 98, 2.98, 3.15, .22, 1.85, 7.22, 1.01, 3.55, 1045},
+        {1, 14.1, 2.16, 2.3, 18, 105, 2.95, 3.32, .22, 2.38, 5.75, 1.25, 3.17, 1510},
+        {1, 14.12, 1.48, 2.32, 16.8, 95, 2.2, 2.43, .26, 1.57, 5, 1.17, 2.82, 1280},
+        {1, 13.75, 1.73, 2.41, 16, 89, 2.6, 2.76, .29, 1.81, 5.6, 1.15, 2.9, 1320},
+        {1, 14.75, 1.73, 2.39, 11.4, 91, 3.1, 3.69, .43, 2.81, 5.4, 1.25, 2.73, 1150},
+        {1, 14.38, 1.87, 2.38, 12, 102, 3.3, 3.64, .29, 2.96, 7.5, 1.2, 3, 1547},
+        {1, 13.63, 1.81, 2.7, 17.2, 112, 2.85, 2.91, .3, 1.46, 7.3, 1.28, 2.88, 1310},
+        {1, 14.3, 1.92, 2.72, 20, 120, 2.8, 3.14, .33, 1.97, 6.2, 1.07, 2.65, 1280},
+        {1, 13.83, 1.57, 2.62, 20, 115, 2.95, 3.4, .4, 1.72, 6.6, 1.13, 2.57, 1130},
+        {1, 14.19, 1.59, 2.48, 16.5, 108, 3.3, 3.93, .32, 1.86, 8.7, 1.23, 2.82, 1680},
+        {1, 13.64, 3.1, 2.56, 15.2, 116, 2.7, 3.03, .17, 1.66, 5.1, .96, 3.36, 845},
+        {1, 14.06, 1.63, 2.28, 16, 126, 3, 3.17, .24, 2.1, 5.65, 1.09, 3.71, 780},
+        {1, 12.93, 3.8, 2.65, 18.6, 102, 2.41, 2.41, .25, 1.98, 4.5, 1.03, 3.52, 770},
+        {1, 13.71, 1.86, 2.36, 16.6, 101, 2.61, 2.88, .27, 1.69, 3.8, 1.11, 4, 1035},
+        {1, 12.85, 1.6, 2.52, 17.8, 95, 2.48, 2.37, .26, 1.46, 3.93, 1.09, 3.63, 1015},
+        {1, 13.5, 1.81, 2.61, 20, 96, 2.53, 2.61, .28, 1.66, 3.52, 1.12, 3.82, 845},
+        {1, 13.05, 2.05, 3.22, 25, 124, 2.63, 2.68, .47, 1.92, 3.58, 1.13, 3.2, 830},
+        {1, 13.39, 1.77, 2.62, 16.1, 93, 2.85, 2.94, .34, 1.45, 4.8, .92, 3.22, 1195},
+        {1, 13.3, 1.72, 2.14, 17, 94, 2.4, 2.19, .27, 1.35, 3.95, 1.02, 2.77, 1285},
+        {1, 13.87, 1.9, 2.8, 19.4, 107, 2.95, 2.97, .37, 1.76, 4.5, 1.25, 3.4, 915},
+        {1, 14.02, 1.68, 2.21, 16, 96, 2.65, 2.33, .26, 1.98, 4.7, 1.04, 3.59, 1035},
+        {1, 13.73, 1.5, 2.7, 22.5, 101, 3, 3.25, .29, 2.38, 5.7, 1.19, 2.71, 1285},
+        {1, 13.58, 1.66, 2.36, 19.1, 106, 2.86, 3.19, .22, 1.95, 6.9, 1.09, 2.88, 1515},
+        {1, 13.68, 1.83, 2.36, 17.2, 104, 2.42, 2.69, .42, 1.97, 3.84, 1.23, 2.87, 990},
+        {1, 13.76, 1.53, 2.7, 19.5, 132, 2.95, 2.74, .5, 1.35, 5.4, 1.25, 3, 1235},
+        {1, 13.51, 1.8, 2.65, 19, 110, 2.35, 2.53, .29, 1.54, 4.2, 1.1, 2.87, 1095},
+        {1, 13.48, 1.81, 2.41, 20.5, 100, 2.7, 2.98, .26, 1.86, 5.1, 1.04, 3.47, 920},
+        {1, 13.28, 1.64, 2.84, 15.5, 110, 2.6, 2.68, .34, 1.36, 4.6, 1.09, 2.78, 880},
+        {1, 13.05, 1.65, 2.55, 18, 98, 2.45, 2.43, .29, 1.44, 4.25, 1.12, 2.51, 1105},
+        {1, 13.07, 1.5, 2.1, 15.5, 98, 2.4, 2.64, .28, 1.37, 3.7, 1.18, 2.69, 1020},
+        {1, 14.22, 3.99, 2.51, 13.2, 128, 3, 3.04, .2, 2.08, 5.1, .89, 3.53, 760},
+        {1, 13.56, 1.71, 2.31, 16.2, 117, 3.15, 3.29, .34, 2.34, 6.13, .95, 3.38, 795},
+        {1, 13.41, 3.84, 2.12, 18.8, 90, 2.45, 2.68, .27, 1.48, 4.28, .91, 3, 1035},
+        {1, 13.88, 1.89, 2.59, 15, 101, 3.25, 3.56, .17, 1.7, 5.43, .88, 3.56, 1095},
+        {1, 13.24, 3.98, 2.29, 17.5, 103, 2.64, 2.63, .32, 1.66, 4.36, .82, 3, 680},
+        {1, 13.05, 1.77, 2.1, 17, 107, 3, 3, .28, 2.03, 5.04, .88, 3.35, 885},
+        {1, 14.21, 4.04, 2.44, 18.9, 111, 2.85, 2.65, .3, 1.25, 5.24, .87, 3.33, 1080},
+        {1, 14.38, 3.59, 2.28, 16, 102, 3.25, 3.17, .27, 2.19, 4.9, 1.04, 3.44, 1065},
+        {1, 13.9, 1.68, 2.12, 16, 101, 3.1, 3.39, .21, 2.14, 6.1, .91, 3.33, 985},
+        {1, 14.1, 2.02, 2.4, 18.8, 103, 2.75, 2.92, .32, 2.38, 6.2, 1.07, 2.75, 1060},
+        {1, 13.94, 1.73, 2.27, 17.4, 108, 2.88, 3.54, .32, 2.08, 8.90, 1.12, 3.1, 1260},
+        {1, 13.05, 1.73, 2.04, 12.4, 92, 2.72, 3.27, .17, 2.91, 7.2, 1.12, 2.91, 1150},
+        {1, 13.83, 1.65, 2.6, 17.2, 94, 2.45, 2.99, .22, 2.29, 5.6, 1.24, 3.37, 1265},
+        {1, 13.82, 1.75, 2.42, 14, 111, 3.88, 3.74, .32, 1.87, 7.05, 1.01, 3.26, 1190},
+        {1, 13.77, 1.9, 2.68, 17.1, 115, 3, 2.79, .39, 1.68, 6.3, 1.13, 2.93, 1375},
+        {1, 13.74, 1.67, 2.25, 16.4, 118, 2.6, 2.9, .21, 1.62, 5.85, .92, 3.2, 1060},
+        {1, 13.56, 1.73, 2.46, 20.5, 116, 2.96, 2.78, .2, 2.45, 6.25, .98, 3.03, 1120},
+        {1, 14.22, 1.7, 2.3, 16.3, 118, 3.2, 3, .26, 2.03, 6.38, .94, 3.31, 970},
+        {1, 13.29, 1.97, 2.68, 16.8, 102, 3, 3.23, .31, 1.66, 6, 1.07, 2.84, 1270},
+        {1, 13.72, 1.43, 2.5, 16.7, 108, 3.4, 3.67, .19, 2.04, 6.8, .89, 2.87, 1285},
+        {2, 12.37, .94, 1.36, 10.6, 88, 1.98, .57, .28, .42, 1.95, 1.05, 1.82, 520},
+        {2, 12.33, 1.1, 2.28, 16, 101, 2.05, 1.09, .63, .41, 3.27, 1.25, 1.67, 680},
+        {2, 12.64, 1.36, 2.02, 16.8, 100, 2.02, 1.41, .53, .62, 5.75, .98, 1.59, 450},
+        {2, 13.67, 1.25, 1.92, 18, 94, 2.1, 1.79, .32, .73, 3.8, 1.23, 2.46, 630},
+        {2, 12.37, 1.13, 2.16, 19, 87, 3.5, 3.1, .19, 1.87, 4.45, 1.22, 2.87, 420},
+        {2, 12.17, 1.45, 2.53, 19, 104, 1.89, 1.75, .45, 1.03, 2.95, 1.45, 2.23, 355},
+        {2, 12.37, 1.21, 2.56, 18.1, 98, 2.42, 2.65, .37, 2.08, 4.6, 1.19, 2.3, 678},
+        {2, 13.11, 1.01, 1.7, 15, 78, 2.98, 3.18, .26, 2.28, 5.3, 1.12, 3.18, 502},
+        {2, 12.37, 1.17, 1.92, 19.6, 78, 2.11, 2, .27, 1.04, 4.68, 1.12, 3.48, 510},
+        {2, 13.34, .94, 2.36, 17, 110, 2.53, 1.3, .55, .42, 3.17, 1.02, 1.93, 750},
+        {2, 12.21, 1.19, 1.75, 16.8, 151, 1.85, 1.28, .14, 2.5, 2.85, 1.28, 3.07, 718},
+        {2, 12.29, 1.61, 2.21, 20.4, 103, 1.1, 1.02, .37, 1.46, 3.05, .906, 1.82, 870},
+        {2, 13.86, 1.51, 2.67, 25, 86, 2.95, 2.86, .21, 1.87, 3.38, 1.36, 3.16, 410},
+        {2, 13.49, 1.66, 2.24, 24, 87, 1.88, 1.84, .27, 1.03, 3.74, .98, 2.78, 472},
+        {2, 12.99, 1.67, 2.6, 30, 139, 3.3, 2.89, .21, 1.96, 3.35, 1.31, 3.5, 985},
+        {2, 11.96, 1.09, 2.3, 21, 101, 3.38, 2.14, .13, 1.65, 3.21, .99, 3.13, 886},
+        {2, 11.66, 1.88, 1.92, 16, 97, 1.61, 1.57, .34, 1.15, 3.8, 1.23, 2.14, 428},
+        {2, 13.03, .9, 1.71, 16, 86, 1.95, 2.03, .24, 1.46, 4.6, 1.19, 2.48, 392},
+        {2, 11.84, 2.89, 2.23, 18, 112, 1.72, 1.32, .43, .95, 2.65, .96, 2.52, 500},
+        {2, 12.33, .99, 1.95, 14.8, 136, 1.9, 1.85, .35, 2.76, 3.4, 1.06, 2.31, 750},
+        {2, 12.7, 3.87, 2.4, 23, 101, 2.83, 2.55, .43, 1.95, 2.57, 1.19, 3.13, 463},
+        {2, 12, .92, 2, 19, 86, 2.42, 2.26, .3, 1.43, 2.5, 1.38, 3.12, 278},
+        {2, 12.72, 1.81, 2.2, 18.8, 86, 2.2, 2.53, .26, 1.77, 3.9, 1.16, 3.14, 714},
+        {2, 12.08, 1.13, 2.51, 24, 78, 2, 1.58, .4, 1.4, 2.2, 1.31, 2.72, 630},
+        {2, 13.05, 3.86, 2.32, 22.5, 85, 1.65, 1.59, .61, 1.62, 4.8, .84, 2.01, 515},
+        {2, 11.84, .89, 2.58, 18, 94, 2.2, 2.21, .22, 2.35, 3.05, .79, 3.08, 520},
+        {2, 12.67, .98, 2.24, 18, 99, 2.2, 1.94, .3, 1.46, 2.62, 1.23, 3.16, 450},
+        {2, 12.16, 1.61, 2.31, 22.8, 90, 1.78, 1.69, .43, 1.56, 2.45, 1.33, 2.26, 495},
+        {2, 11.65, 1.67, 2.62, 26, 88, 1.92, 1.61, .4, 1.34, 2.6, 1.36, 3.21, 562},
+        {2, 11.64, 2.06, 2.46, 21.6, 84, 1.95, 1.69, .48, 1.35, 2.8, 1, 2.75, 680},
+        {2, 12.08, 1.33, 2.3, 23.6, 70, 2.2, 1.59, .42, 1.38, 1.74, 1.07, 3.21, 625},
+        {2, 12.08, 1.83, 2.32, 18.5, 81, 1.6, 1.5, .52, 1.64, 2.4, 1.08, 2.27, 480},
+        {2, 12, 1.51, 2.42, 22, 86, 1.45, 1.25, .5, 1.63, 3.6, 1.05, 2.65, 450},
+        {2, 12.69, 1.53, 2.26, 20.7, 80, 1.38, 1.46, .58, 1.62, 3.05, .96, 2.06, 495},
+        {2, 12.29, 2.83, 2.22, 18, 88, 2.45, 2.25, .25, 1.99, 2.15, 1.15, 3.3, 290},
+        {2, 11.62, 1.99, 2.28, 18, 98, 3.02, 2.26, .17, 1.35, 3.25, 1.16, 2.96, 345},
+        {2, 12.47, 1.52, 2.2, 19, 162, 2.5, 2.27, .32, 3.28, 2.6, 1.16, 2.63, 937},
+        {2, 11.81, 2.12, 2.74, 21.5, 134, 1.6, .99, .14, 1.56, 2.5, .95, 2.26, 625},
+        {2, 12.29, 1.41, 1.98, 16, 85, 2.55, 2.5, .29, 1.77, 2.9, 1.23, 2.74, 428},
+        {2, 12.37, 1.07, 2.1, 18.5, 88, 3.52, 3.75, .24, 1.95, 4.5, 1.04, 2.77, 660},
+        {2, 12.29, 3.17, 2.21, 18, 88, 2.85, 2.99, .45, 2.81, 2.3, 1.42, 2.83, 406},
+        {2, 12.08, 2.08, 1.7, 17.5, 97, 2.23, 2.17, .26, 1.4, 3.3, 1.27, 2.96, 710},
+        {2, 12.6, 1.34, 1.9, 18.5, 88, 1.45, 1.36, .29, 1.35, 2.45, 1.04, 2.77, 562},
+        {2, 12.34, 2.45, 2.46, 21, 98, 2.56, 2.11, .34, 1.31, 2.8, .8, 3.38, 438},
+        {2, 11.82, 1.72, 1.88, 19.5, 86, 2.5, 1.64, .37, 1.42, 2.06, .94, 2.44, 415},
+        {2, 12.51, 1.73, 1.98, 20.5, 85, 2.2, 1.92, .32, 1.48, 2.94, 1.04, 3.57, 672},
+        {2, 12.42, 2.55, 2.27, 22, 90, 1.68, 1.84, .66, 1.42, 2.7, .86, 3.3, 315},
+        {2, 12.25, 1.73, 2.12, 19, 80, 1.65, 2.03, .37, 1.63, 3.4, 1, 3.17, 510},
+        {2, 12.72, 1.75, 2.28, 22.5, 84, 1.38, 1.76, .48, 1.63, 3.3, .88, 2.42, 488},
+        {2, 12.22, 1.29, 1.94, 19, 92, 2.36, 2.04, .39, 2.08, 2.7, .86, 3.02, 312},
+        {2, 11.61, 1.35, 2.7, 20, 94, 2.74, 2.92, .29, 2.49, 2.65, .96, 3.26, 680},
+        {2, 11.46, 3.74, 1.82, 19.5, 107, 3.18, 2.58, .24, 3.58, 2.9, .75, 2.81, 562},
+        {2, 12.52, 2.43, 2.17, 21, 88, 2.55, 2.27, .26, 1.22, 2, .9, 2.78, 325},
+        {2, 11.76, 2.68, 2.92, 20, 103, 1.75, 2.03, .6, 1.05, 3.8, 1.23, 2.5, 607},
+        {2, 11.41, .74, 2.5, 21, 88, 2.48, 2.01, .42, 1.44, 3.08, 1.1, 2.31, 434},
+        {2, 12.08, 1.39, 2.5, 22.5, 84, 2.56, 2.29, .43, 1.04, 2.9, .93, 3.19, 385},
+        {2, 11.03, 1.51, 2.2, 21.5, 85, 2.46, 2.17, .52, 2.01, 1.9, 1.71, 2.87, 407},
+        {2, 11.82, 1.47, 1.99, 20.8, 86, 1.98, 1.6, .3, 1.53, 1.95, .95, 3.3423, 495},
+        {2, 12.42, 1.61, 2.19, 22.5, 108, 2, 2.09, .34, 1.61, 2.06, 1.06, 2.96, 345},
+        {2, 12.77, 3.43, 1.98, 16, 80, 1.63, 1.25, .43, .83, 3.4, .7, 2.12, 372},
+        {2, 12, 3.43, 2, 19, 87, 2, 1.64, .37, 1.87, 1.28, .93, 3.05, 564},
+        {2, 11.45, 2.4, 2.42, 20, 96, 2.9, 2.79, .32, 1.83, 3.25, .8, 3.39, 625},
+        {2, 11.56, 2.05, 3.23, 28.5, 119, 3.18, 5.08, .47, 1.87, 6, .93, 3.69, 465},
+        {2, 12.42, 4.43, 2.73, 26.5, 102, 2.2, 2.13, .43, 1.71, 2.08, .92, 3.12, 365},
+        {2, 13.05, 5.8, 2.13, 21.5, 86, 2.62, 2.65, .3, 2.01, 2.6, .73, 3.1, 380},
+        {2, 11.87, 4.31, 2.39, 21, 82, 2.86, 3.03, .21, 2.91, 2.8, .75, 3.64, 380},
+        {2, 12.07, 2.16, 2.17, 21, 85, 2.6, 2.65, .37, 1.35, 2.76, .86, 3.28, 378},
+        {2, 12.43, 1.53, 2.29, 21.5, 86, 2.74, 3.15, .39, 1.77, 3.94, .69, 2.84, 352},
+        {2, 11.79, 2.13, 2.78, 28.5, 92, 2.13, 2.24, .58, 1.76, 3, .97, 2.44, 466},
+        {2, 12.37, 1.63, 2.3, 24.5, 88, 2.22, 2.45, .4, 1.9, 2.12, .89, 2.78, 342},
+        {2, 12.04, 4.3, 2.38, 22, 80, 2.1, 1.75, .42, 1.35, 2.6, .79, 2.57, 580},
+        {3, 12.86, 1.35, 2.32, 18, 122, 1.51, 1.25, .21, .94, 4.1, .76, 1.29, 630},
+        {3, 12.88, 2.99, 2.4, 20, 104, 1.3, 1.22, .24, .83, 5.4, .74, 1.42, 530},
+        {3, 12.81, 2.31, 2.4, 24, 98, 1.15, 1.09, .27, .83, 5.7, .66, 1.36, 560},
+        {3, 12.7, 3.55, 2.36, 21.5, 106, 1.7, 1.2, .17, .84, 5, .78, 1.29, 600},
+        {3, 12.51, 1.24, 2.25, 17.5, 85, 2, .58, .6, 1.25, 5.45, .75, 1.51, 650},
+        {3, 12.6, 2.46, 2.2, 18.5, 94, 1.62, .66, .63, .94, 7.1, .73, 1.58, 695},
+        {3, 12.25, 4.72, 2.54, 21, 89, 1.38, .47, .53, .8, 3.85, .75, 1.27, 720},
+        {3, 12.53, 5.51, 2.64, 25, 96, 1.79, .6, .63, 1.1, 5, .82, 1.69, 515},
+        {3, 13.49, 3.59, 2.19, 19.5, 88, 1.62, .48, .58, .88, 5.7, .81, 1.82, 580},
+        {3, 12.84, 2.96, 2.61, 24, 101, 2.32, .6, .53, .81, 4.92, .89, 2.15, 590},
+        {3, 12.93, 2.81, 2.7, 21, 96, 1.54, .5, .53, .75, 4.6, .77, 2.31, 600},
+        {3, 13.36, 2.56, 2.35, 20, 89, 1.4, .5, .37, .64, 5.6, .7, 2.47, 780},
+        {3, 13.52, 3.17, 2.72, 23.5, 97, 1.55, .52, .5, .55, 4.35, .89, 2.06, 520},
+        {3, 13.62, 4.95, 2.35, 20, 92, 2, .8, .47, 1.02, 4.4, .91, 2.05, 550},
+        {3, 12.25, 3.88, 2.2, 18.5, 112, 1.38, .78, .29, 1.14, 8.21, .65, 2, 855},
+        {3, 13.16, 3.57, 2.15, 21, 102, 1.5, .55, .43, 1.3, 4, .6, 1.68, 830},
+        {3, 13.88, 5.04, 2.23, 20, 80, .98, .34, .4, .68, 4.9, .58, 1.33, 415},
+        {3, 12.87, 4.61, 2.48, 21.5, 86, 1.7, .65, .47, .86, 7.65, .54, 1.86, 625},
+        {3, 13.32, 3.24, 2.38, 21.5, 92, 1.93, .76, .45, 1.25, 8.42, .55, 1.62, 650},
+        {3, 13.08, 3.9, 2.36, 21.5, 113, 1.41, 1.39, .34, 1.14, 9.40, .57, 1.33, 550},
+        {3, 13.5, 3.12, 2.62, 24, 123, 1.4, 1.57, .22, 1.25, 8.60, .59, 1.3, 500},
+        {3, 12.79, 2.67, 2.48, 22, 112, 1.48, 1.36, .24, 1.26, 10.8, .48, 1.47, 480},
+        {3, 13.11, 1.9, 2.75, 25.5, 116, 2.2, 1.28, .26, 1.56, 7.1, .61, 1.33, 425},
+        {3, 13.23, 3.3, 2.28, 18.5, 98, 1.8, .83, .61, 1.87, 10.52, .56, 1.51, 675},
+        {3, 12.58, 1.29, 2.1, 20, 103, 1.48, .58, .53, 1.4, 7.6, .58, 1.55, 640},
+        {3, 13.17, 5.19, 2.32, 22, 93, 1.74, .63, .61, 1.55, 7.9, .6, 1.48, 725},
+        {3, 13.84, 4.12, 2.38, 19.5, 89, 1.8, .83, .48, 1.56, 9.01, .57, 1.64, 480},
+        {3, 12.45, 3.03, 2.64, 27, 97, 1.9, .58, .63, 1.14, 7.5, .67, 1.73, 880},
+        {3, 14.34, 1.68, 2.7, 25, 98, 2.8, 1.31, .53, 2.7, 13, .57, 1.96, 660},
+        {3, 13.48, 1.67, 2.64, 22.5, 89, 2.6, 1.1, .52, 2.29, 11.75, .57, 1.78, 620},
+        {3, 12.36, 3.83, 2.38, 21, 88, 2.3, .92, .5, 1.04, 7.65, .56, 1.58, 520},
+        {3, 13.69, 3.26, 2.54, 20, 107, 1.83, .56, .5, .8, 5.88, .96, 1.82, 680},
+        {3, 12.85, 3.27, 2.58, 22, 106, 1.65, .6, .6, .96, 5.58, .87, 2.11, 570},
+        {3, 12.96, 3.45, 2.35, 18.5, 106, 1.39, .7, .4, .94, 5.28, .68, 1.75, 675},
+        {3, 13.78, 2.76, 2.3, 22, 90, 1.35, .68, .41, 1.03, 9.58, .7, 1.68, 615},
+        {3, 13.73, 4.36, 2.26, 22.5, 88, 1.28, .47, .52, 1.15, 6.62, .78, 1.75, 520},
+        {3, 13.45, 3.7, 2.6, 23, 111, 1.7, .92, .43, 1.46, 10.68, .85, 1.56, 695},
+        {3, 12.82, 3.37, 2.3, 19.5, 88, 1.48, .66, .4, .97, 10.26, .72, 1.75, 685},
+        {3, 13.58, 2.58, 2.69, 24.5, 105, 1.55, .84, .39, 1.54, 8.66, .74, 1.8, 750},
+        {3, 13.4, 4.6, 2.86, 25, 112, 1.98, .96, .27, 1.11, 8.5, .67, 1.92, 630},
+        {3, 12.2, 3.03, 2.32, 19, 96, 1.25, .49, .4, .73, 5.5, .66, 1.83, 510},
+        {3, 12.77, 2.39, 2.28, 19.5, 86, 1.39, .51, .48, .64, 9.899999, .57, 1.63, 470},
+        {3, 14.16, 2.51, 2.48, 20, 91, 1.68, .7, .44, 1.24, 9.7, .62, 1.71, 660},
+        {3, 13.71, 5.65, 2.45, 20.5, 95, 1.68, .61, .52, 1.06, 7.7, .64, 1.74, 740},
+        {3, 13.4, 3.91, 2.48, 23, 102, 1.8, .75, .43, 1.41, 7.3, .7, 1.56, 750},
+        {3, 13.27, 4.28, 2.26, 20, 120, 1.59, .69, .43, 1.35, 10.2, .59, 1.56, 835},
+        {3, 13.17, 2.59, 2.37, 20, 120, 1.65, .68, .53, 1.46, 9.3, .6, 1.62, 840},
+        {3, 14.13, 4.1, 2.74, 24.5, 96, 2.05, .76, .56, 1.35, 9.2, .61, 1.6, 560}
     };
 }
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tree/randomforest/RandomForestRegressionExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/tree/randomforest/RandomForestRegressionExample.java
index c803354..5f010f2 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/tree/randomforest/RandomForestRegressionExample.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/tree/randomforest/RandomForestRegressionExample.java
@@ -18,34 +18,42 @@
 package org.apache.ignite.examples.ml.tree.randomforest;
 
 import java.util.Arrays;
-import java.util.UUID;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
 import javax.cache.Cache;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.Ignition;
-import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
 import org.apache.ignite.cache.query.QueryCursor;
 import org.apache.ignite.cache.query.ScanQuery;
-import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.examples.ml.util.TestCache;
 import org.apache.ignite.ml.composition.ModelsComposition;
+import org.apache.ignite.ml.dataset.feature.FeatureMeta;
 import org.apache.ignite.ml.environment.LearningEnvironment;
 import org.apache.ignite.ml.environment.logging.ConsoleLogger;
 import org.apache.ignite.ml.environment.logging.MLLogger;
 import org.apache.ignite.ml.environment.parallelism.ParallelismStrategy;
 import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
 import org.apache.ignite.ml.tree.randomforest.RandomForestRegressionTrainer;
-import org.apache.ignite.ml.tree.randomforest.RandomForestTrainer;
+import org.apache.ignite.ml.tree.randomforest.data.FeaturesCountSelectionStrategies;
 import org.apache.ignite.thread.IgniteThread;
 
 /**
- * Example represents a solution for the task of price predictions for houses in Boston based on RandomForest
- * implementation for regression. It shows an initialization of {@link RandomForestTrainer}, +initialization of Ignite
- * Cache, learning step and evaluation of model quality in terms of Mean Squared Error (MSE) and Mean Absolute Error
- * (MAE).
- *
- * Dataset url: https://archive.ics.uci.edu/ml/machine-learning-databases/housing/
- *
- * @see RandomForestRegressionTrainer
+ * Example represents a solution for the task of price predictions for houses in Boston based on a
+ * <a href ="https://en.wikipedia.org/wiki/Random_forest">Random Forest</a> implementation for regression.
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with test data points (based on the
+ * <a href="https://archive.ics.uci.edu/ml/machine-learning-databases/housing/">Boston Housing dataset</a>).</p>
+ * <p>
+ * After that it initializes the {@link RandomForestRegressionTrainer} and trains the model based on the specified data
+ * using random forest regression algorithm.</p>
+ * <p>
+ * Finally, this example loops over the test set of data points, compares prediction of the trained model to the
+ * expected outcome (ground truth), and evaluates model quality in terms of Mean Squared Error (MSE) and
+ * Mean Absolute Error (MAE).</p>
+ * <p>
+ * You can change the test data used in this example and re-run it to explore this algorithm further.</p>
  */
 public class RandomForestRegressionExample {
     /**
@@ -60,20 +68,34 @@
 
             IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
                 RandomForestRegressionExample.class.getSimpleName(), () -> {
-                IgniteCache<Integer, double[]> dataCache = getTestCache(ignite);
+                IgniteCache<Integer, double[]> dataCache = new TestCache(ignite).fillCacheWith(data);
 
-                RandomForestRegressionTrainer trainer = new RandomForestRegressionTrainer(13, 4, 101, 0.3, 2, 0);
+                AtomicInteger idx = new AtomicInteger(0);
+                RandomForestRegressionTrainer trainer = new RandomForestRegressionTrainer(
+                    IntStream.range(0, data[0].length - 1).mapToObj(
+                        x -> new FeatureMeta("", idx.getAndIncrement(), false)).collect(Collectors.toList())
+                ).withCountOfTrees(101)
+                    .withFeaturesCountSelectionStrgy(FeaturesCountSelectionStrategies.ONE_THIRD)
+                    .withMaxDepth(4)
+                    .withMinImpurityDelta(0.)
+                    .withSubSampleSize(0.3)
+                    .withSeed(0);
+
                 trainer.setEnvironment(LearningEnvironment.builder()
                     .withParallelismStrategy(ParallelismStrategy.Type.ON_DEFAULT_POOL)
                     .withLoggingFactory(ConsoleLogger.factory(MLLogger.VerboseLevel.LOW))
                     .build()
                 );
 
+                System.out.println(">>> Configured trainer: " + trainer.getClass().getSimpleName());
+
                 ModelsComposition randomForest = trainer.fit(ignite, dataCache,
-                        (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 0, v.length - 1)),
-                        (k, v) -> v[v.length - 1]
+                    (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 0, v.length - 1)),
+                    (k, v) -> v[v.length - 1]
                 );
 
+                System.out.println(">>> Trained model: " + randomForest.toString(true));
+
                 double mse = 0.0;
                 double mae = 0.0;
                 int totalAmount = 0;
@@ -92,11 +114,15 @@
                         totalAmount++;
                     }
 
+                    System.out.println("\n>>> Evaluated model on " + totalAmount + " data points.");
+
                     mse = mse / totalAmount;
                     System.out.println("\n>>> Mean squared error (MSE) " + mse);
 
                     mae = mae / totalAmount;
                     System.out.println("\n>>> Mean absolute error (MAE) " + mae);
+
+                    System.out.println(">>> Random Forest regression algorithm over cached dataset usage example completed.");
                 }
             });
 
@@ -105,533 +131,512 @@
         }
     }
 
-    /**
-     * Fills cache with data and returns it.
-     *
-     * @param ignite Ignite instance.
-     * @return Filled Ignite Cache.
-     */
-    private static IgniteCache<Integer, double[]> getTestCache(Ignite ignite) {
-        CacheConfiguration<Integer, double[]> cacheConfiguration = new CacheConfiguration<>();
-        cacheConfiguration.setName("TEST_" + UUID.randomUUID());
-        cacheConfiguration.setAffinity(new RendezvousAffinityFunction(false, 10));
-
-        IgniteCache<Integer, double[]> cache = ignite.createCache(cacheConfiguration);
-
-        for (int i = 0; i < data.length; i++)
-            cache.put(i, data[i]);
-
-        return cache;
-    }
-
-    /**
-     * The Boston housing dataset.
-     */
+    /** The Boston housing dataset. */
     private static final double[][] data = {
-            {0.02731,0.00,7.070,0,0.4690,6.4210,78.90,4.9671,2,242.0,17.80,396.90,9.14,21.60},
-            {0.02729,0.00,7.070,0,0.4690,7.1850,61.10,4.9671,2,242.0,17.80,392.83,4.03,34.70},
-            {0.03237,0.00,2.180,0,0.4580,6.9980,45.80,6.0622,3,222.0,18.70,394.63,2.94,33.40},
-            {0.06905,0.00,2.180,0,0.4580,7.1470,54.20,6.0622,3,222.0,18.70,396.90,5.33,36.20},
-            {0.02985,0.00,2.180,0,0.4580,6.4300,58.70,6.0622,3,222.0,18.70,394.12,5.21,28.70},
-            {0.08829,12.50,7.870,0,0.5240,6.0120,66.60,5.5605,5,311.0,15.20,395.60,12.43,22.90},
-            {0.14455,12.50,7.870,0,0.5240,6.1720,96.10,5.9505,5,311.0,15.20,396.90,19.15,27.10},
-            {0.21124,12.50,7.870,0,0.5240,5.6310,100.00,6.0821,5,311.0,15.20,386.63,29.93,16.50},
-            {0.17004,12.50,7.870,0,0.5240,6.0040,85.90,6.5921,5,311.0,15.20,386.71,17.10,18.90},
-            {0.22489,12.50,7.870,0,0.5240,6.3770,94.30,6.3467,5,311.0,15.20,392.52,20.45,15.00},
-            {0.11747,12.50,7.870,0,0.5240,6.0090,82.90,6.2267,5,311.0,15.20,396.90,13.27,18.90},
-            {0.09378,12.50,7.870,0,0.5240,5.8890,39.00,5.4509,5,311.0,15.20,390.50,15.71,21.70},
-            {0.62976,0.00,8.140,0,0.5380,5.9490,61.80,4.7075,4,307.0,21.00,396.90,8.26,20.40},
-            {0.63796,0.00,8.140,0,0.5380,6.0960,84.50,4.4619,4,307.0,21.00,380.02,10.26,18.20},
-            {0.62739,0.00,8.140,0,0.5380,5.8340,56.50,4.4986,4,307.0,21.00,395.62,8.47,19.90},
-            {1.05393,0.00,8.140,0,0.5380,5.9350,29.30,4.4986,4,307.0,21.00,386.85,6.58,23.10},
-            {0.78420,0.00,8.140,0,0.5380,5.9900,81.70,4.2579,4,307.0,21.00,386.75,14.67,17.50},
-            {0.80271,0.00,8.140,0,0.5380,5.4560,36.60,3.7965,4,307.0,21.00,288.99,11.69,20.20},
-            {0.72580,0.00,8.140,0,0.5380,5.7270,69.50,3.7965,4,307.0,21.00,390.95,11.28,18.20},
-            {1.25179,0.00,8.140,0,0.5380,5.5700,98.10,3.7979,4,307.0,21.00,376.57,21.02,13.60},
-            {0.85204,0.00,8.140,0,0.5380,5.9650,89.20,4.0123,4,307.0,21.00,392.53,13.83,19.60},
-            {1.23247,0.00,8.140,0,0.5380,6.1420,91.70,3.9769,4,307.0,21.00,396.90,18.72,15.20},
-            {0.98843,0.00,8.140,0,0.5380,5.8130,100.00,4.0952,4,307.0,21.00,394.54,19.88,14.50},
-            {0.75026,0.00,8.140,0,0.5380,5.9240,94.10,4.3996,4,307.0,21.00,394.33,16.30,15.60},
-            {0.84054,0.00,8.140,0,0.5380,5.5990,85.70,4.4546,4,307.0,21.00,303.42,16.51,13.90},
-            {0.67191,0.00,8.140,0,0.5380,5.8130,90.30,4.6820,4,307.0,21.00,376.88,14.81,16.60},
-            {0.95577,0.00,8.140,0,0.5380,6.0470,88.80,4.4534,4,307.0,21.00,306.38,17.28,14.80},
-            {0.77299,0.00,8.140,0,0.5380,6.4950,94.40,4.4547,4,307.0,21.00,387.94,12.80,18.40},
-            {1.00245,0.00,8.140,0,0.5380,6.6740,87.30,4.2390,4,307.0,21.00,380.23,11.98,21.00},
-            {1.13081,0.00,8.140,0,0.5380,5.7130,94.10,4.2330,4,307.0,21.00,360.17,22.60,12.70},
-            {1.35472,0.00,8.140,0,0.5380,6.0720,100.00,4.1750,4,307.0,21.00,376.73,13.04,14.50},
-            {1.38799,0.00,8.140,0,0.5380,5.9500,82.00,3.9900,4,307.0,21.00,232.60,27.71,13.20},
-            {1.15172,0.00,8.140,0,0.5380,5.7010,95.00,3.7872,4,307.0,21.00,358.77,18.35,13.10},
-            {1.61282,0.00,8.140,0,0.5380,6.0960,96.90,3.7598,4,307.0,21.00,248.31,20.34,13.50},
-            {0.06417,0.00,5.960,0,0.4990,5.9330,68.20,3.3603,5,279.0,19.20,396.90,9.68,18.90},
-            {0.09744,0.00,5.960,0,0.4990,5.8410,61.40,3.3779,5,279.0,19.20,377.56,11.41,20.00},
-            {0.08014,0.00,5.960,0,0.4990,5.8500,41.50,3.9342,5,279.0,19.20,396.90,8.77,21.00},
-            {0.17505,0.00,5.960,0,0.4990,5.9660,30.20,3.8473,5,279.0,19.20,393.43,10.13,24.70},
-            {0.02763,75.00,2.950,0,0.4280,6.5950,21.80,5.4011,3,252.0,18.30,395.63,4.32,30.80},
-            {0.03359,75.00,2.950,0,0.4280,7.0240,15.80,5.4011,3,252.0,18.30,395.62,1.98,34.90},
-            {0.12744,0.00,6.910,0,0.4480,6.7700,2.90,5.7209,3,233.0,17.90,385.41,4.84,26.60},
-            {0.14150,0.00,6.910,0,0.4480,6.1690,6.60,5.7209,3,233.0,17.90,383.37,5.81,25.30},
-            {0.15936,0.00,6.910,0,0.4480,6.2110,6.50,5.7209,3,233.0,17.90,394.46,7.44,24.70},
-            {0.12269,0.00,6.910,0,0.4480,6.0690,40.00,5.7209,3,233.0,17.90,389.39,9.55,21.20},
-            {0.17142,0.00,6.910,0,0.4480,5.6820,33.80,5.1004,3,233.0,17.90,396.90,10.21,19.30},
-            {0.18836,0.00,6.910,0,0.4480,5.7860,33.30,5.1004,3,233.0,17.90,396.90,14.15,20.00},
-            {0.22927,0.00,6.910,0,0.4480,6.0300,85.50,5.6894,3,233.0,17.90,392.74,18.80,16.60},
-            {0.25387,0.00,6.910,0,0.4480,5.3990,95.30,5.8700,3,233.0,17.90,396.90,30.81,14.40},
-            {0.21977,0.00,6.910,0,0.4480,5.6020,62.00,6.0877,3,233.0,17.90,396.90,16.20,19.40},
-            {0.08873,21.00,5.640,0,0.4390,5.9630,45.70,6.8147,4,243.0,16.80,395.56,13.45,19.70},
-            {0.04337,21.00,5.640,0,0.4390,6.1150,63.00,6.8147,4,243.0,16.80,393.97,9.43,20.50},
-            {0.05360,21.00,5.640,0,0.4390,6.5110,21.10,6.8147,4,243.0,16.80,396.90,5.28,25.00},
-            {0.04981,21.00,5.640,0,0.4390,5.9980,21.40,6.8147,4,243.0,16.80,396.90,8.43,23.40},
-            {0.01360,75.00,4.000,0,0.4100,5.8880,47.60,7.3197,3,469.0,21.10,396.90,14.80,18.90},
-            {0.01311,90.00,1.220,0,0.4030,7.2490,21.90,8.6966,5,226.0,17.90,395.93,4.81,35.40},
-            {0.02055,85.00,0.740,0,0.4100,6.3830,35.70,9.1876,2,313.0,17.30,396.90,5.77,24.70},
-            {0.01432,100.00,1.320,0,0.4110,6.8160,40.50,8.3248,5,256.0,15.10,392.90,3.95,31.60},
-            {0.15445,25.00,5.130,0,0.4530,6.1450,29.20,7.8148,8,284.0,19.70,390.68,6.86,23.30},
-            {0.10328,25.00,5.130,0,0.4530,5.9270,47.20,6.9320,8,284.0,19.70,396.90,9.22,19.60},
-            {0.14932,25.00,5.130,0,0.4530,5.7410,66.20,7.2254,8,284.0,19.70,395.11,13.15,18.70},
-            {0.17171,25.00,5.130,0,0.4530,5.9660,93.40,6.8185,8,284.0,19.70,378.08,14.44,16.00},
-            {0.11027,25.00,5.130,0,0.4530,6.4560,67.80,7.2255,8,284.0,19.70,396.90,6.73,22.20},
-            {0.12650,25.00,5.130,0,0.4530,6.7620,43.40,7.9809,8,284.0,19.70,395.58,9.50,25.00},
-            {0.01951,17.50,1.380,0,0.4161,7.1040,59.50,9.2229,3,216.0,18.60,393.24,8.05,33.00},
-            {0.03584,80.00,3.370,0,0.3980,6.2900,17.80,6.6115,4,337.0,16.10,396.90,4.67,23.50},
-            {0.04379,80.00,3.370,0,0.3980,5.7870,31.10,6.6115,4,337.0,16.10,396.90,10.24,19.40},
-            {0.05789,12.50,6.070,0,0.4090,5.8780,21.40,6.4980,4,345.0,18.90,396.21,8.10,22.00},
-            {0.13554,12.50,6.070,0,0.4090,5.5940,36.80,6.4980,4,345.0,18.90,396.90,13.09,17.40},
-            {0.12816,12.50,6.070,0,0.4090,5.8850,33.00,6.4980,4,345.0,18.90,396.90,8.79,20.90},
-            {0.08826,0.00,10.810,0,0.4130,6.4170,6.60,5.2873,4,305.0,19.20,383.73,6.72,24.20},
-            {0.15876,0.00,10.810,0,0.4130,5.9610,17.50,5.2873,4,305.0,19.20,376.94,9.88,21.70},
-            {0.09164,0.00,10.810,0,0.4130,6.0650,7.80,5.2873,4,305.0,19.20,390.91,5.52,22.80},
-            {0.19539,0.00,10.810,0,0.4130,6.2450,6.20,5.2873,4,305.0,19.20,377.17,7.54,23.40},
-            {0.07896,0.00,12.830,0,0.4370,6.2730,6.00,4.2515,5,398.0,18.70,394.92,6.78,24.10},
-            {0.09512,0.00,12.830,0,0.4370,6.2860,45.00,4.5026,5,398.0,18.70,383.23,8.94,21.40},
-            {0.10153,0.00,12.830,0,0.4370,6.2790,74.50,4.0522,5,398.0,18.70,373.66,11.97,20.00},
-            {0.08707,0.00,12.830,0,0.4370,6.1400,45.80,4.0905,5,398.0,18.70,386.96,10.27,20.80},
-            {0.05646,0.00,12.830,0,0.4370,6.2320,53.70,5.0141,5,398.0,18.70,386.40,12.34,21.20},
-            {0.08387,0.00,12.830,0,0.4370,5.8740,36.60,4.5026,5,398.0,18.70,396.06,9.10,20.30},
-            {0.04113,25.00,4.860,0,0.4260,6.7270,33.50,5.4007,4,281.0,19.00,396.90,5.29,28.00},
-            {0.04462,25.00,4.860,0,0.4260,6.6190,70.40,5.4007,4,281.0,19.00,395.63,7.22,23.90},
-            {0.03659,25.00,4.860,0,0.4260,6.3020,32.20,5.4007,4,281.0,19.00,396.90,6.72,24.80},
-            {0.03551,25.00,4.860,0,0.4260,6.1670,46.70,5.4007,4,281.0,19.00,390.64,7.51,22.90},
-            {0.05059,0.00,4.490,0,0.4490,6.3890,48.00,4.7794,3,247.0,18.50,396.90,9.62,23.90},
-            {0.05735,0.00,4.490,0,0.4490,6.6300,56.10,4.4377,3,247.0,18.50,392.30,6.53,26.60},
-            {0.05188,0.00,4.490,0,0.4490,6.0150,45.10,4.4272,3,247.0,18.50,395.99,12.86,22.50},
-            {0.07151,0.00,4.490,0,0.4490,6.1210,56.80,3.7476,3,247.0,18.50,395.15,8.44,22.20},
-            {0.05660,0.00,3.410,0,0.4890,7.0070,86.30,3.4217,2,270.0,17.80,396.90,5.50,23.60},
-            {0.05302,0.00,3.410,0,0.4890,7.0790,63.10,3.4145,2,270.0,17.80,396.06,5.70,28.70},
-            {0.04684,0.00,3.410,0,0.4890,6.4170,66.10,3.0923,2,270.0,17.80,392.18,8.81,22.60},
-            {0.03932,0.00,3.410,0,0.4890,6.4050,73.90,3.0921,2,270.0,17.80,393.55,8.20,22.00},
-            {0.04203,28.00,15.040,0,0.4640,6.4420,53.60,3.6659,4,270.0,18.20,395.01,8.16,22.90},
-            {0.02875,28.00,15.040,0,0.4640,6.2110,28.90,3.6659,4,270.0,18.20,396.33,6.21,25.00},
-            {0.04294,28.00,15.040,0,0.4640,6.2490,77.30,3.6150,4,270.0,18.20,396.90,10.59,20.60},
-            {0.12204,0.00,2.890,0,0.4450,6.6250,57.80,3.4952,2,276.0,18.00,357.98,6.65,28.40},
-            {0.11504,0.00,2.890,0,0.4450,6.1630,69.60,3.4952,2,276.0,18.00,391.83,11.34,21.40},
-            {0.12083,0.00,2.890,0,0.4450,8.0690,76.00,3.4952,2,276.0,18.00,396.90,4.21,38.70},
-            {0.08187,0.00,2.890,0,0.4450,7.8200,36.90,3.4952,2,276.0,18.00,393.53,3.57,43.80},
-            {0.06860,0.00,2.890,0,0.4450,7.4160,62.50,3.4952,2,276.0,18.00,396.90,6.19,33.20},
-            {0.14866,0.00,8.560,0,0.5200,6.7270,79.90,2.7778,5,384.0,20.90,394.76,9.42,27.50},
-            {0.11432,0.00,8.560,0,0.5200,6.7810,71.30,2.8561,5,384.0,20.90,395.58,7.67,26.50},
-            {0.22876,0.00,8.560,0,0.5200,6.4050,85.40,2.7147,5,384.0,20.90,70.80,10.63,18.60},
-            {0.21161,0.00,8.560,0,0.5200,6.1370,87.40,2.7147,5,384.0,20.90,394.47,13.44,19.30},
-            {0.13960,0.00,8.560,0,0.5200,6.1670,90.00,2.4210,5,384.0,20.90,392.69,12.33,20.10},
-            {0.13262,0.00,8.560,0,0.5200,5.8510,96.70,2.1069,5,384.0,20.90,394.05,16.47,19.50},
-            {0.17120,0.00,8.560,0,0.5200,5.8360,91.90,2.2110,5,384.0,20.90,395.67,18.66,19.50},
-            {0.13117,0.00,8.560,0,0.5200,6.1270,85.20,2.1224,5,384.0,20.90,387.69,14.09,20.40},
-            {0.12802,0.00,8.560,0,0.5200,6.4740,97.10,2.4329,5,384.0,20.90,395.24,12.27,19.80},
-            {0.26363,0.00,8.560,0,0.5200,6.2290,91.20,2.5451,5,384.0,20.90,391.23,15.55,19.40},
-            {0.10793,0.00,8.560,0,0.5200,6.1950,54.40,2.7778,5,384.0,20.90,393.49,13.00,21.70},
-            {0.10084,0.00,10.010,0,0.5470,6.7150,81.60,2.6775,6,432.0,17.80,395.59,10.16,22.80},
-            {0.12329,0.00,10.010,0,0.5470,5.9130,92.90,2.3534,6,432.0,17.80,394.95,16.21,18.80},
-            {0.22212,0.00,10.010,0,0.5470,6.0920,95.40,2.5480,6,432.0,17.80,396.90,17.09,18.70},
-            {0.14231,0.00,10.010,0,0.5470,6.2540,84.20,2.2565,6,432.0,17.80,388.74,10.45,18.50},
-            {0.17134,0.00,10.010,0,0.5470,5.9280,88.20,2.4631,6,432.0,17.80,344.91,15.76,18.30},
-            {0.13158,0.00,10.010,0,0.5470,6.1760,72.50,2.7301,6,432.0,17.80,393.30,12.04,21.20},
-            {0.15098,0.00,10.010,0,0.5470,6.0210,82.60,2.7474,6,432.0,17.80,394.51,10.30,19.20},
-            {0.13058,0.00,10.010,0,0.5470,5.8720,73.10,2.4775,6,432.0,17.80,338.63,15.37,20.40},
-            {0.14476,0.00,10.010,0,0.5470,5.7310,65.20,2.7592,6,432.0,17.80,391.50,13.61,19.30},
-            {0.06899,0.00,25.650,0,0.5810,5.8700,69.70,2.2577,2,188.0,19.10,389.15,14.37,22.00},
-            {0.07165,0.00,25.650,0,0.5810,6.0040,84.10,2.1974,2,188.0,19.10,377.67,14.27,20.30},
-            {0.09299,0.00,25.650,0,0.5810,5.9610,92.90,2.0869,2,188.0,19.10,378.09,17.93,20.50},
-            {0.15038,0.00,25.650,0,0.5810,5.8560,97.00,1.9444,2,188.0,19.10,370.31,25.41,17.30},
-            {0.09849,0.00,25.650,0,0.5810,5.8790,95.80,2.0063,2,188.0,19.10,379.38,17.58,18.80},
-            {0.16902,0.00,25.650,0,0.5810,5.9860,88.40,1.9929,2,188.0,19.10,385.02,14.81,21.40},
-            {0.38735,0.00,25.650,0,0.5810,5.6130,95.60,1.7572,2,188.0,19.10,359.29,27.26,15.70},
-            {0.25915,0.00,21.890,0,0.6240,5.6930,96.00,1.7883,4,437.0,21.20,392.11,17.19,16.20},
-            {0.32543,0.00,21.890,0,0.6240,6.4310,98.80,1.8125,4,437.0,21.20,396.90,15.39,18.00},
-            {0.88125,0.00,21.890,0,0.6240,5.6370,94.70,1.9799,4,437.0,21.20,396.90,18.34,14.30},
-            {0.34006,0.00,21.890,0,0.6240,6.4580,98.90,2.1185,4,437.0,21.20,395.04,12.60,19.20},
-            {1.19294,0.00,21.890,0,0.6240,6.3260,97.70,2.2710,4,437.0,21.20,396.90,12.26,19.60},
-            {0.59005,0.00,21.890,0,0.6240,6.3720,97.90,2.3274,4,437.0,21.20,385.76,11.12,23.00},
-            {0.32982,0.00,21.890,0,0.6240,5.8220,95.40,2.4699,4,437.0,21.20,388.69,15.03,18.40},
-            {0.97617,0.00,21.890,0,0.6240,5.7570,98.40,2.3460,4,437.0,21.20,262.76,17.31,15.60},
-            {0.55778,0.00,21.890,0,0.6240,6.3350,98.20,2.1107,4,437.0,21.20,394.67,16.96,18.10},
-            {0.32264,0.00,21.890,0,0.6240,5.9420,93.50,1.9669,4,437.0,21.20,378.25,16.90,17.40},
-            {0.35233,0.00,21.890,0,0.6240,6.4540,98.40,1.8498,4,437.0,21.20,394.08,14.59,17.10},
-            {0.24980,0.00,21.890,0,0.6240,5.8570,98.20,1.6686,4,437.0,21.20,392.04,21.32,13.30},
-            {0.54452,0.00,21.890,0,0.6240,6.1510,97.90,1.6687,4,437.0,21.20,396.90,18.46,17.80},
-            {0.29090,0.00,21.890,0,0.6240,6.1740,93.60,1.6119,4,437.0,21.20,388.08,24.16,14.00},
-            {1.62864,0.00,21.890,0,0.6240,5.0190,100.00,1.4394,4,437.0,21.20,396.90,34.41,14.40},
-            {3.32105,0.00,19.580,1,0.8710,5.4030,100.00,1.3216,5,403.0,14.70,396.90,26.82,13.40},
-            {4.09740,0.00,19.580,0,0.8710,5.4680,100.00,1.4118,5,403.0,14.70,396.90,26.42,15.60},
-            {2.77974,0.00,19.580,0,0.8710,4.9030,97.80,1.3459,5,403.0,14.70,396.90,29.29,11.80},
-            {2.37934,0.00,19.580,0,0.8710,6.1300,100.00,1.4191,5,403.0,14.70,172.91,27.80,13.80},
-            {2.15505,0.00,19.580,0,0.8710,5.6280,100.00,1.5166,5,403.0,14.70,169.27,16.65,15.60},
-            {2.36862,0.00,19.580,0,0.8710,4.9260,95.70,1.4608,5,403.0,14.70,391.71,29.53,14.60},
-            {2.33099,0.00,19.580,0,0.8710,5.1860,93.80,1.5296,5,403.0,14.70,356.99,28.32,17.80},
-            {2.73397,0.00,19.580,0,0.8710,5.5970,94.90,1.5257,5,403.0,14.70,351.85,21.45,15.40},
-            {1.65660,0.00,19.580,0,0.8710,6.1220,97.30,1.6180,5,403.0,14.70,372.80,14.10,21.50},
-            {1.49632,0.00,19.580,0,0.8710,5.4040,100.00,1.5916,5,403.0,14.70,341.60,13.28,19.60},
-            {1.12658,0.00,19.580,1,0.8710,5.0120,88.00,1.6102,5,403.0,14.70,343.28,12.12,15.30},
-            {2.14918,0.00,19.580,0,0.8710,5.7090,98.50,1.6232,5,403.0,14.70,261.95,15.79,19.40},
-            {1.41385,0.00,19.580,1,0.8710,6.1290,96.00,1.7494,5,403.0,14.70,321.02,15.12,17.00},
-            {3.53501,0.00,19.580,1,0.8710,6.1520,82.60,1.7455,5,403.0,14.70,88.01,15.02,15.60},
-            {2.44668,0.00,19.580,0,0.8710,5.2720,94.00,1.7364,5,403.0,14.70,88.63,16.14,13.10},
-            {1.22358,0.00,19.580,0,0.6050,6.9430,97.40,1.8773,5,403.0,14.70,363.43,4.59,41.30},
-            {1.34284,0.00,19.580,0,0.6050,6.0660,100.00,1.7573,5,403.0,14.70,353.89,6.43,24.30},
-            {1.42502,0.00,19.580,0,0.8710,6.5100,100.00,1.7659,5,403.0,14.70,364.31,7.39,23.30},
-            {1.27346,0.00,19.580,1,0.6050,6.2500,92.60,1.7984,5,403.0,14.70,338.92,5.50,27.00},
-            {1.46336,0.00,19.580,0,0.6050,7.4890,90.80,1.9709,5,403.0,14.70,374.43,1.73,50.00},
-            {1.83377,0.00,19.580,1,0.6050,7.8020,98.20,2.0407,5,403.0,14.70,389.61,1.92,50.00},
-            {1.51902,0.00,19.580,1,0.6050,8.3750,93.90,2.1620,5,403.0,14.70,388.45,3.32,50.00},
-            {2.24236,0.00,19.580,0,0.6050,5.8540,91.80,2.4220,5,403.0,14.70,395.11,11.64,22.70},
-            {2.92400,0.00,19.580,0,0.6050,6.1010,93.00,2.2834,5,403.0,14.70,240.16,9.81,25.00},
-            {2.01019,0.00,19.580,0,0.6050,7.9290,96.20,2.0459,5,403.0,14.70,369.30,3.70,50.00},
-            {1.80028,0.00,19.580,0,0.6050,5.8770,79.20,2.4259,5,403.0,14.70,227.61,12.14,23.80},
-            {2.30040,0.00,19.580,0,0.6050,6.3190,96.10,2.1000,5,403.0,14.70,297.09,11.10,23.80},
-            {2.44953,0.00,19.580,0,0.6050,6.4020,95.20,2.2625,5,403.0,14.70,330.04,11.32,22.30},
-            {1.20742,0.00,19.580,0,0.6050,5.8750,94.60,2.4259,5,403.0,14.70,292.29,14.43,17.40},
-            {2.31390,0.00,19.580,0,0.6050,5.8800,97.30,2.3887,5,403.0,14.70,348.13,12.03,19.10},
-            {0.13914,0.00,4.050,0,0.5100,5.5720,88.50,2.5961,5,296.0,16.60,396.90,14.69,23.10},
-            {0.09178,0.00,4.050,0,0.5100,6.4160,84.10,2.6463,5,296.0,16.60,395.50,9.04,23.60},
-            {0.08447,0.00,4.050,0,0.5100,5.8590,68.70,2.7019,5,296.0,16.60,393.23,9.64,22.60},
-            {0.06664,0.00,4.050,0,0.5100,6.5460,33.10,3.1323,5,296.0,16.60,390.96,5.33,29.40},
-            {0.07022,0.00,4.050,0,0.5100,6.0200,47.20,3.5549,5,296.0,16.60,393.23,10.11,23.20},
-            {0.05425,0.00,4.050,0,0.5100,6.3150,73.40,3.3175,5,296.0,16.60,395.60,6.29,24.60},
-            {0.06642,0.00,4.050,0,0.5100,6.8600,74.40,2.9153,5,296.0,16.60,391.27,6.92,29.90},
-            {0.05780,0.00,2.460,0,0.4880,6.9800,58.40,2.8290,3,193.0,17.80,396.90,5.04,37.20},
-            {0.06588,0.00,2.460,0,0.4880,7.7650,83.30,2.7410,3,193.0,17.80,395.56,7.56,39.80},
-            {0.06888,0.00,2.460,0,0.4880,6.1440,62.20,2.5979,3,193.0,17.80,396.90,9.45,36.20},
-            {0.09103,0.00,2.460,0,0.4880,7.1550,92.20,2.7006,3,193.0,17.80,394.12,4.82,37.90},
-            {0.10008,0.00,2.460,0,0.4880,6.5630,95.60,2.8470,3,193.0,17.80,396.90,5.68,32.50},
-            {0.08308,0.00,2.460,0,0.4880,5.6040,89.80,2.9879,3,193.0,17.80,391.00,13.98,26.40},
-            {0.06047,0.00,2.460,0,0.4880,6.1530,68.80,3.2797,3,193.0,17.80,387.11,13.15,29.60},
-            {0.05602,0.00,2.460,0,0.4880,7.8310,53.60,3.1992,3,193.0,17.80,392.63,4.45,50.00},
-            {0.07875,45.00,3.440,0,0.4370,6.7820,41.10,3.7886,5,398.0,15.20,393.87,6.68,32.00},
-            {0.12579,45.00,3.440,0,0.4370,6.5560,29.10,4.5667,5,398.0,15.20,382.84,4.56,29.80},
-            {0.08370,45.00,3.440,0,0.4370,7.1850,38.90,4.5667,5,398.0,15.20,396.90,5.39,34.90},
-            {0.09068,45.00,3.440,0,0.4370,6.9510,21.50,6.4798,5,398.0,15.20,377.68,5.10,37.00},
-            {0.06911,45.00,3.440,0,0.4370,6.7390,30.80,6.4798,5,398.0,15.20,389.71,4.69,30.50},
-            {0.08664,45.00,3.440,0,0.4370,7.1780,26.30,6.4798,5,398.0,15.20,390.49,2.87,36.40},
-            {0.02187,60.00,2.930,0,0.4010,6.8000,9.90,6.2196,1,265.0,15.60,393.37,5.03,31.10},
-            {0.01439,60.00,2.930,0,0.4010,6.6040,18.80,6.2196,1,265.0,15.60,376.70,4.38,29.10},
-            {0.01381,80.00,0.460,0,0.4220,7.8750,32.00,5.6484,4,255.0,14.40,394.23,2.97,50.00},
-            {0.04011,80.00,1.520,0,0.4040,7.2870,34.10,7.3090,2,329.0,12.60,396.90,4.08,33.30},
-            {0.04666,80.00,1.520,0,0.4040,7.1070,36.60,7.3090,2,329.0,12.60,354.31,8.61,30.30},
-            {0.03768,80.00,1.520,0,0.4040,7.2740,38.30,7.3090,2,329.0,12.60,392.20,6.62,34.60},
-            {0.03150,95.00,1.470,0,0.4030,6.9750,15.30,7.6534,3,402.0,17.00,396.90,4.56,34.90},
-            {0.01778,95.00,1.470,0,0.4030,7.1350,13.90,7.6534,3,402.0,17.00,384.30,4.45,32.90},
-            {0.03445,82.50,2.030,0,0.4150,6.1620,38.40,6.2700,2,348.0,14.70,393.77,7.43,24.10},
-            {0.02177,82.50,2.030,0,0.4150,7.6100,15.70,6.2700,2,348.0,14.70,395.38,3.11,42.30},
-            {0.03510,95.00,2.680,0,0.4161,7.8530,33.20,5.1180,4,224.0,14.70,392.78,3.81,48.50},
-            {0.02009,95.00,2.680,0,0.4161,8.0340,31.90,5.1180,4,224.0,14.70,390.55,2.88,50.00},
-            {0.13642,0.00,10.590,0,0.4890,5.8910,22.30,3.9454,4,277.0,18.60,396.90,10.87,22.60},
-            {0.22969,0.00,10.590,0,0.4890,6.3260,52.50,4.3549,4,277.0,18.60,394.87,10.97,24.40},
-            {0.25199,0.00,10.590,0,0.4890,5.7830,72.70,4.3549,4,277.0,18.60,389.43,18.06,22.50},
-            {0.13587,0.00,10.590,1,0.4890,6.0640,59.10,4.2392,4,277.0,18.60,381.32,14.66,24.40},
-            {0.43571,0.00,10.590,1,0.4890,5.3440,100.00,3.8750,4,277.0,18.60,396.90,23.09,20.00},
-            {0.17446,0.00,10.590,1,0.4890,5.9600,92.10,3.8771,4,277.0,18.60,393.25,17.27,21.70},
-            {0.37578,0.00,10.590,1,0.4890,5.4040,88.60,3.6650,4,277.0,18.60,395.24,23.98,19.30},
-            {0.21719,0.00,10.590,1,0.4890,5.8070,53.80,3.6526,4,277.0,18.60,390.94,16.03,22.40},
-            {0.14052,0.00,10.590,0,0.4890,6.3750,32.30,3.9454,4,277.0,18.60,385.81,9.38,28.10},
-            {0.28955,0.00,10.590,0,0.4890,5.4120,9.80,3.5875,4,277.0,18.60,348.93,29.55,23.70},
-            {0.19802,0.00,10.590,0,0.4890,6.1820,42.40,3.9454,4,277.0,18.60,393.63,9.47,25.00},
-            {0.04560,0.00,13.890,1,0.5500,5.8880,56.00,3.1121,5,276.0,16.40,392.80,13.51,23.30},
-            {0.07013,0.00,13.890,0,0.5500,6.6420,85.10,3.4211,5,276.0,16.40,392.78,9.69,28.70},
-            {0.11069,0.00,13.890,1,0.5500,5.9510,93.80,2.8893,5,276.0,16.40,396.90,17.92,21.50},
-            {0.11425,0.00,13.890,1,0.5500,6.3730,92.40,3.3633,5,276.0,16.40,393.74,10.50,23.00},
-            {0.35809,0.00,6.200,1,0.5070,6.9510,88.50,2.8617,8,307.0,17.40,391.70,9.71,26.70},
-            {0.40771,0.00,6.200,1,0.5070,6.1640,91.30,3.0480,8,307.0,17.40,395.24,21.46,21.70},
-            {0.62356,0.00,6.200,1,0.5070,6.8790,77.70,3.2721,8,307.0,17.40,390.39,9.93,27.50},
-            {0.61470,0.00,6.200,0,0.5070,6.6180,80.80,3.2721,8,307.0,17.40,396.90,7.60,30.10},
-            {0.31533,0.00,6.200,0,0.5040,8.2660,78.30,2.8944,8,307.0,17.40,385.05,4.14,44.80},
-            {0.52693,0.00,6.200,0,0.5040,8.7250,83.00,2.8944,8,307.0,17.40,382.00,4.63,50.00},
-            {0.38214,0.00,6.200,0,0.5040,8.0400,86.50,3.2157,8,307.0,17.40,387.38,3.13,37.60},
-            {0.41238,0.00,6.200,0,0.5040,7.1630,79.90,3.2157,8,307.0,17.40,372.08,6.36,31.60},
-            {0.29819,0.00,6.200,0,0.5040,7.6860,17.00,3.3751,8,307.0,17.40,377.51,3.92,46.70},
-            {0.44178,0.00,6.200,0,0.5040,6.5520,21.40,3.3751,8,307.0,17.40,380.34,3.76,31.50},
-            {0.53700,0.00,6.200,0,0.5040,5.9810,68.10,3.6715,8,307.0,17.40,378.35,11.65,24.30},
-            {0.46296,0.00,6.200,0,0.5040,7.4120,76.90,3.6715,8,307.0,17.40,376.14,5.25,31.70},
-            {0.57529,0.00,6.200,0,0.5070,8.3370,73.30,3.8384,8,307.0,17.40,385.91,2.47,41.70},
-            {0.33147,0.00,6.200,0,0.5070,8.2470,70.40,3.6519,8,307.0,17.40,378.95,3.95,48.30},
-            {0.44791,0.00,6.200,1,0.5070,6.7260,66.50,3.6519,8,307.0,17.40,360.20,8.05,29.00},
-            {0.33045,0.00,6.200,0,0.5070,6.0860,61.50,3.6519,8,307.0,17.40,376.75,10.88,24.00},
-            {0.52058,0.00,6.200,1,0.5070,6.6310,76.50,4.1480,8,307.0,17.40,388.45,9.54,25.10},
-            {0.51183,0.00,6.200,0,0.5070,7.3580,71.60,4.1480,8,307.0,17.40,390.07,4.73,31.50},
-            {0.08244,30.00,4.930,0,0.4280,6.4810,18.50,6.1899,6,300.0,16.60,379.41,6.36,23.70},
-            {0.09252,30.00,4.930,0,0.4280,6.6060,42.20,6.1899,6,300.0,16.60,383.78,7.37,23.30},
-            {0.11329,30.00,4.930,0,0.4280,6.8970,54.30,6.3361,6,300.0,16.60,391.25,11.38,22.00},
-            {0.10612,30.00,4.930,0,0.4280,6.0950,65.10,6.3361,6,300.0,16.60,394.62,12.40,20.10},
-            {0.10290,30.00,4.930,0,0.4280,6.3580,52.90,7.0355,6,300.0,16.60,372.75,11.22,22.20},
-            {0.12757,30.00,4.930,0,0.4280,6.3930,7.80,7.0355,6,300.0,16.60,374.71,5.19,23.70},
-            {0.20608,22.00,5.860,0,0.4310,5.5930,76.50,7.9549,7,330.0,19.10,372.49,12.50,17.60},
-            {0.19133,22.00,5.860,0,0.4310,5.6050,70.20,7.9549,7,330.0,19.10,389.13,18.46,18.50},
-            {0.33983,22.00,5.860,0,0.4310,6.1080,34.90,8.0555,7,330.0,19.10,390.18,9.16,24.30},
-            {0.19657,22.00,5.860,0,0.4310,6.2260,79.20,8.0555,7,330.0,19.10,376.14,10.15,20.50},
-            {0.16439,22.00,5.860,0,0.4310,6.4330,49.10,7.8265,7,330.0,19.10,374.71,9.52,24.50},
-            {0.19073,22.00,5.860,0,0.4310,6.7180,17.50,7.8265,7,330.0,19.10,393.74,6.56,26.20},
-            {0.14030,22.00,5.860,0,0.4310,6.4870,13.00,7.3967,7,330.0,19.10,396.28,5.90,24.40},
-            {0.21409,22.00,5.860,0,0.4310,6.4380,8.90,7.3967,7,330.0,19.10,377.07,3.59,24.80},
-            {0.08221,22.00,5.860,0,0.4310,6.9570,6.80,8.9067,7,330.0,19.10,386.09,3.53,29.60},
-            {0.36894,22.00,5.860,0,0.4310,8.2590,8.40,8.9067,7,330.0,19.10,396.90,3.54,42.80},
-            {0.04819,80.00,3.640,0,0.3920,6.1080,32.00,9.2203,1,315.0,16.40,392.89,6.57,21.90},
-            {0.03548,80.00,3.640,0,0.3920,5.8760,19.10,9.2203,1,315.0,16.40,395.18,9.25,20.90},
-            {0.01538,90.00,3.750,0,0.3940,7.4540,34.20,6.3361,3,244.0,15.90,386.34,3.11,44.00},
-            {0.61154,20.00,3.970,0,0.6470,8.7040,86.90,1.8010,5,264.0,13.00,389.70,5.12,50.00},
-            {0.66351,20.00,3.970,0,0.6470,7.3330,100.00,1.8946,5,264.0,13.00,383.29,7.79,36.00},
-            {0.65665,20.00,3.970,0,0.6470,6.8420,100.00,2.0107,5,264.0,13.00,391.93,6.90,30.10},
-            {0.54011,20.00,3.970,0,0.6470,7.2030,81.80,2.1121,5,264.0,13.00,392.80,9.59,33.80},
-            {0.53412,20.00,3.970,0,0.6470,7.5200,89.40,2.1398,5,264.0,13.00,388.37,7.26,43.10},
-            {0.52014,20.00,3.970,0,0.6470,8.3980,91.50,2.2885,5,264.0,13.00,386.86,5.91,48.80},
-            {0.82526,20.00,3.970,0,0.6470,7.3270,94.50,2.0788,5,264.0,13.00,393.42,11.25,31.00},
-            {0.55007,20.00,3.970,0,0.6470,7.2060,91.60,1.9301,5,264.0,13.00,387.89,8.10,36.50},
-            {0.76162,20.00,3.970,0,0.6470,5.5600,62.80,1.9865,5,264.0,13.00,392.40,10.45,22.80},
-            {0.78570,20.00,3.970,0,0.6470,7.0140,84.60,2.1329,5,264.0,13.00,384.07,14.79,30.70},
-            {0.57834,20.00,3.970,0,0.5750,8.2970,67.00,2.4216,5,264.0,13.00,384.54,7.44,50.00},
-            {0.54050,20.00,3.970,0,0.5750,7.4700,52.60,2.8720,5,264.0,13.00,390.30,3.16,43.50},
-            {0.09065,20.00,6.960,1,0.4640,5.9200,61.50,3.9175,3,223.0,18.60,391.34,13.65,20.70},
-            {0.29916,20.00,6.960,0,0.4640,5.8560,42.10,4.4290,3,223.0,18.60,388.65,13.00,21.10},
-            {0.16211,20.00,6.960,0,0.4640,6.2400,16.30,4.4290,3,223.0,18.60,396.90,6.59,25.20},
-            {0.11460,20.00,6.960,0,0.4640,6.5380,58.70,3.9175,3,223.0,18.60,394.96,7.73,24.40},
-            {0.22188,20.00,6.960,1,0.4640,7.6910,51.80,4.3665,3,223.0,18.60,390.77,6.58,35.20},
-            {0.05644,40.00,6.410,1,0.4470,6.7580,32.90,4.0776,4,254.0,17.60,396.90,3.53,32.40},
-            {0.09604,40.00,6.410,0,0.4470,6.8540,42.80,4.2673,4,254.0,17.60,396.90,2.98,32.00},
-            {0.10469,40.00,6.410,1,0.4470,7.2670,49.00,4.7872,4,254.0,17.60,389.25,6.05,33.20},
-            {0.06127,40.00,6.410,1,0.4470,6.8260,27.60,4.8628,4,254.0,17.60,393.45,4.16,33.10},
-            {0.07978,40.00,6.410,0,0.4470,6.4820,32.10,4.1403,4,254.0,17.60,396.90,7.19,29.10},
-            {0.21038,20.00,3.330,0,0.4429,6.8120,32.20,4.1007,5,216.0,14.90,396.90,4.85,35.10},
-            {0.03578,20.00,3.330,0,0.4429,7.8200,64.50,4.6947,5,216.0,14.90,387.31,3.76,45.40},
-            {0.03705,20.00,3.330,0,0.4429,6.9680,37.20,5.2447,5,216.0,14.90,392.23,4.59,35.40},
-            {0.06129,20.00,3.330,1,0.4429,7.6450,49.70,5.2119,5,216.0,14.90,377.07,3.01,46.00},
-            {0.01501,90.00,1.210,1,0.4010,7.9230,24.80,5.8850,1,198.0,13.60,395.52,3.16,50.00},
-            {0.00906,90.00,2.970,0,0.4000,7.0880,20.80,7.3073,1,285.0,15.30,394.72,7.85,32.20},
-            {0.01096,55.00,2.250,0,0.3890,6.4530,31.90,7.3073,1,300.0,15.30,394.72,8.23,22.00},
-            {0.01965,80.00,1.760,0,0.3850,6.2300,31.50,9.0892,1,241.0,18.20,341.60,12.93,20.10},
-            {0.03871,52.50,5.320,0,0.4050,6.2090,31.30,7.3172,6,293.0,16.60,396.90,7.14,23.20},
-            {0.04590,52.50,5.320,0,0.4050,6.3150,45.60,7.3172,6,293.0,16.60,396.90,7.60,22.30},
-            {0.04297,52.50,5.320,0,0.4050,6.5650,22.90,7.3172,6,293.0,16.60,371.72,9.51,24.80},
-            {0.03502,80.00,4.950,0,0.4110,6.8610,27.90,5.1167,4,245.0,19.20,396.90,3.33,28.50},
-            {0.07886,80.00,4.950,0,0.4110,7.1480,27.70,5.1167,4,245.0,19.20,396.90,3.56,37.30},
-            {0.03615,80.00,4.950,0,0.4110,6.6300,23.40,5.1167,4,245.0,19.20,396.90,4.70,27.90},
-            {0.08265,0.00,13.920,0,0.4370,6.1270,18.40,5.5027,4,289.0,16.00,396.90,8.58,23.90},
-            {0.08199,0.00,13.920,0,0.4370,6.0090,42.30,5.5027,4,289.0,16.00,396.90,10.40,21.70},
-            {0.12932,0.00,13.920,0,0.4370,6.6780,31.10,5.9604,4,289.0,16.00,396.90,6.27,28.60},
-            {0.05372,0.00,13.920,0,0.4370,6.5490,51.00,5.9604,4,289.0,16.00,392.85,7.39,27.10},
-            {0.14103,0.00,13.920,0,0.4370,5.7900,58.00,6.3200,4,289.0,16.00,396.90,15.84,20.30},
-            {0.06466,70.00,2.240,0,0.4000,6.3450,20.10,7.8278,5,358.0,14.80,368.24,4.97,22.50},
-            {0.05561,70.00,2.240,0,0.4000,7.0410,10.00,7.8278,5,358.0,14.80,371.58,4.74,29.00},
-            {0.04417,70.00,2.240,0,0.4000,6.8710,47.40,7.8278,5,358.0,14.80,390.86,6.07,24.80},
-            {0.03537,34.00,6.090,0,0.4330,6.5900,40.40,5.4917,7,329.0,16.10,395.75,9.50,22.00},
-            {0.09266,34.00,6.090,0,0.4330,6.4950,18.40,5.4917,7,329.0,16.10,383.61,8.67,26.40},
-            {0.10000,34.00,6.090,0,0.4330,6.9820,17.70,5.4917,7,329.0,16.10,390.43,4.86,33.10},
-            {0.05515,33.00,2.180,0,0.4720,7.2360,41.10,4.0220,7,222.0,18.40,393.68,6.93,36.10},
-            {0.05479,33.00,2.180,0,0.4720,6.6160,58.10,3.3700,7,222.0,18.40,393.36,8.93,28.40},
-            {0.07503,33.00,2.180,0,0.4720,7.4200,71.90,3.0992,7,222.0,18.40,396.90,6.47,33.40},
-            {0.04932,33.00,2.180,0,0.4720,6.8490,70.30,3.1827,7,222.0,18.40,396.90,7.53,28.20},
-            {0.49298,0.00,9.900,0,0.5440,6.6350,82.50,3.3175,4,304.0,18.40,396.90,4.54,22.80},
-            {0.34940,0.00,9.900,0,0.5440,5.9720,76.70,3.1025,4,304.0,18.40,396.24,9.97,20.30},
-            {2.63548,0.00,9.900,0,0.5440,4.9730,37.80,2.5194,4,304.0,18.40,350.45,12.64,16.10},
-            {0.79041,0.00,9.900,0,0.5440,6.1220,52.80,2.6403,4,304.0,18.40,396.90,5.98,22.10},
-            {0.26169,0.00,9.900,0,0.5440,6.0230,90.40,2.8340,4,304.0,18.40,396.30,11.72,19.40},
-            {0.26938,0.00,9.900,0,0.5440,6.2660,82.80,3.2628,4,304.0,18.40,393.39,7.90,21.60},
-            {0.36920,0.00,9.900,0,0.5440,6.5670,87.30,3.6023,4,304.0,18.40,395.69,9.28,23.80},
-            {0.25356,0.00,9.900,0,0.5440,5.7050,77.70,3.9450,4,304.0,18.40,396.42,11.50,16.20},
-            {0.31827,0.00,9.900,0,0.5440,5.9140,83.20,3.9986,4,304.0,18.40,390.70,18.33,17.80},
-            {0.24522,0.00,9.900,0,0.5440,5.7820,71.70,4.0317,4,304.0,18.40,396.90,15.94,19.80},
-            {0.40202,0.00,9.900,0,0.5440,6.3820,67.20,3.5325,4,304.0,18.40,395.21,10.36,23.10},
-            {0.47547,0.00,9.900,0,0.5440,6.1130,58.80,4.0019,4,304.0,18.40,396.23,12.73,21.00},
-            {0.16760,0.00,7.380,0,0.4930,6.4260,52.30,4.5404,5,287.0,19.60,396.90,7.20,23.80},
-            {0.18159,0.00,7.380,0,0.4930,6.3760,54.30,4.5404,5,287.0,19.60,396.90,6.87,23.10},
-            {0.35114,0.00,7.380,0,0.4930,6.0410,49.90,4.7211,5,287.0,19.60,396.90,7.70,20.40},
-            {0.28392,0.00,7.380,0,0.4930,5.7080,74.30,4.7211,5,287.0,19.60,391.13,11.74,18.50},
-            {0.34109,0.00,7.380,0,0.4930,6.4150,40.10,4.7211,5,287.0,19.60,396.90,6.12,25.00},
-            {0.19186,0.00,7.380,0,0.4930,6.4310,14.70,5.4159,5,287.0,19.60,393.68,5.08,24.60},
-            {0.30347,0.00,7.380,0,0.4930,6.3120,28.90,5.4159,5,287.0,19.60,396.90,6.15,23.00},
-            {0.24103,0.00,7.380,0,0.4930,6.0830,43.70,5.4159,5,287.0,19.60,396.90,12.79,22.20},
-            {0.06617,0.00,3.240,0,0.4600,5.8680,25.80,5.2146,4,430.0,16.90,382.44,9.97,19.30},
-            {0.06724,0.00,3.240,0,0.4600,6.3330,17.20,5.2146,4,430.0,16.90,375.21,7.34,22.60},
-            {0.04544,0.00,3.240,0,0.4600,6.1440,32.20,5.8736,4,430.0,16.90,368.57,9.09,19.80},
-            {0.05023,35.00,6.060,0,0.4379,5.7060,28.40,6.6407,1,304.0,16.90,394.02,12.43,17.10},
-            {0.03466,35.00,6.060,0,0.4379,6.0310,23.30,6.6407,1,304.0,16.90,362.25,7.83,19.40},
-            {0.05083,0.00,5.190,0,0.5150,6.3160,38.10,6.4584,5,224.0,20.20,389.71,5.68,22.20},
-            {0.03738,0.00,5.190,0,0.5150,6.3100,38.50,6.4584,5,224.0,20.20,389.40,6.75,20.70},
-            {0.03961,0.00,5.190,0,0.5150,6.0370,34.50,5.9853,5,224.0,20.20,396.90,8.01,21.10},
-            {0.03427,0.00,5.190,0,0.5150,5.8690,46.30,5.2311,5,224.0,20.20,396.90,9.80,19.50},
-            {0.03041,0.00,5.190,0,0.5150,5.8950,59.60,5.6150,5,224.0,20.20,394.81,10.56,18.50},
-            {0.03306,0.00,5.190,0,0.5150,6.0590,37.30,4.8122,5,224.0,20.20,396.14,8.51,20.60},
-            {0.05497,0.00,5.190,0,0.5150,5.9850,45.40,4.8122,5,224.0,20.20,396.90,9.74,19.00},
-            {0.06151,0.00,5.190,0,0.5150,5.9680,58.50,4.8122,5,224.0,20.20,396.90,9.29,18.70},
-            {0.01301,35.00,1.520,0,0.4420,7.2410,49.30,7.0379,1,284.0,15.50,394.74,5.49,32.70},
-            {0.02498,0.00,1.890,0,0.5180,6.5400,59.70,6.2669,1,422.0,15.90,389.96,8.65,16.50},
-            {0.02543,55.00,3.780,0,0.4840,6.6960,56.40,5.7321,5,370.0,17.60,396.90,7.18,23.90},
-            {0.03049,55.00,3.780,0,0.4840,6.8740,28.10,6.4654,5,370.0,17.60,387.97,4.61,31.20},
-            {0.03113,0.00,4.390,0,0.4420,6.0140,48.50,8.0136,3,352.0,18.80,385.64,10.53,17.50},
-            {0.06162,0.00,4.390,0,0.4420,5.8980,52.30,8.0136,3,352.0,18.80,364.61,12.67,17.20},
-            {0.01870,85.00,4.150,0,0.4290,6.5160,27.70,8.5353,4,351.0,17.90,392.43,6.36,23.10},
-            {0.01501,80.00,2.010,0,0.4350,6.6350,29.70,8.3440,4,280.0,17.00,390.94,5.99,24.50},
-            {0.02899,40.00,1.250,0,0.4290,6.9390,34.50,8.7921,1,335.0,19.70,389.85,5.89,26.60},
-            {0.06211,40.00,1.250,0,0.4290,6.4900,44.40,8.7921,1,335.0,19.70,396.90,5.98,22.90},
-            {0.07950,60.00,1.690,0,0.4110,6.5790,35.90,10.7103,4,411.0,18.30,370.78,5.49,24.10},
-            {0.07244,60.00,1.690,0,0.4110,5.8840,18.50,10.7103,4,411.0,18.30,392.33,7.79,18.60},
-            {0.01709,90.00,2.020,0,0.4100,6.7280,36.10,12.1265,5,187.0,17.00,384.46,4.50,30.10},
-            {0.04301,80.00,1.910,0,0.4130,5.6630,21.90,10.5857,4,334.0,22.00,382.80,8.05,18.20},
-            {0.10659,80.00,1.910,0,0.4130,5.9360,19.50,10.5857,4,334.0,22.00,376.04,5.57,20.60},
-            {8.98296,0.00,18.100,1,0.7700,6.2120,97.40,2.1222,24,666.0,20.20,377.73,17.60,17.80},
-            {3.84970,0.00,18.100,1,0.7700,6.3950,91.00,2.5052,24,666.0,20.20,391.34,13.27,21.70},
-            {5.20177,0.00,18.100,1,0.7700,6.1270,83.40,2.7227,24,666.0,20.20,395.43,11.48,22.70},
-            {4.26131,0.00,18.100,0,0.7700,6.1120,81.30,2.5091,24,666.0,20.20,390.74,12.67,22.60},
-            {4.54192,0.00,18.100,0,0.7700,6.3980,88.00,2.5182,24,666.0,20.20,374.56,7.79,25.00},
-            {3.83684,0.00,18.100,0,0.7700,6.2510,91.10,2.2955,24,666.0,20.20,350.65,14.19,19.90},
-            {3.67822,0.00,18.100,0,0.7700,5.3620,96.20,2.1036,24,666.0,20.20,380.79,10.19,20.80},
-            {4.22239,0.00,18.100,1,0.7700,5.8030,89.00,1.9047,24,666.0,20.20,353.04,14.64,16.80},
-            {3.47428,0.00,18.100,1,0.7180,8.7800,82.90,1.9047,24,666.0,20.20,354.55,5.29,21.90},
-            {4.55587,0.00,18.100,0,0.7180,3.5610,87.90,1.6132,24,666.0,20.20,354.70,7.12,27.50},
-            {3.69695,0.00,18.100,0,0.7180,4.9630,91.40,1.7523,24,666.0,20.20,316.03,14.00,21.90},
-            {13.52220,0.00,18.100,0,0.6310,3.8630,100.00,1.5106,24,666.0,20.20,131.42,13.33,23.10},
-            {4.89822,0.00,18.100,0,0.6310,4.9700,100.00,1.3325,24,666.0,20.20,375.52,3.26,50.00},
-            {5.66998,0.00,18.100,1,0.6310,6.6830,96.80,1.3567,24,666.0,20.20,375.33,3.73,50.00},
-            {6.53876,0.00,18.100,1,0.6310,7.0160,97.50,1.2024,24,666.0,20.20,392.05,2.96,50.00},
-            {9.23230,0.00,18.100,0,0.6310,6.2160,100.00,1.1691,24,666.0,20.20,366.15,9.53,50.00},
-            {8.26725,0.00,18.100,1,0.6680,5.8750,89.60,1.1296,24,666.0,20.20,347.88,8.88,50.00},
-            {11.10810,0.00,18.100,0,0.6680,4.9060,100.00,1.1742,24,666.0,20.20,396.90,34.77,13.80},
-            {18.49820,0.00,18.100,0,0.6680,4.1380,100.00,1.1370,24,666.0,20.20,396.90,37.97,13.80},
-            {19.60910,0.00,18.100,0,0.6710,7.3130,97.90,1.3163,24,666.0,20.20,396.90,13.44,15.00},
-            {15.28800,0.00,18.100,0,0.6710,6.6490,93.30,1.3449,24,666.0,20.20,363.02,23.24,13.90},
-            {9.82349,0.00,18.100,0,0.6710,6.7940,98.80,1.3580,24,666.0,20.20,396.90,21.24,13.30},
-            {23.64820,0.00,18.100,0,0.6710,6.3800,96.20,1.3861,24,666.0,20.20,396.90,23.69,13.10},
-            {17.86670,0.00,18.100,0,0.6710,6.2230,100.00,1.3861,24,666.0,20.20,393.74,21.78,10.20},
-            {88.97620,0.00,18.100,0,0.6710,6.9680,91.90,1.4165,24,666.0,20.20,396.90,17.21,10.40},
-            {15.87440,0.00,18.100,0,0.6710,6.5450,99.10,1.5192,24,666.0,20.20,396.90,21.08,10.90},
-            {9.18702,0.00,18.100,0,0.7000,5.5360,100.00,1.5804,24,666.0,20.20,396.90,23.60,11.30},
-            {7.99248,0.00,18.100,0,0.7000,5.5200,100.00,1.5331,24,666.0,20.20,396.90,24.56,12.30},
-            {20.08490,0.00,18.100,0,0.7000,4.3680,91.20,1.4395,24,666.0,20.20,285.83,30.63,8.80},
-            {16.81180,0.00,18.100,0,0.7000,5.2770,98.10,1.4261,24,666.0,20.20,396.90,30.81,7.20},
-            {24.39380,0.00,18.100,0,0.7000,4.6520,100.00,1.4672,24,666.0,20.20,396.90,28.28,10.50},
-            {22.59710,0.00,18.100,0,0.7000,5.0000,89.50,1.5184,24,666.0,20.20,396.90,31.99,7.40},
-            {14.33370,0.00,18.100,0,0.7000,4.8800,100.00,1.5895,24,666.0,20.20,372.92,30.62,10.20},
-            {8.15174,0.00,18.100,0,0.7000,5.3900,98.90,1.7281,24,666.0,20.20,396.90,20.85,11.50},
-            {6.96215,0.00,18.100,0,0.7000,5.7130,97.00,1.9265,24,666.0,20.20,394.43,17.11,15.10},
-            {5.29305,0.00,18.100,0,0.7000,6.0510,82.50,2.1678,24,666.0,20.20,378.38,18.76,23.20},
-            {11.57790,0.00,18.100,0,0.7000,5.0360,97.00,1.7700,24,666.0,20.20,396.90,25.68,9.70},
-            {8.64476,0.00,18.100,0,0.6930,6.1930,92.60,1.7912,24,666.0,20.20,396.90,15.17,13.80},
-            {13.35980,0.00,18.100,0,0.6930,5.8870,94.70,1.7821,24,666.0,20.20,396.90,16.35,12.70},
-            {8.71675,0.00,18.100,0,0.6930,6.4710,98.80,1.7257,24,666.0,20.20,391.98,17.12,13.10},
-            {5.87205,0.00,18.100,0,0.6930,6.4050,96.00,1.6768,24,666.0,20.20,396.90,19.37,12.50},
-            {7.67202,0.00,18.100,0,0.6930,5.7470,98.90,1.6334,24,666.0,20.20,393.10,19.92,8.50},
-            {38.35180,0.00,18.100,0,0.6930,5.4530,100.00,1.4896,24,666.0,20.20,396.90,30.59,5.00},
-            {9.91655,0.00,18.100,0,0.6930,5.8520,77.80,1.5004,24,666.0,20.20,338.16,29.97,6.30},
-            {25.04610,0.00,18.100,0,0.6930,5.9870,100.00,1.5888,24,666.0,20.20,396.90,26.77,5.60},
-            {14.23620,0.00,18.100,0,0.6930,6.3430,100.00,1.5741,24,666.0,20.20,396.90,20.32,7.20},
-            {9.59571,0.00,18.100,0,0.6930,6.4040,100.00,1.6390,24,666.0,20.20,376.11,20.31,12.10},
-            {24.80170,0.00,18.100,0,0.6930,5.3490,96.00,1.7028,24,666.0,20.20,396.90,19.77,8.30},
-            {41.52920,0.00,18.100,0,0.6930,5.5310,85.40,1.6074,24,666.0,20.20,329.46,27.38,8.50},
-            {67.92080,0.00,18.100,0,0.6930,5.6830,100.00,1.4254,24,666.0,20.20,384.97,22.98,5.00},
-            {20.71620,0.00,18.100,0,0.6590,4.1380,100.00,1.1781,24,666.0,20.20,370.22,23.34,11.90},
-            {11.95110,0.00,18.100,0,0.6590,5.6080,100.00,1.2852,24,666.0,20.20,332.09,12.13,27.90},
-            {7.40389,0.00,18.100,0,0.5970,5.6170,97.90,1.4547,24,666.0,20.20,314.64,26.40,17.20},
-            {14.43830,0.00,18.100,0,0.5970,6.8520,100.00,1.4655,24,666.0,20.20,179.36,19.78,27.50},
-            {51.13580,0.00,18.100,0,0.5970,5.7570,100.00,1.4130,24,666.0,20.20,2.60,10.11,15.00},
-            {14.05070,0.00,18.100,0,0.5970,6.6570,100.00,1.5275,24,666.0,20.20,35.05,21.22,17.20},
-            {18.81100,0.00,18.100,0,0.5970,4.6280,100.00,1.5539,24,666.0,20.20,28.79,34.37,17.90},
-            {28.65580,0.00,18.100,0,0.5970,5.1550,100.00,1.5894,24,666.0,20.20,210.97,20.08,16.30},
-            {45.74610,0.00,18.100,0,0.6930,4.5190,100.00,1.6582,24,666.0,20.20,88.27,36.98,7.00},
-            {18.08460,0.00,18.100,0,0.6790,6.4340,100.00,1.8347,24,666.0,20.20,27.25,29.05,7.20},
-            {10.83420,0.00,18.100,0,0.6790,6.7820,90.80,1.8195,24,666.0,20.20,21.57,25.79,7.50},
-            {25.94060,0.00,18.100,0,0.6790,5.3040,89.10,1.6475,24,666.0,20.20,127.36,26.64,10.40},
-            {73.53410,0.00,18.100,0,0.6790,5.9570,100.00,1.8026,24,666.0,20.20,16.45,20.62,8.80},
-            {11.81230,0.00,18.100,0,0.7180,6.8240,76.50,1.7940,24,666.0,20.20,48.45,22.74,8.40},
-            {11.08740,0.00,18.100,0,0.7180,6.4110,100.00,1.8589,24,666.0,20.20,318.75,15.02,16.70},
-            {7.02259,0.00,18.100,0,0.7180,6.0060,95.30,1.8746,24,666.0,20.20,319.98,15.70,14.20},
-            {12.04820,0.00,18.100,0,0.6140,5.6480,87.60,1.9512,24,666.0,20.20,291.55,14.10,20.80},
-            {7.05042,0.00,18.100,0,0.6140,6.1030,85.10,2.0218,24,666.0,20.20,2.52,23.29,13.40},
-            {8.79212,0.00,18.100,0,0.5840,5.5650,70.60,2.0635,24,666.0,20.20,3.65,17.16,11.70},
-            {15.86030,0.00,18.100,0,0.6790,5.8960,95.40,1.9096,24,666.0,20.20,7.68,24.39,8.30},
-            {12.24720,0.00,18.100,0,0.5840,5.8370,59.70,1.9976,24,666.0,20.20,24.65,15.69,10.20},
-            {37.66190,0.00,18.100,0,0.6790,6.2020,78.70,1.8629,24,666.0,20.20,18.82,14.52,10.90},
-            {7.36711,0.00,18.100,0,0.6790,6.1930,78.10,1.9356,24,666.0,20.20,96.73,21.52,11.00},
-            {9.33889,0.00,18.100,0,0.6790,6.3800,95.60,1.9682,24,666.0,20.20,60.72,24.08,9.50},
-            {8.49213,0.00,18.100,0,0.5840,6.3480,86.10,2.0527,24,666.0,20.20,83.45,17.64,14.50},
-            {10.06230,0.00,18.100,0,0.5840,6.8330,94.30,2.0882,24,666.0,20.20,81.33,19.69,14.10},
-            {6.44405,0.00,18.100,0,0.5840,6.4250,74.80,2.2004,24,666.0,20.20,97.95,12.03,16.10},
-            {5.58107,0.00,18.100,0,0.7130,6.4360,87.90,2.3158,24,666.0,20.20,100.19,16.22,14.30},
-            {13.91340,0.00,18.100,0,0.7130,6.2080,95.00,2.2222,24,666.0,20.20,100.63,15.17,11.70},
-            {11.16040,0.00,18.100,0,0.7400,6.6290,94.60,2.1247,24,666.0,20.20,109.85,23.27,13.40},
-            {14.42080,0.00,18.100,0,0.7400,6.4610,93.30,2.0026,24,666.0,20.20,27.49,18.05,9.60},
-            {15.17720,0.00,18.100,0,0.7400,6.1520,100.00,1.9142,24,666.0,20.20,9.32,26.45,8.70},
-            {13.67810,0.00,18.100,0,0.7400,5.9350,87.90,1.8206,24,666.0,20.20,68.95,34.02,8.40},
-            {9.39063,0.00,18.100,0,0.7400,5.6270,93.90,1.8172,24,666.0,20.20,396.90,22.88,12.80},
-            {22.05110,0.00,18.100,0,0.7400,5.8180,92.40,1.8662,24,666.0,20.20,391.45,22.11,10.50},
-            {9.72418,0.00,18.100,0,0.7400,6.4060,97.20,2.0651,24,666.0,20.20,385.96,19.52,17.10},
-            {5.66637,0.00,18.100,0,0.7400,6.2190,100.00,2.0048,24,666.0,20.20,395.69,16.59,18.40},
-            {9.96654,0.00,18.100,0,0.7400,6.4850,100.00,1.9784,24,666.0,20.20,386.73,18.85,15.40},
-            {12.80230,0.00,18.100,0,0.7400,5.8540,96.60,1.8956,24,666.0,20.20,240.52,23.79,10.80},
-            {10.67180,0.00,18.100,0,0.7400,6.4590,94.80,1.9879,24,666.0,20.20,43.06,23.98,11.80},
-            {6.28807,0.00,18.100,0,0.7400,6.3410,96.40,2.0720,24,666.0,20.20,318.01,17.79,14.90},
-            {9.92485,0.00,18.100,0,0.7400,6.2510,96.60,2.1980,24,666.0,20.20,388.52,16.44,12.60},
-            {9.32909,0.00,18.100,0,0.7130,6.1850,98.70,2.2616,24,666.0,20.20,396.90,18.13,14.10},
-            {7.52601,0.00,18.100,0,0.7130,6.4170,98.30,2.1850,24,666.0,20.20,304.21,19.31,13.00},
-            {6.71772,0.00,18.100,0,0.7130,6.7490,92.60,2.3236,24,666.0,20.20,0.32,17.44,13.40},
-            {5.44114,0.00,18.100,0,0.7130,6.6550,98.20,2.3552,24,666.0,20.20,355.29,17.73,15.20},
-            {5.09017,0.00,18.100,0,0.7130,6.2970,91.80,2.3682,24,666.0,20.20,385.09,17.27,16.10},
-            {8.24809,0.00,18.100,0,0.7130,7.3930,99.30,2.4527,24,666.0,20.20,375.87,16.74,17.80},
-            {9.51363,0.00,18.100,0,0.7130,6.7280,94.10,2.4961,24,666.0,20.20,6.68,18.71,14.90},
-            {4.75237,0.00,18.100,0,0.7130,6.5250,86.50,2.4358,24,666.0,20.20,50.92,18.13,14.10},
-            {4.66883,0.00,18.100,0,0.7130,5.9760,87.90,2.5806,24,666.0,20.20,10.48,19.01,12.70},
-            {8.20058,0.00,18.100,0,0.7130,5.9360,80.30,2.7792,24,666.0,20.20,3.50,16.94,13.50},
-            {7.75223,0.00,18.100,0,0.7130,6.3010,83.70,2.7831,24,666.0,20.20,272.21,16.23,14.90},
-            {6.80117,0.00,18.100,0,0.7130,6.0810,84.40,2.7175,24,666.0,20.20,396.90,14.70,20.00},
-            {4.81213,0.00,18.100,0,0.7130,6.7010,90.00,2.5975,24,666.0,20.20,255.23,16.42,16.40},
-            {3.69311,0.00,18.100,0,0.7130,6.3760,88.40,2.5671,24,666.0,20.20,391.43,14.65,17.70},
-            {6.65492,0.00,18.100,0,0.7130,6.3170,83.00,2.7344,24,666.0,20.20,396.90,13.99,19.50},
-            {5.82115,0.00,18.100,0,0.7130,6.5130,89.90,2.8016,24,666.0,20.20,393.82,10.29,20.20},
-            {7.83932,0.00,18.100,0,0.6550,6.2090,65.40,2.9634,24,666.0,20.20,396.90,13.22,21.40},
-            {3.16360,0.00,18.100,0,0.6550,5.7590,48.20,3.0665,24,666.0,20.20,334.40,14.13,19.90},
-            {3.77498,0.00,18.100,0,0.6550,5.9520,84.70,2.8715,24,666.0,20.20,22.01,17.15,19.00},
-            {4.42228,0.00,18.100,0,0.5840,6.0030,94.50,2.5403,24,666.0,20.20,331.29,21.32,19.10},
-            {15.57570,0.00,18.100,0,0.5800,5.9260,71.00,2.9084,24,666.0,20.20,368.74,18.13,19.10},
-            {13.07510,0.00,18.100,0,0.5800,5.7130,56.70,2.8237,24,666.0,20.20,396.90,14.76,20.10},
-            {4.34879,0.00,18.100,0,0.5800,6.1670,84.00,3.0334,24,666.0,20.20,396.90,16.29,19.90},
-            {4.03841,0.00,18.100,0,0.5320,6.2290,90.70,3.0993,24,666.0,20.20,395.33,12.87,19.60},
-            {3.56868,0.00,18.100,0,0.5800,6.4370,75.00,2.8965,24,666.0,20.20,393.37,14.36,23.20},
-            {4.64689,0.00,18.100,0,0.6140,6.9800,67.60,2.5329,24,666.0,20.20,374.68,11.66,29.80},
-            {8.05579,0.00,18.100,0,0.5840,5.4270,95.40,2.4298,24,666.0,20.20,352.58,18.14,13.80},
-            {6.39312,0.00,18.100,0,0.5840,6.1620,97.40,2.2060,24,666.0,20.20,302.76,24.10,13.30},
-            {4.87141,0.00,18.100,0,0.6140,6.4840,93.60,2.3053,24,666.0,20.20,396.21,18.68,16.70},
-            {15.02340,0.00,18.100,0,0.6140,5.3040,97.30,2.1007,24,666.0,20.20,349.48,24.91,12.00},
-            {10.23300,0.00,18.100,0,0.6140,6.1850,96.70,2.1705,24,666.0,20.20,379.70,18.03,14.60},
-            {14.33370,0.00,18.100,0,0.6140,6.2290,88.00,1.9512,24,666.0,20.20,383.32,13.11,21.40},
-            {5.82401,0.00,18.100,0,0.5320,6.2420,64.70,3.4242,24,666.0,20.20,396.90,10.74,23.00},
-            {5.70818,0.00,18.100,0,0.5320,6.7500,74.90,3.3317,24,666.0,20.20,393.07,7.74,23.70},
-            {5.73116,0.00,18.100,0,0.5320,7.0610,77.00,3.4106,24,666.0,20.20,395.28,7.01,25.00},
-            {2.81838,0.00,18.100,0,0.5320,5.7620,40.30,4.0983,24,666.0,20.20,392.92,10.42,21.80},
-            {2.37857,0.00,18.100,0,0.5830,5.8710,41.90,3.7240,24,666.0,20.20,370.73,13.34,20.60},
-            {3.67367,0.00,18.100,0,0.5830,6.3120,51.90,3.9917,24,666.0,20.20,388.62,10.58,21.20},
-            {5.69175,0.00,18.100,0,0.5830,6.1140,79.80,3.5459,24,666.0,20.20,392.68,14.98,19.10},
-            {4.83567,0.00,18.100,0,0.5830,5.9050,53.20,3.1523,24,666.0,20.20,388.22,11.45,20.60},
-            {0.15086,0.00,27.740,0,0.6090,5.4540,92.70,1.8209,4,711.0,20.10,395.09,18.06,15.20},
-            {0.18337,0.00,27.740,0,0.6090,5.4140,98.30,1.7554,4,711.0,20.10,344.05,23.97,7.00},
-            {0.20746,0.00,27.740,0,0.6090,5.0930,98.00,1.8226,4,711.0,20.10,318.43,29.68,8.10},
-            {0.10574,0.00,27.740,0,0.6090,5.9830,98.80,1.8681,4,711.0,20.10,390.11,18.07,13.60},
-            {0.11132,0.00,27.740,0,0.6090,5.9830,83.50,2.1099,4,711.0,20.10,396.90,13.35,20.10},
-            {0.17331,0.00,9.690,0,0.5850,5.7070,54.00,2.3817,6,391.0,19.20,396.90,12.01,21.80},
-            {0.27957,0.00,9.690,0,0.5850,5.9260,42.60,2.3817,6,391.0,19.20,396.90,13.59,24.50},
-            {0.17899,0.00,9.690,0,0.5850,5.6700,28.80,2.7986,6,391.0,19.20,393.29,17.60,23.10},
-            {0.28960,0.00,9.690,0,0.5850,5.3900,72.90,2.7986,6,391.0,19.20,396.90,21.14,19.70},
-            {0.26838,0.00,9.690,0,0.5850,5.7940,70.60,2.8927,6,391.0,19.20,396.90,14.10,18.30},
-            {0.23912,0.00,9.690,0,0.5850,6.0190,65.30,2.4091,6,391.0,19.20,396.90,12.92,21.20},
-            {0.17783,0.00,9.690,0,0.5850,5.5690,73.50,2.3999,6,391.0,19.20,395.77,15.10,17.50},
-            {0.22438,0.00,9.690,0,0.5850,6.0270,79.70,2.4982,6,391.0,19.20,396.90,14.33,16.80},
-            {0.06263,0.00,11.930,0,0.5730,6.5930,69.10,2.4786,1,273.0,21.00,391.99,9.67,22.40},
-            {0.04527,0.00,11.930,0,0.5730,6.1200,76.70,2.2875,1,273.0,21.00,396.90,9.08,20.60},
-            {0.06076,0.00,11.930,0,0.5730,6.9760,91.00,2.1675,1,273.0,21.00,396.90,5.64,23.90},
-            {0.10959,0.00,11.930,0,0.5730,6.7940,89.30,2.3889,1,273.0,21.00,393.45,6.48,22.00},
-            {0.04741,0.00,11.930,0,0.5730,6.0300,80.80,2.5050,1,273.0,21.00,396.90,7.88,11.90}
+        {0.02731,0.00,7.070,0,0.4690,6.4210,78.90,4.9671,2,242.0,17.80,396.90,9.14,21.60},
+        {0.02729,0.00,7.070,0,0.4690,7.1850,61.10,4.9671,2,242.0,17.80,392.83,4.03,34.70},
+        {0.03237,0.00,2.180,0,0.4580,6.9980,45.80,6.0622,3,222.0,18.70,394.63,2.94,33.40},
+        {0.06905,0.00,2.180,0,0.4580,7.1470,54.20,6.0622,3,222.0,18.70,396.90,5.33,36.20},
+        {0.02985,0.00,2.180,0,0.4580,6.4300,58.70,6.0622,3,222.0,18.70,394.12,5.21,28.70},
+        {0.08829,12.50,7.870,0,0.5240,6.0120,66.60,5.5605,5,311.0,15.20,395.60,12.43,22.90},
+        {0.14455,12.50,7.870,0,0.5240,6.1720,96.10,5.9505,5,311.0,15.20,396.90,19.15,27.10},
+        {0.21124,12.50,7.870,0,0.5240,5.6310,100.00,6.0821,5,311.0,15.20,386.63,29.93,16.50},
+        {0.17004,12.50,7.870,0,0.5240,6.0040,85.90,6.5921,5,311.0,15.20,386.71,17.10,18.90},
+        {0.22489,12.50,7.870,0,0.5240,6.3770,94.30,6.3467,5,311.0,15.20,392.52,20.45,15.00},
+        {0.11747,12.50,7.870,0,0.5240,6.0090,82.90,6.2267,5,311.0,15.20,396.90,13.27,18.90},
+        {0.09378,12.50,7.870,0,0.5240,5.8890,39.00,5.4509,5,311.0,15.20,390.50,15.71,21.70},
+        {0.62976,0.00,8.140,0,0.5380,5.9490,61.80,4.7075,4,307.0,21.00,396.90,8.26,20.40},
+        {0.63796,0.00,8.140,0,0.5380,6.0960,84.50,4.4619,4,307.0,21.00,380.02,10.26,18.20},
+        {0.62739,0.00,8.140,0,0.5380,5.8340,56.50,4.4986,4,307.0,21.00,395.62,8.47,19.90},
+        {1.05393,0.00,8.140,0,0.5380,5.9350,29.30,4.4986,4,307.0,21.00,386.85,6.58,23.10},
+        {0.78420,0.00,8.140,0,0.5380,5.9900,81.70,4.2579,4,307.0,21.00,386.75,14.67,17.50},
+        {0.80271,0.00,8.140,0,0.5380,5.4560,36.60,3.7965,4,307.0,21.00,288.99,11.69,20.20},
+        {0.72580,0.00,8.140,0,0.5380,5.7270,69.50,3.7965,4,307.0,21.00,390.95,11.28,18.20},
+        {1.25179,0.00,8.140,0,0.5380,5.5700,98.10,3.7979,4,307.0,21.00,376.57,21.02,13.60},
+        {0.85204,0.00,8.140,0,0.5380,5.9650,89.20,4.0123,4,307.0,21.00,392.53,13.83,19.60},
+        {1.23247,0.00,8.140,0,0.5380,6.1420,91.70,3.9769,4,307.0,21.00,396.90,18.72,15.20},
+        {0.98843,0.00,8.140,0,0.5380,5.8130,100.00,4.0952,4,307.0,21.00,394.54,19.88,14.50},
+        {0.75026,0.00,8.140,0,0.5380,5.9240,94.10,4.3996,4,307.0,21.00,394.33,16.30,15.60},
+        {0.84054,0.00,8.140,0,0.5380,5.5990,85.70,4.4546,4,307.0,21.00,303.42,16.51,13.90},
+        {0.67191,0.00,8.140,0,0.5380,5.8130,90.30,4.6820,4,307.0,21.00,376.88,14.81,16.60},
+        {0.95577,0.00,8.140,0,0.5380,6.0470,88.80,4.4534,4,307.0,21.00,306.38,17.28,14.80},
+        {0.77299,0.00,8.140,0,0.5380,6.4950,94.40,4.4547,4,307.0,21.00,387.94,12.80,18.40},
+        {1.00245,0.00,8.140,0,0.5380,6.6740,87.30,4.2390,4,307.0,21.00,380.23,11.98,21.00},
+        {1.13081,0.00,8.140,0,0.5380,5.7130,94.10,4.2330,4,307.0,21.00,360.17,22.60,12.70},
+        {1.35472,0.00,8.140,0,0.5380,6.0720,100.00,4.1750,4,307.0,21.00,376.73,13.04,14.50},
+        {1.38799,0.00,8.140,0,0.5380,5.9500,82.00,3.9900,4,307.0,21.00,232.60,27.71,13.20},
+        {1.15172,0.00,8.140,0,0.5380,5.7010,95.00,3.7872,4,307.0,21.00,358.77,18.35,13.10},
+        {1.61282,0.00,8.140,0,0.5380,6.0960,96.90,3.7598,4,307.0,21.00,248.31,20.34,13.50},
+        {0.06417,0.00,5.960,0,0.4990,5.9330,68.20,3.3603,5,279.0,19.20,396.90,9.68,18.90},
+        {0.09744,0.00,5.960,0,0.4990,5.8410,61.40,3.3779,5,279.0,19.20,377.56,11.41,20.00},
+        {0.08014,0.00,5.960,0,0.4990,5.8500,41.50,3.9342,5,279.0,19.20,396.90,8.77,21.00},
+        {0.17505,0.00,5.960,0,0.4990,5.9660,30.20,3.8473,5,279.0,19.20,393.43,10.13,24.70},
+        {0.02763,75.00,2.950,0,0.4280,6.5950,21.80,5.4011,3,252.0,18.30,395.63,4.32,30.80},
+        {0.03359,75.00,2.950,0,0.4280,7.0240,15.80,5.4011,3,252.0,18.30,395.62,1.98,34.90},
+        {0.12744,0.00,6.910,0,0.4480,6.7700,2.90,5.7209,3,233.0,17.90,385.41,4.84,26.60},
+        {0.14150,0.00,6.910,0,0.4480,6.1690,6.60,5.7209,3,233.0,17.90,383.37,5.81,25.30},
+        {0.15936,0.00,6.910,0,0.4480,6.2110,6.50,5.7209,3,233.0,17.90,394.46,7.44,24.70},
+        {0.12269,0.00,6.910,0,0.4480,6.0690,40.00,5.7209,3,233.0,17.90,389.39,9.55,21.20},
+        {0.17142,0.00,6.910,0,0.4480,5.6820,33.80,5.1004,3,233.0,17.90,396.90,10.21,19.30},
+        {0.18836,0.00,6.910,0,0.4480,5.7860,33.30,5.1004,3,233.0,17.90,396.90,14.15,20.00},
+        {0.22927,0.00,6.910,0,0.4480,6.0300,85.50,5.6894,3,233.0,17.90,392.74,18.80,16.60},
+        {0.25387,0.00,6.910,0,0.4480,5.3990,95.30,5.8700,3,233.0,17.90,396.90,30.81,14.40},
+        {0.21977,0.00,6.910,0,0.4480,5.6020,62.00,6.0877,3,233.0,17.90,396.90,16.20,19.40},
+        {0.08873,21.00,5.640,0,0.4390,5.9630,45.70,6.8147,4,243.0,16.80,395.56,13.45,19.70},
+        {0.04337,21.00,5.640,0,0.4390,6.1150,63.00,6.8147,4,243.0,16.80,393.97,9.43,20.50},
+        {0.05360,21.00,5.640,0,0.4390,6.5110,21.10,6.8147,4,243.0,16.80,396.90,5.28,25.00},
+        {0.04981,21.00,5.640,0,0.4390,5.9980,21.40,6.8147,4,243.0,16.80,396.90,8.43,23.40},
+        {0.01360,75.00,4.000,0,0.4100,5.8880,47.60,7.3197,3,469.0,21.10,396.90,14.80,18.90},
+        {0.01311,90.00,1.220,0,0.4030,7.2490,21.90,8.6966,5,226.0,17.90,395.93,4.81,35.40},
+        {0.02055,85.00,0.740,0,0.4100,6.3830,35.70,9.1876,2,313.0,17.30,396.90,5.77,24.70},
+        {0.01432,100.00,1.320,0,0.4110,6.8160,40.50,8.3248,5,256.0,15.10,392.90,3.95,31.60},
+        {0.15445,25.00,5.130,0,0.4530,6.1450,29.20,7.8148,8,284.0,19.70,390.68,6.86,23.30},
+        {0.10328,25.00,5.130,0,0.4530,5.9270,47.20,6.9320,8,284.0,19.70,396.90,9.22,19.60},
+        {0.14932,25.00,5.130,0,0.4530,5.7410,66.20,7.2254,8,284.0,19.70,395.11,13.15,18.70},
+        {0.17171,25.00,5.130,0,0.4530,5.9660,93.40,6.8185,8,284.0,19.70,378.08,14.44,16.00},
+        {0.11027,25.00,5.130,0,0.4530,6.4560,67.80,7.2255,8,284.0,19.70,396.90,6.73,22.20},
+        {0.12650,25.00,5.130,0,0.4530,6.7620,43.40,7.9809,8,284.0,19.70,395.58,9.50,25.00},
+        {0.01951,17.50,1.380,0,0.4161,7.1040,59.50,9.2229,3,216.0,18.60,393.24,8.05,33.00},
+        {0.03584,80.00,3.370,0,0.3980,6.2900,17.80,6.6115,4,337.0,16.10,396.90,4.67,23.50},
+        {0.04379,80.00,3.370,0,0.3980,5.7870,31.10,6.6115,4,337.0,16.10,396.90,10.24,19.40},
+        {0.05789,12.50,6.070,0,0.4090,5.8780,21.40,6.4980,4,345.0,18.90,396.21,8.10,22.00},
+        {0.13554,12.50,6.070,0,0.4090,5.5940,36.80,6.4980,4,345.0,18.90,396.90,13.09,17.40},
+        {0.12816,12.50,6.070,0,0.4090,5.8850,33.00,6.4980,4,345.0,18.90,396.90,8.79,20.90},
+        {0.08826,0.00,10.810,0,0.4130,6.4170,6.60,5.2873,4,305.0,19.20,383.73,6.72,24.20},
+        {0.15876,0.00,10.810,0,0.4130,5.9610,17.50,5.2873,4,305.0,19.20,376.94,9.88,21.70},
+        {0.09164,0.00,10.810,0,0.4130,6.0650,7.80,5.2873,4,305.0,19.20,390.91,5.52,22.80},
+        {0.19539,0.00,10.810,0,0.4130,6.2450,6.20,5.2873,4,305.0,19.20,377.17,7.54,23.40},
+        {0.07896,0.00,12.830,0,0.4370,6.2730,6.00,4.2515,5,398.0,18.70,394.92,6.78,24.10},
+        {0.09512,0.00,12.830,0,0.4370,6.2860,45.00,4.5026,5,398.0,18.70,383.23,8.94,21.40},
+        {0.10153,0.00,12.830,0,0.4370,6.2790,74.50,4.0522,5,398.0,18.70,373.66,11.97,20.00},
+        {0.08707,0.00,12.830,0,0.4370,6.1400,45.80,4.0905,5,398.0,18.70,386.96,10.27,20.80},
+        {0.05646,0.00,12.830,0,0.4370,6.2320,53.70,5.0141,5,398.0,18.70,386.40,12.34,21.20},
+        {0.08387,0.00,12.830,0,0.4370,5.8740,36.60,4.5026,5,398.0,18.70,396.06,9.10,20.30},
+        {0.04113,25.00,4.860,0,0.4260,6.7270,33.50,5.4007,4,281.0,19.00,396.90,5.29,28.00},
+        {0.04462,25.00,4.860,0,0.4260,6.6190,70.40,5.4007,4,281.0,19.00,395.63,7.22,23.90},
+        {0.03659,25.00,4.860,0,0.4260,6.3020,32.20,5.4007,4,281.0,19.00,396.90,6.72,24.80},
+        {0.03551,25.00,4.860,0,0.4260,6.1670,46.70,5.4007,4,281.0,19.00,390.64,7.51,22.90},
+        {0.05059,0.00,4.490,0,0.4490,6.3890,48.00,4.7794,3,247.0,18.50,396.90,9.62,23.90},
+        {0.05735,0.00,4.490,0,0.4490,6.6300,56.10,4.4377,3,247.0,18.50,392.30,6.53,26.60},
+        {0.05188,0.00,4.490,0,0.4490,6.0150,45.10,4.4272,3,247.0,18.50,395.99,12.86,22.50},
+        {0.07151,0.00,4.490,0,0.4490,6.1210,56.80,3.7476,3,247.0,18.50,395.15,8.44,22.20},
+        {0.05660,0.00,3.410,0,0.4890,7.0070,86.30,3.4217,2,270.0,17.80,396.90,5.50,23.60},
+        {0.05302,0.00,3.410,0,0.4890,7.0790,63.10,3.4145,2,270.0,17.80,396.06,5.70,28.70},
+        {0.04684,0.00,3.410,0,0.4890,6.4170,66.10,3.0923,2,270.0,17.80,392.18,8.81,22.60},
+        {0.03932,0.00,3.410,0,0.4890,6.4050,73.90,3.0921,2,270.0,17.80,393.55,8.20,22.00},
+        {0.04203,28.00,15.040,0,0.4640,6.4420,53.60,3.6659,4,270.0,18.20,395.01,8.16,22.90},
+        {0.02875,28.00,15.040,0,0.4640,6.2110,28.90,3.6659,4,270.0,18.20,396.33,6.21,25.00},
+        {0.04294,28.00,15.040,0,0.4640,6.2490,77.30,3.6150,4,270.0,18.20,396.90,10.59,20.60},
+        {0.12204,0.00,2.890,0,0.4450,6.6250,57.80,3.4952,2,276.0,18.00,357.98,6.65,28.40},
+        {0.11504,0.00,2.890,0,0.4450,6.1630,69.60,3.4952,2,276.0,18.00,391.83,11.34,21.40},
+        {0.12083,0.00,2.890,0,0.4450,8.0690,76.00,3.4952,2,276.0,18.00,396.90,4.21,38.70},
+        {0.08187,0.00,2.890,0,0.4450,7.8200,36.90,3.4952,2,276.0,18.00,393.53,3.57,43.80},
+        {0.06860,0.00,2.890,0,0.4450,7.4160,62.50,3.4952,2,276.0,18.00,396.90,6.19,33.20},
+        {0.14866,0.00,8.560,0,0.5200,6.7270,79.90,2.7778,5,384.0,20.90,394.76,9.42,27.50},
+        {0.11432,0.00,8.560,0,0.5200,6.7810,71.30,2.8561,5,384.0,20.90,395.58,7.67,26.50},
+        {0.22876,0.00,8.560,0,0.5200,6.4050,85.40,2.7147,5,384.0,20.90,70.80,10.63,18.60},
+        {0.21161,0.00,8.560,0,0.5200,6.1370,87.40,2.7147,5,384.0,20.90,394.47,13.44,19.30},
+        {0.13960,0.00,8.560,0,0.5200,6.1670,90.00,2.4210,5,384.0,20.90,392.69,12.33,20.10},
+        {0.13262,0.00,8.560,0,0.5200,5.8510,96.70,2.1069,5,384.0,20.90,394.05,16.47,19.50},
+        {0.17120,0.00,8.560,0,0.5200,5.8360,91.90,2.2110,5,384.0,20.90,395.67,18.66,19.50},
+        {0.13117,0.00,8.560,0,0.5200,6.1270,85.20,2.1224,5,384.0,20.90,387.69,14.09,20.40},
+        {0.12802,0.00,8.560,0,0.5200,6.4740,97.10,2.4329,5,384.0,20.90,395.24,12.27,19.80},
+        {0.26363,0.00,8.560,0,0.5200,6.2290,91.20,2.5451,5,384.0,20.90,391.23,15.55,19.40},
+        {0.10793,0.00,8.560,0,0.5200,6.1950,54.40,2.7778,5,384.0,20.90,393.49,13.00,21.70},
+        {0.10084,0.00,10.010,0,0.5470,6.7150,81.60,2.6775,6,432.0,17.80,395.59,10.16,22.80},
+        {0.12329,0.00,10.010,0,0.5470,5.9130,92.90,2.3534,6,432.0,17.80,394.95,16.21,18.80},
+        {0.22212,0.00,10.010,0,0.5470,6.0920,95.40,2.5480,6,432.0,17.80,396.90,17.09,18.70},
+        {0.14231,0.00,10.010,0,0.5470,6.2540,84.20,2.2565,6,432.0,17.80,388.74,10.45,18.50},
+        {0.17134,0.00,10.010,0,0.5470,5.9280,88.20,2.4631,6,432.0,17.80,344.91,15.76,18.30},
+        {0.13158,0.00,10.010,0,0.5470,6.1760,72.50,2.7301,6,432.0,17.80,393.30,12.04,21.20},
+        {0.15098,0.00,10.010,0,0.5470,6.0210,82.60,2.7474,6,432.0,17.80,394.51,10.30,19.20},
+        {0.13058,0.00,10.010,0,0.5470,5.8720,73.10,2.4775,6,432.0,17.80,338.63,15.37,20.40},
+        {0.14476,0.00,10.010,0,0.5470,5.7310,65.20,2.7592,6,432.0,17.80,391.50,13.61,19.30},
+        {0.06899,0.00,25.650,0,0.5810,5.8700,69.70,2.2577,2,188.0,19.10,389.15,14.37,22.00},
+        {0.07165,0.00,25.650,0,0.5810,6.0040,84.10,2.1974,2,188.0,19.10,377.67,14.27,20.30},
+        {0.09299,0.00,25.650,0,0.5810,5.9610,92.90,2.0869,2,188.0,19.10,378.09,17.93,20.50},
+        {0.15038,0.00,25.650,0,0.5810,5.8560,97.00,1.9444,2,188.0,19.10,370.31,25.41,17.30},
+        {0.09849,0.00,25.650,0,0.5810,5.8790,95.80,2.0063,2,188.0,19.10,379.38,17.58,18.80},
+        {0.16902,0.00,25.650,0,0.5810,5.9860,88.40,1.9929,2,188.0,19.10,385.02,14.81,21.40},
+        {0.38735,0.00,25.650,0,0.5810,5.6130,95.60,1.7572,2,188.0,19.10,359.29,27.26,15.70},
+        {0.25915,0.00,21.890,0,0.6240,5.6930,96.00,1.7883,4,437.0,21.20,392.11,17.19,16.20},
+        {0.32543,0.00,21.890,0,0.6240,6.4310,98.80,1.8125,4,437.0,21.20,396.90,15.39,18.00},
+        {0.88125,0.00,21.890,0,0.6240,5.6370,94.70,1.9799,4,437.0,21.20,396.90,18.34,14.30},
+        {0.34006,0.00,21.890,0,0.6240,6.4580,98.90,2.1185,4,437.0,21.20,395.04,12.60,19.20},
+        {1.19294,0.00,21.890,0,0.6240,6.3260,97.70,2.2710,4,437.0,21.20,396.90,12.26,19.60},
+        {0.59005,0.00,21.890,0,0.6240,6.3720,97.90,2.3274,4,437.0,21.20,385.76,11.12,23.00},
+        {0.32982,0.00,21.890,0,0.6240,5.8220,95.40,2.4699,4,437.0,21.20,388.69,15.03,18.40},
+        {0.97617,0.00,21.890,0,0.6240,5.7570,98.40,2.3460,4,437.0,21.20,262.76,17.31,15.60},
+        {0.55778,0.00,21.890,0,0.6240,6.3350,98.20,2.1107,4,437.0,21.20,394.67,16.96,18.10},
+        {0.32264,0.00,21.890,0,0.6240,5.9420,93.50,1.9669,4,437.0,21.20,378.25,16.90,17.40},
+        {0.35233,0.00,21.890,0,0.6240,6.4540,98.40,1.8498,4,437.0,21.20,394.08,14.59,17.10},
+        {0.24980,0.00,21.890,0,0.6240,5.8570,98.20,1.6686,4,437.0,21.20,392.04,21.32,13.30},
+        {0.54452,0.00,21.890,0,0.6240,6.1510,97.90,1.6687,4,437.0,21.20,396.90,18.46,17.80},
+        {0.29090,0.00,21.890,0,0.6240,6.1740,93.60,1.6119,4,437.0,21.20,388.08,24.16,14.00},
+        {1.62864,0.00,21.890,0,0.6240,5.0190,100.00,1.4394,4,437.0,21.20,396.90,34.41,14.40},
+        {3.32105,0.00,19.580,1,0.8710,5.4030,100.00,1.3216,5,403.0,14.70,396.90,26.82,13.40},
+        {4.09740,0.00,19.580,0,0.8710,5.4680,100.00,1.4118,5,403.0,14.70,396.90,26.42,15.60},
+        {2.77974,0.00,19.580,0,0.8710,4.9030,97.80,1.3459,5,403.0,14.70,396.90,29.29,11.80},
+        {2.37934,0.00,19.580,0,0.8710,6.1300,100.00,1.4191,5,403.0,14.70,172.91,27.80,13.80},
+        {2.15505,0.00,19.580,0,0.8710,5.6280,100.00,1.5166,5,403.0,14.70,169.27,16.65,15.60},
+        {2.36862,0.00,19.580,0,0.8710,4.9260,95.70,1.4608,5,403.0,14.70,391.71,29.53,14.60},
+        {2.33099,0.00,19.580,0,0.8710,5.1860,93.80,1.5296,5,403.0,14.70,356.99,28.32,17.80},
+        {2.73397,0.00,19.580,0,0.8710,5.5970,94.90,1.5257,5,403.0,14.70,351.85,21.45,15.40},
+        {1.65660,0.00,19.580,0,0.8710,6.1220,97.30,1.6180,5,403.0,14.70,372.80,14.10,21.50},
+        {1.49632,0.00,19.580,0,0.8710,5.4040,100.00,1.5916,5,403.0,14.70,341.60,13.28,19.60},
+        {1.12658,0.00,19.580,1,0.8710,5.0120,88.00,1.6102,5,403.0,14.70,343.28,12.12,15.30},
+        {2.14918,0.00,19.580,0,0.8710,5.7090,98.50,1.6232,5,403.0,14.70,261.95,15.79,19.40},
+        {1.41385,0.00,19.580,1,0.8710,6.1290,96.00,1.7494,5,403.0,14.70,321.02,15.12,17.00},
+        {3.53501,0.00,19.580,1,0.8710,6.1520,82.60,1.7455,5,403.0,14.70,88.01,15.02,15.60},
+        {2.44668,0.00,19.580,0,0.8710,5.2720,94.00,1.7364,5,403.0,14.70,88.63,16.14,13.10},
+        {1.22358,0.00,19.580,0,0.6050,6.9430,97.40,1.8773,5,403.0,14.70,363.43,4.59,41.30},
+        {1.34284,0.00,19.580,0,0.6050,6.0660,100.00,1.7573,5,403.0,14.70,353.89,6.43,24.30},
+        {1.42502,0.00,19.580,0,0.8710,6.5100,100.00,1.7659,5,403.0,14.70,364.31,7.39,23.30},
+        {1.27346,0.00,19.580,1,0.6050,6.2500,92.60,1.7984,5,403.0,14.70,338.92,5.50,27.00},
+        {1.46336,0.00,19.580,0,0.6050,7.4890,90.80,1.9709,5,403.0,14.70,374.43,1.73,50.00},
+        {1.83377,0.00,19.580,1,0.6050,7.8020,98.20,2.0407,5,403.0,14.70,389.61,1.92,50.00},
+        {1.51902,0.00,19.580,1,0.6050,8.3750,93.90,2.1620,5,403.0,14.70,388.45,3.32,50.00},
+        {2.24236,0.00,19.580,0,0.6050,5.8540,91.80,2.4220,5,403.0,14.70,395.11,11.64,22.70},
+        {2.92400,0.00,19.580,0,0.6050,6.1010,93.00,2.2834,5,403.0,14.70,240.16,9.81,25.00},
+        {2.01019,0.00,19.580,0,0.6050,7.9290,96.20,2.0459,5,403.0,14.70,369.30,3.70,50.00},
+        {1.80028,0.00,19.580,0,0.6050,5.8770,79.20,2.4259,5,403.0,14.70,227.61,12.14,23.80},
+        {2.30040,0.00,19.580,0,0.6050,6.3190,96.10,2.1000,5,403.0,14.70,297.09,11.10,23.80},
+        {2.44953,0.00,19.580,0,0.6050,6.4020,95.20,2.2625,5,403.0,14.70,330.04,11.32,22.30},
+        {1.20742,0.00,19.580,0,0.6050,5.8750,94.60,2.4259,5,403.0,14.70,292.29,14.43,17.40},
+        {2.31390,0.00,19.580,0,0.6050,5.8800,97.30,2.3887,5,403.0,14.70,348.13,12.03,19.10},
+        {0.13914,0.00,4.050,0,0.5100,5.5720,88.50,2.5961,5,296.0,16.60,396.90,14.69,23.10},
+        {0.09178,0.00,4.050,0,0.5100,6.4160,84.10,2.6463,5,296.0,16.60,395.50,9.04,23.60},
+        {0.08447,0.00,4.050,0,0.5100,5.8590,68.70,2.7019,5,296.0,16.60,393.23,9.64,22.60},
+        {0.06664,0.00,4.050,0,0.5100,6.5460,33.10,3.1323,5,296.0,16.60,390.96,5.33,29.40},
+        {0.07022,0.00,4.050,0,0.5100,6.0200,47.20,3.5549,5,296.0,16.60,393.23,10.11,23.20},
+        {0.05425,0.00,4.050,0,0.5100,6.3150,73.40,3.3175,5,296.0,16.60,395.60,6.29,24.60},
+        {0.06642,0.00,4.050,0,0.5100,6.8600,74.40,2.9153,5,296.0,16.60,391.27,6.92,29.90},
+        {0.05780,0.00,2.460,0,0.4880,6.9800,58.40,2.8290,3,193.0,17.80,396.90,5.04,37.20},
+        {0.06588,0.00,2.460,0,0.4880,7.7650,83.30,2.7410,3,193.0,17.80,395.56,7.56,39.80},
+        {0.06888,0.00,2.460,0,0.4880,6.1440,62.20,2.5979,3,193.0,17.80,396.90,9.45,36.20},
+        {0.09103,0.00,2.460,0,0.4880,7.1550,92.20,2.7006,3,193.0,17.80,394.12,4.82,37.90},
+        {0.10008,0.00,2.460,0,0.4880,6.5630,95.60,2.8470,3,193.0,17.80,396.90,5.68,32.50},
+        {0.08308,0.00,2.460,0,0.4880,5.6040,89.80,2.9879,3,193.0,17.80,391.00,13.98,26.40},
+        {0.06047,0.00,2.460,0,0.4880,6.1530,68.80,3.2797,3,193.0,17.80,387.11,13.15,29.60},
+        {0.05602,0.00,2.460,0,0.4880,7.8310,53.60,3.1992,3,193.0,17.80,392.63,4.45,50.00},
+        {0.07875,45.00,3.440,0,0.4370,6.7820,41.10,3.7886,5,398.0,15.20,393.87,6.68,32.00},
+        {0.12579,45.00,3.440,0,0.4370,6.5560,29.10,4.5667,5,398.0,15.20,382.84,4.56,29.80},
+        {0.08370,45.00,3.440,0,0.4370,7.1850,38.90,4.5667,5,398.0,15.20,396.90,5.39,34.90},
+        {0.09068,45.00,3.440,0,0.4370,6.9510,21.50,6.4798,5,398.0,15.20,377.68,5.10,37.00},
+        {0.06911,45.00,3.440,0,0.4370,6.7390,30.80,6.4798,5,398.0,15.20,389.71,4.69,30.50},
+        {0.08664,45.00,3.440,0,0.4370,7.1780,26.30,6.4798,5,398.0,15.20,390.49,2.87,36.40},
+        {0.02187,60.00,2.930,0,0.4010,6.8000,9.90,6.2196,1,265.0,15.60,393.37,5.03,31.10},
+        {0.01439,60.00,2.930,0,0.4010,6.6040,18.80,6.2196,1,265.0,15.60,376.70,4.38,29.10},
+        {0.01381,80.00,0.460,0,0.4220,7.8750,32.00,5.6484,4,255.0,14.40,394.23,2.97,50.00},
+        {0.04011,80.00,1.520,0,0.4040,7.2870,34.10,7.3090,2,329.0,12.60,396.90,4.08,33.30},
+        {0.04666,80.00,1.520,0,0.4040,7.1070,36.60,7.3090,2,329.0,12.60,354.31,8.61,30.30},
+        {0.03768,80.00,1.520,0,0.4040,7.2740,38.30,7.3090,2,329.0,12.60,392.20,6.62,34.60},
+        {0.03150,95.00,1.470,0,0.4030,6.9750,15.30,7.6534,3,402.0,17.00,396.90,4.56,34.90},
+        {0.01778,95.00,1.470,0,0.4030,7.1350,13.90,7.6534,3,402.0,17.00,384.30,4.45,32.90},
+        {0.03445,82.50,2.030,0,0.4150,6.1620,38.40,6.2700,2,348.0,14.70,393.77,7.43,24.10},
+        {0.02177,82.50,2.030,0,0.4150,7.6100,15.70,6.2700,2,348.0,14.70,395.38,3.11,42.30},
+        {0.03510,95.00,2.680,0,0.4161,7.8530,33.20,5.1180,4,224.0,14.70,392.78,3.81,48.50},
+        {0.02009,95.00,2.680,0,0.4161,8.0340,31.90,5.1180,4,224.0,14.70,390.55,2.88,50.00},
+        {0.13642,0.00,10.590,0,0.4890,5.8910,22.30,3.9454,4,277.0,18.60,396.90,10.87,22.60},
+        {0.22969,0.00,10.590,0,0.4890,6.3260,52.50,4.3549,4,277.0,18.60,394.87,10.97,24.40},
+        {0.25199,0.00,10.590,0,0.4890,5.7830,72.70,4.3549,4,277.0,18.60,389.43,18.06,22.50},
+        {0.13587,0.00,10.590,1,0.4890,6.0640,59.10,4.2392,4,277.0,18.60,381.32,14.66,24.40},
+        {0.43571,0.00,10.590,1,0.4890,5.3440,100.00,3.8750,4,277.0,18.60,396.90,23.09,20.00},
+        {0.17446,0.00,10.590,1,0.4890,5.9600,92.10,3.8771,4,277.0,18.60,393.25,17.27,21.70},
+        {0.37578,0.00,10.590,1,0.4890,5.4040,88.60,3.6650,4,277.0,18.60,395.24,23.98,19.30},
+        {0.21719,0.00,10.590,1,0.4890,5.8070,53.80,3.6526,4,277.0,18.60,390.94,16.03,22.40},
+        {0.14052,0.00,10.590,0,0.4890,6.3750,32.30,3.9454,4,277.0,18.60,385.81,9.38,28.10},
+        {0.28955,0.00,10.590,0,0.4890,5.4120,9.80,3.5875,4,277.0,18.60,348.93,29.55,23.70},
+        {0.19802,0.00,10.590,0,0.4890,6.1820,42.40,3.9454,4,277.0,18.60,393.63,9.47,25.00},
+        {0.04560,0.00,13.890,1,0.5500,5.8880,56.00,3.1121,5,276.0,16.40,392.80,13.51,23.30},
+        {0.07013,0.00,13.890,0,0.5500,6.6420,85.10,3.4211,5,276.0,16.40,392.78,9.69,28.70},
+        {0.11069,0.00,13.890,1,0.5500,5.9510,93.80,2.8893,5,276.0,16.40,396.90,17.92,21.50},
+        {0.11425,0.00,13.890,1,0.5500,6.3730,92.40,3.3633,5,276.0,16.40,393.74,10.50,23.00},
+        {0.35809,0.00,6.200,1,0.5070,6.9510,88.50,2.8617,8,307.0,17.40,391.70,9.71,26.70},
+        {0.40771,0.00,6.200,1,0.5070,6.1640,91.30,3.0480,8,307.0,17.40,395.24,21.46,21.70},
+        {0.62356,0.00,6.200,1,0.5070,6.8790,77.70,3.2721,8,307.0,17.40,390.39,9.93,27.50},
+        {0.61470,0.00,6.200,0,0.5070,6.6180,80.80,3.2721,8,307.0,17.40,396.90,7.60,30.10},
+        {0.31533,0.00,6.200,0,0.5040,8.2660,78.30,2.8944,8,307.0,17.40,385.05,4.14,44.80},
+        {0.52693,0.00,6.200,0,0.5040,8.7250,83.00,2.8944,8,307.0,17.40,382.00,4.63,50.00},
+        {0.38214,0.00,6.200,0,0.5040,8.0400,86.50,3.2157,8,307.0,17.40,387.38,3.13,37.60},
+        {0.41238,0.00,6.200,0,0.5040,7.1630,79.90,3.2157,8,307.0,17.40,372.08,6.36,31.60},
+        {0.29819,0.00,6.200,0,0.5040,7.6860,17.00,3.3751,8,307.0,17.40,377.51,3.92,46.70},
+        {0.44178,0.00,6.200,0,0.5040,6.5520,21.40,3.3751,8,307.0,17.40,380.34,3.76,31.50},
+        {0.53700,0.00,6.200,0,0.5040,5.9810,68.10,3.6715,8,307.0,17.40,378.35,11.65,24.30},
+        {0.46296,0.00,6.200,0,0.5040,7.4120,76.90,3.6715,8,307.0,17.40,376.14,5.25,31.70},
+        {0.57529,0.00,6.200,0,0.5070,8.3370,73.30,3.8384,8,307.0,17.40,385.91,2.47,41.70},
+        {0.33147,0.00,6.200,0,0.5070,8.2470,70.40,3.6519,8,307.0,17.40,378.95,3.95,48.30},
+        {0.44791,0.00,6.200,1,0.5070,6.7260,66.50,3.6519,8,307.0,17.40,360.20,8.05,29.00},
+        {0.33045,0.00,6.200,0,0.5070,6.0860,61.50,3.6519,8,307.0,17.40,376.75,10.88,24.00},
+        {0.52058,0.00,6.200,1,0.5070,6.6310,76.50,4.1480,8,307.0,17.40,388.45,9.54,25.10},
+        {0.51183,0.00,6.200,0,0.5070,7.3580,71.60,4.1480,8,307.0,17.40,390.07,4.73,31.50},
+        {0.08244,30.00,4.930,0,0.4280,6.4810,18.50,6.1899,6,300.0,16.60,379.41,6.36,23.70},
+        {0.09252,30.00,4.930,0,0.4280,6.6060,42.20,6.1899,6,300.0,16.60,383.78,7.37,23.30},
+        {0.11329,30.00,4.930,0,0.4280,6.8970,54.30,6.3361,6,300.0,16.60,391.25,11.38,22.00},
+        {0.10612,30.00,4.930,0,0.4280,6.0950,65.10,6.3361,6,300.0,16.60,394.62,12.40,20.10},
+        {0.10290,30.00,4.930,0,0.4280,6.3580,52.90,7.0355,6,300.0,16.60,372.75,11.22,22.20},
+        {0.12757,30.00,4.930,0,0.4280,6.3930,7.80,7.0355,6,300.0,16.60,374.71,5.19,23.70},
+        {0.20608,22.00,5.860,0,0.4310,5.5930,76.50,7.9549,7,330.0,19.10,372.49,12.50,17.60},
+        {0.19133,22.00,5.860,0,0.4310,5.6050,70.20,7.9549,7,330.0,19.10,389.13,18.46,18.50},
+        {0.33983,22.00,5.860,0,0.4310,6.1080,34.90,8.0555,7,330.0,19.10,390.18,9.16,24.30},
+        {0.19657,22.00,5.860,0,0.4310,6.2260,79.20,8.0555,7,330.0,19.10,376.14,10.15,20.50},
+        {0.16439,22.00,5.860,0,0.4310,6.4330,49.10,7.8265,7,330.0,19.10,374.71,9.52,24.50},
+        {0.19073,22.00,5.860,0,0.4310,6.7180,17.50,7.8265,7,330.0,19.10,393.74,6.56,26.20},
+        {0.14030,22.00,5.860,0,0.4310,6.4870,13.00,7.3967,7,330.0,19.10,396.28,5.90,24.40},
+        {0.21409,22.00,5.860,0,0.4310,6.4380,8.90,7.3967,7,330.0,19.10,377.07,3.59,24.80},
+        {0.08221,22.00,5.860,0,0.4310,6.9570,6.80,8.9067,7,330.0,19.10,386.09,3.53,29.60},
+        {0.36894,22.00,5.860,0,0.4310,8.2590,8.40,8.9067,7,330.0,19.10,396.90,3.54,42.80},
+        {0.04819,80.00,3.640,0,0.3920,6.1080,32.00,9.2203,1,315.0,16.40,392.89,6.57,21.90},
+        {0.03548,80.00,3.640,0,0.3920,5.8760,19.10,9.2203,1,315.0,16.40,395.18,9.25,20.90},
+        {0.01538,90.00,3.750,0,0.3940,7.4540,34.20,6.3361,3,244.0,15.90,386.34,3.11,44.00},
+        {0.61154,20.00,3.970,0,0.6470,8.7040,86.90,1.8010,5,264.0,13.00,389.70,5.12,50.00},
+        {0.66351,20.00,3.970,0,0.6470,7.3330,100.00,1.8946,5,264.0,13.00,383.29,7.79,36.00},
+        {0.65665,20.00,3.970,0,0.6470,6.8420,100.00,2.0107,5,264.0,13.00,391.93,6.90,30.10},
+        {0.54011,20.00,3.970,0,0.6470,7.2030,81.80,2.1121,5,264.0,13.00,392.80,9.59,33.80},
+        {0.53412,20.00,3.970,0,0.6470,7.5200,89.40,2.1398,5,264.0,13.00,388.37,7.26,43.10},
+        {0.52014,20.00,3.970,0,0.6470,8.3980,91.50,2.2885,5,264.0,13.00,386.86,5.91,48.80},
+        {0.82526,20.00,3.970,0,0.6470,7.3270,94.50,2.0788,5,264.0,13.00,393.42,11.25,31.00},
+        {0.55007,20.00,3.970,0,0.6470,7.2060,91.60,1.9301,5,264.0,13.00,387.89,8.10,36.50},
+        {0.76162,20.00,3.970,0,0.6470,5.5600,62.80,1.9865,5,264.0,13.00,392.40,10.45,22.80},
+        {0.78570,20.00,3.970,0,0.6470,7.0140,84.60,2.1329,5,264.0,13.00,384.07,14.79,30.70},
+        {0.57834,20.00,3.970,0,0.5750,8.2970,67.00,2.4216,5,264.0,13.00,384.54,7.44,50.00},
+        {0.54050,20.00,3.970,0,0.5750,7.4700,52.60,2.8720,5,264.0,13.00,390.30,3.16,43.50},
+        {0.09065,20.00,6.960,1,0.4640,5.9200,61.50,3.9175,3,223.0,18.60,391.34,13.65,20.70},
+        {0.29916,20.00,6.960,0,0.4640,5.8560,42.10,4.4290,3,223.0,18.60,388.65,13.00,21.10},
+        {0.16211,20.00,6.960,0,0.4640,6.2400,16.30,4.4290,3,223.0,18.60,396.90,6.59,25.20},
+        {0.11460,20.00,6.960,0,0.4640,6.5380,58.70,3.9175,3,223.0,18.60,394.96,7.73,24.40},
+        {0.22188,20.00,6.960,1,0.4640,7.6910,51.80,4.3665,3,223.0,18.60,390.77,6.58,35.20},
+        {0.05644,40.00,6.410,1,0.4470,6.7580,32.90,4.0776,4,254.0,17.60,396.90,3.53,32.40},
+        {0.09604,40.00,6.410,0,0.4470,6.8540,42.80,4.2673,4,254.0,17.60,396.90,2.98,32.00},
+        {0.10469,40.00,6.410,1,0.4470,7.2670,49.00,4.7872,4,254.0,17.60,389.25,6.05,33.20},
+        {0.06127,40.00,6.410,1,0.4470,6.8260,27.60,4.8628,4,254.0,17.60,393.45,4.16,33.10},
+        {0.07978,40.00,6.410,0,0.4470,6.4820,32.10,4.1403,4,254.0,17.60,396.90,7.19,29.10},
+        {0.21038,20.00,3.330,0,0.4429,6.8120,32.20,4.1007,5,216.0,14.90,396.90,4.85,35.10},
+        {0.03578,20.00,3.330,0,0.4429,7.8200,64.50,4.6947,5,216.0,14.90,387.31,3.76,45.40},
+        {0.03705,20.00,3.330,0,0.4429,6.9680,37.20,5.2447,5,216.0,14.90,392.23,4.59,35.40},
+        {0.06129,20.00,3.330,1,0.4429,7.6450,49.70,5.2119,5,216.0,14.90,377.07,3.01,46.00},
+        {0.01501,90.00,1.210,1,0.4010,7.9230,24.80,5.8850,1,198.0,13.60,395.52,3.16,50.00},
+        {0.00906,90.00,2.970,0,0.4000,7.0880,20.80,7.3073,1,285.0,15.30,394.72,7.85,32.20},
+        {0.01096,55.00,2.250,0,0.3890,6.4530,31.90,7.3073,1,300.0,15.30,394.72,8.23,22.00},
+        {0.01965,80.00,1.760,0,0.3850,6.2300,31.50,9.0892,1,241.0,18.20,341.60,12.93,20.10},
+        {0.03871,52.50,5.320,0,0.4050,6.2090,31.30,7.3172,6,293.0,16.60,396.90,7.14,23.20},
+        {0.04590,52.50,5.320,0,0.4050,6.3150,45.60,7.3172,6,293.0,16.60,396.90,7.60,22.30},
+        {0.04297,52.50,5.320,0,0.4050,6.5650,22.90,7.3172,6,293.0,16.60,371.72,9.51,24.80},
+        {0.03502,80.00,4.950,0,0.4110,6.8610,27.90,5.1167,4,245.0,19.20,396.90,3.33,28.50},
+        {0.07886,80.00,4.950,0,0.4110,7.1480,27.70,5.1167,4,245.0,19.20,396.90,3.56,37.30},
+        {0.03615,80.00,4.950,0,0.4110,6.6300,23.40,5.1167,4,245.0,19.20,396.90,4.70,27.90},
+        {0.08265,0.00,13.920,0,0.4370,6.1270,18.40,5.5027,4,289.0,16.00,396.90,8.58,23.90},
+        {0.08199,0.00,13.920,0,0.4370,6.0090,42.30,5.5027,4,289.0,16.00,396.90,10.40,21.70},
+        {0.12932,0.00,13.920,0,0.4370,6.6780,31.10,5.9604,4,289.0,16.00,396.90,6.27,28.60},
+        {0.05372,0.00,13.920,0,0.4370,6.5490,51.00,5.9604,4,289.0,16.00,392.85,7.39,27.10},
+        {0.14103,0.00,13.920,0,0.4370,5.7900,58.00,6.3200,4,289.0,16.00,396.90,15.84,20.30},
+        {0.06466,70.00,2.240,0,0.4000,6.3450,20.10,7.8278,5,358.0,14.80,368.24,4.97,22.50},
+        {0.05561,70.00,2.240,0,0.4000,7.0410,10.00,7.8278,5,358.0,14.80,371.58,4.74,29.00},
+        {0.04417,70.00,2.240,0,0.4000,6.8710,47.40,7.8278,5,358.0,14.80,390.86,6.07,24.80},
+        {0.03537,34.00,6.090,0,0.4330,6.5900,40.40,5.4917,7,329.0,16.10,395.75,9.50,22.00},
+        {0.09266,34.00,6.090,0,0.4330,6.4950,18.40,5.4917,7,329.0,16.10,383.61,8.67,26.40},
+        {0.10000,34.00,6.090,0,0.4330,6.9820,17.70,5.4917,7,329.0,16.10,390.43,4.86,33.10},
+        {0.05515,33.00,2.180,0,0.4720,7.2360,41.10,4.0220,7,222.0,18.40,393.68,6.93,36.10},
+        {0.05479,33.00,2.180,0,0.4720,6.6160,58.10,3.3700,7,222.0,18.40,393.36,8.93,28.40},
+        {0.07503,33.00,2.180,0,0.4720,7.4200,71.90,3.0992,7,222.0,18.40,396.90,6.47,33.40},
+        {0.04932,33.00,2.180,0,0.4720,6.8490,70.30,3.1827,7,222.0,18.40,396.90,7.53,28.20},
+        {0.49298,0.00,9.900,0,0.5440,6.6350,82.50,3.3175,4,304.0,18.40,396.90,4.54,22.80},
+        {0.34940,0.00,9.900,0,0.5440,5.9720,76.70,3.1025,4,304.0,18.40,396.24,9.97,20.30},
+        {2.63548,0.00,9.900,0,0.5440,4.9730,37.80,2.5194,4,304.0,18.40,350.45,12.64,16.10},
+        {0.79041,0.00,9.900,0,0.5440,6.1220,52.80,2.6403,4,304.0,18.40,396.90,5.98,22.10},
+        {0.26169,0.00,9.900,0,0.5440,6.0230,90.40,2.8340,4,304.0,18.40,396.30,11.72,19.40},
+        {0.26938,0.00,9.900,0,0.5440,6.2660,82.80,3.2628,4,304.0,18.40,393.39,7.90,21.60},
+        {0.36920,0.00,9.900,0,0.5440,6.5670,87.30,3.6023,4,304.0,18.40,395.69,9.28,23.80},
+        {0.25356,0.00,9.900,0,0.5440,5.7050,77.70,3.9450,4,304.0,18.40,396.42,11.50,16.20},
+        {0.31827,0.00,9.900,0,0.5440,5.9140,83.20,3.9986,4,304.0,18.40,390.70,18.33,17.80},
+        {0.24522,0.00,9.900,0,0.5440,5.7820,71.70,4.0317,4,304.0,18.40,396.90,15.94,19.80},
+        {0.40202,0.00,9.900,0,0.5440,6.3820,67.20,3.5325,4,304.0,18.40,395.21,10.36,23.10},
+        {0.47547,0.00,9.900,0,0.5440,6.1130,58.80,4.0019,4,304.0,18.40,396.23,12.73,21.00},
+        {0.16760,0.00,7.380,0,0.4930,6.4260,52.30,4.5404,5,287.0,19.60,396.90,7.20,23.80},
+        {0.18159,0.00,7.380,0,0.4930,6.3760,54.30,4.5404,5,287.0,19.60,396.90,6.87,23.10},
+        {0.35114,0.00,7.380,0,0.4930,6.0410,49.90,4.7211,5,287.0,19.60,396.90,7.70,20.40},
+        {0.28392,0.00,7.380,0,0.4930,5.7080,74.30,4.7211,5,287.0,19.60,391.13,11.74,18.50},
+        {0.34109,0.00,7.380,0,0.4930,6.4150,40.10,4.7211,5,287.0,19.60,396.90,6.12,25.00},
+        {0.19186,0.00,7.380,0,0.4930,6.4310,14.70,5.4159,5,287.0,19.60,393.68,5.08,24.60},
+        {0.30347,0.00,7.380,0,0.4930,6.3120,28.90,5.4159,5,287.0,19.60,396.90,6.15,23.00},
+        {0.24103,0.00,7.380,0,0.4930,6.0830,43.70,5.4159,5,287.0,19.60,396.90,12.79,22.20},
+        {0.06617,0.00,3.240,0,0.4600,5.8680,25.80,5.2146,4,430.0,16.90,382.44,9.97,19.30},
+        {0.06724,0.00,3.240,0,0.4600,6.3330,17.20,5.2146,4,430.0,16.90,375.21,7.34,22.60},
+        {0.04544,0.00,3.240,0,0.4600,6.1440,32.20,5.8736,4,430.0,16.90,368.57,9.09,19.80},
+        {0.05023,35.00,6.060,0,0.4379,5.7060,28.40,6.6407,1,304.0,16.90,394.02,12.43,17.10},
+        {0.03466,35.00,6.060,0,0.4379,6.0310,23.30,6.6407,1,304.0,16.90,362.25,7.83,19.40},
+        {0.05083,0.00,5.190,0,0.5150,6.3160,38.10,6.4584,5,224.0,20.20,389.71,5.68,22.20},
+        {0.03738,0.00,5.190,0,0.5150,6.3100,38.50,6.4584,5,224.0,20.20,389.40,6.75,20.70},
+        {0.03961,0.00,5.190,0,0.5150,6.0370,34.50,5.9853,5,224.0,20.20,396.90,8.01,21.10},
+        {0.03427,0.00,5.190,0,0.5150,5.8690,46.30,5.2311,5,224.0,20.20,396.90,9.80,19.50},
+        {0.03041,0.00,5.190,0,0.5150,5.8950,59.60,5.6150,5,224.0,20.20,394.81,10.56,18.50},
+        {0.03306,0.00,5.190,0,0.5150,6.0590,37.30,4.8122,5,224.0,20.20,396.14,8.51,20.60},
+        {0.05497,0.00,5.190,0,0.5150,5.9850,45.40,4.8122,5,224.0,20.20,396.90,9.74,19.00},
+        {0.06151,0.00,5.190,0,0.5150,5.9680,58.50,4.8122,5,224.0,20.20,396.90,9.29,18.70},
+        {0.01301,35.00,1.520,0,0.4420,7.2410,49.30,7.0379,1,284.0,15.50,394.74,5.49,32.70},
+        {0.02498,0.00,1.890,0,0.5180,6.5400,59.70,6.2669,1,422.0,15.90,389.96,8.65,16.50},
+        {0.02543,55.00,3.780,0,0.4840,6.6960,56.40,5.7321,5,370.0,17.60,396.90,7.18,23.90},
+        {0.03049,55.00,3.780,0,0.4840,6.8740,28.10,6.4654,5,370.0,17.60,387.97,4.61,31.20},
+        {0.03113,0.00,4.390,0,0.4420,6.0140,48.50,8.0136,3,352.0,18.80,385.64,10.53,17.50},
+        {0.06162,0.00,4.390,0,0.4420,5.8980,52.30,8.0136,3,352.0,18.80,364.61,12.67,17.20},
+        {0.01870,85.00,4.150,0,0.4290,6.5160,27.70,8.5353,4,351.0,17.90,392.43,6.36,23.10},
+        {0.01501,80.00,2.010,0,0.4350,6.6350,29.70,8.3440,4,280.0,17.00,390.94,5.99,24.50},
+        {0.02899,40.00,1.250,0,0.4290,6.9390,34.50,8.7921,1,335.0,19.70,389.85,5.89,26.60},
+        {0.06211,40.00,1.250,0,0.4290,6.4900,44.40,8.7921,1,335.0,19.70,396.90,5.98,22.90},
+        {0.07950,60.00,1.690,0,0.4110,6.5790,35.90,10.7103,4,411.0,18.30,370.78,5.49,24.10},
+        {0.07244,60.00,1.690,0,0.4110,5.8840,18.50,10.7103,4,411.0,18.30,392.33,7.79,18.60},
+        {0.01709,90.00,2.020,0,0.4100,6.7280,36.10,12.1265,5,187.0,17.00,384.46,4.50,30.10},
+        {0.04301,80.00,1.910,0,0.4130,5.6630,21.90,10.5857,4,334.0,22.00,382.80,8.05,18.20},
+        {0.10659,80.00,1.910,0,0.4130,5.9360,19.50,10.5857,4,334.0,22.00,376.04,5.57,20.60},
+        {8.98296,0.00,18.100,1,0.7700,6.2120,97.40,2.1222,24,666.0,20.20,377.73,17.60,17.80},
+        {3.84970,0.00,18.100,1,0.7700,6.3950,91.00,2.5052,24,666.0,20.20,391.34,13.27,21.70},
+        {5.20177,0.00,18.100,1,0.7700,6.1270,83.40,2.7227,24,666.0,20.20,395.43,11.48,22.70},
+        {4.26131,0.00,18.100,0,0.7700,6.1120,81.30,2.5091,24,666.0,20.20,390.74,12.67,22.60},
+        {4.54192,0.00,18.100,0,0.7700,6.3980,88.00,2.5182,24,666.0,20.20,374.56,7.79,25.00},
+        {3.83684,0.00,18.100,0,0.7700,6.2510,91.10,2.2955,24,666.0,20.20,350.65,14.19,19.90},
+        {3.67822,0.00,18.100,0,0.7700,5.3620,96.20,2.1036,24,666.0,20.20,380.79,10.19,20.80},
+        {4.22239,0.00,18.100,1,0.7700,5.8030,89.00,1.9047,24,666.0,20.20,353.04,14.64,16.80},
+        {3.47428,0.00,18.100,1,0.7180,8.7800,82.90,1.9047,24,666.0,20.20,354.55,5.29,21.90},
+        {4.55587,0.00,18.100,0,0.7180,3.5610,87.90,1.6132,24,666.0,20.20,354.70,7.12,27.50},
+        {3.69695,0.00,18.100,0,0.7180,4.9630,91.40,1.7523,24,666.0,20.20,316.03,14.00,21.90},
+        {13.52220,0.00,18.100,0,0.6310,3.8630,100.00,1.5106,24,666.0,20.20,131.42,13.33,23.10},
+        {4.89822,0.00,18.100,0,0.6310,4.9700,100.00,1.3325,24,666.0,20.20,375.52,3.26,50.00},
+        {5.66998,0.00,18.100,1,0.6310,6.6830,96.80,1.3567,24,666.0,20.20,375.33,3.73,50.00},
+        {6.53876,0.00,18.100,1,0.6310,7.0160,97.50,1.2024,24,666.0,20.20,392.05,2.96,50.00},
+        {9.23230,0.00,18.100,0,0.6310,6.2160,100.00,1.1691,24,666.0,20.20,366.15,9.53,50.00},
+        {8.26725,0.00,18.100,1,0.6680,5.8750,89.60,1.1296,24,666.0,20.20,347.88,8.88,50.00},
+        {11.10810,0.00,18.100,0,0.6680,4.9060,100.00,1.1742,24,666.0,20.20,396.90,34.77,13.80},
+        {18.49820,0.00,18.100,0,0.6680,4.1380,100.00,1.1370,24,666.0,20.20,396.90,37.97,13.80},
+        {19.60910,0.00,18.100,0,0.6710,7.3130,97.90,1.3163,24,666.0,20.20,396.90,13.44,15.00},
+        {15.28800,0.00,18.100,0,0.6710,6.6490,93.30,1.3449,24,666.0,20.20,363.02,23.24,13.90},
+        {9.82349,0.00,18.100,0,0.6710,6.7940,98.80,1.3580,24,666.0,20.20,396.90,21.24,13.30},
+        {23.64820,0.00,18.100,0,0.6710,6.3800,96.20,1.3861,24,666.0,20.20,396.90,23.69,13.10},
+        {17.86670,0.00,18.100,0,0.6710,6.2230,100.00,1.3861,24,666.0,20.20,393.74,21.78,10.20},
+        {88.97620,0.00,18.100,0,0.6710,6.9680,91.90,1.4165,24,666.0,20.20,396.90,17.21,10.40},
+        {15.87440,0.00,18.100,0,0.6710,6.5450,99.10,1.5192,24,666.0,20.20,396.90,21.08,10.90},
+        {9.18702,0.00,18.100,0,0.7000,5.5360,100.00,1.5804,24,666.0,20.20,396.90,23.60,11.30},
+        {7.99248,0.00,18.100,0,0.7000,5.5200,100.00,1.5331,24,666.0,20.20,396.90,24.56,12.30},
+        {20.08490,0.00,18.100,0,0.7000,4.3680,91.20,1.4395,24,666.0,20.20,285.83,30.63,8.80},
+        {16.81180,0.00,18.100,0,0.7000,5.2770,98.10,1.4261,24,666.0,20.20,396.90,30.81,7.20},
+        {24.39380,0.00,18.100,0,0.7000,4.6520,100.00,1.4672,24,666.0,20.20,396.90,28.28,10.50},
+        {22.59710,0.00,18.100,0,0.7000,5.0000,89.50,1.5184,24,666.0,20.20,396.90,31.99,7.40},
+        {14.33370,0.00,18.100,0,0.7000,4.8800,100.00,1.5895,24,666.0,20.20,372.92,30.62,10.20},
+        {8.15174,0.00,18.100,0,0.7000,5.3900,98.90,1.7281,24,666.0,20.20,396.90,20.85,11.50},
+        {6.96215,0.00,18.100,0,0.7000,5.7130,97.00,1.9265,24,666.0,20.20,394.43,17.11,15.10},
+        {5.29305,0.00,18.100,0,0.7000,6.0510,82.50,2.1678,24,666.0,20.20,378.38,18.76,23.20},
+        {11.57790,0.00,18.100,0,0.7000,5.0360,97.00,1.7700,24,666.0,20.20,396.90,25.68,9.70},
+        {8.64476,0.00,18.100,0,0.6930,6.1930,92.60,1.7912,24,666.0,20.20,396.90,15.17,13.80},
+        {13.35980,0.00,18.100,0,0.6930,5.8870,94.70,1.7821,24,666.0,20.20,396.90,16.35,12.70},
+        {8.71675,0.00,18.100,0,0.6930,6.4710,98.80,1.7257,24,666.0,20.20,391.98,17.12,13.10},
+        {5.87205,0.00,18.100,0,0.6930,6.4050,96.00,1.6768,24,666.0,20.20,396.90,19.37,12.50},
+        {7.67202,0.00,18.100,0,0.6930,5.7470,98.90,1.6334,24,666.0,20.20,393.10,19.92,8.50},
+        {38.35180,0.00,18.100,0,0.6930,5.4530,100.00,1.4896,24,666.0,20.20,396.90,30.59,5.00},
+        {9.91655,0.00,18.100,0,0.6930,5.8520,77.80,1.5004,24,666.0,20.20,338.16,29.97,6.30},
+        {25.04610,0.00,18.100,0,0.6930,5.9870,100.00,1.5888,24,666.0,20.20,396.90,26.77,5.60},
+        {14.23620,0.00,18.100,0,0.6930,6.3430,100.00,1.5741,24,666.0,20.20,396.90,20.32,7.20},
+        {9.59571,0.00,18.100,0,0.6930,6.4040,100.00,1.6390,24,666.0,20.20,376.11,20.31,12.10},
+        {24.80170,0.00,18.100,0,0.6930,5.3490,96.00,1.7028,24,666.0,20.20,396.90,19.77,8.30},
+        {41.52920,0.00,18.100,0,0.6930,5.5310,85.40,1.6074,24,666.0,20.20,329.46,27.38,8.50},
+        {67.92080,0.00,18.100,0,0.6930,5.6830,100.00,1.4254,24,666.0,20.20,384.97,22.98,5.00},
+        {20.71620,0.00,18.100,0,0.6590,4.1380,100.00,1.1781,24,666.0,20.20,370.22,23.34,11.90},
+        {11.95110,0.00,18.100,0,0.6590,5.6080,100.00,1.2852,24,666.0,20.20,332.09,12.13,27.90},
+        {7.40389,0.00,18.100,0,0.5970,5.6170,97.90,1.4547,24,666.0,20.20,314.64,26.40,17.20},
+        {14.43830,0.00,18.100,0,0.5970,6.8520,100.00,1.4655,24,666.0,20.20,179.36,19.78,27.50},
+        {51.13580,0.00,18.100,0,0.5970,5.7570,100.00,1.4130,24,666.0,20.20,2.60,10.11,15.00},
+        {14.05070,0.00,18.100,0,0.5970,6.6570,100.00,1.5275,24,666.0,20.20,35.05,21.22,17.20},
+        {18.81100,0.00,18.100,0,0.5970,4.6280,100.00,1.5539,24,666.0,20.20,28.79,34.37,17.90},
+        {28.65580,0.00,18.100,0,0.5970,5.1550,100.00,1.5894,24,666.0,20.20,210.97,20.08,16.30},
+        {45.74610,0.00,18.100,0,0.6930,4.5190,100.00,1.6582,24,666.0,20.20,88.27,36.98,7.00},
+        {18.08460,0.00,18.100,0,0.6790,6.4340,100.00,1.8347,24,666.0,20.20,27.25,29.05,7.20},
+        {10.83420,0.00,18.100,0,0.6790,6.7820,90.80,1.8195,24,666.0,20.20,21.57,25.79,7.50},
+        {25.94060,0.00,18.100,0,0.6790,5.3040,89.10,1.6475,24,666.0,20.20,127.36,26.64,10.40},
+        {73.53410,0.00,18.100,0,0.6790,5.9570,100.00,1.8026,24,666.0,20.20,16.45,20.62,8.80},
+        {11.81230,0.00,18.100,0,0.7180,6.8240,76.50,1.7940,24,666.0,20.20,48.45,22.74,8.40},
+        {11.08740,0.00,18.100,0,0.7180,6.4110,100.00,1.8589,24,666.0,20.20,318.75,15.02,16.70},
+        {7.02259,0.00,18.100,0,0.7180,6.0060,95.30,1.8746,24,666.0,20.20,319.98,15.70,14.20},
+        {12.04820,0.00,18.100,0,0.6140,5.6480,87.60,1.9512,24,666.0,20.20,291.55,14.10,20.80},
+        {7.05042,0.00,18.100,0,0.6140,6.1030,85.10,2.0218,24,666.0,20.20,2.52,23.29,13.40},
+        {8.79212,0.00,18.100,0,0.5840,5.5650,70.60,2.0635,24,666.0,20.20,3.65,17.16,11.70},
+        {15.86030,0.00,18.100,0,0.6790,5.8960,95.40,1.9096,24,666.0,20.20,7.68,24.39,8.30},
+        {12.24720,0.00,18.100,0,0.5840,5.8370,59.70,1.9976,24,666.0,20.20,24.65,15.69,10.20},
+        {37.66190,0.00,18.100,0,0.6790,6.2020,78.70,1.8629,24,666.0,20.20,18.82,14.52,10.90},
+        {7.36711,0.00,18.100,0,0.6790,6.1930,78.10,1.9356,24,666.0,20.20,96.73,21.52,11.00},
+        {9.33889,0.00,18.100,0,0.6790,6.3800,95.60,1.9682,24,666.0,20.20,60.72,24.08,9.50},
+        {8.49213,0.00,18.100,0,0.5840,6.3480,86.10,2.0527,24,666.0,20.20,83.45,17.64,14.50},
+        {10.06230,0.00,18.100,0,0.5840,6.8330,94.30,2.0882,24,666.0,20.20,81.33,19.69,14.10},
+        {6.44405,0.00,18.100,0,0.5840,6.4250,74.80,2.2004,24,666.0,20.20,97.95,12.03,16.10},
+        {5.58107,0.00,18.100,0,0.7130,6.4360,87.90,2.3158,24,666.0,20.20,100.19,16.22,14.30},
+        {13.91340,0.00,18.100,0,0.7130,6.2080,95.00,2.2222,24,666.0,20.20,100.63,15.17,11.70},
+        {11.16040,0.00,18.100,0,0.7400,6.6290,94.60,2.1247,24,666.0,20.20,109.85,23.27,13.40},
+        {14.42080,0.00,18.100,0,0.7400,6.4610,93.30,2.0026,24,666.0,20.20,27.49,18.05,9.60},
+        {15.17720,0.00,18.100,0,0.7400,6.1520,100.00,1.9142,24,666.0,20.20,9.32,26.45,8.70},
+        {13.67810,0.00,18.100,0,0.7400,5.9350,87.90,1.8206,24,666.0,20.20,68.95,34.02,8.40},
+        {9.39063,0.00,18.100,0,0.7400,5.6270,93.90,1.8172,24,666.0,20.20,396.90,22.88,12.80},
+        {22.05110,0.00,18.100,0,0.7400,5.8180,92.40,1.8662,24,666.0,20.20,391.45,22.11,10.50},
+        {9.72418,0.00,18.100,0,0.7400,6.4060,97.20,2.0651,24,666.0,20.20,385.96,19.52,17.10},
+        {5.66637,0.00,18.100,0,0.7400,6.2190,100.00,2.0048,24,666.0,20.20,395.69,16.59,18.40},
+        {9.96654,0.00,18.100,0,0.7400,6.4850,100.00,1.9784,24,666.0,20.20,386.73,18.85,15.40},
+        {12.80230,0.00,18.100,0,0.7400,5.8540,96.60,1.8956,24,666.0,20.20,240.52,23.79,10.80},
+        {10.67180,0.00,18.100,0,0.7400,6.4590,94.80,1.9879,24,666.0,20.20,43.06,23.98,11.80},
+        {6.28807,0.00,18.100,0,0.7400,6.3410,96.40,2.0720,24,666.0,20.20,318.01,17.79,14.90},
+        {9.92485,0.00,18.100,0,0.7400,6.2510,96.60,2.1980,24,666.0,20.20,388.52,16.44,12.60},
+        {9.32909,0.00,18.100,0,0.7130,6.1850,98.70,2.2616,24,666.0,20.20,396.90,18.13,14.10},
+        {7.52601,0.00,18.100,0,0.7130,6.4170,98.30,2.1850,24,666.0,20.20,304.21,19.31,13.00},
+        {6.71772,0.00,18.100,0,0.7130,6.7490,92.60,2.3236,24,666.0,20.20,0.32,17.44,13.40},
+        {5.44114,0.00,18.100,0,0.7130,6.6550,98.20,2.3552,24,666.0,20.20,355.29,17.73,15.20},
+        {5.09017,0.00,18.100,0,0.7130,6.2970,91.80,2.3682,24,666.0,20.20,385.09,17.27,16.10},
+        {8.24809,0.00,18.100,0,0.7130,7.3930,99.30,2.4527,24,666.0,20.20,375.87,16.74,17.80},
+        {9.51363,0.00,18.100,0,0.7130,6.7280,94.10,2.4961,24,666.0,20.20,6.68,18.71,14.90},
+        {4.75237,0.00,18.100,0,0.7130,6.5250,86.50,2.4358,24,666.0,20.20,50.92,18.13,14.10},
+        {4.66883,0.00,18.100,0,0.7130,5.9760,87.90,2.5806,24,666.0,20.20,10.48,19.01,12.70},
+        {8.20058,0.00,18.100,0,0.7130,5.9360,80.30,2.7792,24,666.0,20.20,3.50,16.94,13.50},
+        {7.75223,0.00,18.100,0,0.7130,6.3010,83.70,2.7831,24,666.0,20.20,272.21,16.23,14.90},
+        {6.80117,0.00,18.100,0,0.7130,6.0810,84.40,2.7175,24,666.0,20.20,396.90,14.70,20.00},
+        {4.81213,0.00,18.100,0,0.7130,6.7010,90.00,2.5975,24,666.0,20.20,255.23,16.42,16.40},
+        {3.69311,0.00,18.100,0,0.7130,6.3760,88.40,2.5671,24,666.0,20.20,391.43,14.65,17.70},
+        {6.65492,0.00,18.100,0,0.7130,6.3170,83.00,2.7344,24,666.0,20.20,396.90,13.99,19.50},
+        {5.82115,0.00,18.100,0,0.7130,6.5130,89.90,2.8016,24,666.0,20.20,393.82,10.29,20.20},
+        {7.83932,0.00,18.100,0,0.6550,6.2090,65.40,2.9634,24,666.0,20.20,396.90,13.22,21.40},
+        {3.16360,0.00,18.100,0,0.6550,5.7590,48.20,3.0665,24,666.0,20.20,334.40,14.13,19.90},
+        {3.77498,0.00,18.100,0,0.6550,5.9520,84.70,2.8715,24,666.0,20.20,22.01,17.15,19.00},
+        {4.42228,0.00,18.100,0,0.5840,6.0030,94.50,2.5403,24,666.0,20.20,331.29,21.32,19.10},
+        {15.57570,0.00,18.100,0,0.5800,5.9260,71.00,2.9084,24,666.0,20.20,368.74,18.13,19.10},
+        {13.07510,0.00,18.100,0,0.5800,5.7130,56.70,2.8237,24,666.0,20.20,396.90,14.76,20.10},
+        {4.34879,0.00,18.100,0,0.5800,6.1670,84.00,3.0334,24,666.0,20.20,396.90,16.29,19.90},
+        {4.03841,0.00,18.100,0,0.5320,6.2290,90.70,3.0993,24,666.0,20.20,395.33,12.87,19.60},
+        {3.56868,0.00,18.100,0,0.5800,6.4370,75.00,2.8965,24,666.0,20.20,393.37,14.36,23.20},
+        {4.64689,0.00,18.100,0,0.6140,6.9800,67.60,2.5329,24,666.0,20.20,374.68,11.66,29.80},
+        {8.05579,0.00,18.100,0,0.5840,5.4270,95.40,2.4298,24,666.0,20.20,352.58,18.14,13.80},
+        {6.39312,0.00,18.100,0,0.5840,6.1620,97.40,2.2060,24,666.0,20.20,302.76,24.10,13.30},
+        {4.87141,0.00,18.100,0,0.6140,6.4840,93.60,2.3053,24,666.0,20.20,396.21,18.68,16.70},
+        {15.02340,0.00,18.100,0,0.6140,5.3040,97.30,2.1007,24,666.0,20.20,349.48,24.91,12.00},
+        {10.23300,0.00,18.100,0,0.6140,6.1850,96.70,2.1705,24,666.0,20.20,379.70,18.03,14.60},
+        {14.33370,0.00,18.100,0,0.6140,6.2290,88.00,1.9512,24,666.0,20.20,383.32,13.11,21.40},
+        {5.82401,0.00,18.100,0,0.5320,6.2420,64.70,3.4242,24,666.0,20.20,396.90,10.74,23.00},
+        {5.70818,0.00,18.100,0,0.5320,6.7500,74.90,3.3317,24,666.0,20.20,393.07,7.74,23.70},
+        {5.73116,0.00,18.100,0,0.5320,7.0610,77.00,3.4106,24,666.0,20.20,395.28,7.01,25.00},
+        {2.81838,0.00,18.100,0,0.5320,5.7620,40.30,4.0983,24,666.0,20.20,392.92,10.42,21.80},
+        {2.37857,0.00,18.100,0,0.5830,5.8710,41.90,3.7240,24,666.0,20.20,370.73,13.34,20.60},
+        {3.67367,0.00,18.100,0,0.5830,6.3120,51.90,3.9917,24,666.0,20.20,388.62,10.58,21.20},
+        {5.69175,0.00,18.100,0,0.5830,6.1140,79.80,3.5459,24,666.0,20.20,392.68,14.98,19.10},
+        {4.83567,0.00,18.100,0,0.5830,5.9050,53.20,3.1523,24,666.0,20.20,388.22,11.45,20.60},
+        {0.15086,0.00,27.740,0,0.6090,5.4540,92.70,1.8209,4,711.0,20.10,395.09,18.06,15.20},
+        {0.18337,0.00,27.740,0,0.6090,5.4140,98.30,1.7554,4,711.0,20.10,344.05,23.97,7.00},
+        {0.20746,0.00,27.740,0,0.6090,5.0930,98.00,1.8226,4,711.0,20.10,318.43,29.68,8.10},
+        {0.10574,0.00,27.740,0,0.6090,5.9830,98.80,1.8681,4,711.0,20.10,390.11,18.07,13.60},
+        {0.11132,0.00,27.740,0,0.6090,5.9830,83.50,2.1099,4,711.0,20.10,396.90,13.35,20.10},
+        {0.17331,0.00,9.690,0,0.5850,5.7070,54.00,2.3817,6,391.0,19.20,396.90,12.01,21.80},
+        {0.27957,0.00,9.690,0,0.5850,5.9260,42.60,2.3817,6,391.0,19.20,396.90,13.59,24.50},
+        {0.17899,0.00,9.690,0,0.5850,5.6700,28.80,2.7986,6,391.0,19.20,393.29,17.60,23.10},
+        {0.28960,0.00,9.690,0,0.5850,5.3900,72.90,2.7986,6,391.0,19.20,396.90,21.14,19.70},
+        {0.26838,0.00,9.690,0,0.5850,5.7940,70.60,2.8927,6,391.0,19.20,396.90,14.10,18.30},
+        {0.23912,0.00,9.690,0,0.5850,6.0190,65.30,2.4091,6,391.0,19.20,396.90,12.92,21.20},
+        {0.17783,0.00,9.690,0,0.5850,5.5690,73.50,2.3999,6,391.0,19.20,395.77,15.10,17.50},
+        {0.22438,0.00,9.690,0,0.5850,6.0270,79.70,2.4982,6,391.0,19.20,396.90,14.33,16.80},
+        {0.06263,0.00,11.930,0,0.5730,6.5930,69.10,2.4786,1,273.0,21.00,391.99,9.67,22.40},
+        {0.04527,0.00,11.930,0,0.5730,6.1200,76.70,2.2875,1,273.0,21.00,396.90,9.08,20.60},
+        {0.06076,0.00,11.930,0,0.5730,6.9760,91.00,2.1675,1,273.0,21.00,396.90,5.64,23.90},
+        {0.10959,0.00,11.930,0,0.5730,6.7940,89.30,2.3889,1,273.0,21.00,393.45,6.48,22.00},
+        {0.04741,0.00,11.930,0,0.5730,6.0300,80.80,2.5050,1,273.0,21.00,396.90,7.88,11.90}
     };
 }
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_1_Read_and_Learn.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_1_Read_and_Learn.java
index 460ca67..78ec9f5 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_1_Read_and_Learn.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_1_Read_and_Learn.java
@@ -31,21 +31,30 @@
 import org.apache.ignite.thread.IgniteThread;
 
 /**
- * Usage of DecisionTreeClassificationTrainer to predict death in the disaster.
- *
- * Extract 3 features "pclass", "sibsp", "parch" to use in prediction.
+ * Usage of {@link DecisionTreeClassificationTrainer} to predict death in the disaster.
+ * <p>
+ * Extract 3 features "pclass", "sibsp", "parch" to use in prediction.</p>
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with test data (based on Titanic passengers data).</p>
+ * <p>
+ * After that it trains the model based on the specified data using decision tree classification.</p>
+ * <p>
+ * Finally, this example uses {@link Evaluator} functionality to compute metrics from predictions.</p>
  */
 public class Step_1_Read_and_Learn {
     /** Run example. */
     public static void main(String[] args) throws InterruptedException {
+        System.out.println();
+        System.out.println(">>> Tutorial step 1 (read and learn) example started.");
+
         try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
             IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
                 Step_1_Read_and_Learn.class.getSimpleName(), () -> {
                 try {
-
                     IgniteCache<Integer, Object[]> dataCache = TitanicUtils.readPassengers(ignite);
 
-                    IgniteBiFunction<Integer, Object[], Vector> featureExtractor = (k, v) -> VectorUtils.of((double) v[0], (double) v[5], (double) v[6]);
+                    IgniteBiFunction<Integer, Object[], Vector> featureExtractor
+                        = (k, v) -> VectorUtils.of((double) v[0], (double) v[5], (double) v[6]);
 
                     IgniteBiFunction<Integer, Object[], Double> lbExtractor = (k, v) -> (double) v[1];
 
@@ -58,6 +67,8 @@
                         lbExtractor
                     );
 
+                    System.out.println("\n>>> Trained model: " + mdl);
+
                     double accuracy = Evaluator.evaluate(
                         dataCache,
                         mdl,
@@ -69,6 +80,7 @@
                     System.out.println("\n>>> Accuracy " + accuracy);
                     System.out.println("\n>>> Test Error " + (1 - accuracy));
 
+                    System.out.println(">>> Tutorial step 1 (read and learn) example completed.");
                 }
                 catch (FileNotFoundException e) {
                     e.printStackTrace();
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_2_Imputing.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_2_Imputing.java
index 8127a51..f86e1b6 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_2_Imputing.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_2_Imputing.java
@@ -32,18 +32,31 @@
 import org.apache.ignite.thread.IgniteThread;
 
 /**
- * Usage of imputer to fill missed data (Double.NaN) values in the chosen columns.
+ * Usage of {@link ImputerTrainer} to fill missed data ({@code Double.NaN}) values in the chosen columns.
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with test data (based on Titanic passengers data).</p>
+ * <p>
+ * After that it defines preprocessors that extract features from an upstream data and
+ * <a href="https://en.wikipedia.org/wiki/Imputation_(statistics)">impute</a> missing values.</p>
+ * <p>
+ * Then, it trains the model based on the processed data using decision tree classification.</p>
+ * <p>
+ * Finally, this example uses {@link Evaluator} functionality to compute metrics from predictions.</p>
  */
 public class Step_2_Imputing {
     /** Run example. */
     public static void main(String[] args) throws InterruptedException {
+        System.out.println();
+        System.out.println(">>> Tutorial step 2 (imputing) example started.");
+
         try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
             IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
                 Step_2_Imputing.class.getSimpleName(), () -> {
                 try {
                     IgniteCache<Integer, Object[]> dataCache = TitanicUtils.readPassengers(ignite);
 
-                    IgniteBiFunction<Integer, Object[], Vector> featureExtractor = (k, v) -> VectorUtils.of((double) v[0], (double) v[5], (double) v[6]);
+                    IgniteBiFunction<Integer, Object[], Vector> featureExtractor
+                        = (k, v) -> VectorUtils.of((double) v[0], (double) v[5], (double) v[6]);
 
                     IgniteBiFunction<Integer, Object[], Double> lbExtractor = (k, v) -> (double) v[1];
 
@@ -63,6 +76,8 @@
                         lbExtractor
                     );
 
+                    System.out.println("\n>>> Trained model: " + mdl);
+
                     double accuracy = Evaluator.evaluate(
                         dataCache,
                         mdl,
@@ -73,6 +88,8 @@
 
                     System.out.println("\n>>> Accuracy " + accuracy);
                     System.out.println("\n>>> Test Error " + (1 - accuracy));
+
+                    System.out.println(">>> Tutorial step 2 (imputing) example completed.");
                 }
                 catch (FileNotFoundException e) {
                     e.printStackTrace();
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_3_Categorial.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_3_Categorial.java
index e623083..03ff527 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_3_Categorial.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_3_Categorial.java
@@ -23,8 +23,8 @@
 import org.apache.ignite.Ignition;
 import org.apache.ignite.ml.math.functions.IgniteBiFunction;
 import org.apache.ignite.ml.math.primitives.vector.Vector;
-import org.apache.ignite.ml.preprocessing.encoding.EncoderType;
 import org.apache.ignite.ml.preprocessing.encoding.EncoderTrainer;
+import org.apache.ignite.ml.preprocessing.encoding.EncoderType;
 import org.apache.ignite.ml.preprocessing.imputing.ImputerTrainer;
 import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator;
 import org.apache.ignite.ml.selection.scoring.metric.Accuracy;
@@ -33,13 +33,25 @@
 import org.apache.ignite.thread.IgniteThread;
 
 /**
- * Let's add two categorial features "sex", "embarked" to predict more precisely.
- *
- * To encode categorial features the StringEncoderTrainer will be used.
+ * Let's add two categorial features "sex", "embarked" to predict more precisely than in {@link Step_1_Read_and_Learn}.
+ * <p>
+ * To encode categorial features the String kind type of {@link EncoderTrainer} will be used.</p>
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with test data (based on Titanic passengers data).</p>
+ * <p>
+ * After that it defines preprocessors that extract features from an upstream data and encode string values (categories)
+ * to double values in specified range.</p>
+ * <p>
+ * Then, it trains the model based on the processed data using decision tree classification.</p>
+ * <p>
+ * Finally, this example uses {@link Evaluator} functionality to compute metrics from predictions.</p>
  */
 public class Step_3_Categorial {
     /** Run example. */
     public static void main(String[] args) throws InterruptedException {
+        System.out.println();
+        System.out.println(">>> Tutorial step 3 (categorial) example started.");
+
         try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
             IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
                 Step_3_Categorial.class.getSimpleName(), () -> {
@@ -54,8 +66,8 @@
 
                     IgniteBiFunction<Integer, Object[], Vector> strEncoderPreprocessor = new EncoderTrainer<Integer, Object[]>()
                         .withEncoderType(EncoderType.STRING_ENCODER)
-                        .encodeFeature(1)
-                        .encodeFeature(4)
+                        .withEncodedFeature(1)
+                        .withEncodedFeature(4)
                         .fit(ignite,
                             dataCache,
                             featureExtractor
@@ -77,6 +89,8 @@
                         lbExtractor
                     );
 
+                    System.out.println("\n>>> Trained model: " + mdl);
+
                     double accuracy = Evaluator.evaluate(
                         dataCache,
                         mdl,
@@ -87,6 +101,8 @@
 
                     System.out.println("\n>>> Accuracy " + accuracy);
                     System.out.println("\n>>> Test Error " + (1 - accuracy));
+
+                    System.out.println(">>> Tutorial step 3 (categorial) example completed.");
                 }
                 catch (FileNotFoundException e) {
                     e.printStackTrace();
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_3_Categorial_with_One_Hot_Encoder.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_3_Categorial_with_One_Hot_Encoder.java
index d80f647..a4535ba 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_3_Categorial_with_One_Hot_Encoder.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_3_Categorial_with_One_Hot_Encoder.java
@@ -23,8 +23,8 @@
 import org.apache.ignite.Ignition;
 import org.apache.ignite.ml.math.functions.IgniteBiFunction;
 import org.apache.ignite.ml.math.primitives.vector.Vector;
-import org.apache.ignite.ml.preprocessing.encoding.EncoderType;
 import org.apache.ignite.ml.preprocessing.encoding.EncoderTrainer;
+import org.apache.ignite.ml.preprocessing.encoding.EncoderType;
 import org.apache.ignite.ml.preprocessing.imputing.ImputerTrainer;
 import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator;
 import org.apache.ignite.ml.selection.scoring.metric.Accuracy;
@@ -33,13 +33,26 @@
 import org.apache.ignite.thread.IgniteThread;
 
 /**
- * Let's add two categorial features "sex", "embarked" to predict more precisely.
- *
- * To encode categorial features the StringEncoderTrainer will be used.
+ * Let's add two categorial features "sex", "embarked" to predict more precisely than in {@link Step_1_Read_and_Learn}..
+ * <p>
+ * To encode categorial features the {@link EncoderTrainer} of the
+ * <a href="https://en.wikipedia.org/wiki/One-hot">One-hot</a> type will be used.</p>
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with test data (based on Titanic passengers data).</p>
+ * <p>
+ * After that it defines preprocessors that extract features from an upstream data and encode string values (categories)
+ * to double values in specified range.</p>
+ * <p>
+ * Then, it trains the model based on the processed data using decision tree classification.</p>
+ * <p>
+ * Finally, this example uses {@link Evaluator} functionality to compute metrics from predictions.</p>
  */
 public class Step_3_Categorial_with_One_Hot_Encoder {
     /** Run example. */
     public static void main(String[] args) throws InterruptedException {
+        System.out.println();
+        System.out.println(">>> Tutorial step 3 (categorial with One-hot encoder) example started.");
+
         try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
             IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
                 Step_3_Categorial_with_One_Hot_Encoder.class.getSimpleName(), () -> {
@@ -55,9 +68,9 @@
 
                     IgniteBiFunction<Integer, Object[], Vector> oneHotEncoderPreprocessor = new EncoderTrainer<Integer, Object[]>()
                         .withEncoderType(EncoderType.ONE_HOT_ENCODER)
-                        .encodeFeature(0)
-                        .encodeFeature(1)
-                        .encodeFeature(4)
+                        .withEncodedFeature(0)
+                        .withEncodedFeature(1)
+                        .withEncodedFeature(4)
                         .fit(ignite,
                             dataCache,
                             featureExtractor
@@ -79,6 +92,8 @@
                         lbExtractor
                     );
 
+                    System.out.println("\n>>> Trained model: " + mdl);
+
                     double accuracy = Evaluator.evaluate(
                         dataCache,
                         mdl,
@@ -89,6 +104,9 @@
 
                     System.out.println("\n>>> Accuracy " + accuracy);
                     System.out.println("\n>>> Test Error " + (1 - accuracy));
+
+                    System.out.println(">>> Tutorial step 3 (categorial with One-hot encoder) example started.");
+
                 }
                 catch (FileNotFoundException e) {
                     e.printStackTrace();
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_4_Add_age_fare.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_4_Add_age_fare.java
index 2ea9860..789d7e8 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_4_Add_age_fare.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_4_Add_age_fare.java
@@ -33,11 +33,23 @@
 import org.apache.ignite.thread.IgniteThread;
 
 /**
- * Add yet two numerical features "age", "fare" to improve our model.
+ * Add yet two numerical features "age", "fare" to improve our model over {@link Step_3_Categorial}.
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with test data (based on Titanic passengers data).</p>
+ * <p>
+ * After that it defines preprocessors that extract features from an upstream data and encode string values (categories)
+ * to double values in specified range.</p>
+ * <p>
+ * Then, it trains the model based on the processed data using decision tree classification.</p>
+ * <p>
+ * Finally, this example uses {@link Evaluator} functionality to compute metrics from predictions.</p>
  */
 public class Step_4_Add_age_fare {
     /** Run example. */
     public static void main(String[] args) throws InterruptedException {
+        System.out.println();
+        System.out.println(">>> Tutorial step 4 (add age and fare) example started.");
+
         try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
             IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
                 Step_4_Add_age_fare.class.getSimpleName(), () -> {
@@ -45,7 +57,7 @@
                     IgniteCache<Integer, Object[]> dataCache = TitanicUtils.readPassengers(ignite);
 
                     // Defines first preprocessor that extracts features from an upstream data.
-                    // Extracts "pclass", "sibsp", "parch", "sex", "embarked", "age", "fare"
+                    // Extracts "pclass", "sibsp", "parch", "sex", "embarked", "age", "fare".
                     IgniteBiFunction<Integer, Object[], Object[]> featureExtractor
                         = (k, v) -> new Object[]{v[0], v[3], v[4], v[5], v[6], v[8], v[10]};
 
@@ -53,8 +65,8 @@
 
                     IgniteBiFunction<Integer, Object[], Vector> strEncoderPreprocessor = new EncoderTrainer<Integer, Object[]>()
                         .withEncoderType(EncoderType.STRING_ENCODER)
-                        .encodeFeature(1)
-                        .encodeFeature(6) // <--- Changed index here
+                        .withEncodedFeature(1)
+                        .withEncodedFeature(6) // <--- Changed index here.
                         .fit(ignite,
                             dataCache,
                             featureExtractor
@@ -76,6 +88,8 @@
                         lbExtractor
                     );
 
+                    System.out.println("\n>>> Trained model: " + mdl);
+
                     double accuracy = Evaluator.evaluate(
                         dataCache,
                         mdl,
@@ -86,6 +100,8 @@
 
                     System.out.println("\n>>> Accuracy " + accuracy);
                     System.out.println("\n>>> Test Error " + (1 - accuracy));
+
+                    System.out.println(">>> Tutorial step 4 (add age and fare) example completed.");
                 }
                 catch (FileNotFoundException e) {
                     e.printStackTrace();
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_5_Scaling.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_5_Scaling.java
index 01a4c3f..e3de585 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_5_Scaling.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_5_Scaling.java
@@ -35,11 +35,24 @@
 import org.apache.ignite.thread.IgniteThread;
 
 /**
- * MinMaxScalerTrainer and NormalizationTrainer are used in this example due to different values distribution in columns and rows.
+ * {@link MinMaxScalerTrainer} and {@link NormalizationTrainer} are used in this example due to different values
+ * distribution in columns and rows.
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with test data (based on Titanic passengers data).</p>
+ * <p>
+ * After that it defines preprocessors that extract features from an upstream data and perform other desired changes
+ * over the extracted data, including the scaling.</p>
+ * <p>
+ * Then, it trains the model based on the processed data using decision tree classification.</p>
+ * <p>
+ * Finally, this example uses {@link Evaluator} functionality to compute metrics from predictions.</p>
  */
 public class Step_5_Scaling {
     /** Run example. */
     public static void main(String[] args) throws InterruptedException {
+        System.out.println();
+        System.out.println(">>> Tutorial step 5 (scaling) example started.");
+
         try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
             IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
                 Step_5_Scaling.class.getSimpleName(), () -> {
@@ -47,7 +60,7 @@
                     IgniteCache<Integer, Object[]> dataCache = TitanicUtils.readPassengers(ignite);
 
                     // Defines first preprocessor that extracts features from an upstream data.
-                    // Extracts "pclass", "sibsp", "parch", "sex", "embarked", "age", "fare"
+                    // Extracts "pclass", "sibsp", "parch", "sex", "embarked", "age", "fare".
                     IgniteBiFunction<Integer, Object[], Object[]> featureExtractor
                         = (k, v) -> new Object[]{v[0], v[3], v[4], v[5], v[6], v[8], v[10]};
 
@@ -55,8 +68,8 @@
 
                     IgniteBiFunction<Integer, Object[], Vector> strEncoderPreprocessor = new EncoderTrainer<Integer, Object[]>()
                         .withEncoderType(EncoderType.STRING_ENCODER)
-                        .encodeFeature(1)
-                        .encodeFeature(6) // <--- Changed index here
+                        .withEncodedFeature(1)
+                        .withEncodedFeature(6) // <--- Changed index here.
                         .fit(ignite,
                             dataCache,
                             featureExtractor
@@ -68,21 +81,20 @@
                             strEncoderPreprocessor
                         );
 
-
                     IgniteBiFunction<Integer, Object[], Vector> minMaxScalerPreprocessor = new MinMaxScalerTrainer<Integer, Object[]>()
                         .fit(
-                        ignite,
-                        dataCache,
-                        imputingPreprocessor
-                    );
+                            ignite,
+                            dataCache,
+                            imputingPreprocessor
+                        );
 
                     IgniteBiFunction<Integer, Object[], Vector> normalizationPreprocessor = new NormalizationTrainer<Integer, Object[]>()
                         .withP(1)
                         .fit(
-                        ignite,
-                        dataCache,
-                        minMaxScalerPreprocessor
-                    );
+                            ignite,
+                            dataCache,
+                            minMaxScalerPreprocessor
+                        );
 
                     DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(5, 0);
 
@@ -94,6 +106,8 @@
                         lbExtractor
                     );
 
+                    System.out.println("\n>>> Trained model: " + mdl);
+
                     double accuracy = Evaluator.evaluate(
                         dataCache,
                         mdl,
@@ -104,6 +118,8 @@
 
                     System.out.println("\n>>> Accuracy " + accuracy);
                     System.out.println("\n>>> Test Error " + (1 - accuracy));
+
+                    System.out.println(">>> Tutorial step 5 (scaling) example completed.");
                 }
                 catch (FileNotFoundException e) {
                     e.printStackTrace();
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_5_Scaling_with_Pipeline.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_5_Scaling_with_Pipeline.java
new file mode 100644
index 0000000..1d5900f
--- /dev/null
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_5_Scaling_with_Pipeline.java
@@ -0,0 +1,107 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.examples.ml.tutorial;
+
+import java.io.FileNotFoundException;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.Ignition;
+import org.apache.ignite.ml.math.functions.IgniteBiFunction;
+import org.apache.ignite.ml.pipeline.Pipeline;
+import org.apache.ignite.ml.pipeline.PipelineMdl;
+import org.apache.ignite.ml.preprocessing.encoding.EncoderTrainer;
+import org.apache.ignite.ml.preprocessing.encoding.EncoderType;
+import org.apache.ignite.ml.preprocessing.imputing.ImputerTrainer;
+import org.apache.ignite.ml.preprocessing.minmaxscaling.MinMaxScalerTrainer;
+import org.apache.ignite.ml.preprocessing.normalization.NormalizationTrainer;
+import org.apache.ignite.ml.selection.scoring.evaluator.Evaluator;
+import org.apache.ignite.ml.selection.scoring.metric.Accuracy;
+import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer;
+import org.apache.ignite.thread.IgniteThread;
+
+/**
+ * {@link MinMaxScalerTrainer} and {@link NormalizationTrainer} are used in this example due to different values
+ * distribution in columns and rows.
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with test data (based on Titanic passengers data).</p>
+ * <p>
+ * After that it defines preprocessors that extract features from an upstream data and perform other desired changes
+ * over the extracted data, including the scaling.</p>
+ * <p>
+ * Then, it trains the model based on the processed data using decision tree classification.</p>
+ * <p>
+ * Finally, this example uses {@link Evaluator} functionality to compute metrics from predictions.</p>
+ */
+public class Step_5_Scaling_with_Pipeline {
+    /** Run example. */
+    public static void main(String[] args) throws InterruptedException {
+        System.out.println();
+        System.out.println(">>> Tutorial step 5 (scaling) via Pipeline example started.");
+
+        try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
+            IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
+                Step_5_Scaling_with_Pipeline.class.getSimpleName(), () -> {
+                try {
+                    IgniteCache<Integer, Object[]> dataCache = TitanicUtils.readPassengers(ignite);
+
+                    // Defines first preprocessor that extracts features from an upstream data.
+                    // Extracts "pclass", "sibsp", "parch", "sex", "embarked", "age", "fare".
+                    IgniteBiFunction<Integer, Object[], Object[]> featureExtractor
+                        = (k, v) -> new Object[]{v[0], v[3], v[4], v[5], v[6], v[8], v[10]};
+
+                    IgniteBiFunction<Integer, Object[], Double> lbExtractor = (k, v) -> (double) v[1];
+
+                    PipelineMdl<Integer, Object[]> mdl = new Pipeline<Integer, Object[], Object[]>()
+                        .addFeatureExtractor(featureExtractor)
+                        .addLabelExtractor(lbExtractor)
+                        .addPreprocessor(new EncoderTrainer<Integer, Object[]>()
+                            .withEncoderType(EncoderType.STRING_ENCODER)
+                            .withEncodedFeature(1)
+                            .withEncodedFeature(6))
+                        .addPreprocessor(new ImputerTrainer<Integer, Object[]>())
+                        .addPreprocessor(new MinMaxScalerTrainer<Integer, Object[]>())
+                        .addPreprocessor(new NormalizationTrainer<Integer, Object[]>()
+                            .withP(1))
+                        .addTrainer(new DecisionTreeClassificationTrainer(5, 0))
+                        .fit(ignite, dataCache);
+
+                    System.out.println("\n>>> Trained model: " + mdl);
+
+                    double accuracy = Evaluator.evaluate(
+                        dataCache,
+                        mdl,
+                        mdl.getFeatureExtractor(),
+                        mdl.getLabelExtractor(),
+                        new Accuracy<>()
+                    );
+
+                    System.out.println("\n>>> Accuracy " + accuracy);
+                    System.out.println("\n>>> Test Error " + (1 - accuracy));
+
+                    System.out.println(">>> Tutorial step 5 (scaling) via Pipeline example completed.");
+                }
+                catch (FileNotFoundException e) {
+                    e.printStackTrace();
+                }
+            });
+
+            igniteThread.start();
+            igniteThread.join();
+        }
+    }
+}
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_6_KNN.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_6_KNN.java
index e07e9f8..e99494b 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_6_KNN.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_6_KNN.java
@@ -21,9 +21,9 @@
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.Ignition;
-import org.apache.ignite.ml.knn.classification.KNNClassificationModel;
+import org.apache.ignite.ml.knn.NNClassificationModel;
 import org.apache.ignite.ml.knn.classification.KNNClassificationTrainer;
-import org.apache.ignite.ml.knn.classification.KNNStrategy;
+import org.apache.ignite.ml.knn.classification.NNStrategy;
 import org.apache.ignite.ml.math.functions.IgniteBiFunction;
 import org.apache.ignite.ml.math.primitives.vector.Vector;
 import org.apache.ignite.ml.preprocessing.encoding.EncoderTrainer;
@@ -36,11 +36,24 @@
 import org.apache.ignite.thread.IgniteThread;
 
 /**
- * Sometimes is better to change algorithm, let's say on kNN.
+ * Change classification algorithm that was used in {@link Step_5_Scaling} from decision tree to kNN
+ * ({@link KNNClassificationTrainer}) because sometimes this can be beneficial.
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with test data (based on Titanic passengers data).</p>
+ * <p>
+ * After that it defines preprocessors that extract features from an upstream data and perform other desired changes
+ * over the extracted data.</p>
+ * <p>
+ * Then, it trains the model based on the processed data using kNN classification.</p>
+ * <p>
+ * Finally, this example uses {@link Evaluator} functionality to compute metrics from predictions.</p>
  */
 public class Step_6_KNN {
     /** Run example. */
     public static void main(String[] args) throws InterruptedException {
+        System.out.println();
+        System.out.println(">>> Tutorial step 6 (kNN) example started.");
+
         try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
             IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
                 Step_6_KNN.class.getSimpleName(), () -> {
@@ -48,7 +61,7 @@
                     IgniteCache<Integer, Object[]> dataCache = TitanicUtils.readPassengers(ignite);
 
                     // Defines first preprocessor that extracts features from an upstream data.
-                    // Extracts "pclass", "sibsp", "parch", "sex", "embarked", "age", "fare"
+                    // Extracts "pclass", "sibsp", "parch", "sex", "embarked", "age", "fare".
                     IgniteBiFunction<Integer, Object[], Object[]> featureExtractor
                         = (k, v) -> new Object[]{v[0], v[3], v[4], v[5], v[6], v[8], v[10]};
 
@@ -56,8 +69,8 @@
 
                     IgniteBiFunction<Integer, Object[], Vector> strEncoderPreprocessor = new EncoderTrainer<Integer, Object[]>()
                         .withEncoderType(EncoderType.STRING_ENCODER)
-                        .encodeFeature(1)
-                        .encodeFeature(6) // <--- Changed index here
+                        .withEncodedFeature(1)
+                        .withEncodedFeature(6) // <--- Changed index here.
                         .fit(ignite,
                             dataCache,
                             featureExtractor
@@ -69,31 +82,32 @@
                             strEncoderPreprocessor
                         );
 
-
                     IgniteBiFunction<Integer, Object[], Vector> minMaxScalerPreprocessor = new MinMaxScalerTrainer<Integer, Object[]>()
                         .fit(
-                        ignite,
-                        dataCache,
-                        imputingPreprocessor
-                    );
+                            ignite,
+                            dataCache,
+                            imputingPreprocessor
+                        );
 
                     IgniteBiFunction<Integer, Object[], Vector> normalizationPreprocessor = new NormalizationTrainer<Integer, Object[]>()
                         .withP(1)
                         .fit(
-                        ignite,
-                        dataCache,
-                        minMaxScalerPreprocessor
-                    );
+                            ignite,
+                            dataCache,
+                            minMaxScalerPreprocessor
+                        );
 
                     KNNClassificationTrainer trainer = new KNNClassificationTrainer();
 
                     // Train decision tree model.
-                    KNNClassificationModel mdl = trainer.fit(
+                    NNClassificationModel mdl = trainer.fit(
                         ignite,
                         dataCache,
                         normalizationPreprocessor,
                         lbExtractor
-                    ).withK(1).withStrategy(KNNStrategy.WEIGHTED);
+                    ).withK(1).withStrategy(NNStrategy.WEIGHTED);
+
+                    System.out.println("\n>>> Trained model: " + mdl);
 
                     double accuracy = Evaluator.evaluate(
                         dataCache,
@@ -105,6 +119,8 @@
 
                     System.out.println("\n>>> Accuracy " + accuracy);
                     System.out.println("\n>>> Test Error " + (1 - accuracy));
+
+                    System.out.println(">>> Tutorial step 6 (kNN) example completed.");
                 }
                 catch (FileNotFoundException e) {
                     e.printStackTrace();
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_7_Split_train_test.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_7_Split_train_test.java
index f62054e..2ce2b27 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_7_Split_train_test.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_7_Split_train_test.java
@@ -37,13 +37,25 @@
 import org.apache.ignite.thread.IgniteThread;
 
 /**
- * The highest accuracy in the previous example is the result of overfitting.
- *
- * For real model estimation is better to use test-train split via TrainTestDatasetSplitter.
+ * The highest accuracy in the previous example ({@link Step_6_KNN}) is the result of
+ * <a href="https://en.wikipedia.org/wiki/Overfitting">overfitting</a>.
+ * For real model estimation is better to use test-train split via {@link TrainTestDatasetSplitter}.
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with test data (based on Titanic passengers data).</p>
+ * <p>
+ * After that it defines how to split the data to train and test sets and configures preprocessors that extract
+ * features from an upstream data and perform other desired changes over the extracted data.</p>
+ * <p>
+ * Then, it trains the model based on the processed data using decision tree classification.</p>
+ * <p>
+ * Finally, this example uses {@link Evaluator} functionality to compute metrics from predictions.</p>
  */
 public class Step_7_Split_train_test {
     /** Run example. */
     public static void main(String[] args) throws InterruptedException {
+        System.out.println();
+        System.out.println(">>> Tutorial step 7 (split to train and test) example started.");
+
         try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
             IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
                 Step_7_Split_train_test.class.getSimpleName(), () -> {
@@ -51,7 +63,7 @@
                     IgniteCache<Integer, Object[]> dataCache = TitanicUtils.readPassengers(ignite);
 
                     // Defines first preprocessor that extracts features from an upstream data.
-                    // Extracts "pclass", "sibsp", "parch", "sex", "embarked", "age", "fare"
+                    // Extracts "pclass", "sibsp", "parch", "sex", "embarked", "age", "fare".
                     IgniteBiFunction<Integer, Object[], Object[]> featureExtractor
                         = (k, v) -> new Object[]{v[0], v[3], v[4], v[5], v[6], v[8], v[10]};
 
@@ -62,8 +74,8 @@
 
                     IgniteBiFunction<Integer, Object[], Vector> strEncoderPreprocessor = new EncoderTrainer<Integer, Object[]>()
                         .withEncoderType(EncoderType.STRING_ENCODER)
-                        .encodeFeature(1)
-                        .encodeFeature(6) // <--- Changed index here
+                        .withEncodedFeature(1)
+                        .withEncodedFeature(6) // <--- Changed index here.
                         .fit(ignite,
                             dataCache,
                             featureExtractor
@@ -75,21 +87,20 @@
                             strEncoderPreprocessor
                         );
 
-
                     IgniteBiFunction<Integer, Object[], Vector> minMaxScalerPreprocessor = new MinMaxScalerTrainer<Integer, Object[]>()
                         .fit(
-                        ignite,
-                        dataCache,
-                        imputingPreprocessor
-                    );
+                            ignite,
+                            dataCache,
+                            imputingPreprocessor
+                        );
 
                     IgniteBiFunction<Integer, Object[], Vector> normalizationPreprocessor = new NormalizationTrainer<Integer, Object[]>()
                         .withP(1)
                         .fit(
-                        ignite,
-                        dataCache,
-                        minMaxScalerPreprocessor
-                    );
+                            ignite,
+                            dataCache,
+                            minMaxScalerPreprocessor
+                        );
 
                     DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(5, 0);
 
@@ -102,6 +113,8 @@
                         lbExtractor
                     );
 
+                    System.out.println("\n>>> Trained model: " + mdl);
+
                     double accuracy = Evaluator.evaluate(
                         dataCache,
                         split.getTestFilter(),
@@ -113,6 +126,8 @@
 
                     System.out.println("\n>>> Accuracy " + accuracy);
                     System.out.println("\n>>> Test Error " + (1 - accuracy));
+
+                    System.out.println(">>> Tutorial step 7 (split to train and test) example completed.");
                 }
                 catch (FileNotFoundException e) {
                     e.printStackTrace();
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV.java
index d7e6e27..83c2cca 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV.java
@@ -40,21 +40,34 @@
 
 /**
  * To choose the best hyperparameters the cross-validation will be used in this example.
- *
- * The purpose of cross-validation is model checking, not model building.
- *
- * We train k different models.
- *
- * They differ in that 1/(k-1)th of the training data is exchanged against other cases.
- *
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with test data (based on Titanic passengers data).</p>
+ * <p>
+ * After that it defines how to split the data to train and test sets and configures preprocessors that extract
+ * features from an upstream data and perform other desired changes over the extracted data.</p>
+ * <p>
+ * Then, it tunes hyperparams with K-fold Cross-Validation on the split training set and trains the model based on
+ * the processed data using decision tree classification and the obtained hyperparams.</p>
+ * <p>
+ * Finally, this example uses {@link Evaluator} functionality to compute metrics from predictions.</p>
+ * <p>
+ * The purpose of cross-validation is model checking, not model building.</p>
+ * <p>
+ * We train {@code k} different models.</p>
+ * <p>
+ * They differ in that {@code 1/(k-1)}th of the training data is exchanged against other cases.</p>
+ * <p>
  * These models are sometimes called surrogate models because the (average) performance measured for these models
- * is taken as a surrogate of the performance of the model trained on all cases.
- *
- * All scenarios are described there: https://sebastianraschka.com/faq/docs/evaluate-a-model.html
+ * is taken as a surrogate of the performance of the model trained on all cases.</p>
+ * <p>
+ * All scenarios are described there: https://sebastianraschka.com/faq/docs/evaluate-a-model.html</p>
  */
 public class Step_8_CV {
     /** Run example. */
     public static void main(String[] args) throws InterruptedException {
+        System.out.println();
+        System.out.println(">>> Tutorial step 8 (cross-validation) example started.");
+
         try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
             IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
                 Step_8_CV.class.getSimpleName(), () -> {
@@ -62,7 +75,7 @@
                     IgniteCache<Integer, Object[]> dataCache = TitanicUtils.readPassengers(ignite);
 
                     // Defines first preprocessor that extracts features from an upstream data.
-                    // Extracts "pclass", "sibsp", "parch", "sex", "embarked", "age", "fare"
+                    // Extracts "pclass", "sibsp", "parch", "sex", "embarked", "age", "fare".
                     IgniteBiFunction<Integer, Object[], Object[]> featureExtractor
                         = (k, v) -> new Object[]{v[0], v[3], v[4], v[5], v[6], v[8], v[10]};
 
@@ -73,8 +86,8 @@
 
                     IgniteBiFunction<Integer, Object[], Vector> strEncoderPreprocessor = new EncoderTrainer<Integer, Object[]>()
                         .withEncoderType(EncoderType.STRING_ENCODER)
-                        .encodeFeature(1)
-                        .encodeFeature(6) // <--- Changed index here
+                        .withEncodedFeature(1)
+                        .withEncodedFeature(6) // <--- Changed index here.
                         .fit(ignite,
                             dataCache,
                             featureExtractor
@@ -88,12 +101,12 @@
 
                     IgniteBiFunction<Integer, Object[], Vector> minMaxScalerPreprocessor = new MinMaxScalerTrainer<Integer, Object[]>()
                         .fit(
-                        ignite,
-                        dataCache,
-                        imputingPreprocessor
-                    );
+                            ignite,
+                            dataCache,
+                            imputingPreprocessor
+                        );
 
-                    // Tune hyperparams with K-fold Cross-Validation on the splitted training set.
+                    // Tune hyperparams with K-fold Cross-Validation on the split training set.
                     int[] pSet = new int[]{1, 2};
                     int[] maxDeepSet = new int[]{1, 2, 3, 4, 5, 10, 20};
                     int bestP = 1;
@@ -102,7 +115,8 @@
 
                     for(int p: pSet){
                         for(int maxDeep: maxDeepSet){
-                            IgniteBiFunction<Integer, Object[], Vector> normalizationPreprocessor = new NormalizationTrainer<Integer, Object[]>()
+                            IgniteBiFunction<Integer, Object[], Vector> normalizationPreprocessor
+                                = new NormalizationTrainer<Integer, Object[]>()
                                 .withP(p)
                                 .fit(
                                     ignite,
@@ -110,7 +124,8 @@
                                     minMaxScalerPreprocessor
                                 );
 
-                            DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(maxDeep, 0);
+                            DecisionTreeClassificationTrainer trainer
+                                = new DecisionTreeClassificationTrainer(maxDeep, 0);
 
                             CrossValidation<DecisionTreeNode, Double, Integer, Object[]> scoreCalculator
                                 = new CrossValidation<>();
@@ -161,6 +176,8 @@
                         lbExtractor
                     );
 
+                    System.out.println("\n>>> Trained model: " + bestMdl);
+
                     double accuracy = Evaluator.evaluate(
                         dataCache,
                         split.getTestFilter(),
@@ -172,6 +189,8 @@
 
                     System.out.println("\n>>> Accuracy " + accuracy);
                     System.out.println("\n>>> Test Error " + (1 - accuracy));
+
+                    System.out.println(">>> Tutorial step 8 (cross-validation) example completed.");
                 }
                 catch (FileNotFoundException e) {
                     e.printStackTrace();
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV_with_Param_Grid.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV_with_Param_Grid.java
index 9311cfb..73a0303 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV_with_Param_Grid.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_8_CV_with_Param_Grid.java
@@ -41,22 +41,35 @@
 import org.apache.ignite.thread.IgniteThread;
 
 /**
- * To choose the best hyperparameters the cross-validation will be used in this example.
- *
- * The purpose of cross-validation is model checking, not model building.
- *
- * We train k different models.
- *
- * They differ in that 1/(k-1)th of the training data is exchanged against other cases.
- *
+ * To choose the best hyperparameters the cross-validation with {@link ParamGrid} will be used in this example.
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with test data (based on Titanic passengers data).</p>
+ * <p>
+ * After that it defines how to split the data to train and test sets and configures preprocessors that extract
+ * features from an upstream data and perform other desired changes over the extracted data.</p>
+ * <p>
+ * Then, it tunes hyperparams with K-fold Cross-Validation on the split training set and trains the model based on
+ * the processed data using decision tree classification and the obtained hyperparams.</p>
+ * <p>
+ * Finally, this example uses {@link Evaluator} functionality to compute metrics from predictions.</p>
+ * <p>
+ * The purpose of cross-validation is model checking, not model building.</p>
+ * <p>
+ * We train {@code k} different models.</p>
+ * <p>
+ * They differ in that {@code 1/(k-1)}th of the training data is exchanged against other cases.</p>
+ * <p>
  * These models are sometimes called surrogate models because the (average) performance measured for these models
- * is taken as a surrogate of the performance of the model trained on all cases.
- *
- * All scenarios are described there: https://sebastianraschka.com/faq/docs/evaluate-a-model.html
+ * is taken as a surrogate of the performance of the model trained on all cases.</p>
+ * <p>
+ * All scenarios are described there: https://sebastianraschka.com/faq/docs/evaluate-a-model.html</p>
  */
 public class Step_8_CV_with_Param_Grid {
     /** Run example. */
     public static void main(String[] args) throws InterruptedException {
+        System.out.println();
+        System.out.println(">>> Tutorial step 8 (cross-validation with param grid) example started.");
+
         try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
             IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
                 Step_8_CV_with_Param_Grid.class.getSimpleName(), () -> {
@@ -64,7 +77,7 @@
                     IgniteCache<Integer, Object[]> dataCache = TitanicUtils.readPassengers(ignite);
 
                     // Defines first preprocessor that extracts features from an upstream data.
-                    // Extracts "pclass", "sibsp", "parch", "sex", "embarked", "age", "fare"
+                    // Extracts "pclass", "sibsp", "parch", "sex", "embarked", "age", "fare" .
                     IgniteBiFunction<Integer, Object[], Object[]> featureExtractor
                         = (k, v) -> new Object[]{v[0], v[3], v[4], v[5], v[6], v[8], v[10]};
 
@@ -75,8 +88,8 @@
 
                     IgniteBiFunction<Integer, Object[], Vector> strEncoderPreprocessor = new EncoderTrainer<Integer, Object[]>()
                         .withEncoderType(EncoderType.STRING_ENCODER)
-                        .encodeFeature(1)
-                        .encodeFeature(6) // <--- Changed index here
+                        .withEncodedFeature(1)
+                        .withEncodedFeature(6) // <--- Changed index here.
                         .fit(ignite,
                             dataCache,
                             featureExtractor
@@ -103,7 +116,7 @@
                             minMaxScalerPreprocessor
                         );
 
-                    // Tune hyperparams with K-fold Cross-Validation on the splitted training set.
+                    // Tune hyperparams with K-fold Cross-Validation on the split training set.
 
                     DecisionTreeClassificationTrainer trainerCV = new DecisionTreeClassificationTrainer();
 
@@ -126,7 +139,8 @@
                         paramGrid
                     );
 
-                    System.out.println("Train with maxDeep: " + crossValidationRes.getBest("maxDeep") + " and minImpurityDecrease: " + crossValidationRes.getBest("minImpurityDecrease"));
+                    System.out.println("Train with maxDeep: " + crossValidationRes.getBest("maxDeep")
+                        + " and minImpurityDecrease: " + crossValidationRes.getBest("minImpurityDecrease"));
 
                     DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer()
                         .withMaxDeep(crossValidationRes.getBest("maxDeep"))
@@ -138,9 +152,8 @@
                     System.out.println("Best hyper params: " + crossValidationRes.getBestHyperParams());
                     System.out.println("Best average score: " + crossValidationRes.getBestAvgScore());
 
-                    crossValidationRes.getScoringBoard().forEach((hyperParams, score) -> {
-                        System.out.println("Score " + Arrays.toString(score) + " for hyper params " + hyperParams);
-                    });
+                    crossValidationRes.getScoringBoard().forEach((hyperParams, score)
+                        -> System.out.println("Score " + Arrays.toString(score) + " for hyper params " + hyperParams));
 
                     // Train decision tree model.
                     DecisionTreeNode bestMdl = trainer.fit(
@@ -151,6 +164,8 @@
                         lbExtractor
                     );
 
+                    System.out.println("\n>>> Trained model: " + bestMdl);
+
                     double accuracy = Evaluator.evaluate(
                         dataCache,
                         split.getTestFilter(),
@@ -162,6 +177,8 @@
 
                     System.out.println("\n>>> Accuracy " + accuracy);
                     System.out.println("\n>>> Test Error " + (1 - accuracy));
+
+                    System.out.println(">>> Tutorial step 8 (cross-validation with param grid) example started.");
                 } catch (FileNotFoundException e) {
                     e.printStackTrace();
                 }
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_9_Go_to_LogReg.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_9_Go_to_LogReg.java
index 9fcc9ba..088caf7 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_9_Go_to_LogReg.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/Step_9_Go_to_LogReg.java
@@ -42,13 +42,25 @@
 import org.apache.ignite.thread.IgniteThread;
 
 /**
- * Maybe the another algorithm can give us the higher accuracy?
- *
- * Let's win with the LogisticRegressionSGDTrainer!
+ * Change classification algorithm that was used in {@link Step_8_CV_with_Param_Grid} from decision tree to logistic
+ * regression ({@link LogisticRegressionSGDTrainer}) because sometimes this can give the higher accuracy.
+ * <p>
+ * Code in this example launches Ignite grid and fills the cache with test data (based on Titanic passengers data).</p>
+ * <p>
+ * After that it defines how to split the data to train and test sets and configures preprocessors that extract
+ * features from an upstream data and perform other desired changes over the extracted data.</p>
+ * <p>
+ * Then, it tunes hyperparams with K-fold Cross-Validation on the split training set and trains the model based on
+ * the processed data using logistic regression and the obtained hyperparams.</p>
+ * <p>
+ * Finally, this example uses {@link Evaluator} functionality to compute metrics from predictions.</p>
  */
 public class Step_9_Go_to_LogReg {
     /** Run example. */
     public static void main(String[] args) throws InterruptedException {
+        System.out.println();
+        System.out.println(">>> Tutorial step 9 (logistic regression) example started.");
+
         try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) {
             IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
                 Step_9_Go_to_LogReg.class.getSimpleName(), () -> {
@@ -67,8 +79,8 @@
 
                     IgniteBiFunction<Integer, Object[], Vector> strEncoderPreprocessor = new EncoderTrainer<Integer, Object[]>()
                         .withEncoderType(EncoderType.STRING_ENCODER)
-                        .encodeFeature(1)
-                        .encodeFeature(6) // <--- Changed index here
+                        .withEncodedFeature(1)
+                        .withEncodedFeature(6) // <--- Changed index here
                         .fit(ignite,
                             dataCache,
                             featureExtractor
@@ -82,19 +94,18 @@
 
                     IgniteBiFunction<Integer, Object[], Vector> minMaxScalerPreprocessor = new MinMaxScalerTrainer<Integer, Object[]>()
                         .fit(
-                        ignite,
-                        dataCache,
-                        imputingPreprocessor
-                    );
+                            ignite,
+                            dataCache,
+                            imputingPreprocessor
+                        );
 
-                    // Tune hyperparams with K-fold Cross-Validation on the splitted training set.
+                    // Tune hyperparams with K-fold Cross-Validation on the split training set.
                     int[] pSet = new int[]{1, 2};
                     int[] maxIterationsSet = new int[]{ 100, 1000};
                     int[] batchSizeSet = new int[]{100, 10};
                     int[] locIterationsSet = new int[]{10, 100};
                     double[] learningRateSet = new double[]{0.1, 0.2, 0.5};
 
-
                     int bestP = 1;
                     int bestMaxIterations = 100;
                     int bestBatchSize = 10;
@@ -107,8 +118,8 @@
                             for (int batchSize : batchSizeSet) {
                                 for (int locIterations : locIterationsSet) {
                                     for (double learningRate : learningRateSet) {
-
-                                        IgniteBiFunction<Integer, Object[], Vector> normalizationPreprocessor = new NormalizationTrainer<Integer, Object[]>()
+                                        IgniteBiFunction<Integer, Object[], Vector> normalizationPreprocessor
+                                            = new NormalizationTrainer<Integer, Object[]>()
                                             .withP(p)
                                             .fit(
                                                 ignite,
@@ -116,14 +127,15 @@
                                                 minMaxScalerPreprocessor
                                             );
 
-                                        LogisticRegressionSGDTrainer<?> trainer = new LogisticRegressionSGDTrainer<>(new UpdatesStrategy<>(
+                                        LogisticRegressionSGDTrainer<?> trainer
+                                            = new LogisticRegressionSGDTrainer<>(new UpdatesStrategy<>(
                                             new SimpleGDUpdateCalculator(learningRate),
                                             SimpleGDParameterUpdate::sumLocal,
                                             SimpleGDParameterUpdate::avg
                                         ), maxIterations, batchSize, locIterations, 123L);
 
-                                        CrossValidation<LogisticRegressionModel, Double, Integer, Object[]> scoreCalculator
-                                            = new CrossValidation<>();
+                                        CrossValidation<LogisticRegressionModel, Double, Integer, Object[]>
+                                            scoreCalculator = new CrossValidation<>();
 
                                         double[] scores = scoreCalculator.score(
                                             trainer,
@@ -193,6 +205,8 @@
                         lbExtractor
                     );
 
+                    System.out.println("\n>>> Trained model: " + bestMdl);
+
                     double accuracy = Evaluator.evaluate(
                         dataCache,
                         split.getTestFilter(),
@@ -204,6 +218,8 @@
 
                     System.out.println("\n>>> Accuracy " + accuracy);
                     System.out.println("\n>>> Test Error " + (1 - accuracy));
+
+                    System.out.println(">>> Tutorial step 9 (logistic regression) example completed.");
                 }
                 catch (FileNotFoundException e) {
                     e.printStackTrace();
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/TitanicUtils.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/TitanicUtils.java
index a339638..3a68ecb 100644
--- a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/TitanicUtils.java
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/TitanicUtils.java
@@ -38,7 +38,7 @@
      *
      * @param ignite The ignite.
      * @return The filled cache.
-     * @throws FileNotFoundException
+     * @throws FileNotFoundException If data file is not found.
      */
     public static IgniteCache<Integer, Object[]> readPassengers(Ignite ignite)
         throws FileNotFoundException {
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/TutorialStepByStepExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/TutorialStepByStepExample.java
new file mode 100644
index 0000000..67f4bf5
--- /dev/null
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/tutorial/TutorialStepByStepExample.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.examples.ml.tutorial;
+
+/**
+ * Run all the tutorial examples step by step with primary purpose to provide
+ * automatic execution from {@code IgniteExamplesMLTestSuite}.
+ */
+public class TutorialStepByStepExample {
+    /** Run examples with default settings. */
+    public static void main(String[] args) throws InterruptedException {
+        Step_1_Read_and_Learn.main(args);
+        Step_2_Imputing.main(args);
+        Step_3_Categorial.main(args);
+        Step_3_Categorial_with_One_Hot_Encoder.main(args);
+        Step_4_Add_age_fare.main(args);
+        Step_5_Scaling.main(args);
+        Step_6_KNN.main(args);
+        Step_7_Split_train_test.main(args);
+        Step_8_CV.main(args);
+        Step_8_CV_with_Param_Grid.main(args);
+        Step_9_Go_to_LogReg.main(args);
+    }
+}
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/util/DatasetHelper.java b/examples/src/main/java/org/apache/ignite/examples/ml/util/DatasetHelper.java
new file mode 100644
index 0000000..96de9ad
--- /dev/null
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/util/DatasetHelper.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.examples.ml.util;
+
+import java.util.Arrays;
+import org.apache.ignite.ml.dataset.primitive.SimpleDataset;
+
+/**
+ * Common utility code used in some ML examples to report some statistic metrics of the dataset.
+ */
+public class DatasetHelper {
+    /** */
+    private final SimpleDataset dataset;
+
+    /** */
+    public DatasetHelper(SimpleDataset dataset) {
+        this.dataset = dataset;
+    }
+
+    /** */
+    public void describe() {
+        // Calculation of the mean value. This calculation will be performed in map-reduce manner.
+        double[] mean = dataset.mean();
+        System.out.println("Mean \n\t" + Arrays.toString(mean));
+
+        // Calculation of the standard deviation. This calculation will be performed in map-reduce manner.
+        double[] std = dataset.std();
+        System.out.println("Standard deviation \n\t" + Arrays.toString(std));
+
+        // Calculation of the covariance matrix.  This calculation will be performed in map-reduce manner.
+        double[][] cov = dataset.cov();
+        System.out.println("Covariance matrix ");
+        for (double[] row : cov)
+            System.out.println("\t" + Arrays.toString(row));
+
+        // Calculation of the correlation matrix. This calculation will be performed in map-reduce manner.
+        double[][] corr = dataset.corr();
+        System.out.println("Correlation matrix ");
+        for (double[] row : corr)
+            System.out.println("\t" + Arrays.toString(row));
+    }
+}
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/util/MLExamplesCommonArgs.java b/examples/src/main/java/org/apache/ignite/examples/ml/util/MLExamplesCommonArgs.java
new file mode 100644
index 0000000..23cbe27
--- /dev/null
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/util/MLExamplesCommonArgs.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.examples.ml.util;
+
+/**
+ * Some common arguments for examples in ML module.
+ */
+public class MLExamplesCommonArgs {
+    /**
+     * Unattended argument.
+     */
+    public static String UNATTENDED = "unattended";
+
+    /** Empty args for ML examples. */
+    public static final String[] EMPTY_ARGS_ML = new String[] {"--" + UNATTENDED};
+}
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/util/TestCache.java b/examples/src/main/java/org/apache/ignite/examples/ml/util/TestCache.java
new file mode 100644
index 0000000..454aa76
--- /dev/null
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/util/TestCache.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.examples.ml.util;
+
+import java.util.UUID;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
+
+/**
+ * Common utility code used in some ML examples to set up test cache.
+ */
+public class TestCache {
+    /** */
+    private final Ignite ignite;
+
+    /** */
+    public TestCache(Ignite ignite) {
+        this.ignite = ignite;
+    }
+
+    /**
+     * Fills cache with data and returns it.
+     *
+     * @param data Data to fill the cache with.
+     * @return Filled Ignite Cache.
+     */
+    public IgniteCache<Integer, double[]> fillCacheWith(double[][] data) {
+        CacheConfiguration<Integer, double[]> cacheConfiguration = new CacheConfiguration<>();
+        cacheConfiguration.setName("TEST_" + UUID.randomUUID());
+        cacheConfiguration.setAffinity(new RendezvousAffinityFunction(false, 10));
+
+        IgniteCache<Integer, double[]> cache = ignite.createCache(cacheConfiguration);
+
+        for (int i = 0; i < data.length; i++)
+            cache.put(i, data[i]);
+
+        return cache;
+    }
+
+    /**
+     * Fills cache with data and returns it.
+     *
+     * @param data Data to fill the cache with.
+     * @return Filled Ignite Cache.
+     */
+    public IgniteCache<Integer, Vector> getVectors(double[][] data) {
+        CacheConfiguration<Integer, Vector> cacheConfiguration = new CacheConfiguration<>();
+        cacheConfiguration.setName("TEST_" + UUID.randomUUID());
+        cacheConfiguration.setAffinity(new RendezvousAffinityFunction(false, 10));
+
+        IgniteCache<Integer, Vector> cache = ignite.createCache(cacheConfiguration);
+
+        for (int i = 0; i < data.length; i++)
+            cache.put(i, VectorUtils.of(data[i]));
+
+        return cache;
+    }
+}
diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/util/package-info.java b/examples/src/main/java/org/apache/ignite/examples/ml/util/package-info.java
new file mode 100644
index 0000000..5f912e1
--- /dev/null
+++ b/examples/src/main/java/org/apache/ignite/examples/ml/util/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * <!-- Package description. -->
+ * Utilities for machine learning examples.
+ */
+package org.apache.ignite.examples.ml.util;
\ No newline at end of file
diff --git a/examples/src/test/java/org/apache/ignite/testsuites/IgniteExamplesMLTestSuite.java b/examples/src/test/java/org/apache/ignite/testsuites/IgniteExamplesMLTestSuite.java
index df85f1a..6b41301 100644
--- a/examples/src/test/java/org/apache/ignite/testsuites/IgniteExamplesMLTestSuite.java
+++ b/examples/src/test/java/org/apache/ignite/testsuites/IgniteExamplesMLTestSuite.java
@@ -30,7 +30,7 @@
 import javassist.CtNewMethod;
 import javassist.NotFoundException;
 import junit.framework.TestSuite;
-import org.apache.ignite.examples.ml.MLExamplesCommonArgs;
+import org.apache.ignite.examples.ml.util.MLExamplesCommonArgs;
 import org.apache.ignite.testframework.GridTestUtils;
 import org.apache.ignite.testframework.junits.common.GridAbstractExamplesTest;
 
diff --git a/idea/ignite_inspections.xml b/idea/ignite_inspections.xml
new file mode 100644
index 0000000..7ce5af8
--- /dev/null
+++ b/idea/ignite_inspections.xml
@@ -0,0 +1,772 @@
+<profile version="1.0">
+  <option name="myName" value="ignite_inspections" />
+  <inspection_tool class="AbstractMethodCallInConstructor" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="AccessorLikeMethodIsEmptyParen" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AccessorLikeMethodIsUnit" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintAddJavascriptInterface" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintAllowAllHostnameVerifier" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintAlwaysShowAction" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintAppCompatMethod" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintAuthLeak" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintBadHostnameVerifier" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintBatteryLife" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintCommitPrefEdits" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintCommitTransaction" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintCustomViewStyleable" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintCutPasteId" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintDefaultLocale" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintDrawAllocation" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintExportedContentProvider" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintExportedPreferenceActivity" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintExportedReceiver" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintExportedService" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintFloatMath" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintGetInstance" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintGifUsage" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintGoogleAppIndexingUrlError" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintGoogleAppIndexingWarning" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintGrantAllUris" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintHandlerLeak" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintIconColors" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintIconDensities" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintIconDipSize" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintIconDuplicates" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintIconDuplicatesConfig" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintIconExtension" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintIconLauncherShape" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintIconLocation" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintIconMissingDensityFolder" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintIconMixedNinePatch" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintIconNoDpi" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintIconXmlAndPng" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintInconsistentLayout" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintInflateParams" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintInlinedApi" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintInvalidUsesTagAttribute" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintJavascriptInterface" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintLocalSuppress" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintLogTagMismatch" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintLongLogTag" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintMergeRootFrame" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintMissingIntentFilterForMediaSearch" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintMissingMediaBrowserServiceIntentFilter" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintMissingOnPlayFromSearch" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintMissingSuperCall" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintNewApi" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintOverdraw" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintOverride" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintOverrideAbstract" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintPackageManagerGetSignatures" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintParcelClassLoader" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintParcelCreator" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintPendingBindings" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintPluralsCandidate" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintPrivateResource" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintRecycle" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintRecyclerView" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintRegistered" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintRequiredSize" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintRtlCompat" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintRtlEnabled" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintRtlHardcoded" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintRtlSymmetry" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintSQLiteString" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintSSLCertificateSocketFactoryCreateSocket" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintSSLCertificateSocketFactoryGetInsecure" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintSdCardPath" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintSecureRandom" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintServiceCast" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintSetJavaScriptEnabled" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintSetTextI18n" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintSetWorldReadable" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintSetWorldWritable" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintShiftFlags" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintShortAlarm" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintShowToast" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintSimpleDateFormat" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintStringFormatCount" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintStringFormatInvalid" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintStringFormatMatches" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintSupportAnnotationUsage" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintSuspiciousImport" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintSwitchIntDef" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintTrustAllX509TrustManager" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintUniqueConstants" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintUnlocalizedSms" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintUnprotectedSMSBroadcastReceiver" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintUnsafeDynamicallyLoadedCode" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintUnsafeNativeCodeLocation" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintUnsafeProtectedBroadcastReceiver" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintUnusedAttribute" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintUseSparseArrays" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintUseValueOf" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintValidFragment" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintViewConstructor" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintViewHolder" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintViewTag" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintWorldReadableFiles" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintWorldWriteableFiles" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintWrongCall" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidKLintWrongViewCast" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintAaptCrash" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintAccidentalOctal" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintAdapterViewChildren" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintAddJavascriptInterface" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintAllowAllHostnameVerifier" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintAllowBackup" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintAlwaysShowAction" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintAppCompatMethod" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintAppCompatResource" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintAssert" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintAuthLeak" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintBadHostnameVerifier" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintBatteryLife" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintButtonCase" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintButtonOrder" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintButtonStyle" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintByteOrderMark" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintCommitPrefEdits" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintCommitTransaction" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintContentDescription" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintCustomViewStyleable" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintCutPasteId" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintDefaultLocale" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintDeprecated" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintDeviceAdmin" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintDisableBaselineAlignment" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintDrawAllocation" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintDuplicateActivity" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintDuplicateDefinition" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintDuplicateIds" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintDuplicateIncludedIds" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintDuplicateUsesFeature" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintEnforceUTF8" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintExportedContentProvider" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintExportedPreferenceActivity" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintExportedReceiver" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintExportedService" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintExtraText" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintExtraTranslation" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintFloatMath" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintFullBackupContent" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintGetInstance" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintGifUsage" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintGoogleAppIndexingUrlError" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintGoogleAppIndexingWarning" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintGradleCompatible" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintGradleDependency" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintGradleDeprecated" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintGradleDynamicVersion" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintGradleGetter" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintGradleIdeError" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintGradleOverrides" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintGradlePath" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintGradlePluginVersion" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintGrantAllUris" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintGridLayout" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintHandlerLeak" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintHardcodedDebugMode" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintHardcodedText" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintHardwareIds" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintIconColors" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintIconDensities" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintIconDipSize" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintIconDuplicates" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintIconDuplicatesConfig" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintIconExtension" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintIconLauncherShape" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintIconLocation" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintIconMissingDensityFolder" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintIconMixedNinePatch" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintIconNoDpi" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintIconXmlAndPng" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintIllegalResourceRef" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintImpliedQuantity" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintInOrMmUsage" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintIncludeLayoutParam" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintInconsistentArrays" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintInconsistentLayout" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintInefficientWeight" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintInflateParams" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintInlinedApi" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintInnerclassSeparator" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintInvalidId" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintInvalidResourceFolder" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintInvalidUsesTagAttribute" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintJavascriptInterface" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintLabelFor" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintLibraryCustomView" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintLocalSuppress" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintLocaleFolder" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintLogTagMismatch" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintLongLogTag" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintManifestOrder" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintManifestResource" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintMenuTitle" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintMergeRootFrame" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintMipmapIcons" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintMissingApplicationIcon" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintMissingConstraints" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintMissingId" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintMissingIntentFilterForMediaSearch" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintMissingLeanbackLauncher" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintMissingLeanbackSupport" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintMissingMediaBrowserServiceIntentFilter" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintMissingOnPlayFromSearch" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintMissingPrefix" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintMissingQuantity" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintMissingSuperCall" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintMissingTranslation" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintMissingTvBanner" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintMissingVersion" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintMockLocation" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintMultipleUsesSdk" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintNestedScrolling" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintNestedWeights" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintNewApi" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintNfcTechWhitespace" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintNotInterpolated" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintNotSibling" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintObsoleteLayoutParam" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintOldTargetApi" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintOrientation" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintOverdraw" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintOverride" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintOverrideAbstract" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintPackageManagerGetSignatures" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintPackagedPrivateKey" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintParcelClassLoader" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintParcelCreator" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintPendingBindings" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintPermissionImpliesUnsupportedHardware" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintPluralsCandidate" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintPrivateResource" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintProguard" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintProguardSplit" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintPropertyEscape" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintProtectedPermissions" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintPxUsage" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintRecycle" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintRecyclerView" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintReferenceType" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintRegistered" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintRelativeOverlap" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintRequiredSize" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintResAuto" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintResourceCycle" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintResourceName" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintRtlCompat" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintRtlEnabled" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintRtlHardcoded" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintRtlSymmetry" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintSQLiteString" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintSSLCertificateSocketFactoryCreateSocket" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintSSLCertificateSocketFactoryGetInsecure" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintScrollViewCount" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintScrollViewSize" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintSdCardPath" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintSecureRandom" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintServiceCast" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintSetJavaScriptEnabled" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintSetTextI18n" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintSetWorldReadable" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintSetWorldWritable" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintShiftFlags" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintShortAlarm" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintShowToast" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintSignatureOrSystemPermissions" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintSimpleDateFormat" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintSmallSp" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintSpUsage" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintStateListReachable" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintStaticFieldLeak" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintStringFormatCount" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintStringFormatInvalid" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintStringFormatMatches" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintStringShouldBeInt" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintSupportAnnotationUsage" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintSuspicious0dp" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintSuspiciousImport" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintSwitchIntDef" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintTextFields" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintTextViewEdits" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintTooDeepLayout" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintTooManyViews" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintTrustAllX509TrustManager" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintTypographyDashes" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintTypographyEllipsis" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintTypographyFractions" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintTypographyOther" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintTypos" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintUniqueConstants" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintUniquePermission" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintUnknownId" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintUnknownIdInLayout" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintUnlocalizedSms" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintUnprotectedSMSBroadcastReceiver" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintUnsafeDynamicallyLoadedCode" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintUnsafeNativeCodeLocation" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintUnsafeProtectedBroadcastReceiver" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintUnsupportedTvHardware" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintUnusedAttribute" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintUnusedQuantity" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintUnusedResources" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintUseAlpha2" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintUseCompoundDrawables" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintUseSparseArrays" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintUseValueOf" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintUselessLeaf" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintUselessParent" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintUsesMinSdkAttributes" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintUsingHttp" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintValidFragment" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintValidRestrictions" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintVectorDrawableCompat" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintVectorRaster" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintViewConstructor" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintViewHolder" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintViewTag" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintWearableBindListener" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintWebViewLayout" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintWorldReadableFiles" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintWorldWriteableFiles" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintWrongCall" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintWrongCase" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintWrongFolder" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintWrongRegion" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AndroidLintWrongViewCast" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AntDuplicateTargetsInspection" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AntMissingPropertiesFileInspection" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="AntResolveInspection" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="ApparentRefinementOfResultType" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="AppliedTypeLambdaCanBeSimplified" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ArrayCreationWithoutNewKeyword" enabled="false" level="INFORMATION" enabled_by_default="false" />
+  <inspection_tool class="AssertAsName" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="AssignmentToCatchBlockParameter" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="AssignmentToDateFieldFromParameter" enabled="true" level="WARNING" enabled_by_default="true">
+    <option name="ignorePrivateMethods" value="true" />
+  </inspection_tool>
+  <inspection_tool class="BadOddness" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="BigDecimalEquals" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="BooleanMethodIsAlwaysInverted" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="BreakStatementWithLabel" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="BusyWait" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="CStyleArrayDeclaration" enabled="false" level="INFORMATION" enabled_by_default="false" />
+  <inspection_tool class="CallToStringConcatCanBeReplacedByOperator" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="CaseClassParam" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ChainedPackage" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ClassMayBeInterface" enabled="false" level="INFORMATION" enabled_by_default="false" />
+  <inspection_tool class="ClassNameDiffersFromFileName" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="ClassNameSameAsAncestorName" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="CloneCallsConstructors" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="CloneInNonCloneableClass" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="CloneableImplementsClone" enabled="true" level="WARNING" enabled_by_default="true">
+    <option name="m_ignoreCloneableDueToInheritance" value="true" />
+  </inspection_tool>
+  <inspection_tool class="CollectionContainsUrl" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="CollectionsFieldAccessReplaceableByMethodCall" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="ComparableImplementedButEqualsNotOverridden" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="ComparatorNotSerializable" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="ComparingDiffCollectionKinds" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ComparingUnrelatedTypes" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ComparisonOfShortAndChar" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="ComparisonToNaN" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="ConditionSignal" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="ConstantConditions" enabled="false" level="WARNING" enabled_by_default="false">
+    <option name="SUGGEST_NULLABLE_ANNOTATIONS" value="true" />
+    <option name="DONT_REPORT_TRUE_ASSERT_STATEMENTS" value="true" />
+  </inspection_tool>
+  <inspection_tool class="ConstantValueVariableUse" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="ContinueStatementWithLabel" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="ControlFlowStatementWithoutBraces" enabled="false" level="INFORMATION" enabled_by_default="false" />
+  <inspection_tool class="Convert2Lambda" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ConvertExpressionToSAM" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ConvertNullInitializerToUnderscore" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ConvertibleToMethodValue" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="CorrespondsUnsorted" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="CovariantCompareTo" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="CovariantEquals" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="DangerousCatchAll" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="DanglingJavadoc" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="DefaultNotLastCaseInSwitch" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="DeprecatedIsStillUsed" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="DeprecatedViewBound" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="DialogTitleCapitalization" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="DivideByZero" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="DollarSignInName" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="DottyDeprecatedWith" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="DropTakeToSlice" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="DuplicateBooleanBranch" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="DuplicateCondition" enabled="false" level="WARNING" enabled_by_default="false">
+    <option name="ignoreMethodCalls" value="false" />
+  </inspection_tool>
+  <inspection_tool class="DuplicateThrows" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="EmptyCheck" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="EmptyInitializer" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="EmptyParenMethodAccessedAsParameterless" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="EmptyParenMethodOverridenAsParameterless" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="EmptySynchronizedStatement" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="EnumAsName" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="EnumSwitchStatementWhichMissesCases" enabled="true" level="WARNING" enabled_by_default="true">
+    <option name="ignoreSwitchStatementsWithDefault" value="true" />
+  </inspection_tool>
+  <inspection_tool class="EqualityToSameElements" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="EqualsCalledOnEnumConstant" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="EqualsHashCodeCalledOnUrl" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="EqualsWhichDoesntCheckParameterClass" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ErrorRethrown" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="ExceptionNameDoesntEndWithException" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="ExistsEquals" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ExtendsUtilityClass" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="ExternalizableWithSerializationMethods" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="FieldAccessedSynchronizedAndUnsynchronized" enabled="true" level="WARNING" enabled_by_default="true">
+    <option name="countGettersAndSetters" value="false" />
+  </inspection_tool>
+  <inspection_tool class="FieldCanBeLocal" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="FieldFromDelayedInit" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="FieldMayBeStatic" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="FilterEmptyCheck" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="FilterHeadOption" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="FilterOtherContains" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="FilterSize" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="FinalizeNotProtected" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="FindAndMapToApply" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="FindEmptyCheck" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="FloatLiteralEndingWithDecimalPoint" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="FloatingPointEquality" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="FoldTrueAnd" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ForCanBeForeach" enabled="false" level="WARNING" enabled_by_default="false">
+    <option name="REPORT_INDEXED_LOOP" value="true" />
+    <option name="ignoreUntypedCollections" value="false" />
+  </inspection_tool>
+  <inspection_tool class="ForwardReference" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="FunctionTupleSyntacticSugar" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="GetGetOrElse" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="GetOrElseNull" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="HashCodeUsesVar" enabled="false" level="WEAK WARNING" enabled_by_default="false" />
+  <inspection_tool class="HeadOrLastOption" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="HoconIncludeResolution" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="HtmlPresentationalElement" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="IfElseToOption" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="IfMayBeConditional" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="IfStatementWithIdenticalBranches" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="IgnoreResultOfCall" enabled="false" level="WARNING" enabled_by_default="false">
+    <option name="m_reportAllNonLibraryCalls" value="false" />
+    <option name="callCheckString" value="java.io.InputStream,read,java.io.InputStream,skip,java.lang.StringBuffer,toString,java.lang.StringBuilder,toString,java.lang.String,.*,java.math.BigInteger,.*,java.math.BigDecimal,.*,java.net.InetAddress,.*,java.io.File,.*,java.lang.Object,equals|hashCode" />
+  </inspection_tool>
+  <inspection_tool class="InjectionNotApplicable" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="InnerClassMayBeStatic" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="InstanceofThis" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="IntLiteralMayBeLongLiteral" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="IntegerDivisionInFloatingPointContext" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="IteratorHasNextCallsIteratorNext" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="IteratorNextDoesNotThrowNoSuchElementException" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="JavaAccessorMethodCalledAsEmptyParen" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="JavaAccessorMethodOverridenAsEmptyParen" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="JavaFxColorRgb" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="JavaFxDefaultTag" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="JavaFxEventHandler" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="JavaFxRedundantPropertyValue" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="JavaFxResourcePropertyValue" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="JavaFxUnresolvedFxIdReference" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="JavaFxUnusedImports" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="JavaLangImport" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="JavaMutatorMethodAccessedAsParameterless" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="JavaMutatorMethodOverridenAsParameterless" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="JsonDuplicatePropertyKeys" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="JsonStandardCompliance" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="KindProjectorSimplifyTypeProjection" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="KindProjectorUseCorrectLambdaKeyword" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="LabeledStatement" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="LanguageFeature" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="LanguageMismatch" enabled="false" level="WARNING" enabled_by_default="false">
+    <option name="CHECK_NON_ANNOTATED_REFERENCES" value="true" />
+  </inspection_tool>
+  <inspection_tool class="LastIndexToLast" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="LengthOneStringInIndexOf" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="ListIndexOfReplaceableByContains" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="LiteralAsArgToStringEquals" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="LoopVariableNotUpdated" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="LossyEncoding" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="MapFlatten" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="MapGetGet" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="MapGetOrElseBoolean" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="MapKeys" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="MapReplaceableByEnumMap" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="MapToBooleanContains" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="MapValues" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="MatchToPartialFunction" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="MethodNameSameAsParentName" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="MethodNamesDifferOnlyByCase" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="MethodOverridesPackageLocalMethod" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="MethodOverridesPrivateMethod" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="MethodOverridesStaticMethod" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="MismatchedCollectionQueryUpdate" enabled="false" level="WEAK WARNING" enabled_by_default="false">
+    <option name="queryNames">
+      <value />
+    </option>
+    <option name="updateNames">
+      <value />
+    </option>
+    <option name="ignoredClasses">
+      <value />
+    </option>
+  </inspection_tool>
+  <inspection_tool class="MissingDeprecatedAnnotation" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="MissingOverrideAnnotation" enabled="true" level="WARNING" enabled_by_default="true">
+    <option name="ignoreObjectMethods" value="true" />
+    <option name="ignoreAnonymousClassMethods" value="false" />
+  </inspection_tool>
+  <inspection_tool class="MissortedModifiers" enabled="true" level="WARNING" enabled_by_default="true">
+    <option name="m_requireAnnotationsFirst" value="true" />
+  </inspection_tool>
+  <inspection_tool class="MisspelledCompareTo" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="MisspelledEquals" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="MisspelledHashcode" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="MisspelledToString" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="MultipleArgListsInAnnotation" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="MultipleRepositoryUrls" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="MutatorLikeMethodIsParameterless" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="NakedNotify" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="NameBooleanParameters" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="NoReturnTypeForImplicitDef" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="NonExceptionNameEndsWithException" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="NonFinalStaticVariableUsedInClassInitialization" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="NonProtectedConstructorInAbstractClass" enabled="true" level="WARNING" enabled_by_default="true">
+    <option name="m_ignoreNonPublicClasses" value="false" />
+  </inspection_tool>
+  <inspection_tool class="NonSerializableObjectBoundToHttpSession" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="NonSerializableWithSerialVersionUIDField" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="NonSerializableWithSerializationMethods" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="NonSynchronizedMethodOverridesSynchronizedMethod" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="NonThreadSafeLazyInitialization" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="NotImplementedCode" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="NotifyCalledOnCondition" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="NotifyNotInSynchronizedContext" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="NotifyWithoutCorrespondingWait" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="NullableProblems" enabled="true" level="WARNING" enabled_by_default="true">
+    <option name="REPORT_NULLABLE_METHOD_OVERRIDES_NOTNULL" value="true" />
+    <option name="REPORT_NOT_ANNOTATED_METHOD_OVERRIDES_NOTNULL" value="false" />
+    <option name="REPORT_NOTNULL_PARAMETER_OVERRIDES_NULLABLE" value="true" />
+    <option name="REPORT_NOT_ANNOTATED_PARAMETER_OVERRIDES_NOTNULL" value="true" />
+    <option name="REPORT_NOT_ANNOTATED_GETTER" value="true" />
+    <option name="REPORT_NOT_ANNOTATED_SETTER_PARAMETER" value="true" />
+    <option name="REPORT_ANNOTATION_NOT_PROPAGATED_TO_OVERRIDERS" value="false" />
+    <option name="REPORT_NULLS_PASSED_TO_NON_ANNOTATED_METHOD" value="true" />
+  </inspection_tool>
+  <inspection_tool class="ObjectNotify" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="ObsoleteCollection" enabled="true" level="WARNING" enabled_by_default="true">
+    <option name="ignoreRequiredObsoleteCollectionTypes" value="false" />
+  </inspection_tool>
+  <inspection_tool class="OptionEqualsSome" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="OverlyStrongTypeCast" enabled="true" level="WARNING" enabled_by_default="true">
+    <option name="ignoreInMatchingInstanceof" value="false" />
+  </inspection_tool>
+  <inspection_tool class="OverriddenMethodCallDuringObjectConstruction" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="PackageVisibleInnerClass" enabled="true" level="WARNING" enabled_by_default="false">
+    <scope name="Production" level="WARNING" enabled="true">
+      <option name="ignoreEnums" value="false" />
+      <option name="ignoreInterfaces" value="false" />
+    </scope>
+    <option name="ignoreEnums" value="false" />
+    <option name="ignoreInterfaces" value="false" />
+  </inspection_tool>
+  <inspection_tool class="ParameterNameDiffersFromOverriddenParameter" enabled="false" level="WARNING" enabled_by_default="false">
+    <option name="m_ignoreSingleCharacterNames" value="false" />
+    <option name="m_ignoreOverridesOfLibraryMethods" value="true" />
+  </inspection_tool>
+  <inspection_tool class="ParameterlessMemberOverridenAsEmptyParen" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="PointlessIndexOfComparison" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="ProtectedMemberInFinalClass" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="PublicField" enabled="true" level="WARNING" enabled_by_default="false">
+    <scope name="Production" level="WARNING" enabled="true">
+      <option name="ignoreEnums" value="false" />
+      <option name="ignorableAnnotations">
+        <value />
+      </option>
+    </scope>
+    <option name="ignoreEnums" value="false" />
+    <option name="ignorableAnnotations">
+      <value />
+    </option>
+  </inspection_tool>
+  <inspection_tool class="PublicFieldAccessedInSynchronizedContext" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="PublicInnerClass" enabled="true" level="WARNING" enabled_by_default="false">
+    <scope name="Production" level="WARNING" enabled="true">
+      <option name="ignoreEnums" value="false" />
+      <option name="ignoreInterfaces" value="false" />
+    </scope>
+    <option name="ignoreEnums" value="false" />
+    <option name="ignoreInterfaces" value="false" />
+  </inspection_tool>
+  <inspection_tool class="RangeToIndices" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ReadObjectAndWriteObjectPrivate" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="ReadObjectInitialization" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="ReadResolveAndWriteReplaceProtected" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="RedundantBlock" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="RedundantCollectionConversion" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="RedundantDefaultArgument" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="RedundantFieldInitialization" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="RedundantHeadOrLastOption" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="RedundantImplements" enabled="true" level="WARNING" enabled_by_default="true">
+    <option name="ignoreSerializable" value="false" />
+    <option name="ignoreCloneable" value="false" />
+  </inspection_tool>
+  <inspection_tool class="RedundantImport" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="RedundantMethodOverride" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="RedundantNewCaseClass" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="RedundantSuppression" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="RedundantThrowsDeclaration" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="ReferenceMustBePrefixed" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="RemoveRedundantReturn" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ReplaceAssignmentWithOperatorAssignment" enabled="true" level="WARNING" enabled_by_default="true">
+    <option name="ignoreLazyOperators" value="true" />
+    <option name="ignoreObscureOperators" value="false" />
+  </inspection_tool>
+  <inspection_tool class="ReplaceToWithUntil" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ReplaceWithFlatten" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="RequiredAttributes" enabled="false" level="WARNING" enabled_by_default="false">
+    <option name="myAdditionalRequiredHtmlAttributes" value="" />
+  </inspection_tool>
+  <inspection_tool class="ResultOfObjectAllocationIgnored" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="ResultSetIndexZero" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="ReturnOfDateField" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="ReverseIterator" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ReverseMap" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ReverseTakeReverse" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="SafeLock" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="SafeVarargsDetector" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="SameElementsToEquals" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="SamePackageImport" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="SameParameterValue" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ScalaDefaultFileTemplate" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ScalaDefaultFileTemplateUsage" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ScalaDeprecatedIdentifier" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ScalaDeprecation" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ScalaDocInlinedTag" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ScalaDocMissingParameterDescription" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ScalaDocUnbalancedHeader" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ScalaDocUnclosedTagWithoutParser" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ScalaDocUnknownParameter" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ScalaDocUnknownTag" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ScalaFileName" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ScalaMalformedFormatString" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ScalaPackageName" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ScalaRedundantCast" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ScalaRedundantConversion" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ScalaStyle" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ScalaUnnecessaryParentheses" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ScalaUnnecessarySemicolon" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ScalaUnreachableCode" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ScalaUnusedSymbol" enabled="false" level="WEAK WARNING" enabled_by_default="false" />
+  <inspection_tool class="ScalaUselessExpression" enabled="false" level="WEAK WARNING" enabled_by_default="false" />
+  <inspection_tool class="ScalaXmlUnmatchedTag" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="SerialPersistentFieldsWithWrongSignature" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="SerialVersionUIDNotStaticFinal" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="SignalWithoutCorrespondingAwait" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="SimplifiableFoldOrReduce" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="SimplifiableIfStatement" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="SimplifyBoolean" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="SimplifyBooleanMatch" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="SingleImport" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="SizeReplaceableByIsEmpty" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="SizeToLength" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="SleepWhileHoldingLock" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="SortFilter" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="SpellCheckingInspection" enabled="false" level="WEAK WARNING" enabled_by_default="false">
+    <option name="processCode" value="true" />
+    <option name="processLiterals" value="true" />
+    <option name="processComments" value="true" />
+  </inspection_tool>
+  <inspection_tool class="StringBufferToStringInConcatenation" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="StringEqualsEmptyString" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="SubtractionInCompareTo" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="SynchronizeOnLock" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="SynchronizedOnLiteralObject" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="SystemRunFinalizersOnExit" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="TextLabelInSwitchStatement" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="ThreadDeathRethrown" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="ThreadRun" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="ThreadStartInConstruction" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="ThreadStopSuspendResume" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="ThreadYield" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="ThrowableInstanceNeverThrown" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ThrowableResultOfMethodCallIgnored" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ToSetAndBack" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="TransientFieldInNonSerializableClass" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="TransientFieldNotInitialized" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="TrivialStringConcatenation" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="TypeAnnotation" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="TypeCheckCanBeMatch" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="TypeParameterExtendsFinalClass" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="TypeParameterShadow" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="UNCHECKED_WARNING" enabled="true" level="WARNING" enabled_by_default="true">
+    <option name="IGNORE_UNCHECKED_ASSIGNMENT" value="false" />
+    <option name="IGNORE_UNCHECKED_GENERICS_ARRAY_CREATION" value="true" />
+    <option name="IGNORE_UNCHECKED_CALL" value="false" />
+    <option name="IGNORE_UNCHECKED_CAST" value="true" />
+    <option name="IGNORE_UNCHECKED_OVERRIDING" value="false" />
+  </inspection_tool>
+  <inspection_tool class="UNUSED_SYMBOL" enabled="true" level="WARNING" enabled_by_default="true">
+    <option name="LOCAL_VARIABLE" value="true" />
+    <option name="FIELD" value="true" />
+    <option name="METHOD" value="false" />
+    <option name="CLASS" value="false" />
+    <option name="PARAMETER" value="true" />
+    <option name="REPORT_PARAMETER_FOR_PUBLIC_METHODS" value="false" />
+  </inspection_tool>
+  <inspection_tool class="UnaryPlus" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="UnconditionalWait" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="UnitInMap" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="UnitMethodIsParameterless" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="UnknownLanguage" enabled="false" level="ERROR" enabled_by_default="false" />
+  <inspection_tool class="UnnecessarilyQualifiedStaticUsage" enabled="true" level="WARNING" enabled_by_default="true">
+    <option name="m_ignoreStaticFieldAccesses" value="false" />
+    <option name="m_ignoreStaticMethodCalls" value="false" />
+    <option name="m_ignoreStaticAccessFromStaticContext" value="false" />
+  </inspection_tool>
+  <inspection_tool class="UnnecessaryAnnotationParentheses" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="UnnecessaryCallToStringValueOf" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="UnnecessaryConstructor" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="UnnecessaryFinalOnLocalVariable" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="UnnecessaryFinalOnParameter" enabled="true" level="WARNING" enabled_by_default="true">
+    <option name="onlyWarnOnAbstractMethods" value="false" />
+  </inspection_tool>
+  <inspection_tool class="UnnecessaryFullyQualifiedName" enabled="false" level="WARNING" enabled_by_default="false">
+    <option name="m_ignoreJavadoc" value="true" />
+    <option name="ignoreInModuleStatements" value="true" />
+  </inspection_tool>
+  <inspection_tool class="UnnecessaryInterfaceModifier" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="UnnecessaryJavaDocLink" enabled="true" level="WARNING" enabled_by_default="true">
+    <option name="ignoreInlineLinkToSuper" value="false" />
+  </inspection_tool>
+  <inspection_tool class="UnnecessaryPartialFunction" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="UnnecessaryQualifierForThis" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="UnnecessarySuperConstructor" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="UnnecessarySuperQualifier" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="UnnecessaryThis" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="UnnecessaryUnaryMinus" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="UnusedCatchParameter" enabled="true" level="WARNING" enabled_by_default="true">
+    <option name="m_ignoreCatchBlocksWithComments" value="false" />
+    <option name="m_ignoreTestCases" value="false" />
+  </inspection_tool>
+  <inspection_tool class="UnusedLibrary" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="UnusedReturnValue" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="UpperCaseFieldNameNotConstant" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="Use of postfix method call" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="VarCouldBeVal" enabled="false" level="WEAK WARNING" enabled_by_default="false" />
+  <inspection_tool class="VariablePatternShadow" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="WaitCalledOnCondition" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="WaitNotInLoop" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="WaitNotInSynchronizedContext" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="WaitWhileHoldingTwoLocks" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="WaitWithoutCorrespondingNotify" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="WeakerAccess" enabled="false" level="WARNING" enabled_by_default="false">
+    <option name="SUGGEST_PACKAGE_LOCAL_FOR_MEMBERS" value="true" />
+    <option name="SUGGEST_PACKAGE_LOCAL_FOR_TOP_CLASSES" value="true" />
+    <option name="SUGGEST_PRIVATE_FOR_INNERS" value="false" />
+  </inspection_tool>
+  <inspection_tool class="ZeroIndexToHead" enabled="false" level="WARNING" enabled_by_default="false" />
+  <inspection_tool class="ZeroLengthArrayInitialization" enabled="true" level="WARNING" enabled_by_default="true" />
+  <inspection_tool class="ZipWithIndex" enabled="false" level="WARNING" enabled_by_default="false" />
+</profile>
\ No newline at end of file
diff --git a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/sequence/JmhSequenceBenchmark.java b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/sequence/JmhSequenceBenchmark.java
new file mode 100644
index 0000000..ca85071
--- /dev/null
+++ b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/sequence/JmhSequenceBenchmark.java
@@ -0,0 +1,215 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.benchmarks.jmh.sequence;
+
+import java.util.concurrent.ThreadLocalRandom;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteAtomicSequence;
+import org.apache.ignite.Ignition;
+import org.apache.ignite.configuration.AtomicConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.benchmarks.jmh.JmhAbstractBenchmark;
+import org.apache.ignite.internal.benchmarks.jmh.runner.JmhIdeBenchmarkRunner;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.TearDown;
+
+import static org.apache.ignite.internal.benchmarks.jmh.runner.JmhIdeBenchmarkRunner.createProperty;
+
+/**
+ * JMH benchmark for {@link IgniteAtomicSequence}.
+ */
+public class JmhSequenceBenchmark extends JmhAbstractBenchmark {
+
+    /** Property: nodes count. */
+    private static final String PROP_DATA_NODES = "ignite.jmh.sequence.dataNodes";
+
+    /** Property: client mode flag. */
+    private static final String PROP_CLIENT_MODE = "ignite.jmh.sequence.clientMode";
+
+    /** Property: reservation batch size. */
+    private static final String PROP_BATCH_SIZE = "ignite.jmh.sequence.batchSize";
+
+    @State(Scope.Benchmark)
+    public static class SequenceState {
+        /** IP finder shared across nodes. */
+        private static final TcpDiscoveryVmIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
+        /** 1/10 of batchSize. */
+        private int randomBound;
+
+        /** */
+        private IgniteAtomicSequence seq;
+
+        /**
+         * Setup.
+         */
+        @Setup
+        public void setup() {
+            Ignite node = Ignition.start(configuration("NODE_0"));
+
+            int nodes = intProperty(PROP_DATA_NODES, 4);
+
+            for (int i = 1; i < nodes; i++)
+                Ignition.start(configuration("NODE_" + i));
+
+            boolean isClient = booleanProperty(PROP_CLIENT_MODE);
+
+            if (isClient) {
+                IgniteConfiguration clientCfg = configuration("client");
+
+                clientCfg.setClientMode(true);
+
+                node = Ignition.start(clientCfg);
+            }
+
+            AtomicConfiguration acfg = new AtomicConfiguration();
+
+            int batchSize = intProperty(PROP_BATCH_SIZE);
+
+            randomBound = batchSize < 10 ? 1 : batchSize / 10;
+
+            acfg.setAtomicSequenceReserveSize(batchSize);
+
+            seq = node.atomicSequence("seq", acfg, 0, true);
+        }
+
+        /**
+         * Create Ignite configuration.
+         *
+         * @param igniteInstanceName Ignite instance name.
+         * @return Configuration.
+         */
+        private IgniteConfiguration configuration(String igniteInstanceName) {
+            IgniteConfiguration cfg = new IgniteConfiguration();
+
+            cfg.setIgniteInstanceName(igniteInstanceName);
+
+            cfg.setLocalHost("127.0.0.1");
+
+            TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
+
+            discoSpi.setIpFinder(IP_FINDER);
+
+            cfg.setDiscoverySpi(discoSpi);
+
+            return cfg;
+        }
+
+        /**
+         * Tear down routine.
+         */
+        @TearDown
+        public void tearDown() {
+            Ignition.stopAll(true);
+        }
+    }
+
+    /**
+     * Run benchmarks.
+     *
+     * @param args Arguments.
+     * @throws Exception If failed.
+     */
+    public static void main(String[] args) throws Exception {
+        run(false, 4, 10_000);
+        run(false, 4, 100_000);
+
+        run(true, 4, 10_000);
+        run(true, 4, 100_000);
+    }
+
+    /**
+     * @param client Client node.
+     * @param dataNodes Number of data nodes.
+     * @param batchSize Batch size.
+     * @throws Exception If failed.
+     */
+    private static void run(boolean client, int dataNodes, int batchSize) throws Exception {
+        String simpleClsName = JmhSequenceBenchmark.class.getSimpleName();
+
+        String output = simpleClsName +
+            "-" + (client ? "client" : "data") +
+            "-" + dataNodes +
+            "-" + batchSize;
+
+        JmhIdeBenchmarkRunner.create()
+            .forks(1)
+            .threads(5)
+            .warmupIterations(10)
+            .measurementIterations(20)
+            .output(output + ".jmh.log")
+            .benchmarks(JmhSequenceBenchmark.class.getSimpleName())
+            .jvmArguments(
+                "-Xms4g",
+                "-Xmx4g",
+                createProperty(PROP_BATCH_SIZE, batchSize),
+                createProperty(PROP_DATA_NODES, dataNodes),
+                createProperty(PROP_CLIENT_MODE, client)
+            )
+            .run();
+    }
+
+    /**
+     * Benchmark for {@link IgniteAtomicSequence#incrementAndGet} operation.
+     *
+     * @return Long new value.
+     */
+    @Benchmark
+    public long incrementAndGet(SequenceState state) {
+        return state.seq.incrementAndGet();
+    }
+
+    /**
+     * Benchmark for {@link IgniteAtomicSequence#getAndIncrement()} operation.
+     *
+     * @return Long previous value.
+     */
+    @Benchmark
+    public long getAndIncrement(SequenceState state) {
+        return state.seq.getAndIncrement();
+    }
+
+    /**
+     * Benchmark for {@link IgniteAtomicSequence#addAndGet(long)} operation.
+     *
+     * @return Long new value.
+     */
+    @Benchmark
+    public long addAndGet(SequenceState state) {
+        int key = ThreadLocalRandom.current().nextInt(state.randomBound) + 1;
+
+        return state.seq.getAndAdd(key);
+    }
+
+    /**
+     * Benchmark for {@link IgniteAtomicSequence#getAndAdd(long)} operation.
+     *
+     * @return Long previous value.
+     */
+    @Benchmark
+    public long getAndAdd(SequenceState state) {
+        int key = ThreadLocalRandom.current().nextInt(state.randomBound) + 1;
+
+        return state.seq.getAndAdd(key);
+    }
+}
diff --git a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/tree/BPlusTreeBenchmark.java b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/tree/BPlusTreeBenchmark.java
index cef00ee..e80e13d 100644
--- a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/tree/BPlusTreeBenchmark.java
+++ b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/tree/BPlusTreeBenchmark.java
@@ -176,7 +176,7 @@
         TestTree(ReuseList reuseList, int cacheId, PageMemory pageMem, long metaPageId)
             throws IgniteCheckedException {
             super("test", cacheId, pageMem, null, new AtomicLong(), metaPageId, reuseList,
-                new IOVersions<>(new LongInnerIO()), new IOVersions<>(new LongLeafIO()));
+                new IOVersions<>(new LongInnerIO()), new IOVersions<>(new LongLeafIO()), null);
 
             PageIO.registerTest(latestInnerIO(), latestLeafIO());
 
@@ -192,7 +192,7 @@
         }
 
         /** {@inheritDoc} */
-        @Override protected Long getRow(BPlusIO<Long> io, long pageAddr, int idx, Object ignore)
+        @Override public Long getRow(BPlusIO<Long> io, long pageAddr, int idx, Object ignore)
             throws IgniteCheckedException {
             assert io.canGetRow() : io;
 
diff --git a/modules/camel/pom.xml b/modules/camel/pom.xml
index cc2872c..3942c9b 100644
--- a/modules/camel/pom.xml
+++ b/modules/camel/pom.xml
@@ -35,7 +35,6 @@
     <url>http://ignite.apache.org</url>
 
     <properties>
-        <guava.version>18.0</guava.version>
         <okhttp.version>2.5.0</okhttp.version>
     </properties>
 
diff --git a/modules/camel/src/test/java/org/apache/ignite/stream/camel/IgniteCamelStreamerTestSuite.java b/modules/camel/src/test/java/org/apache/ignite/stream/camel/IgniteCamelStreamerTestSuite.java
index fa7f542..c45272e 100644
--- a/modules/camel/src/test/java/org/apache/ignite/stream/camel/IgniteCamelStreamerTestSuite.java
+++ b/modules/camel/src/test/java/org/apache/ignite/stream/camel/IgniteCamelStreamerTestSuite.java
@@ -21,7 +21,7 @@
 import junit.framework.TestSuite;
 
 /**
- * Camel streamer tests.
+ * Camel streamer tests. Included into 'Streamers' run configuration.
  */
 public class IgniteCamelStreamerTestSuite extends TestSuite {
     /**
diff --git a/modules/cassandra/store/pom.xml b/modules/cassandra/store/pom.xml
index df20518..df92156 100644
--- a/modules/cassandra/store/pom.xml
+++ b/modules/cassandra/store/pom.xml
@@ -39,7 +39,6 @@
         <cassandra-driver.version>3.0.0</cassandra-driver.version>
         <cassandra-all.version>3.3</cassandra-all.version>
         <netty.version>4.1.27.Final</netty.version>
-        <guava.version>19.0</guava.version>
         <metrics-core.version>3.0.2</metrics-core.version>
     </properties>
 
diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/TaskEventSubjectIdSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/TaskEventSubjectIdSelfTest.java
index 93b720f..46aaa6b 100644
--- a/modules/clients/src/test/java/org/apache/ignite/internal/TaskEventSubjectIdSelfTest.java
+++ b/modules/clients/src/test/java/org/apache/ignite/internal/TaskEventSubjectIdSelfTest.java
@@ -173,7 +173,7 @@
                     return null;
                 }
             },
-            IgniteCheckedException.class,
+            IgniteException.class,
             null
         );
 
@@ -328,7 +328,7 @@
         assert evt != null;
 
         assertEquals(EVT_TASK_STARTED, evt.type());
-        assertEquals(client.id(), evt.subjectId());
+        assertEquals(nodeId, evt.subjectId());
 
         assert it.hasNext();
 
@@ -337,7 +337,7 @@
         assert evt != null;
 
         assertEquals(EVT_TASK_REDUCED, evt.type());
-        assertEquals(client.id(), evt.subjectId());
+        assertEquals(nodeId, evt.subjectId());
 
         assert it.hasNext();
 
@@ -346,7 +346,7 @@
         assert evt != null;
 
         assertEquals(EVT_TASK_FINISHED, evt.type());
-        assertEquals(client.id(), evt.subjectId());
+        assertEquals(nodeId, evt.subjectId());
 
         assert !it.hasNext();
     }
@@ -408,4 +408,4 @@
             return null;
         }
     }
-}
\ No newline at end of file
+}
diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/client/suite/IgniteClientTestSuite.java b/modules/clients/src/test/java/org/apache/ignite/internal/client/suite/IgniteClientTestSuite.java
index c7017d6..657fda4 100644
--- a/modules/clients/src/test/java/org/apache/ignite/internal/client/suite/IgniteClientTestSuite.java
+++ b/modules/clients/src/test/java/org/apache/ignite/internal/client/suite/IgniteClientTestSuite.java
@@ -53,18 +53,18 @@
 import org.apache.ignite.internal.processors.rest.ClientMemcachedProtocolSelfTest;
 import org.apache.ignite.internal.processors.rest.JettyRestProcessorAuthenticationWithCredsSelfTest;
 import org.apache.ignite.internal.processors.rest.JettyRestProcessorAuthenticationWithTokenSelfTest;
+import org.apache.ignite.internal.processors.rest.JettyRestProcessorGetAllAsArrayTest;
 import org.apache.ignite.internal.processors.rest.JettyRestProcessorSignedSelfTest;
 import org.apache.ignite.internal.processors.rest.JettyRestProcessorUnsignedSelfTest;
-import org.apache.ignite.internal.processors.rest.MemcacheRestProcessorTest;
 import org.apache.ignite.internal.processors.rest.RestBinaryProtocolSelfTest;
 import org.apache.ignite.internal.processors.rest.RestMemcacheProtocolSelfTest;
 import org.apache.ignite.internal.processors.rest.RestProcessorMultiStartSelfTest;
 import org.apache.ignite.internal.processors.rest.RestProcessorStartSelfTest;
-import org.apache.ignite.internal.processors.rest.RestProcessorTest;
 import org.apache.ignite.internal.processors.rest.TaskCommandHandlerSelfTest;
 import org.apache.ignite.internal.processors.rest.TcpRestUnmarshalVulnerabilityTest;
 import org.apache.ignite.internal.processors.rest.protocols.tcp.TcpRestParserSelfTest;
 import org.apache.ignite.internal.processors.rest.protocols.tcp.redis.RedisProtocolConnectSelfTest;
+import org.apache.ignite.internal.processors.rest.protocols.tcp.redis.RedisProtocolGetAllAsArrayTest;
 import org.apache.ignite.internal.processors.rest.protocols.tcp.redis.RedisProtocolServerSelfTest;
 import org.apache.ignite.internal.processors.rest.protocols.tcp.redis.RedisProtocolStringSelfTest;
 import org.apache.ignite.testframework.IgniteTestSuite;
@@ -86,7 +86,6 @@
 
         // Test memcache protocol with custom test client.
         suite.addTestSuite(RestMemcacheProtocolSelfTest.class);
-        //suite.addTestSuite(MemcacheRestProcessorTest.class);
 
         // Test custom binary protocol with test client.
         suite.addTestSuite(RestBinaryProtocolSelfTest.class);
@@ -97,16 +96,17 @@
         suite.addTestSuite(JettyRestProcessorUnsignedSelfTest.class);
         suite.addTestSuite(JettyRestProcessorAuthenticationWithCredsSelfTest.class);
         suite.addTestSuite(JettyRestProcessorAuthenticationWithTokenSelfTest.class);
+        suite.addTestSuite(JettyRestProcessorGetAllAsArrayTest.class);
 
         // Test TCP rest processor with original memcache client.
         suite.addTestSuite(ClientMemcachedProtocolSelfTest.class);
 
         // Test TCP rest processor with original REDIS client.
         suite.addTestSuite(RedisProtocolStringSelfTest.class);
+        suite.addTestSuite(RedisProtocolGetAllAsArrayTest.class);
         suite.addTestSuite(RedisProtocolConnectSelfTest.class);
         suite.addTestSuite(RedisProtocolServerSelfTest.class);
 
-        //suite.addTestSuite(RestProcessorTest.class);
         suite.addTestSuite(RestProcessorStartSelfTest.class);
 
         // Test cache flag conversion.
@@ -143,8 +143,8 @@
 
         // Rest task command handler test.
         suite.addTestSuite(TaskCommandHandlerSelfTest.class);
-        //suite.addTestSuite(ChangeStateCommandHandlerTest.class);
-        //suite.addTestSuite(TaskEventSubjectIdSelfTest.class);
+        suite.addTestSuite(ChangeStateCommandHandlerTest.class);
+        suite.addTestSuite(TaskEventSubjectIdSelfTest.class);
 
         // Default cache only test.
         suite.addTestSuite(ClientDefaultCacheSelfTest.class);
diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcBulkLoadSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcBulkLoadSelfTest.java
index e91585f..753a98c 100644
--- a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcBulkLoadSelfTest.java
+++ b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcBulkLoadSelfTest.java
@@ -130,7 +130,7 @@
                 conn = createConnection();
 
                 try (Statement stmt = conn.createStatement()) {
-                    stmt.executeUpdate("copy from \"dummy.csv\" into Person" +
+                    stmt.executeUpdate("copy from 'dummy.csv' into Person" +
                         " (_key, id, firstName, lastName) format csv");
 
                     return null;
diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcStreamingSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcStreamingSelfTest.java
index 9e11efc..bc545ac 100644
--- a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcStreamingSelfTest.java
+++ b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcStreamingSelfTest.java
@@ -34,6 +34,7 @@
 import org.apache.ignite.configuration.IgniteConfiguration;
 import org.apache.ignite.internal.processors.query.QueryUtils;
 import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.jdbc.thin.JdbcThinAbstractSelfTest;
 import org.apache.ignite.lang.IgniteCallable;
 import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
 import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
@@ -47,7 +48,7 @@
 /**
  * Data streaming test.
  */
-public class JdbcStreamingSelfTest extends GridCommonAbstractTest {
+public class JdbcStreamingSelfTest extends JdbcThinAbstractSelfTest {
     /** JDBC URL. */
     private static final String BASE_URL = CFG_URL_PREFIX +
         "cache=default@modules/clients/src/test/config/jdbc-config.xml";
diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/AbstractRestProcessorSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/AbstractRestProcessorSelfTest.java
index 3a8afa4..e5c658c 100644
--- a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/AbstractRestProcessorSelfTest.java
+++ b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/AbstractRestProcessorSelfTest.java
@@ -43,6 +43,8 @@
 
     /** {@inheritDoc} */
     @Override protected void beforeTestsStarted() throws Exception {
+        cleanPersistenceDir();
+
         startGrids(gridCount());
     }
 
@@ -59,6 +61,13 @@
     }
 
     /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        stopAllGrids();
+
+        cleanPersistenceDir();
+    }
+
+    /** {@inheritDoc} */
     @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
         IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName);
 
diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAbstractSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAbstractSelfTest.java
index 8bfef10..f39c280 100644
--- a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAbstractSelfTest.java
+++ b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAbstractSelfTest.java
@@ -688,7 +688,7 @@
         String ret = content(DEFAULT_CACHE_NAME, GridRestCommand.CACHE_PUT, "key", "key0");
 
         assertResponseContainsError(ret,
-            "Failed to handle request: [req=CACHE_PUT, err=Failed to find mandatory parameter in request: val]");
+            "Failed to find mandatory parameter in request: val");
     }
 
     /**
diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorCommonSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorCommonSelfTest.java
index 2076d49..1b93284 100644
--- a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorCommonSelfTest.java
+++ b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorCommonSelfTest.java
@@ -56,6 +56,8 @@
 
     /** {@inheritDoc} */
     @Override protected void afterTestsStopped() throws Exception {
+        super.afterTestsStopped();
+
         System.clearProperty(IGNITE_JETTY_PORT);
     }
 
diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorGetAllAsArrayTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorGetAllAsArrayTest.java
new file mode 100644
index 0000000..521d7c1
--- /dev/null
+++ b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorGetAllAsArrayTest.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.rest;
+
+import com.fasterxml.jackson.databind.JsonNode;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import org.apache.ignite.internal.util.typedef.F;
+
+import static org.apache.ignite.IgniteSystemProperties.IGNITE_REST_GETALL_AS_ARRAY;
+import static org.apache.ignite.internal.processors.rest.GridRestResponse.STATUS_SUCCESS;
+
+/** */
+public class JettyRestProcessorGetAllAsArrayTest extends JettyRestProcessorCommonSelfTest {
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        System.setProperty(IGNITE_REST_GETALL_AS_ARRAY, "true");
+
+        super.beforeTestsStarted();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        super.afterTestsStopped();
+
+        System.clearProperty(IGNITE_REST_GETALL_AS_ARRAY);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testGetAll() throws Exception {
+        final Map<String, String> entries = F.asMap("getKey1", "getVal1", "getKey2", "getVal2");
+
+        jcache().putAll(entries);
+
+        String ret = content(DEFAULT_CACHE_NAME, GridRestCommand.CACHE_GET_ALL,
+            "k1", "getKey1",
+            "k2", "getKey2"
+        );
+
+        info("Get all command result: " + ret);
+
+        assertNotNull(ret);
+        assertFalse(ret.isEmpty());
+
+        JsonNode node = JSON_MAPPER.readTree(ret);
+
+        assertEquals(STATUS_SUCCESS, node.get("successStatus").asInt());
+        assertTrue(node.get("error").isNull());
+
+        JsonNode res = node.get("response");
+
+        assertTrue(res.isArray());
+
+        Set<Map<String, String>> returnValue = new HashSet<>();
+
+        returnValue.add(F.asMap("key", "getKey1", "value", "getVal1"));
+        returnValue.add(F.asMap("key", "getKey2", "value", "getVal2"));
+
+        assertEquals(returnValue, JSON_MAPPER.treeToValue(res, Set.class));
+    }
+
+    /** {@inheritDoc} */
+    @Override protected String signature() {
+        return null;
+    }
+}
diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/MemcacheRestProcessorTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/MemcacheRestProcessorTest.java
deleted file mode 100644
index 9a84424..0000000
--- a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/MemcacheRestProcessorTest.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.rest;
-
-import java.net.InetSocketAddress;
-import java.util.Map;
-import net.spy.memcached.BinaryConnectionFactory;
-import net.spy.memcached.MemcachedClient;
-import net.spy.memcached.MemcachedClientIF;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
-
-/**
- */
-public class MemcacheRestProcessorTest extends GridCommonAbstractTest {
-    /** Client. */
-    private MemcachedClientIF client;
-
-    /** {@inheritDoc} */
-    @Override protected void beforeTest() throws Exception {
-        client = new MemcachedClient(new BinaryConnectionFactory(),
-                F.asList(new InetSocketAddress("127.0.0.1", 11211)));
-
-        assert client.flush().get();
-    }
-
-    /**
-     * @throws Exception If failed.
-     */
-    public void testGetBulk() throws Exception {
-        assert client.add("key1", 0, 1).get();
-        assert client.add("key2", 0, 2).get();
-
-        Map<String, Object> map = client.getBulk("key1", "key2");
-
-        assert map.size() == 2;
-        assert map.get("key1").equals(1);
-        assert map.get("key2").equals(2);
-    }
-
-    /**
-     * @throws Exception If failed.
-     */
-    public void testAppend() throws Exception {
-        assert client.add("key", 0, "val").get();
-
-        assert client.append(0, "key", "_1").get();
-
-        assert "val_1".equals(client.get("key"));
-    }
-
-    /**
-     * @throws Exception If failed.
-     */
-    public void testPrepend() throws Exception {
-        assert client.add("key", 0, "val").get();
-
-        assert client.prepend(0, "key", "1_").get();
-
-        assert "1_val".equals(client.get("key"));
-    }
-}
\ No newline at end of file
diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/RestProcessorTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/RestProcessorTest.java
deleted file mode 100644
index 5b4ce53..0000000
--- a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/RestProcessorTest.java
+++ /dev/null
@@ -1,351 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.rest;
-
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.Map;
-import javax.swing.JComponent;
-import javax.swing.JLabel;
-import javax.swing.JOptionPane;
-import org.apache.ignite.IgniteCache;
-import org.apache.ignite.configuration.ConnectorConfiguration;
-import org.apache.ignite.configuration.IgniteConfiguration;
-import org.apache.ignite.internal.util.typedef.G;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
-import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
-import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
-
-/**
- * Rest processor test.
- * <p>
- * URLs to test:
- * <ul>
- * <li>http://localhost:8080/ignite?cmd=get&key=simpleBean</li>
- * <li>http://localhost:8080/ignite?cmd=get&key=complexBean</li>
- * <li>http://localhost:8080/ignite?cmd=get&key=list</li>
- * <li>http://localhost:8080/ignite?cmd=get&key=map</li>
- * <li>http://localhost:8080/ignite?cmd=get&key=int</li>
- * <li>http://localhost:8080/ignite?cmd=get&key=string</li>
- * <li>http://localhost:8080/ignite?cmd=get&key=date</li>
- * <li>http://localhost:8080/ignite?cmd=top</li>
- * <li>http://localhost:8080/ignite?cmd=exe&name=org.apache.ignite.internal.processors.rest.TestTask2</li>
- * <li>http://localhost:8080/ignite?cmd=exe&name=org.apache.ignite.internal.processors.rest.TestTask2&async=true</li>
- * <li>http://localhost:8080/ignite?cmd=res&id=XXXX</li>
- * </ul>
- */
-public class RestProcessorTest extends GridCommonAbstractTest {
-    /** Counter */
-    private static int cntr;
-
-    /** */
-    public RestProcessorTest() {
-        super(/*start grid*/false);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected long getTestTimeout() {
-        return Long.MAX_VALUE;
-    }
-
-    /**
-     * @throws Exception If failed.
-     */
-    public void testRest() throws Exception {
-        IgniteConfiguration cfg = getConfiguration((String)null);
-
-        cfg = cacheTestConfiguration(cfg);
-
-        G.start(cfg);
-
-        populateCache();
-
-        deployTasks();
-
-        // Wait until Ok is pressed.
-        JOptionPane.showMessageDialog(
-            null,
-            new JComponent[] {
-                new JLabel("Ignite started."),
-                new JLabel(
-                    "<html>" +
-                        "You can use JMX console at <u>http://localhost:1234</u>" +
-                        "</html>"),
-                new JLabel("Press OK to stop Ignite.")
-            },
-            "Ignite Startup JUnit",
-            JOptionPane.INFORMATION_MESSAGE
-        );
-
-        G.stop(true);
-    }
-
-    /**
-     * @param cfg Initial configuration.
-     * @return Final configuration.
-     */
-    @SuppressWarnings({"unchecked"})
-    private IgniteConfiguration cacheTestConfiguration(IgniteConfiguration cfg) {
-        TcpDiscoverySpi disco = new TcpDiscoverySpi();
-
-        TcpDiscoveryVmIpFinder ipFinder = new TcpDiscoveryVmIpFinder();
-
-        ipFinder.setShared(true);
-
-        disco.setIpFinder(ipFinder);
-
-        cfg.setDiscoverySpi(disco);
-
-        assert cfg.getConnectorConfiguration() == null;
-
-        ConnectorConfiguration clientCfg = new ConnectorConfiguration();
-
-        // Ensure - no authentication.
-        clientCfg.setSecretKey(null);
-
-        cfg.setConnectorConfiguration(clientCfg);
-
-        cfg.setCacheConfiguration(defaultCacheConfiguration());
-
-        return cfg;
-    }
-
-    /**
-     * @return Integer.
-     */
-    private int intValue() {
-        return ++cntr;
-    }
-
-    /**
-     *
-     */
-    private void populateCache() {
-        IgniteCache<String, Object> cache = G.ignite().cache(DEFAULT_CACHE_NAME);
-
-        cache.put("int", intValue());
-        cache.put("string", "cacheString");
-        cache.put("date", new Date());
-        cache.put("list", createCollection());
-        cache.put("map", createMap());
-        cache.put("simpleBean", new SimpleBean());
-
-        ComplexBean bean = new ComplexBean(new SimpleBean(intValue(), "complexSimpleString"));
-
-        bean.setComplexBean(new ComplexBean(new SimpleBean(intValue(), "complexComplexString")));
-
-        cache.put("complexBean", bean);
-    }
-
-    /**
-     *
-     */
-    private void deployTasks() {
-        G.ignite().compute().localDeployTask(TestTask1.class, TestTask1.class.getClassLoader());
-        G.ignite().compute().localDeployTask(TestTask2.class, TestTask2.class.getClassLoader());
-    }
-
-    /**
-     * @return Map.
-     */
-    private Map<?, ?> createMap() {
-        Map<Object, Object> map = new HashMap<>();
-
-        map.put("intValue", intValue());
-        map.put("stringValue", "mapString");
-        map.put("simpleBean", new SimpleBean());
-        map.put("complexBean", new ComplexBean(new SimpleBean(intValue(), "mapSimpleComplexString")));
-
-        Map<Object, Object> nested = new HashMap<>();
-
-        nested.put("intValue", intValue());
-        nested.put("stringValue", "nestedMapString");
-        nested.put("simpleBean", new SimpleBean());
-        nested.put("complexBean", new ComplexBean(new SimpleBean(intValue(), "mapSimpleComplexNestedString")));
-
-        map.put("nestedMap", nested);
-
-        return map;
-    }
-
-    /**
-     * @return List.
-     */
-    private Collection<?> createCollection() {
-        Collection<Object> list = new ArrayList<>();
-
-        list.add(intValue());
-        list.add("listString");
-        list.add(new Date());
-
-        Collection<Object> nested = new ArrayList<>();
-
-        nested.add(intValue());
-        nested.add("nestedListString");
-        nested.add(new Date());
-
-        list.add(nested);
-
-        return list;
-    }
-
-    /**
-     * Simple bean.
-     */
-    @SuppressWarnings( {"ReturnOfDateField", "AssignmentToDateFieldFromParameter", "PublicInnerClass"})
-    public static class SimpleBean implements Serializable {
-        /** */
-        private int intField = 12345;
-
-        /** */
-        private String strField = "testString";
-
-        /** */
-        private Date date = new Date();
-
-        /**
-         * Empty constructor.
-         */
-        private SimpleBean() {
-            // No-op.
-        }
-
-        /**
-         * @param intField Int value.
-         * @param strField String value.
-         */
-        private SimpleBean(int intField, String strField) {
-            this.intField = intField;
-            this.strField = strField;
-        }
-
-        /**
-         * @param intField Int value.
-         * @param strField String value.
-         * @param date Date value.
-         */
-        private SimpleBean(int intField, String strField, Date date) {
-            this.intField = intField;
-            this.strField = strField;
-            this.date = date;
-        }
-
-        /**
-         * @return Int value.
-         */
-        public int getIntField() {
-            return intField;
-        }
-
-        /**
-         * @param intField Int value.
-         */
-        public void setIntField(int intField) {
-            this.intField = intField;
-        }
-
-        /**
-         * @return String value.
-         */
-        public String getStringField() {
-            return strField;
-        }
-
-        /**
-         * @param strField String value.
-         */
-        public void setStringField(String strField) {
-            this.strField = strField;
-        }
-
-        /**
-         * @return Date value.
-         */
-        public Date getDate() {
-            return date;
-        }
-
-        /**
-         * @param date Date value.
-         */
-        public void setDate(Date date) {
-            this.date = date;
-        }
-
-        /** {@inheritDoc} */
-        @Override public String toString() {
-            return S.toString(SimpleBean.class, this);
-        }
-    }
-
-    /**
-     * Simple bean.
-     */
-    @SuppressWarnings( {"ReturnOfDateField", "PublicInnerClass"})
-    public static class ComplexBean extends SimpleBean {
-        /** */
-        private SimpleBean simpleBean = new SimpleBean(67890, "nestedTestString", new Date());
-
-        /** */
-        private ComplexBean complexBean;
-
-        /**
-         * @param simpleBean Simple bean.
-         */
-        private ComplexBean(SimpleBean simpleBean) {
-            this.simpleBean = simpleBean;
-        }
-
-        /**
-         * @return Simple bean.
-         */
-        public SimpleBean getSimpleBean() {
-            return simpleBean;
-        }
-
-        /**
-         * @param simpleBean Simple bean.
-         */
-        public void setSimpleBean(SimpleBean simpleBean) {
-            this.simpleBean = simpleBean;
-        }
-
-        /**
-         * @return Complex bean.
-         */
-        public ComplexBean getComplexBean() {
-            return complexBean;
-        }
-
-        /**
-         * @param complexBean Complex bean.
-         */
-        public void setComplexBean(ComplexBean complexBean) {
-            this.complexBean = complexBean;
-        }
-
-        /** {@inheritDoc} */
-        @Override public String toString() {
-            return S.toString(ComplexBean.class, this);
-        }
-    }
-}
\ No newline at end of file
diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/protocols/tcp/redis/RedisProtocolGetAllAsArrayTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/protocols/tcp/redis/RedisProtocolGetAllAsArrayTest.java
new file mode 100644
index 0000000..f892ca5
--- /dev/null
+++ b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/protocols/tcp/redis/RedisProtocolGetAllAsArrayTest.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.rest.protocols.tcp.redis;
+
+import org.apache.ignite.IgniteSystemProperties;
+
+/**
+ * Test for being unaffected by {@link IgniteSystemProperties#IGNITE_REST_GETALL_AS_ARRAY}.
+ */
+public class RedisProtocolGetAllAsArrayTest extends RedisProtocolStringSelfTest {
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        System.setProperty(IgniteSystemProperties.IGNITE_REST_GETALL_AS_ARRAY, "true");
+
+        super.beforeTestsStarted();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        super.afterTestsStopped();
+
+        System.clearProperty(IgniteSystemProperties.IGNITE_REST_GETALL_AS_ARRAY);
+    }
+}
diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/suite/IgniteJdbcDriverMvccTestSuite.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/suite/IgniteJdbcDriverMvccTestSuite.java
new file mode 100644
index 0000000..6d8933d
--- /dev/null
+++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/suite/IgniteJdbcDriverMvccTestSuite.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.jdbc.suite;
+
+import junit.framework.TestSuite;
+import org.apache.ignite.jdbc.thin.JdbcThinConnectionMvccEnabledSelfTest;
+import org.apache.ignite.jdbc.thin.JdbcThinTransactionsClientAutoCommitComplexSelfTest;
+import org.apache.ignite.jdbc.thin.JdbcThinTransactionsClientNoAutoCommitComplexSelfTest;
+import org.apache.ignite.jdbc.thin.JdbcThinTransactionsWithMvccEnabledSelfTest;
+import org.apache.ignite.jdbc.thin.JdbcThinTransactionsServerAutoCommitComplexSelfTest;
+import org.apache.ignite.jdbc.thin.JdbcThinTransactionsServerNoAutoCommitComplexSelfTest;
+
+public class IgniteJdbcDriverMvccTestSuite extends TestSuite {
+    /**
+     * @return JDBC Driver Test Suite.
+     * @throws Exception In case of error.
+     */
+    public static TestSuite suite() throws Exception {
+        TestSuite suite = new TestSuite("Ignite JDBC Driver Test Suite");
+
+        suite.addTest(new TestSuite(JdbcThinConnectionMvccEnabledSelfTest.class));
+        
+        // Transactions
+        suite.addTest(new TestSuite(JdbcThinTransactionsWithMvccEnabledSelfTest.class));
+        suite.addTest(new TestSuite(JdbcThinTransactionsClientAutoCommitComplexSelfTest.class));
+        suite.addTest(new TestSuite(JdbcThinTransactionsServerAutoCommitComplexSelfTest.class));
+        suite.addTest(new TestSuite(JdbcThinTransactionsClientNoAutoCommitComplexSelfTest.class));
+        suite.addTest(new TestSuite(JdbcThinTransactionsServerNoAutoCommitComplexSelfTest.class));
+
+        return suite;
+    }
+}
diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/suite/IgniteJdbcDriverTestSuite.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/suite/IgniteJdbcDriverTestSuite.java
index f3490aa..2e98d68 100644
--- a/modules/clients/src/test/java/org/apache/ignite/jdbc/suite/IgniteJdbcDriverTestSuite.java
+++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/suite/IgniteJdbcDriverTestSuite.java
@@ -43,10 +43,12 @@
 import org.apache.ignite.jdbc.thin.JdbcThinBulkLoadTransactionalPartitionedNearSelfTest;
 import org.apache.ignite.jdbc.thin.JdbcThinBulkLoadTransactionalPartitionedSelfTest;
 import org.apache.ignite.jdbc.thin.JdbcThinBulkLoadTransactionalReplicatedSelfTest;
+import org.apache.ignite.jdbc.thin.JdbcThinComplexDmlDdlCustomSchemaSelfTest;
 import org.apache.ignite.jdbc.thin.JdbcThinComplexDmlDdlSelfTest;
 import org.apache.ignite.jdbc.thin.JdbcThinComplexDmlDdlSkipReducerOnUpdateSelfTest;
 import org.apache.ignite.jdbc.thin.JdbcThinComplexQuerySelfTest;
 import org.apache.ignite.jdbc.thin.JdbcThinConnectionMultipleAddressesTest;
+import org.apache.ignite.jdbc.thin.JdbcThinConnectionMvccEnabledSelfTest;
 import org.apache.ignite.jdbc.thin.JdbcThinConnectionSSLTest;
 import org.apache.ignite.jdbc.thin.JdbcThinConnectionSelfTest;
 import org.apache.ignite.jdbc.thin.JdbcThinDataSourceSelfTest;
@@ -75,6 +77,11 @@
 import org.apache.ignite.jdbc.thin.JdbcThinStreamingNotOrderedSelfTest;
 import org.apache.ignite.jdbc.thin.JdbcThinStreamingOrderedSelfTest;
 import org.apache.ignite.jdbc.thin.JdbcThinTcpIoTest;
+import org.apache.ignite.jdbc.thin.JdbcThinTransactionsClientAutoCommitComplexSelfTest;
+import org.apache.ignite.jdbc.thin.JdbcThinTransactionsClientNoAutoCommitComplexSelfTest;
+import org.apache.ignite.jdbc.thin.JdbcThinTransactionsSelfTest;
+import org.apache.ignite.jdbc.thin.JdbcThinTransactionsServerAutoCommitComplexSelfTest;
+import org.apache.ignite.jdbc.thin.JdbcThinTransactionsServerNoAutoCommitComplexSelfTest;
 import org.apache.ignite.jdbc.thin.JdbcThinUpdateStatementSelfTest;
 import org.apache.ignite.jdbc.thin.JdbcThinUpdateStatementSkipReducerOnUpdateSelfTest;
 import org.apache.ignite.jdbc.thin.JdbcThinWalModeChangeSelfTest;
@@ -128,7 +135,7 @@
         suite.addTest(new TestSuite(org.apache.ignite.internal.jdbc2.JdbcErrorsSelfTest.class));
         suite.addTest(new TestSuite(org.apache.ignite.internal.jdbc2.JdbcStreamingToPublicCacheTest.class));
         suite.addTest(new TestSuite(org.apache.ignite.internal.jdbc2.JdbcNoCacheStreamingSelfTest.class));
-        //suite.addTest(new TestSuite(JdbcBulkLoadSelfTest.class));
+        suite.addTest(new TestSuite(JdbcBulkLoadSelfTest.class));
 
         suite.addTest(new TestSuite(JdbcBlobTest.class));
         suite.addTest(new TestSuite(org.apache.ignite.internal.jdbc2.JdbcStreamingSelfTest.class));
@@ -145,6 +152,7 @@
 
         // New thin JDBC
         suite.addTest(new TestSuite(JdbcThinConnectionSelfTest.class));
+        suite.addTest(new TestSuite(JdbcThinConnectionMvccEnabledSelfTest.class));
         suite.addTest(new TestSuite(JdbcThinConnectionMultipleAddressesTest.class));
         suite.addTest(new TestSuite(JdbcThinTcpIoTest.class));
         suite.addTest(new TestSuite(JdbcThinConnectionSSLTest.class));
@@ -194,6 +202,14 @@
         suite.addTest(new TestSuite(JdbcThinUpdateStatementSkipReducerOnUpdateSelfTest.class));
         suite.addTest(new TestSuite(JdbcThinMergeStatementSkipReducerOnUpdateSelfTest.class));
         suite.addTest(new TestSuite(JdbcThinComplexDmlDdlSkipReducerOnUpdateSelfTest.class));
+        suite.addTest(new TestSuite(JdbcThinComplexDmlDdlCustomSchemaSelfTest.class));
+
+        // Transactions
+        suite.addTest(new TestSuite(JdbcThinTransactionsSelfTest.class));
+        suite.addTest(new TestSuite(JdbcThinTransactionsClientAutoCommitComplexSelfTest.class));
+        suite.addTest(new TestSuite(JdbcThinTransactionsServerAutoCommitComplexSelfTest.class));
+        suite.addTest(new TestSuite(JdbcThinTransactionsClientNoAutoCommitComplexSelfTest.class));
+        suite.addTest(new TestSuite(JdbcThinTransactionsServerNoAutoCommitComplexSelfTest.class));
 
         suite.addTest(new TestSuite(JdbcThinLocalQueriesSelfTest.class));
 
diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAbstractSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAbstractSelfTest.java
index 2ba36c3..6d5f59a 100644
--- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAbstractSelfTest.java
+++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAbstractSelfTest.java
@@ -112,7 +112,7 @@
      * @param params Connection parameters.
      * @return Thin JDBC connection to specified node.
      */
-    static Connection connect(IgniteEx node, String params) throws SQLException {
+    protected Connection connect(IgniteEx node, String params) throws SQLException {
         Collection<GridPortRecord> recs = node.context().ports().records();
 
         GridPortRecord cliLsnrRec = null;
@@ -141,7 +141,7 @@
      * @return Result set.
      * @throws RuntimeException if failed.
      */
-    static List<List<?>> execute(Connection conn, String sql, Object... args) throws SQLException {
+    protected List<List<?>> execute(Connection conn, String sql, Object... args) throws SQLException {
         try (PreparedStatement s = conn.prepareStatement(sql)) {
             for (int i = 0; i < args.length; i++)
                 s.setObject(i + 1, args[i]);
diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexDmlDdlCustomSchemaSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexDmlDdlCustomSchemaSelfTest.java
new file mode 100644
index 0000000..8fd9356
--- /dev/null
+++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexDmlDdlCustomSchemaSelfTest.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.jdbc.thin;
+
+import org.apache.ignite.configuration.IgniteConfiguration;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+
+/**
+ * Base class for complex SQL tests based on JDBC driver.
+ */
+public class JdbcThinComplexDmlDdlCustomSchemaSelfTest extends JdbcThinComplexDmlDdlSelfTest {
+    /** Simple schema. */
+    private static final String SCHEMA_1 = "SCHEMA_1";
+
+    /** Complex schema. */
+    private static final String SCHEMA_2 = "\"SCHEMA 2\"";
+
+    /** Current schema. */
+    private String curSchema = SCHEMA_1;
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName);
+
+        cfg.setSqlSchemas(SCHEMA_1, SCHEMA_2);
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected Connection createConnection() throws SQLException {
+        return DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1/" + curSchema);
+    }
+
+    /**
+     * Test create/select/drop flow on escaped schema.
+     *
+     * @throws Exception If failed.
+     */
+    public void testCreateSelectDropEscapedSchema() throws Exception {
+        try {
+            curSchema = SCHEMA_2;
+
+            testCreateSelectDrop();
+        }
+        finally {
+            curSchema = SCHEMA_1;
+        }
+    }
+
+    /**
+     * Test multiple iterations.
+     *
+     * @throws Exception If failed.
+     */
+    public void testMultiple() throws Exception {
+        testCreateSelectDrop();
+        testCreateSelectDrop();
+    }
+}
\ No newline at end of file
diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexDmlDdlSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexDmlDdlSelfTest.java
index 9c40948..36ee34a 100644
--- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexDmlDdlSelfTest.java
+++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexDmlDdlSelfTest.java
@@ -33,12 +33,10 @@
 import org.apache.ignite.configuration.CacheConfiguration;
 import org.apache.ignite.configuration.IgniteConfiguration;
 import org.apache.ignite.internal.processors.cache.DynamicCacheDescriptor;
-import org.apache.ignite.lang.IgniteCallable;
 import org.apache.ignite.lang.IgnitePredicate;
 import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
 import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
 import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
-import org.apache.ignite.testframework.GridTestUtils;
 import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
 import org.jetbrains.annotations.NotNull;
 
@@ -82,9 +80,8 @@
     /**
      * @param name Cache name.
      * @return Cache configuration.
-     * @throws Exception In case of error.
      */
-    private CacheConfiguration cacheConfiguration(@NotNull String name) throws Exception {
+    private CacheConfiguration cacheConfiguration(@NotNull String name) {
         CacheConfiguration cfg = defaultCacheConfiguration();
 
         cfg.setName(name);
@@ -110,8 +107,6 @@
     /** {@inheritDoc} */
     @Override protected void beforeTest() throws Exception {
         super.beforeTest();
-
-        conn = createConnection();
     }
 
     /** {@inheritDoc} */
@@ -133,14 +128,8 @@
      * @throws Exception If failed.
      */
     @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
-    public void testCreateSelect() throws Exception {
-        GridTestUtils.assertThrows(null, new IgniteCallable<Object>() {
-            @Override public Object call() throws Exception {
-                sql(new ResultChecker(new Object[][] {}), "SELECT * from Person");
-
-                return null;
-            }
-        }, SQLException.class, "Table \"PERSON\" not found");
+    public void testCreateSelectDrop() throws Exception {
+        conn = createConnection();
 
         sql(new UpdateChecker(0),
             "CREATE TABLE person (id int, name varchar, age int, company varchar, city varchar, " +
@@ -228,6 +217,9 @@
         assert cnt[0] == 34 : "Invalid rows count";
 
         sql(new UpdateChecker(0), "DROP INDEX idx");
+
+        sql(new UpdateChecker(0), "DROP TABLE city");
+        sql(new UpdateChecker(0), "DROP TABLE person");
     }
 
     /**
diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionMvccEnabledSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionMvccEnabledSelfTest.java
new file mode 100644
index 0000000..0196cb2
--- /dev/null
+++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionMvccEnabledSelfTest.java
@@ -0,0 +1,378 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.jdbc.thin;
+
+import java.sql.Connection;
+import java.sql.DatabaseMetaData;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.sql.Savepoint;
+import java.util.concurrent.Callable;
+import org.apache.ignite.cache.CacheAtomicityMode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.binary.BinaryMarshaller;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.GridStringLogger;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.jetbrains.annotations.NotNull;
+
+import static java.sql.Connection.TRANSACTION_NONE;
+import static java.sql.Connection.TRANSACTION_READ_COMMITTED;
+import static java.sql.Connection.TRANSACTION_READ_UNCOMMITTED;
+import static java.sql.Connection.TRANSACTION_REPEATABLE_READ;
+import static java.sql.Connection.TRANSACTION_SERIALIZABLE;
+
+/**
+ * Connection test.
+ */
+@SuppressWarnings("ThrowableNotThrown")
+public class JdbcThinConnectionMvccEnabledSelfTest extends JdbcThinAbstractSelfTest {
+    /** IP finder. */
+    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
+    /** */
+    private static final String URL = "jdbc:ignite:thin://127.0.0.1";
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("deprecation")
+    @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName);
+
+        cfg.setCacheConfiguration(cacheConfiguration(DEFAULT_CACHE_NAME));
+
+        TcpDiscoverySpi disco = new TcpDiscoverySpi();
+
+        disco.setIpFinder(IP_FINDER);
+
+        cfg.setDiscoverySpi(disco);
+
+        cfg.setMarshaller(new BinaryMarshaller());
+
+        cfg.setGridLogger(new GridStringLogger());
+
+        return cfg;
+    }
+
+    /**
+     * @param name Cache name.
+     * @return Cache configuration.
+     * @throws Exception In case of error.
+     */
+    private CacheConfiguration cacheConfiguration(@NotNull String name) throws Exception {
+        CacheConfiguration cfg = defaultCacheConfiguration();
+
+        cfg.setName(name);
+        cfg.setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT);
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        super.beforeTestsStarted();
+
+        startGridsMultiThreaded(2);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        stopAllGrids();
+    }
+
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testMetadataDefaults() throws Exception {
+        try (Connection conn = DriverManager.getConnection(URL)) {
+            DatabaseMetaData meta = conn.getMetaData();
+
+            assertEquals(TRANSACTION_REPEATABLE_READ, meta.getDefaultTransactionIsolation());
+            assertTrue(meta.supportsTransactions());
+
+            assertFalse(meta.supportsTransactionIsolationLevel(TRANSACTION_NONE));
+            assertFalse(meta.supportsTransactionIsolationLevel(TRANSACTION_READ_UNCOMMITTED));
+            assertFalse(meta.supportsTransactionIsolationLevel(TRANSACTION_READ_COMMITTED));
+            assertTrue(meta.supportsTransactionIsolationLevel(TRANSACTION_REPEATABLE_READ));
+            assertFalse(meta.supportsTransactionIsolationLevel(TRANSACTION_SERIALIZABLE));
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testGetSetAutoCommit() throws Exception {
+        try (Connection conn = DriverManager.getConnection(URL)) {
+            assertTrue(conn.getMetaData().supportsTransactions());
+
+            assertTrue(conn.getAutoCommit());
+
+            conn.setAutoCommit(false);
+
+            assertFalse(conn.getAutoCommit());
+
+            conn.setAutoCommit(true);
+
+            assertTrue(conn.getAutoCommit());
+
+            conn.close();
+
+            // Exception when called on closed connection
+            checkConnectionClosed(new RunnableX() {
+                @Override public void run() throws Exception {
+                    conn.setAutoCommit(true);
+                }
+            });
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testCommit() throws Exception {
+        try (Connection conn = DriverManager.getConnection(URL)) {
+            assertTrue(conn.getMetaData().supportsTransactions());
+
+            // Should not be called in auto-commit mode
+            GridTestUtils.assertThrows(log,
+                new Callable<Object>() {
+                    @Override public Object call() throws Exception {
+                        conn.commit();
+
+                        return null;
+                    }
+                },
+                SQLException.class,
+                "Transaction cannot be committed explicitly in auto-commit mode"
+            );
+
+            conn.setAutoCommit(false);
+
+            conn.commit();
+
+            conn.close();
+
+            // Exception when called on closed connection
+            checkConnectionClosed(new RunnableX() {
+                @Override public void run() throws Exception {
+                    conn.commit();
+                }
+            });
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testRollback() throws Exception {
+        try (Connection conn = DriverManager.getConnection(URL)) {
+            assertTrue(conn.getMetaData().supportsTransactions());
+
+            // Should not be called in auto-commit mode
+            GridTestUtils.assertThrows(log,
+                new Callable<Object>() {
+                    @Override public Object call() throws Exception {
+                        conn.rollback();
+
+                        return null;
+                    }
+                },
+                SQLException.class,
+                "Transaction cannot be rolled back explicitly in auto-commit mode."
+            );
+
+            conn.setAutoCommit(false);
+
+            conn.rollback();
+
+            conn.close();
+
+            // Exception when called on closed connection
+            checkConnectionClosed(new RunnableX() {
+                @Override public void run() throws Exception {
+                    conn.rollback();
+                }
+            });
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testSetSavepoint() throws Exception {
+        try (Connection conn = DriverManager.getConnection(URL)) {
+            assert !conn.getMetaData().supportsSavepoints();
+
+            // Disallowed in auto-commit mode
+            GridTestUtils.assertThrows(log,
+                new Callable<Object>() {
+                    @Override public Object call() throws Exception {
+                        conn.setSavepoint();
+
+                        return null;
+                    }
+                },
+                SQLException.class,
+                "Savepoint cannot be set in auto-commit mode"
+            );
+
+            conn.setAutoCommit(false);
+
+            // Unsupported
+            checkNotSupported(new RunnableX() {
+                @Override public void run() throws Exception {
+                    conn.setSavepoint();
+                }
+            });
+
+            conn.close();
+
+            checkConnectionClosed(new RunnableX() {
+                @Override public void run() throws Exception {
+                    conn.setSavepoint();
+                }
+            });
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testSetSavepointName() throws Exception {
+        try (Connection conn = DriverManager.getConnection(URL)) {
+            assert !conn.getMetaData().supportsSavepoints();
+
+            // Invalid arg
+            GridTestUtils.assertThrows(log,
+                new Callable<Object>() {
+                    @Override public Object call() throws Exception {
+                        conn.setSavepoint(null);
+
+                        return null;
+                    }
+                },
+                SQLException.class,
+                "Savepoint name cannot be null"
+            );
+
+            final String name = "savepoint";
+
+            // Disallowed in auto-commit mode
+            GridTestUtils.assertThrows(log,
+                new Callable<Object>() {
+                    @Override public Object call() throws Exception {
+                        conn.setSavepoint(name);
+
+                        return null;
+                    }
+                },
+                SQLException.class,
+                "Savepoint cannot be set in auto-commit mode"
+            );
+
+            conn.setAutoCommit(false);
+
+            // Unsupported
+            checkNotSupported(new RunnableX() {
+                @Override public void run() throws Exception {
+                    conn.setSavepoint(name);
+                }
+            });
+
+            conn.close();
+
+            checkConnectionClosed(new RunnableX() {
+                @Override public void run() throws Exception {
+                    conn.setSavepoint(name);
+                }
+            });
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testRollbackSavePoint() throws Exception {
+        try (Connection conn = DriverManager.getConnection(URL)) {
+            assert !conn.getMetaData().supportsSavepoints();
+
+            // Invalid arg
+            GridTestUtils.assertThrows(log,
+                new Callable<Object>() {
+                    @Override public Object call() throws Exception {
+                        conn.rollback(null);
+
+                        return null;
+                    }
+                },
+                SQLException.class,
+                "Invalid savepoint"
+            );
+
+            final Savepoint savepoint = getFakeSavepoint();
+
+            // Disallowed in auto-commit mode
+            GridTestUtils.assertThrows(log,
+                new Callable<Object>() {
+                    @Override public Object call() throws Exception {
+                        conn.rollback(savepoint);
+
+                        return null;
+                    }
+                },
+                SQLException.class,
+                "Auto-commit mode"
+            );
+
+            conn.setAutoCommit(false);
+
+            // Unsupported
+            checkNotSupported(new RunnableX() {
+                @Override public void run() throws Exception {
+                    conn.rollback(savepoint);
+                }
+            });
+
+            conn.close();
+
+            checkConnectionClosed(new RunnableX() {
+                @Override public void run() throws Exception {
+                    conn.rollback(savepoint);
+                }
+            });
+        }
+    }
+
+    /**
+     * @return Savepoint.
+     */
+    private Savepoint getFakeSavepoint() {
+        return new Savepoint() {
+            @Override public int getSavepointId() throws SQLException {
+                return 100;
+            }
+
+            @Override public String getSavepointName() {
+                return "savepoint";
+            }
+        };
+    }
+}
\ No newline at end of file
diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSelfTest.java
index 38bcab2..80397e6 100644
--- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSelfTest.java
+++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSelfTest.java
@@ -17,6 +17,9 @@
 
 package org.apache.ignite.jdbc.thin;
 
+import java.io.IOException;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
 import java.sql.Connection;
 import java.sql.DatabaseMetaData;
 import java.sql.DriverManager;
@@ -38,12 +41,16 @@
 import org.apache.ignite.configuration.IgniteConfiguration;
 import org.apache.ignite.internal.IgniteInternalFuture;
 import org.apache.ignite.internal.binary.BinaryMarshaller;
+import org.apache.ignite.internal.jdbc.thin.ConnectionProperties;
+import org.apache.ignite.internal.jdbc.thin.ConnectionPropertiesImpl;
 import org.apache.ignite.internal.jdbc.thin.JdbcThinConnection;
 import org.apache.ignite.internal.jdbc.thin.JdbcThinTcpIo;
+import org.apache.ignite.internal.util.HostAndPortRange;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
 import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
 import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.GridStringLogger;
 import org.apache.ignite.testframework.GridTestUtils;
 import org.jetbrains.annotations.NotNull;
 
@@ -58,6 +65,7 @@
 import static java.sql.ResultSet.TYPE_FORWARD_ONLY;
 import static java.sql.Statement.NO_GENERATED_KEYS;
 import static java.sql.Statement.RETURN_GENERATED_KEYS;
+import static org.apache.ignite.configuration.ClientConnectorConfiguration.DFLT_PORT;
 
 /**
  * Connection test.
@@ -93,6 +101,8 @@
 
         cfg.setMarshaller(new BinaryMarshaller());
 
+        cfg.setGridLogger(new GridStringLogger());
+
         return cfg;
     }
 
@@ -970,11 +980,18 @@
         try (Connection conn = DriverManager.getConnection(URL)) {
             assertTrue(conn.getAutoCommit());
 
-            conn.setAutoCommit(false);
+            // Cannot disable autocommit when MVCC is disabled.
+            GridTestUtils.assertThrows(log,
+                new Callable<Object>() {
+                    @Override public Object call() throws Exception {
+                        conn.setAutoCommit(false);
 
-            assertFalse(conn.getAutoCommit());
-
-            conn.setAutoCommit(true);
+                        return null;
+                    }
+                },
+                SQLException.class,
+                "MVCC must be enabled in order to invoke transactional operation: COMMIT"
+            );
 
             assertTrue(conn.getAutoCommit());
 
@@ -994,8 +1011,6 @@
      */
     public void testCommit() throws Exception {
         try (Connection conn = DriverManager.getConnection(URL)) {
-            assert !conn.getMetaData().supportsTransactions();
-
             // Should not be called in auto-commit mode
             GridTestUtils.assertThrows(log,
                 new Callable<Object>() {
@@ -1009,9 +1024,33 @@
                 "Transaction cannot be committed explicitly in auto-commit mode"
             );
 
-            conn.setAutoCommit(false);
+            // Cannot disable autocommit when MVCC is disabled.
+            GridTestUtils.assertThrows(log,
+                new Callable<Object>() {
+                    @Override public Object call() throws Exception {
+                        conn.setAutoCommit(false);
 
-            conn.commit();
+                        return null;
+                    }
+                },
+                SQLException.class,
+                "MVCC must be enabled in order to invoke transactional operation: COMMIT"
+            );
+
+            assertTrue(conn.getAutoCommit());
+
+            // Should not be called in auto-commit mode
+            GridTestUtils.assertThrows(log,
+                new Callable<Object>() {
+                    @Override public Object call() throws Exception {
+                        conn.commit();
+
+                        return null;
+                    }
+                },
+                SQLException.class,
+                "Transaction cannot be committed explicitly in auto-commit mode."
+            );
 
             conn.close();
 
@@ -1029,8 +1068,6 @@
      */
     public void testRollback() throws Exception {
         try (Connection conn = DriverManager.getConnection(URL)) {
-            assert !conn.getMetaData().supportsTransactions();
-
             // Should not be called in auto-commit mode
             GridTestUtils.assertThrows(log,
                 new Callable<Object>() {
@@ -1041,12 +1078,23 @@
                     }
                 },
                 SQLException.class,
-                "Transaction cannot rollback in auto-commit mode"
+                "Transaction cannot be rolled back explicitly in auto-commit mode."
             );
 
-            conn.setAutoCommit(false);
+            // Cannot disable autocommit when MVCC is disabled.
+            GridTestUtils.assertThrows(log,
+                new Callable<Object>() {
+                    @Override public Object call() throws Exception {
+                        conn.setAutoCommit(false);
 
-            conn.rollback();
+                        return null;
+                    }
+                },
+                SQLException.class,
+                "MVCC must be enabled in order to invoke transactional operation: COMMIT"
+            );
+
+            assertTrue(conn.getAutoCommit());
 
             conn.close();
 
@@ -1138,8 +1186,6 @@
      */
     public void testGetSetTransactionIsolation() throws Exception {
         try (Connection conn = DriverManager.getConnection(URL)) {
-            assert !conn.getMetaData().supportsTransactions();
-
             // Invalid parameter value
             GridTestUtils.assertThrows(log,
                 new Callable<Object>() {
@@ -1346,14 +1392,20 @@
                 "Savepoint cannot be set in auto-commit mode"
             );
 
-            conn.setAutoCommit(false);
+            // Cannot disable autocommit when MVCC is disabled.
+            GridTestUtils.assertThrows(log,
+                new Callable<Object>() {
+                    @Override public Object call() throws Exception {
+                        conn.setAutoCommit(false);
 
-            // Unsupported
-            checkNotSupported(new RunnableX() {
-                @Override public void run() throws Exception {
-                    conn.setSavepoint();
-                }
-            });
+                        return null;
+                    }
+                },
+                SQLException.class,
+                "MVCC must be enabled in order to invoke transactional operation: COMMIT"
+            );
+
+            assertTrue(conn.getAutoCommit());
 
             conn.close();
 
@@ -1400,14 +1452,20 @@
                 "Savepoint cannot be set in auto-commit mode"
             );
 
-            conn.setAutoCommit(false);
+            // Cannot disable autocommit when MVCC is disabled.
+            GridTestUtils.assertThrows(log,
+                new Callable<Object>() {
+                    @Override public Object call() throws Exception {
+                        conn.setAutoCommit(false);
 
-            // Unsupported
-            checkNotSupported(new RunnableX() {
-                @Override public void run() throws Exception {
-                    conn.setSavepoint(name);
-                }
-            });
+                        return null;
+                    }
+                },
+                SQLException.class,
+                "MVCC must be enabled in order to invoke transactional operation: COMMIT"
+            );
+
+            assertTrue(conn.getAutoCommit());
 
             conn.close();
 
@@ -1454,14 +1512,20 @@
                 "Auto-commit mode"
             );
 
-            conn.setAutoCommit(false);
+            // Cannot disable autocommit when MVCC is disabled.
+            GridTestUtils.assertThrows(log,
+                new Callable<Object>() {
+                    @Override public Object call() throws Exception {
+                        conn.setAutoCommit(false);
 
-            // Unsupported
-            checkNotSupported(new RunnableX() {
-                @Override public void run() throws Exception {
-                    conn.rollback(savepoint);
-                }
-            });
+                        return null;
+                    }
+                },
+                SQLException.class,
+                "MVCC must be enabled in order to invoke transactional operation: COMMIT"
+            );
+
+            assertTrue(conn.getAutoCommit());
 
             conn.close();
 
@@ -1898,6 +1962,58 @@
     }
 
     /**
+     * Test that attempting to supply invalid nested TX mode to driver fails on the client.
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testInvalidNestedTxMode() {
+        GridTestUtils.assertThrows(null, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                DriverManager.getConnection(URL + "/?nestedTransactionsMode=invalid");
+
+                return null;
+            }
+        }, SQLException.class, "Invalid nested transactions handling mode");
+    }
+
+    /**
+     * Test that attempting to send unexpected name of nested TX mode to server on handshake yields an error.
+     * We have to do this without explicit {@link Connection} as long as there's no other way to bypass validation and
+     * supply a malformed {@link ConnectionProperties} to {@link JdbcThinTcpIo}.
+     */
+    @SuppressWarnings({"ThrowableResultOfMethodCallIgnored", "ThrowFromFinallyBlock"})
+    public void testInvalidNestedTxModeOnServerSide() throws SQLException, NoSuchMethodException,
+        IllegalAccessException, InvocationTargetException, InstantiationException, IOException {
+        ConnectionPropertiesImpl connProps = new ConnectionPropertiesImpl();
+
+        connProps.setAddresses(new HostAndPortRange[]{new HostAndPortRange("127.0.0.1", DFLT_PORT, DFLT_PORT)});
+
+        connProps.nestedTxMode("invalid");
+
+        Constructor ctor = JdbcThinTcpIo.class.getDeclaredConstructor(ConnectionProperties.class);
+
+        boolean acc = ctor.isAccessible();
+
+        ctor.setAccessible(true);
+
+        final JdbcThinTcpIo io = (JdbcThinTcpIo)ctor.newInstance(connProps);
+
+        try {
+            GridTestUtils.assertThrows(null, new Callable<Object>() {
+                @Override public Object call() throws Exception {
+                    io.start();
+
+                    return null;
+                }
+            }, SQLException.class, "err=Invalid nested transactions handling mode: invalid");
+        }
+        finally {
+            io.close();
+
+            ctor.setAccessible(acc);
+        }
+    }
+
+    /**
      */
     public void testSslClientAndPlainServer()  {
         GridTestUtils.assertThrows(log, new Callable<Object>() {
diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java
index 8e35a86..59382f1 100644
--- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java
+++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinMetadataSelfTest.java
@@ -18,7 +18,6 @@
 package org.apache.ignite.jdbc.thin;
 
 import java.io.Serializable;
-import java.math.BigDecimal;
 import java.sql.Connection;
 import java.sql.DatabaseMetaData;
 import java.sql.DriverManager;
@@ -32,13 +31,16 @@
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.LinkedHashMap;
+import java.util.Map;
 import java.util.Set;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.cache.QueryEntity;
 import org.apache.ignite.cache.QueryIndex;
 import org.apache.ignite.cache.affinity.AffinityKey;
+import org.apache.ignite.cache.query.annotations.QuerySqlField;
 import org.apache.ignite.configuration.CacheConfiguration;
 import org.apache.ignite.configuration.IgniteConfiguration;
 import org.apache.ignite.internal.IgniteVersionUtils;
@@ -99,10 +101,15 @@
 
         startGridsMultiThreaded(3);
 
+        Map<String, Integer> orgPrecision = new HashMap<>();
+        
+        orgPrecision.put("name", 42);
+
         IgniteCache<String, Organization> orgCache = jcache(grid(0),
             cacheConfiguration(new QueryEntity(String.class.getName(), Organization.class.getName())
                 .addQueryField("id", Integer.class.getName(), null)
                 .addQueryField("name", String.class.getName(), null)
+                .setFieldsPrecision(orgPrecision)
                 .setIndexes(Arrays.asList(
                     new QueryIndex("id"),
                     new QueryIndex("name", false, "org_name_index")
@@ -136,6 +143,9 @@
         personCache.put(new AffinityKey<>("p2", "o1"), new Person("Joe Black", 35, 1));
         personCache.put(new AffinityKey<>("p3", "o2"), new Person("Mike Green", 40, 2));
 
+        IgniteCache<Integer, Department> departmentCache = jcache(grid(0), 
+            defaultCacheConfiguration().setIndexedTypes(Integer.class, Department.class), "dep");
+
         try (Connection conn = DriverManager.getConnection(URL)) {
             Statement stmt = conn.createStatement();
 
@@ -240,6 +250,7 @@
             Set<String> expectedTbls = new HashSet<>(Arrays.asList(
                 "org.ORGANIZATION",
                 "pers.PERSON",
+                "dep.DEPARTMENT",
                 "PUBLIC.TEST",
                 "PUBLIC.Quoted",
                 "PUBLIC.TEST_DECIMAL_COLUMN"));
@@ -379,16 +390,18 @@
 
             Set<String> expectedCols = new HashSet<>(Arrays.asList(
                 "org.ORGANIZATION.ID.null",
-                "org.ORGANIZATION.NAME.null",
+                "org.ORGANIZATION.NAME.null.42",
                 "pers.PERSON.ORGID.null",
                 "pers.PERSON.AGE.null",
                 "pers.PERSON.NAME.null",
+                "dep.DEPARTMENT.ID.null",
+                "dep.DEPARTMENT.NAME.null.43",
                 "PUBLIC.TEST.ID.null",
-                "PUBLIC.TEST.NAME.'default name'",
-                "PUBLIC.TEST.VAL.null",
+                "PUBLIC.TEST.NAME.'default name'.50",
+                "PUBLIC.TEST.VAL.null.50",
                 "PUBLIC.TEST.AGE.21",
                 "PUBLIC.Quoted.Id.null",
-                "PUBLIC.Quoted.Name.null",
+                "PUBLIC.Quoted.Name.null.50",
                 "PUBLIC.TEST_DECIMAL_COLUMN.ID.null",
                 "PUBLIC.TEST_DECIMAL_COLUMN.DEC_COL.null.8.3"
             ));
@@ -538,6 +551,7 @@
             Set<String> expectedPks = new HashSet<>(Arrays.asList(
                 "org.ORGANIZATION.PK_org_ORGANIZATION._KEY",
                 "pers.PERSON.PK_pers_PERSON._KEY",
+                "dep.DEPARTMENT.PK_dep_DEPARTMENT._KEY",
                 "PUBLIC.TEST.PK_PUBLIC_TEST.ID",
                 "PUBLIC.TEST.PK_PUBLIC_TEST.NAME",
                 "PUBLIC.Quoted.PK_PUBLIC_Quoted.Id",
@@ -588,7 +602,7 @@
         try (Connection conn = DriverManager.getConnection(URL)) {
             ResultSet rs = conn.getMetaData().getSchemas();
 
-            Set<String> expectedSchemas = new HashSet<>(Arrays.asList("PUBLIC", "pers", "org"));
+            Set<String> expectedSchemas = new HashSet<>(Arrays.asList("PUBLIC", "pers", "org", "dep"));
 
             Set<String> schemas = new HashSet<>();
 
@@ -674,4 +688,27 @@
             this.name = name;
         }
     }
+
+    /**
+     * Organization.
+     */
+    @SuppressWarnings("UnusedDeclaration")
+    private static class Department implements Serializable {
+        /** ID. */
+        @QuerySqlField
+        private final int id;
+
+        /** Name. */
+        @QuerySqlField(precision = 43)
+        private final String name;
+
+        /**
+         * @param id ID.
+         * @param name Name.
+         */
+        private Department(int id, String name) {
+            this.id = id;
+            this.name = name;
+        }
+    }
 }
diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingAbstractSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingAbstractSelfTest.java
index 7004635..c83977c 100644
--- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingAbstractSelfTest.java
+++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingAbstractSelfTest.java
@@ -32,6 +32,7 @@
 import org.apache.ignite.cache.query.FieldsQueryCursor;
 import org.apache.ignite.cache.query.SqlFieldsQuery;
 import org.apache.ignite.internal.jdbc2.JdbcStreamingSelfTest;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccQueryTracker;
 import org.apache.ignite.internal.processors.query.GridQueryCancel;
 import org.apache.ignite.internal.processors.query.GridQueryProcessor;
 import org.apache.ignite.internal.processors.query.SqlClientContext;
@@ -77,9 +78,19 @@
         super.afterTest();
     }
 
-        /** {@inheritDoc} */
+    /** {@inheritDoc} */
+    @Override protected Connection createStreamedConnection(boolean allowOverwrite, long flushFreq) throws Exception {
+        Connection c = connect(grid(0), null);
+
+        execute(c, "SET STREAMING 1 BATCH_SIZE " + batchSize + " ALLOW_OVERWRITE " + (allowOverwrite ? 1 : 0) +
+            " PER_NODE_BUFFER_SIZE 1000 FLUSH_FREQUENCY " + flushFreq);
+
+        return c;
+    }
+
+    /** {@inheritDoc} */
     @Override protected Connection createOrdinaryConnection() throws SQLException {
-        return JdbcThinAbstractSelfTest.connect(grid(0), null);
+        return connect(grid(0), null);
     }
 
     /**
@@ -495,11 +506,11 @@
 
         /** {@inheritDoc} */
         @Override public List<FieldsQueryCursor<List<?>>> querySqlFields(String schemaName, SqlFieldsQuery qry,
-            @Nullable SqlClientContext cliCtx, boolean keepBinary, boolean failOnMultipleStmts,
+            @Nullable SqlClientContext cliCtx, boolean keepBinary, boolean failOnMultipleStmts, MvccQueryTracker tracker,
             GridQueryCancel cancel) {
             IndexingWithContext.cliCtx = cliCtx;
 
-            return super.querySqlFields(schemaName, qry, cliCtx, keepBinary, failOnMultipleStmts, cancel);
+            return super.querySqlFields(schemaName, qry, cliCtx, keepBinary, failOnMultipleStmts, tracker, cancel);
         }
     }
 }
\ No newline at end of file
diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingNotOrderedSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingNotOrderedSelfTest.java
index b91258f..b0788e7 100644
--- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingNotOrderedSelfTest.java
+++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingNotOrderedSelfTest.java
@@ -25,7 +25,7 @@
 public class JdbcThinStreamingNotOrderedSelfTest extends JdbcThinStreamingAbstractSelfTest {
     /** {@inheritDoc} */
     @Override protected Connection createStreamedConnection(boolean allowOverwrite, long flushFreq) throws Exception {
-        Connection c = JdbcThinAbstractSelfTest.connect(grid(0), null);
+        Connection c = connect(grid(0), null);
 
         execute(c, "SET STREAMING 1 BATCH_SIZE " + batchSize
             + " ALLOW_OVERWRITE " + (allowOverwrite ? 1 : 0)
diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingOrderedSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingOrderedSelfTest.java
index b615f8c..c116a7d 100644
--- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingOrderedSelfTest.java
+++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingOrderedSelfTest.java
@@ -25,7 +25,7 @@
 public class JdbcThinStreamingOrderedSelfTest extends JdbcThinStreamingAbstractSelfTest {
     /** {@inheritDoc} */
     @Override protected Connection createStreamedConnection(boolean allowOverwrite, long flushFreq) throws Exception {
-        Connection c = JdbcThinAbstractSelfTest.connect(grid(0), null);
+        Connection c = connect(grid(0), null);
 
         execute(c, "SET STREAMING 1 BATCH_SIZE " + batchSize
             + " ALLOW_OVERWRITE " + (allowOverwrite ? 1 : 0)
diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinTransactionsAbstractComplexSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinTransactionsAbstractComplexSelfTest.java
new file mode 100644
index 0000000..68ed36b
--- /dev/null
+++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinTransactionsAbstractComplexSelfTest.java
@@ -0,0 +1,1056 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.jdbc.thin;
+
+import java.sql.BatchUpdateException;
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CountDownLatch;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.cache.CacheAtomicityMode;
+import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.cache.query.annotations.QuerySqlField;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.IgniteInterruptedCheckedException;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.X;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteInClosure;
+import org.apache.ignite.testframework.GridTestUtils;
+
+/**
+ * Test to check various transactional scenarios.
+ */
+public abstract class JdbcThinTransactionsAbstractComplexSelfTest extends JdbcThinAbstractSelfTest {
+    /** Client node index. */
+    final static int CLI_IDX = 1;
+
+    /**
+     * Closure to perform ordinary delete after repeatable read.
+     */
+    private final IgniteInClosure<Connection> afterReadDel = new IgniteInClosure<Connection>() {
+        @Override public void apply(Connection conn) {
+            execute(conn, "DELETE FROM \"Person\".Person where firstname = 'John'");
+        }
+    };
+
+    /**
+     * Closure to perform fast delete after repeatable read.
+     */
+    private final IgniteInClosure<Connection> afterReadFastDel = new IgniteInClosure<Connection>() {
+        @Override public void apply(Connection conn) {
+            execute(conn, "DELETE FROM \"Person\".Person where id = 1");
+        }
+    };
+
+    /**
+     * Closure to perform ordinary update after repeatable read.
+     */
+    private final IgniteInClosure<Connection> afterReadUpdate = new IgniteInClosure<Connection>() {
+        @Override public void apply(Connection conn) {
+            execute(conn, "UPDATE \"Person\".Person set firstname = 'Joe' where firstname = 'John'");
+        }
+    };
+
+    /**
+     * Closure to perform ordinary delete and rollback after repeatable read.
+     */
+    private final IgniteInClosure<Connection> afterReadDelAndRollback = new IgniteInClosure<Connection>() {
+        @Override public void apply(Connection conn) {
+            execute(conn, "DELETE FROM \"Person\".Person where firstname = 'John'");
+
+            rollback(conn);
+        }
+    };
+
+    /**
+     * Closure to perform fast delete after repeatable read.
+     */
+    private final IgniteInClosure<Connection> afterReadFastDelAndRollback = new IgniteInClosure<Connection>() {
+        @Override public void apply(Connection conn) {
+            execute(conn, "DELETE FROM \"Person\".Person where id = 1");
+
+            rollback(conn);
+        }
+    };
+
+    /**
+     * Closure to perform ordinary update and rollback after repeatable read.
+     */
+    private final IgniteInClosure<Connection> afterReadUpdateAndRollback = new IgniteInClosure<Connection>() {
+        @Override public void apply(Connection conn) {
+            execute(conn, "UPDATE \"Person\".Person set firstname = 'Joe' where firstname = 'John'");
+
+            rollback(conn);
+        }
+    };
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String testIgniteInstanceName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(testIgniteInstanceName);
+
+        CacheConfiguration<Integer, Person> ccfg = new CacheConfiguration<>("Person");
+
+        ccfg.setIndexedTypes(Integer.class, Person.class);
+
+        ccfg.getQueryEntities().iterator().next().setKeyFieldName("id");
+
+        ccfg.setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT);
+
+        ccfg.setCacheMode(CacheMode.PARTITIONED);
+
+        cfg.setCacheConfiguration(ccfg);
+
+        // Let the node with index 1 be client node.
+        cfg.setClientMode(F.eq(testIgniteInstanceName, getTestIgniteInstanceName(CLI_IDX)));
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        super.beforeTest();
+
+        execute("ALTER TABLE \"Person\".person add if not exists cityid int");
+
+        execute("ALTER TABLE \"Person\".person add if not exists companyid int");
+
+        execute("CREATE TABLE City (id int primary key, name varchar, population int) WITH " +
+            "\"atomicity=transactional_snapshot,template=partitioned,backups=3,cache_name=City\"");
+
+        execute("CREATE TABLE Company (id int, \"cityid\" int, name varchar, primary key (id, \"cityid\")) WITH " +
+            "\"atomicity=transactional_snapshot,template=partitioned,backups=1,wrap_value=false,affinity_key=cityid," +
+            "cache_name=Company\"");
+
+        execute("CREATE TABLE Product (id int primary key, name varchar, companyid int) WITH " +
+            "\"atomicity=transactional_snapshot,template=partitioned,backups=2,cache_name=Product\"");
+
+        execute("CREATE INDEX IF NOT EXISTS prodidx ON Product(companyid)");
+
+        execute("CREATE INDEX IF NOT EXISTS persidx ON \"Person\".person(cityid)");
+
+        insertPerson(1, "John", "Smith", 1, 1);
+
+        insertPerson(2, "Mike", "Johns", 1, 2);
+
+        insertPerson(3, "Sam", "Jules", 2, 2);
+
+        insertPerson(4, "Alex", "Pope", 2, 3);
+
+        insertPerson(5, "Peter", "Williams", 2, 3);
+
+        insertCity(1, "Los Angeles", 5000);
+
+        insertCity(2, "Seattle", 1500);
+
+        insertCity(3, "New York", 12000);
+
+        insertCity(4, "Cupertino", 400);
+
+        insertCompany(1, "Microsoft", 2);
+
+        insertCompany(2, "Google", 3);
+
+        insertCompany(3, "Facebook", 1);
+
+        insertCompany(4, "Uber", 1);
+
+        insertCompany(5, "Apple", 4);
+
+        insertProduct(1, "Search", 2);
+
+        insertProduct(2, "Windows", 1);
+
+        insertProduct(3, "Mac", 5);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        super.beforeTestsStarted();
+
+        startGrid(0);
+
+        startGrid(1);
+
+        startGrid(2);
+
+        startGrid(3);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        stopAllGrids();
+
+        super.afterTestsStopped();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        execute("DELETE FROM \"Person\".Person");
+
+        execute("DROP TABLE City");
+
+        execute("DROP TABLE Company");
+
+        execute("DROP TABLE Product");
+
+        super.afterTest();
+    }
+
+    /**
+     *
+     */
+    public void testSingleDmlStatement() throws SQLException {
+        insertPerson(6, "John", "Doe", 2, 2);
+
+        assertEquals(Collections.singletonList(l(6, "John", "Doe", 2, 2)),
+            execute("SELECT * FROM \"Person\".Person where id = 6"));
+    }
+
+    /**
+     *
+     */
+    public void testMultipleDmlStatements() throws SQLException {
+        executeInTransaction(new TransactionClosure() {
+            @Override public void apply(Connection conn) {
+                insertPerson(conn, 6, "John", "Doe", 2, 2);
+
+                // https://issues.apache.org/jira/browse/IGNITE-6938 - we can only see results of
+                // UPDATE of what we have not inserted ourselves.
+                execute(conn, "UPDATE \"Person\".person SET lastname = 'Jameson' where lastname = 'Jules'");
+
+                execute(conn, "DELETE FROM \"Person\".person where id = 5");
+            }
+        });
+
+        assertEquals(l(
+            l(3, "Sam", "Jameson", 2, 2),
+            l(6, "John", "Doe", 2, 2)
+        ), execute("SELECT * FROM \"Person\".Person where id = 3 or id >= 5 order by id"));
+    }
+
+    /**
+     *
+     */
+    public void testBatchDmlStatements() throws SQLException {
+        doBatchedInsert();
+
+        assertEquals(l(
+            l(6, "John", "Doe", 2, 2),
+            l(7, "Mary", "Lee", 1, 3)
+        ), execute("SELECT * FROM \"Person\".Person where id > 5 order by id"));
+    }
+
+    /**
+     *
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testBatchDmlStatementsIntermediateFailure() throws SQLException {
+        insertPerson(6, "John", "Doe", 2, 2);
+
+        IgniteException e = (IgniteException)GridTestUtils.assertThrows(null, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                doBatchedInsert();
+
+                return null;
+            }
+        }, IgniteException.class, "Duplicate key during INSERT [key=KeyCacheObjectImpl " +
+            "[part=6, val=6, hasValBytes=true]]");
+
+        assertTrue(e.getCause() instanceof BatchUpdateException);
+
+        assertTrue(e.getCause().getMessage().contains("Duplicate key during INSERT [key=KeyCacheObjectImpl " +
+            "[part=6, val=6, hasValBytes=true]]"));
+
+        // First we insert id 7, then 6. Still, 7 is not in the cache as long as the whole batch has failed inside tx.
+        assertEquals(Collections.emptyList(), execute("SELECT * FROM \"Person\".Person where id > 6 order by id"));
+    }
+
+    /**
+     *
+     */
+    private void doBatchedInsert() throws SQLException {
+        executeInTransaction(new TransactionClosure() {
+            @Override public void apply(Connection conn) {
+                try {
+                    try (PreparedStatement ps = conn.prepareStatement("INSERT INTO \"Person\".person " +
+                        "(id, firstName, lastName, cityId, companyId) values (?, ?, ?, ?, ?)")) {
+                        ps.setInt(1, 7);
+
+                        ps.setString(2, "Mary");
+
+                        ps.setString(3, "Lee");
+
+                        ps.setInt(4, 1);
+
+                        ps.setInt(5, 3);
+
+                        ps.addBatch();
+
+                        ps.setInt(1, 6);
+
+                        ps.setString(2, "John");
+
+                        ps.setString(3, "Doe");
+
+                        ps.setInt(4, 2);
+
+                        ps.setInt(5, 2);
+
+                        ps.addBatch();
+
+                        ps.executeBatch();
+                    }
+                }
+                catch (SQLException e) {
+                    throw new IgniteException(e);
+                }
+            }
+        });
+    }
+
+    /**
+     *
+     */
+    public void testInsertAndQueryMultipleCaches() throws SQLException {
+        executeInTransaction(new TransactionClosure() {
+            @Override public void apply(Connection conn) {
+                insertCity(conn, 5, "St Petersburg", 6000);
+
+                insertCompany(conn, 6, "VK", 5);
+
+                insertPerson(conn, 6, "Peter", "Sergeev", 5, 6);
+            }
+        });
+
+        try (Connection c = connect("distributedJoins=true")) {
+            assertEquals(l(l(5, "St Petersburg", 6000, 6, 5, "VK", 6, "Peter", "Sergeev", 5, 6)),
+                execute(c, "SELECT * FROM City left join Company on City.id = Company.\"cityid\" " +
+                    "left join \"Person\".Person p on City.id = p.cityid WHERE p.id = 6 or company.id = 6"));
+        }
+    }
+
+    /**
+     *
+     */
+    public void testColocatedJoinSelectAndInsertInTransaction() throws SQLException {
+        // We'd like to put some Google into cities with over 1K population which don't have it yet
+        executeInTransaction(new TransactionClosure() {
+            @Override public void apply(Connection conn) {
+                List<Integer> ids = flat(execute(conn, "SELECT distinct City.id from City left join Company c on " +
+                    "City.id = c.\"cityid\" where population >= 1000 and c.name <> 'Google' order by City.id"));
+
+                assertEqualsCollections(l(1, 2), ids);
+
+                int i = 5;
+
+                for (int l : ids)
+                    insertCompany(conn, ++i, "Google", l);
+            }
+        });
+
+        assertEqualsCollections(l("Los Angeles", "Seattle", "New York"), flat(execute("SELECT City.name from City " +
+            "left join Company c on city.id = c.\"cityid\" WHERE c.name = 'Google' order by City.id")));
+    }
+
+    /**
+     *
+     */
+    public void testDistributedJoinSelectAndInsertInTransaction() throws SQLException {
+        try (Connection c = connect("distributedJoins=true")) {
+            // We'd like to put some Google into cities with over 1K population which don't have it yet
+            executeInTransaction(c, new TransactionClosure() {
+                @Override public void apply(Connection conn) {
+                    List<?> res = flat(execute(conn, "SELECT p.id,p.name,c.id from Company c left join Product p on " +
+                        "c.id = p.companyid left join City on city.id = c.\"cityid\" WHERE c.name <> 'Microsoft' " +
+                        "and population < 1000"));
+
+                    assertEqualsCollections(l(3, "Mac", 5), res);
+
+                    insertProduct(conn, 4, (String)res.get(1), 1);
+                }
+            });
+        }
+
+        try (Connection c = connect("distributedJoins=true")) {
+            assertEqualsCollections(l("Windows", "Mac"), flat(execute(c, "SELECT p.name from Company c left join " +
+                "Product p on c.id = p.companyid WHERE c.name = 'Microsoft' order by p.id")));
+        }
+    }
+
+    /**
+     *
+     */
+    public void testInsertFromExpression() throws SQLException {
+        executeInTransaction(new TransactionClosure() {
+            @Override public void apply(Connection conn) {
+                execute(conn, "insert into city (id, name, population) values (? + 1, ?, ?)",
+                    8, "Moscow", 15000);
+            }
+        });
+    }
+
+    /**
+     *
+     */
+    public void testAutoRollback() throws SQLException {
+        try (Connection c = connect()) {
+            begin(c);
+
+            insertPerson(c, 6, "John", "Doe", 2, 2);
+        }
+
+        // Connection has not hung on close and update has not been applied.
+        assertTrue(personCache().query(new SqlFieldsQuery("SELECT * FROM \"Person\".Person WHERE id = 6"))
+            .getAll().isEmpty());
+    }
+
+    /**
+     *
+     */
+    public void testRepeatableReadWithConcurrentDelete() throws Exception {
+        doTestRepeatableRead(new IgniteInClosure<Connection>() {
+            @Override public void apply(Connection conn) {
+                execute(conn, "DELETE FROM \"Person\".Person where firstname = 'John'");
+            }
+        }, null);
+    }
+
+    /**
+     *
+     */
+    public void testRepeatableReadWithConcurrentFastDelete() throws Exception {
+        doTestRepeatableRead(new IgniteInClosure<Connection>() {
+            @Override public void apply(Connection conn) {
+                execute(conn, "DELETE FROM \"Person\".Person where id = 1");
+            }
+        }, null);
+    }
+
+    /**
+     *
+     */
+    public void testRepeatableReadWithConcurrentCacheRemove() throws Exception {
+        doTestRepeatableRead(new IgniteInClosure<Connection>() {
+            @Override public void apply(Connection conn) {
+                personCache().remove(1);
+            }
+        }, null);
+    }
+
+    /**
+     *
+     */
+    public void testRepeatableReadAndDeleteWithConcurrentDelete() throws Exception {
+        doTestRepeatableRead(new IgniteInClosure<Connection>() {
+            @Override public void apply(Connection conn) {
+                execute(conn, "DELETE FROM \"Person\".Person where firstname = 'John'");
+            }
+        }, afterReadDel);
+    }
+
+    /**
+     *
+     */
+    public void testRepeatableReadAndDeleteWithConcurrentFastDelete() throws Exception {
+        doTestRepeatableRead(new IgniteInClosure<Connection>() {
+            @Override public void apply(Connection conn) {
+                execute(conn, "DELETE FROM \"Person\".Person where id = 1");
+            }
+        }, afterReadDel);
+    }
+
+    /**
+     *
+     */
+    public void testRepeatableReadAndDeleteWithConcurrentCacheRemove() throws Exception {
+        doTestRepeatableRead(new IgniteInClosure<Connection>() {
+            @Override public void apply(Connection conn) {
+                personCache().remove(1);
+            }
+        }, afterReadDel);
+    }
+
+    /**
+     *
+     */
+    public void testRepeatableReadAndFastDeleteWithConcurrentDelete() throws Exception {
+        doTestRepeatableRead(new IgniteInClosure<Connection>() {
+            @Override public void apply(Connection conn) {
+                execute(conn, "DELETE FROM \"Person\".Person where firstname = 'John'");
+            }
+        }, afterReadFastDel);
+    }
+
+    /**
+     *
+     */
+    public void testRepeatableReadAndFastDeleteWithConcurrentFastDelete() throws Exception {
+        doTestRepeatableRead(new IgniteInClosure<Connection>() {
+            @Override public void apply(Connection conn) {
+                execute(conn, "DELETE FROM \"Person\".Person where id = 1");
+            }
+        }, afterReadFastDel);
+    }
+
+    /**
+     *
+     */
+    public void testRepeatableReadAndFastDeleteWithConcurrentCacheRemove() throws Exception {
+        doTestRepeatableRead(new IgniteInClosure<Connection>() {
+            @Override public void apply(Connection conn) {
+                personCache().remove(1);
+            }
+        }, afterReadFastDel);
+    }
+
+    /**
+     *
+     */
+    public void testRepeatableReadAndDeleteWithConcurrentDeleteAndRollback() throws Exception {
+        doTestRepeatableRead(new IgniteInClosure<Connection>() {
+            @Override public void apply(Connection conn) {
+                execute(conn, "DELETE FROM \"Person\".Person where firstname = 'John'");
+            }
+        }, afterReadDelAndRollback);
+    }
+
+    /**
+     *
+     */
+    public void testRepeatableReadAndDeleteWithConcurrentFastDeleteAndRollback() throws Exception {
+        doTestRepeatableRead(new IgniteInClosure<Connection>() {
+            @Override public void apply(Connection conn) {
+                execute(conn, "DELETE FROM \"Person\".Person where id = 1");
+            }
+        }, afterReadDelAndRollback);
+    }
+
+    /**
+     *
+     */
+    public void testRepeatableReadAndDeleteWithConcurrentCacheRemoveAndRollback() throws Exception {
+        doTestRepeatableRead(new IgniteInClosure<Connection>() {
+            @Override public void apply(Connection conn) {
+                personCache().remove(1);
+            }
+        }, afterReadDelAndRollback);
+    }
+
+    /**
+     *
+     */
+    public void testRepeatableReadAndFastDeleteWithConcurrentDeleteAndRollback() throws Exception {
+        doTestRepeatableRead(new IgniteInClosure<Connection>() {
+            @Override public void apply(Connection conn) {
+                execute(conn, "DELETE FROM \"Person\".Person where firstname = 'John'");
+            }
+        }, afterReadFastDelAndRollback);
+    }
+
+    /**
+     *
+     */
+    public void testRepeatableReadAndFastDeleteWithConcurrentFastDeleteAndRollback() throws Exception {
+        doTestRepeatableRead(new IgniteInClosure<Connection>() {
+            @Override public void apply(Connection conn) {
+                execute(conn, "DELETE FROM \"Person\".Person where id = 1");
+            }
+        }, afterReadFastDelAndRollback);
+    }
+
+    /**
+     *
+     */
+    public void testRepeatableReadAndFastDeleteWithConcurrentCacheRemoveAndRollback() throws Exception {
+        doTestRepeatableRead(new IgniteInClosure<Connection>() {
+            @Override public void apply(Connection conn) {
+                personCache().remove(1);
+            }
+        }, afterReadFastDelAndRollback);
+    }
+
+    /**
+     *
+     */
+    public void testRepeatableReadWithConcurrentUpdate() throws Exception {
+        doTestRepeatableRead(new IgniteInClosure<Connection>() {
+            @Override public void apply(Connection conn) {
+                execute(conn, "UPDATE \"Person\".Person SET lastname = 'Fix' where firstname = 'John'");
+            }
+        }, null);
+    }
+
+    /**
+     *
+     */
+    public void testRepeatableReadWithConcurrentCacheReplace() throws Exception {
+        doTestRepeatableRead(new IgniteInClosure<Connection>() {
+            @Override public void apply(Connection conn) {
+                Person p = new Person();
+
+                p.id = 1;
+                p.firstName = "Luke";
+                p.lastName = "Maxwell";
+
+                personCache().replace(1, p);
+            }
+        }, null);
+    }
+
+    /**
+     *
+     */
+    public void testRepeatableReadAndUpdateWithConcurrentUpdate() throws Exception {
+        doTestRepeatableRead(new IgniteInClosure<Connection>() {
+            @Override public void apply(Connection conn) {
+                execute(conn, "UPDATE \"Person\".Person SET lastname = 'Fix' where firstname = 'John'");
+            }
+        }, afterReadUpdate);
+    }
+
+    /**
+     *
+     */
+    public void testRepeatableReadAndUpdateWithConcurrentCacheReplace() throws Exception {
+        doTestRepeatableRead(new IgniteInClosure<Connection>() {
+            @Override public void apply(Connection conn) {
+                Person p = new Person();
+
+                p.id = 1;
+                p.firstName = "Luke";
+                p.lastName = "Maxwell";
+
+                personCache().replace(1, p);
+            }
+        }, afterReadUpdate);
+    }
+
+    /**
+     *
+     */
+    public void testRepeatableReadAndUpdateWithConcurrentUpdateAndRollback() throws Exception {
+        doTestRepeatableRead(new IgniteInClosure<Connection>() {
+            @Override public void apply(Connection conn) {
+                execute(conn, "UPDATE \"Person\".Person SET lastname = 'Fix' where firstname = 'John'");
+            }
+        }, afterReadUpdateAndRollback);
+    }
+
+    /**
+     *
+     */
+    public void testRepeatableReadAndUpdateWithConcurrentCacheReplaceAndRollback() throws Exception {
+        doTestRepeatableRead(new IgniteInClosure<Connection>() {
+            @Override public void apply(Connection conn) {
+                Person p = new Person();
+
+                p.id = 1;
+                p.firstName = "Luke";
+                p.lastName = "Maxwell";
+
+                personCache().replace(1, p);
+            }
+        }, afterReadUpdateAndRollback);
+    }
+
+    /**
+     * Perform repeatable reads and concurrent changes.
+     * @param concurrentWriteClo Updating closure.
+     * @param afterReadClo Closure making write changes that should also be made inside repeatable read transaction
+     *     (must yield an exception).
+     * @throws Exception if failed.
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    private void doTestRepeatableRead(final IgniteInClosure<Connection> concurrentWriteClo,
+        final IgniteInClosure<Connection> afterReadClo) throws Exception {
+        final CountDownLatch repeatableReadLatch = new CountDownLatch(1);
+
+        final CountDownLatch initLatch = new CountDownLatch(1);
+
+        final IgniteInternalFuture<?> readFut = multithreadedAsync(new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                executeInTransaction(new TransactionClosure() {
+                    @Override public void apply(Connection conn) {
+                        List<?> before = flat(execute(conn, "SELECT * from \"Person\".Person where id = 1"));
+
+                        assertEqualsCollections(l(1, "John", "Smith", 1, 1), before);
+
+                        initLatch.countDown();
+
+                        try {
+                            U.await(repeatableReadLatch);
+                        }
+                        catch (IgniteInterruptedCheckedException e) {
+                            throw new IgniteException(e);
+                        }
+
+                        List<?> after = flat(execute(conn, "SELECT * from \"Person\".Person where id = 1"));
+
+                        assertEqualsCollections(before, after);
+
+                        if (afterReadClo != null)
+                            afterReadClo.apply(conn);
+                    }
+                });
+
+                return null;
+            }
+        }, 1);
+
+        IgniteInternalFuture<?> conModFut = multithreadedAsync(new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                executeInTransaction(new TransactionClosure() {
+                    @Override public void apply(Connection conn) {
+                        try {
+                            U.await(initLatch);
+                        }
+                        catch (IgniteInterruptedCheckedException e) {
+                            throw new IgniteException(e);
+                        }
+
+                        concurrentWriteClo.apply(conn);
+
+                        repeatableReadLatch.countDown();
+                    }
+                });
+
+                return null;
+            }
+        }, 1);
+
+        conModFut.get();
+
+        if (afterReadClo != null) {
+            IgniteCheckedException ex = (IgniteCheckedException)GridTestUtils.assertThrows(null, new Callable() {
+                @Override public Object call() throws Exception {
+                    readFut.get();
+
+                    return null;
+                }
+            }, IgniteCheckedException.class, "Mvcc version mismatch.");
+
+            assertTrue(X.hasCause(ex, SQLException.class));
+
+            assertTrue(X.getCause(ex).getMessage().contains("Mvcc version mismatch."));
+        }
+        else
+            readFut.get();
+    }
+
+    /**
+     * Create a new connection, a new transaction and run given closure in its scope.
+     * @param clo Closure.
+     * @throws SQLException if failed.
+     */
+    private void executeInTransaction(TransactionClosure clo) throws SQLException {
+        try (Connection conn = connect()) {
+            executeInTransaction(conn, clo);
+        }
+    }
+
+    /**
+     * Create a new transaction and run given closure in its scope.
+     * @param conn Connection.
+     * @param clo Closure.
+     * @throws SQLException if failed.
+     */
+    private void executeInTransaction(Connection conn, TransactionClosure clo) throws SQLException {
+        begin(conn);
+
+        clo.apply(conn);
+
+        commit(conn);
+    }
+
+    /**
+     * @return Auto commit strategy for this test.
+     */
+    abstract boolean autoCommit();
+
+    /**
+     * @param c Connection to begin a transaction on.
+     */
+    private void begin(Connection c) throws SQLException {
+        if (autoCommit())
+            execute(c, "BEGIN");
+    }
+
+    /**
+     * @param c Connection to begin a transaction on.
+     */
+    private void commit(Connection c) throws SQLException {
+        if (autoCommit())
+            execute(c, "COMMIT");
+        else
+            c.commit();
+    }
+
+    /**
+     * @param c Connection to rollback a transaction on.
+     */
+    private void rollback(Connection c) {
+        try {
+            if (autoCommit())
+                execute(c, "ROLLBACK");
+            else
+                c.rollback();
+        }
+        catch (SQLException e) {
+            throw new IgniteException(e);
+        }
+    }
+
+    /**
+     * @param sql Statement.
+     * @param args Arguments.
+     * @return Result set.
+     * @throws SQLException if failed.
+     */
+    List<List<?>> execute(String sql, Object... args) throws SQLException {
+        try (Connection c = connect()) {
+            c.setAutoCommit(true);
+
+            return execute(c, sql, args);
+        }
+    }
+
+    /**
+     * @param sql Statement.
+     * @param args Arguments.
+     * @return Result set.
+     * @throws RuntimeException if failed.
+     */
+    protected List<List<?>> execute(Connection conn, String sql, Object... args) {
+        try {
+            return super.execute(conn, sql, args);
+        }
+        catch (SQLException e) {
+            throw new IgniteException(e);
+        }
+    }
+
+    /**
+     * @return New connection to default node.
+     * @throws SQLException if failed.
+     */
+    private Connection connect() throws SQLException {
+        return connect(null);
+    }
+
+    /**
+     * @param params Connection parameters.
+     * @return New connection to default node.
+     * @throws SQLException if failed.
+     */
+    private Connection connect(String params) throws SQLException {
+        Connection c = connect(node(), params);
+
+        c.setAutoCommit(false);
+
+        return c;
+    }
+
+    /**
+     * @param node Node to connect to.
+     * @param params Connection parameters.
+     * @return Thin JDBC connection to specified node.
+     */
+    protected Connection connect(IgniteEx node, String params) {
+        try {
+            return super.connect(node, params);
+        }
+        catch (SQLException e) {
+            throw new AssertionError(e);
+        }
+    }
+
+    /**
+     * @return Default node to fire queries from.
+     */
+    private IgniteEx node() {
+        return grid(nodeIndex());
+    }
+
+    /**
+     * @return {@link Person} cache.
+     */
+    private IgniteCache<Integer, Person> personCache() {
+        return node().cache("Person");
+    }
+
+    /**
+     * @return Node index to fire queries from.
+     */
+    abstract int nodeIndex();
+
+    /**
+     * @param id New person's id.
+     * @param firstName First name.
+     * @param lastName Second name.
+     * @param cityId City id.
+     * @param companyId Company id.
+     * @throws SQLException if failed.
+     */
+    private void insertPerson(final int id, final String firstName, final String lastName, final int cityId,
+        final int companyId) throws SQLException {
+        executeInTransaction(new TransactionClosure() {
+            @Override public void apply(Connection conn) {
+                insertPerson(conn, id, firstName, lastName, cityId, companyId);
+            }
+        });
+    }
+
+    /**
+     * @param c Connection.
+     * @param id New person's id.
+     * @param firstName First name.
+     * @param lastName Second name.
+     * @param cityId City id.
+     * @param companyId Company id.
+     */
+    private void insertPerson(Connection c, int id, String firstName, String lastName, int cityId, int companyId) {
+        execute(c, "INSERT INTO \"Person\".person (id, firstName, lastName, cityId, companyId) values (?, ?, ?, ?, ?)",
+            id, firstName, lastName, cityId, companyId);
+    }
+
+    /**
+     * @param id New city's id.
+     * @param name City name.
+     * @param population Number of people.
+     * @throws SQLException if failed.
+     */
+    private void insertCity(final int id, final String name, final int population) throws SQLException {
+        executeInTransaction(new TransactionClosure() {
+            @Override public void apply(Connection conn) {
+                insertCity(conn, id, name, population);
+            }
+        });
+    }
+
+    /**
+     * @param c Connection.
+     * @param id New city's id.
+     * @param name City name.
+     * @param population Number of people.
+     */
+    private void insertCity(Connection c, int id, String name, int population) {
+        execute(c, "INSERT INTO city (id, name, population) values (?, ?, ?)", id, name, population);
+    }
+
+    /**
+     * @param id New company's id.
+     * @param name Company name.
+     * @param cityId City id.
+     * @throws SQLException if failed.
+     */
+    private void insertCompany(final int id, final String name, final int cityId) throws SQLException {
+        executeInTransaction(new TransactionClosure() {
+            @Override public void apply(Connection conn) {
+                insertCompany(conn, id, name, cityId);
+            }
+        });
+    }
+
+    /**
+     * @param c Connection.
+     * @param id New company's id.
+     * @param name Company name.
+     * @param cityId City id.
+     */
+    private void insertCompany(Connection c, int id, String name, int cityId) {
+        execute(c, "INSERT INTO company (id, name, \"cityid\") values (?, ?, ?)", id, name, cityId);
+    }
+
+    /**
+     * @param id New product's id.
+     * @param name Product name.
+     * @param companyId Company id..
+     * @throws SQLException if failed.
+     */
+    private void insertProduct(final int id, final String name, final int companyId) throws SQLException {
+        executeInTransaction(new TransactionClosure() {
+            @Override public void apply(Connection conn) {
+                insertProduct(conn, id, name, companyId);
+            }
+        });
+    }
+
+    /**
+     * @param c Connection.
+     * @param id New product's id.
+     * @param name Product name.
+     * @param companyId Company id..
+     */
+    private void insertProduct(Connection c, int id, String name, int companyId) {
+        execute(c, "INSERT INTO product (id, name, companyid) values (?, ?, ?)", id, name, companyId);
+    }
+
+    /**
+     * Person class.
+     */
+    private final static class Person {
+        /** */
+        @QuerySqlField
+        public int id;
+
+        /** */
+        @QuerySqlField
+        public String firstName;
+
+        /** */
+        @QuerySqlField
+        public String lastName;
+    }
+
+    /**
+     * Closure to be executed in scope of a transaction.
+     */
+    private abstract class TransactionClosure implements IgniteInClosure<Connection> {
+        // No-op.
+    }
+
+    /**
+     * @return List of given arguments.
+     */
+    private static List<?> l(Object... args) {
+        return F.asList(args);
+    }
+
+    /**
+     * Flatten rows.
+     * @param rows Rows.
+     * @return Rows as a single list.
+     */
+    @SuppressWarnings("unchecked")
+    private static <T> List<T> flat(Collection<? extends Collection<?>> rows) {
+        return new ArrayList<>(F.flatCollections((Collection<? extends Collection<T>>)rows));
+    }
+}
diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinTransactionsClientAutoCommitComplexSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinTransactionsClientAutoCommitComplexSelfTest.java
new file mode 100644
index 0000000..d5b505a
--- /dev/null
+++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinTransactionsClientAutoCommitComplexSelfTest.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.jdbc.thin;
+
+/**
+ *
+ */
+public class JdbcThinTransactionsClientAutoCommitComplexSelfTest extends JdbcThinTransactionsAbstractComplexSelfTest {
+    /** {@inheritDoc} */
+    @Override boolean autoCommit() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override int nodeIndex() {
+        return CLI_IDX;
+    }
+}
diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinTransactionsClientNoAutoCommitComplexSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinTransactionsClientNoAutoCommitComplexSelfTest.java
new file mode 100644
index 0000000..7fa69fd
--- /dev/null
+++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinTransactionsClientNoAutoCommitComplexSelfTest.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.jdbc.thin;
+
+/**
+ *
+ */
+public class JdbcThinTransactionsClientNoAutoCommitComplexSelfTest extends JdbcThinTransactionsAbstractComplexSelfTest {
+    /** {@inheritDoc} */
+    @Override boolean autoCommit() {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override int nodeIndex() {
+        return CLI_IDX;
+    }
+}
diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinTransactionsSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinTransactionsSelfTest.java
new file mode 100644
index 0000000..a8fa47b
--- /dev/null
+++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinTransactionsSelfTest.java
@@ -0,0 +1,447 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.jdbc.thin;
+
+import java.sql.BatchUpdateException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.concurrent.Callable;
+import java.util.concurrent.atomic.AtomicBoolean;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.cache.CacheAtomicityMode;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.binary.BinaryMarshaller;
+import org.apache.ignite.internal.processors.query.NestedTxMode;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.GridStringLogger;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.jetbrains.annotations.NotNull;
+
+/**
+ * Tests to check behavior with transactions on.
+ */
+public class JdbcThinTransactionsSelfTest extends JdbcThinAbstractSelfTest {
+    /** IP finder. */
+    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
+    /** */
+    private static final String URL = "jdbc:ignite:thin://127.0.0.1";
+
+    /** Logger. */
+    private GridStringLogger log;
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("deprecation")
+    @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName);
+
+        cfg.setCacheConfiguration(cacheConfiguration(DEFAULT_CACHE_NAME));
+
+        TcpDiscoverySpi disco = new TcpDiscoverySpi();
+
+        disco.setIpFinder(IP_FINDER);
+
+        cfg.setDiscoverySpi(disco);
+
+        cfg.setMarshaller(new BinaryMarshaller());
+
+        cfg.setGridLogger(log = new GridStringLogger());
+
+        return cfg;
+    }
+
+    /**
+     * @param name Cache name.
+     * @return Cache configuration.
+     * @throws Exception In case of error.
+     */
+    private CacheConfiguration cacheConfiguration(@NotNull String name) throws Exception {
+        CacheConfiguration cfg = defaultCacheConfiguration();
+
+        cfg.setName(name);
+        cfg.setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT);
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        super.beforeTest();
+
+        startGrid(0);
+
+        try (Connection c = c(true, NestedTxMode.ERROR)) {
+            try (Statement s = c.createStatement()) {
+                s.execute("CREATE TABLE INTS (k int primary key, v int) WITH \"cache_name=ints,wrap_value=false," +
+                    "atomicity=transactional_snapshot\"");
+            }
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids();
+    }
+
+    /**
+     * @param autoCommit Auto commit mode.
+     * @param nestedTxMode Nested transactions mode.
+     * @return Connection.
+     * @throws SQLException if failed.
+     */
+    private static Connection c(boolean autoCommit, NestedTxMode nestedTxMode) throws SQLException {
+        Connection res = DriverManager.getConnection(URL + "/?nestedTransactionsMode=" + nestedTxMode.name());
+
+        res.setAutoCommit(autoCommit);
+
+        return res;
+    }
+
+    /**
+     *
+     */
+    public void testTransactionsBeginCommitRollback() throws IgniteCheckedException {
+        final AtomicBoolean stop = new AtomicBoolean();
+
+        IgniteInternalFuture<?> fut = GridTestUtils.runMultiThreadedAsync(new Runnable() {
+            @Override public void run() {
+                try {
+                    try (Connection c = c(false, NestedTxMode.ERROR)) {
+                        while (!stop.get()) {
+                            try (Statement s = c.createStatement()) {
+                                s.execute("BEGIN");
+
+                                c.commit();
+
+                                s.execute("BEGIN");
+
+                                c.rollback();
+                            }
+                        }
+                    }
+                }
+                catch (SQLException e) {
+                    throw new AssertionError(e);
+                }
+            }
+        }, 8, "jdbc-transactions");
+
+        U.sleep(5000);
+
+        stop.set(true);
+
+        fut.get();
+    }
+
+    /**
+     *
+     */
+    public void testTransactionsBeginCommitRollbackAutocommit() throws IgniteCheckedException {
+        GridTestUtils.runMultiThreadedAsync(new Runnable() {
+            @Override public void run() {
+                try {
+                    try (Connection c = c(true, NestedTxMode.ERROR)) {
+                        try (Statement s = c.createStatement())  {
+                            s.execute("BEGIN");
+
+                            s.execute("COMMIT");
+
+                            s.execute("BEGIN");
+
+                            s.execute("ROLLBACK");
+                        }
+                    }
+                }
+                catch (SQLException e) {
+                    throw new AssertionError(e);
+                }
+            }
+        }, 8, "jdbc-transactions").get();
+    }
+
+    /**
+     *
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testIgnoreNestedTxAutocommitOff() throws SQLException {
+        try (Connection c = c(false, NestedTxMode.IGNORE)) {
+            doNestedTxStart(c, false);
+        }
+
+        assertTrue(log.toString().contains("ignoring BEGIN command"));
+    }
+
+    /**
+     *
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testCommitNestedTxAutocommitOff() throws SQLException {
+        try (Connection c = c(false, NestedTxMode.COMMIT)) {
+            doNestedTxStart(c, false);
+        }
+
+        assertFalse(log.toString().contains("ignoring BEGIN command"));
+    }
+
+    /**
+     *
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testErrorNestedTxAutocommitOff() throws SQLException {
+        GridTestUtils.assertThrows(null, new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                try (Connection c = c(false, NestedTxMode.ERROR)) {
+                    doNestedTxStart(c, false);
+                }
+
+                throw new AssertionError();
+            }
+        }, SQLException.class, "Transaction has already been started.");
+    }
+
+    /**
+     *
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testIgnoreNestedTxAutocommitOn() throws SQLException {
+        try (Connection c = c(true, NestedTxMode.IGNORE)) {
+            doNestedTxStart(c, false);
+        }
+
+        assertTrue(log.toString().contains("ignoring BEGIN command"));
+    }
+
+    /**
+     *
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testCommitNestedTxAutocommitOn() throws SQLException {
+        try (Connection c = c(true, NestedTxMode.COMMIT)) {
+            doNestedTxStart(c, false);
+        }
+
+        assertFalse(log.toString().contains("ignoring BEGIN command"));
+    }
+
+    /**
+     *
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testErrorNestedTxAutocommitOn() throws SQLException {
+        GridTestUtils.assertThrows(null, new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                try (Connection c = c(true, NestedTxMode.ERROR)) {
+                    doNestedTxStart(c, false);
+                }
+
+                throw new AssertionError();
+            }
+        }, SQLException.class, "Transaction has already been started.");
+    }
+
+    /**
+     *
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testIgnoreNestedTxAutocommitOffBatched() throws SQLException {
+        try (Connection c = c(false, NestedTxMode.IGNORE)) {
+            doNestedTxStart(c, true);
+        }
+
+        assertTrue(log.toString().contains("ignoring BEGIN command"));
+    }
+
+    /**
+     *
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testCommitNestedTxAutocommitOffBatched() throws SQLException {
+        try (Connection c = c(false, NestedTxMode.COMMIT)) {
+            doNestedTxStart(c, true);
+        }
+
+        assertFalse(log.toString().contains("ignoring BEGIN command"));
+    }
+
+    /**
+     *
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testErrorNestedTxAutocommitOffBatched() throws SQLException {
+        GridTestUtils.assertThrows(null, new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                try (Connection c = c(false, NestedTxMode.ERROR)) {
+                    doNestedTxStart(c, true);
+                }
+
+                throw new AssertionError();
+            }
+        }, BatchUpdateException.class, "Transaction has already been started.");
+    }
+
+    /**
+     *
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testIgnoreNestedTxAutocommitOnBatched() throws SQLException {
+        try (Connection c = c(true, NestedTxMode.IGNORE)) {
+            doNestedTxStart(c, true);
+        }
+
+        assertTrue(log.toString().contains("ignoring BEGIN command"));
+    }
+
+    /**
+     *
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testCommitNestedTxAutocommitOnBatched() throws SQLException {
+        try (Connection c = c(true, NestedTxMode.COMMIT)) {
+            doNestedTxStart(c, true);
+        }
+
+        assertFalse(log.toString().contains("ignoring BEGIN command"));
+    }
+
+    /**
+     *
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testErrorNestedTxAutocommitOnBatched() throws SQLException {
+        GridTestUtils.assertThrows(null, new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                try (Connection c = c(true, NestedTxMode.ERROR)) {
+                    doNestedTxStart(c, true);
+                }
+
+                throw new AssertionError();
+            }
+        }, BatchUpdateException.class, "Transaction has already been started.");
+    }
+
+    /**
+     * Try to start nested transaction via batch as well as separate statements.
+     * @param conn Connection.
+     * @param batched Whether {@link Statement#executeBatch()} should be used.
+     * @throws SQLException if failed.
+     */
+    private void doNestedTxStart(Connection conn, boolean batched) throws SQLException {
+        try (Statement s = conn.createStatement()) {
+            s.executeQuery("SELECT * FROM INTS");
+
+            if (batched) {
+                s.addBatch("BEGIN");
+
+                s.addBatch("BEGIN");
+
+                s.executeBatch();
+            }
+            else {
+                s.execute("BEGIN");
+
+                s.execute("BEGIN");
+            }
+        }
+    }
+
+    /**
+     * @throws SQLException if failed.
+     */
+    public void testAutoCommitSingle() throws SQLException {
+        doTestAutoCommit(false);
+    }
+
+    /**
+     * @throws SQLException if failed.
+     */
+    public void testAutoCommitBatched() throws SQLException {
+        doTestAutoCommit(true);
+    }
+
+    /**
+     * @param batched Batch mode flag.
+     * @throws SQLException if failed.
+     */
+    private void doTestAutoCommit(boolean batched) throws SQLException {
+        IgniteCache<Integer, ?> cache = grid(0).cache("ints");
+
+        try (Connection c = c(false, NestedTxMode.ERROR)) {
+            try (Statement s = c.createStatement()) {
+                assertFalse(s.executeQuery("SELECT * from INTS").next());
+
+                if (batched) {
+                    s.addBatch("INSERT INTO INTS(k, v) values(1, 1)");
+
+                    s.executeBatch();
+                }
+                else
+                    s.execute("INSERT INTO INTS(k, v) values(1, 1)");
+
+                // We haven't committed anything yet - this check shows that autoCommit flag is in effect.
+                assertTrue(cache.query(new SqlFieldsQuery("SELECT * from INTS")).getAll().isEmpty());
+
+                // We should see own updates.
+                assertTrue(s.executeQuery("SELECT * from INTS").next());
+
+                c.commit();
+
+                c.setAutoCommit(true);
+
+                assertEquals(1, cache.get(1));
+
+                assertTrue(s.executeQuery("SELECT * from INTS").next());
+            }
+        }
+    }
+
+    /**
+     * Test that exception in one of the statements does not kill connection worker altogether.
+     * @throws SQLException if failed.
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testExceptionHandling() throws SQLException {
+        try (Connection c = c(true, NestedTxMode.ERROR)) {
+            try (Statement s = c.createStatement()) {
+                s.execute("INSERT INTO INTS(k, v) values(1, 1)");
+
+                assertEquals(1, grid(0).cache("ints").get(1));
+
+                GridTestUtils.assertThrows(null, new Callable<Void>() {
+                    @Override public Void call() throws Exception {
+                        s.execute("INSERT INTO INTS(x, y) values(1, 1)");
+
+                        return null;
+                    }
+                }, SQLException.class, "Failed to parse query");
+
+                s.execute("INSERT INTO INTS(k, v) values(2, 2)");
+
+                assertEquals(2, grid(0).cache("ints").get(2));
+            }
+        }
+    }
+}
diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinTransactionsServerAutoCommitComplexSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinTransactionsServerAutoCommitComplexSelfTest.java
new file mode 100644
index 0000000..3c473ab
--- /dev/null
+++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinTransactionsServerAutoCommitComplexSelfTest.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.jdbc.thin;
+
+/**
+ *
+ */
+public class JdbcThinTransactionsServerAutoCommitComplexSelfTest extends JdbcThinTransactionsAbstractComplexSelfTest {
+    /** {@inheritDoc} */
+    @Override boolean autoCommit() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override int nodeIndex() {
+        return 0;
+    }
+}
diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinTransactionsServerNoAutoCommitComplexSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinTransactionsServerNoAutoCommitComplexSelfTest.java
new file mode 100644
index 0000000..655d4c5
--- /dev/null
+++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinTransactionsServerNoAutoCommitComplexSelfTest.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.jdbc.thin;
+
+/**
+ *
+ */
+public class JdbcThinTransactionsServerNoAutoCommitComplexSelfTest extends JdbcThinTransactionsAbstractComplexSelfTest {
+    /** {@inheritDoc} */
+    @Override boolean autoCommit() {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override int nodeIndex() {
+        return 0;
+    }
+}
diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinTransactionsWithMvccEnabledSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinTransactionsWithMvccEnabledSelfTest.java
new file mode 100644
index 0000000..e01a53d
--- /dev/null
+++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinTransactionsWithMvccEnabledSelfTest.java
@@ -0,0 +1,447 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.jdbc.thin;
+
+import java.sql.BatchUpdateException;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.concurrent.Callable;
+import java.util.concurrent.atomic.AtomicBoolean;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.cache.CacheAtomicityMode;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.binary.BinaryMarshaller;
+import org.apache.ignite.internal.processors.query.NestedTxMode;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.GridStringLogger;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.jetbrains.annotations.NotNull;
+
+/**
+ * Tests to check behavior with transactions on.
+ */
+public class JdbcThinTransactionsWithMvccEnabledSelfTest extends JdbcThinAbstractSelfTest {
+    /** IP finder. */
+    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
+    /** */
+    private static final String URL = "jdbc:ignite:thin://127.0.0.1";
+
+    /** Logger. */
+    private GridStringLogger log;
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("deprecation")
+    @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName);
+
+        cfg.setCacheConfiguration(cacheConfiguration(DEFAULT_CACHE_NAME));
+
+        TcpDiscoverySpi disco = new TcpDiscoverySpi();
+
+        disco.setIpFinder(IP_FINDER);
+
+        cfg.setDiscoverySpi(disco);
+
+        cfg.setMarshaller(new BinaryMarshaller());
+
+        cfg.setGridLogger(log = new GridStringLogger());
+
+        return cfg;
+    }
+
+    /**
+     * @param name Cache name.
+     * @return Cache configuration.
+     * @throws Exception In case of error.
+     */
+    private CacheConfiguration cacheConfiguration(@NotNull String name) throws Exception {
+        CacheConfiguration cfg = defaultCacheConfiguration();
+
+        cfg.setName(name);
+        cfg.setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT);
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        super.beforeTest();
+
+        startGrid(0);
+
+        try (Connection c = c(true, NestedTxMode.ERROR)) {
+            try (Statement s = c.createStatement()) {
+                s.execute("CREATE TABLE INTS (k int primary key, v int) WITH \"cache_name=ints,wrap_value=false," +
+                    "atomicity=transactional_snapshot\"");
+            }
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids();
+    }
+
+    /**
+     * @param autoCommit Auto commit mode.
+     * @param nestedTxMode Nested transactions mode.
+     * @return Connection.
+     * @throws SQLException if failed.
+     */
+    private static Connection c(boolean autoCommit, NestedTxMode nestedTxMode) throws SQLException {
+        Connection res = DriverManager.getConnection(URL + "/?nestedTransactionsMode=" + nestedTxMode.name());
+
+        res.setAutoCommit(autoCommit);
+
+        return res;
+    }
+
+    /**
+     *
+     */
+    public void testTransactionsBeginCommitRollback() throws IgniteCheckedException {
+        final AtomicBoolean stop = new AtomicBoolean();
+
+        IgniteInternalFuture<?> fut = GridTestUtils.runMultiThreadedAsync(new Runnable() {
+            @Override public void run() {
+                try {
+                    try (Connection c = c(false, NestedTxMode.ERROR)) {
+                        while (!stop.get()) {
+                            try (Statement s = c.createStatement()) {
+                                s.execute("BEGIN");
+
+                                c.commit();
+
+                                s.execute("BEGIN");
+
+                                c.rollback();
+                            }
+                        }
+                    }
+                }
+                catch (SQLException e) {
+                    throw new AssertionError(e);
+                }
+            }
+        }, 8, "jdbc-transactions");
+
+        U.sleep(5000);
+
+        stop.set(true);
+
+        fut.get();
+    }
+
+    /**
+     *
+     */
+    public void testTransactionsBeginCommitRollbackAutocommit() throws IgniteCheckedException {
+        GridTestUtils.runMultiThreadedAsync(new Runnable() {
+            @Override public void run() {
+                try {
+                    try (Connection c = c(true, NestedTxMode.ERROR)) {
+                        try (Statement s = c.createStatement())  {
+                            s.execute("BEGIN");
+
+                            s.execute("COMMIT");
+
+                            s.execute("BEGIN");
+
+                            s.execute("ROLLBACK");
+                        }
+                    }
+                }
+                catch (SQLException e) {
+                    throw new AssertionError(e);
+                }
+            }
+        }, 8, "jdbc-transactions").get();
+    }
+
+    /**
+     *
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testIgnoreNestedTxAutocommitOff() throws SQLException {
+        try (Connection c = c(false, NestedTxMode.IGNORE)) {
+            doNestedTxStart(c, false);
+        }
+
+        assertTrue(log.toString().contains("ignoring BEGIN command"));
+    }
+
+    /**
+     *
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testCommitNestedTxAutocommitOff() throws SQLException {
+        try (Connection c = c(false, NestedTxMode.COMMIT)) {
+            doNestedTxStart(c, false);
+        }
+
+        assertFalse(log.toString().contains("ignoring BEGIN command"));
+    }
+
+    /**
+     *
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testErrorNestedTxAutocommitOff() throws SQLException {
+        GridTestUtils.assertThrows(null, new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                try (Connection c = c(false, NestedTxMode.ERROR)) {
+                    doNestedTxStart(c, false);
+                }
+
+                throw new AssertionError();
+            }
+        }, SQLException.class, "Transaction has already been started.");
+    }
+
+    /**
+     *
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testIgnoreNestedTxAutocommitOn() throws SQLException {
+        try (Connection c = c(true, NestedTxMode.IGNORE)) {
+            doNestedTxStart(c, false);
+        }
+
+        assertTrue(log.toString().contains("ignoring BEGIN command"));
+    }
+
+    /**
+     *
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testCommitNestedTxAutocommitOn() throws SQLException {
+        try (Connection c = c(true, NestedTxMode.COMMIT)) {
+            doNestedTxStart(c, false);
+        }
+
+        assertFalse(log.toString().contains("ignoring BEGIN command"));
+    }
+
+    /**
+     *
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testErrorNestedTxAutocommitOn() throws SQLException {
+        GridTestUtils.assertThrows(null, new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                try (Connection c = c(true, NestedTxMode.ERROR)) {
+                    doNestedTxStart(c, false);
+                }
+
+                throw new AssertionError();
+            }
+        }, SQLException.class, "Transaction has already been started.");
+    }
+
+    /**
+     *
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testIgnoreNestedTxAutocommitOffBatched() throws SQLException {
+        try (Connection c = c(false, NestedTxMode.IGNORE)) {
+            doNestedTxStart(c, true);
+        }
+
+        assertTrue(log.toString().contains("ignoring BEGIN command"));
+    }
+
+    /**
+     *
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testCommitNestedTxAutocommitOffBatched() throws SQLException {
+        try (Connection c = c(false, NestedTxMode.COMMIT)) {
+            doNestedTxStart(c, true);
+        }
+
+        assertFalse(log.toString().contains("ignoring BEGIN command"));
+    }
+
+    /**
+     *
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testErrorNestedTxAutocommitOffBatched() throws SQLException {
+        GridTestUtils.assertThrows(null, new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                try (Connection c = c(false, NestedTxMode.ERROR)) {
+                    doNestedTxStart(c, true);
+                }
+
+                throw new AssertionError();
+            }
+        }, BatchUpdateException.class, "Transaction has already been started.");
+    }
+
+    /**
+     *
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testIgnoreNestedTxAutocommitOnBatched() throws SQLException {
+        try (Connection c = c(true, NestedTxMode.IGNORE)) {
+            doNestedTxStart(c, true);
+        }
+
+        assertTrue(log.toString().contains("ignoring BEGIN command"));
+    }
+
+    /**
+     *
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testCommitNestedTxAutocommitOnBatched() throws SQLException {
+        try (Connection c = c(true, NestedTxMode.COMMIT)) {
+            doNestedTxStart(c, true);
+        }
+
+        assertFalse(log.toString().contains("ignoring BEGIN command"));
+    }
+
+    /**
+     *
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testErrorNestedTxAutocommitOnBatched() throws SQLException {
+        GridTestUtils.assertThrows(null, new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                try (Connection c = c(true, NestedTxMode.ERROR)) {
+                    doNestedTxStart(c, true);
+                }
+
+                throw new AssertionError();
+            }
+        }, BatchUpdateException.class, "Transaction has already been started.");
+    }
+
+    /**
+     * Try to start nested transaction via batch as well as separate statements.
+     * @param conn Connection.
+     * @param batched Whether {@link Statement#executeBatch()} should be used.
+     * @throws SQLException if failed.
+     */
+    private void doNestedTxStart(Connection conn, boolean batched) throws SQLException {
+        try (Statement s = conn.createStatement()) {
+            s.executeQuery("SELECT * FROM INTS");
+
+            if (batched) {
+                s.addBatch("BEGIN");
+
+                s.addBatch("BEGIN");
+
+                s.executeBatch();
+            }
+            else {
+                s.execute("BEGIN");
+
+                s.execute("BEGIN");
+            }
+        }
+    }
+
+    /**
+     * @throws SQLException if failed.
+     */
+    public void testAutoCommitSingle() throws SQLException {
+        doTestAutoCommit(false);
+    }
+
+    /**
+     * @throws SQLException if failed.
+     */
+    public void testAutoCommitBatched() throws SQLException {
+        doTestAutoCommit(true);
+    }
+
+    /**
+     * @param batched Batch mode flag.
+     * @throws SQLException if failed.
+     */
+    private void doTestAutoCommit(boolean batched) throws SQLException {
+        IgniteCache<Integer, ?> cache = grid(0).cache("ints");
+
+        try (Connection c = c(false, NestedTxMode.ERROR)) {
+            try (Statement s = c.createStatement()) {
+                assertFalse(s.executeQuery("SELECT * from INTS").next());
+
+                if (batched) {
+                    s.addBatch("INSERT INTO INTS(k, v) values(1, 1)");
+
+                    s.executeBatch();
+                }
+                else
+                    s.execute("INSERT INTO INTS(k, v) values(1, 1)");
+
+                // We haven't committed anything yet - this check shows that autoCommit flag is in effect.
+                assertTrue(cache.query(new SqlFieldsQuery("SELECT * from INTS")).getAll().isEmpty());
+
+                // We should see own updates.
+                assertTrue(s.executeQuery("SELECT * from INTS").next());
+
+                c.commit();
+
+                c.setAutoCommit(true);
+
+                assertEquals(1, cache.get(1));
+
+                assertTrue(s.executeQuery("SELECT * from INTS").next());
+            }
+        }
+    }
+
+    /**
+     * Test that exception in one of the statements does not kill connection worker altogether.
+     * @throws SQLException if failed.
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testExceptionHandling() throws SQLException {
+        try (Connection c = c(true, NestedTxMode.ERROR)) {
+            try (Statement s = c.createStatement()) {
+                s.execute("INSERT INTO INTS(k, v) values(1, 1)");
+
+                assertEquals(1, grid(0).cache("ints").get(1));
+
+                GridTestUtils.assertThrows(null, new Callable<Void>() {
+                    @Override public Void call() throws Exception {
+                        s.execute("INSERT INTO INTS(x, y) values(1, 1)");
+
+                        return null;
+                    }
+                }, SQLException.class, "Failed to parse query");
+
+                s.execute("INSERT INTO INTS(k, v) values(2, 2)");
+
+                assertEquals(2, grid(0).cache("ints").get(2));
+            }
+        }
+    }
+}
diff --git a/modules/clients/src/test/keystore/ca/node01.jks b/modules/clients/src/test/keystore/ca/node01.jks
index 23c0643..7dec684 100644
--- a/modules/clients/src/test/keystore/ca/node01.jks
+++ b/modules/clients/src/test/keystore/ca/node01.jks
Binary files differ
diff --git a/modules/clients/src/test/keystore/ca/node02.jks b/modules/clients/src/test/keystore/ca/node02.jks
index 26da4b5..985abae 100644
--- a/modules/clients/src/test/keystore/ca/node02.jks
+++ b/modules/clients/src/test/keystore/ca/node02.jks
Binary files differ
diff --git a/modules/clients/src/test/keystore/ca/node03.jks b/modules/clients/src/test/keystore/ca/node03.jks
index 831ca24..9a6ab40 100644
--- a/modules/clients/src/test/keystore/ca/node03.jks
+++ b/modules/clients/src/test/keystore/ca/node03.jks
Binary files differ
diff --git a/modules/clients/src/test/keystore/ca/oneindex.txt b/modules/clients/src/test/keystore/ca/oneindex.txt
index 8d347d0..5d0e1c9 100644
--- a/modules/clients/src/test/keystore/ca/oneindex.txt
+++ b/modules/clients/src/test/keystore/ca/oneindex.txt
@@ -1 +1 @@
-V	180824104710Z		01	unknown	/CN=node01
+V	210823155040Z		01	unknown	/CN=node01
diff --git a/modules/clients/src/test/keystore/ca/twoindex.txt b/modules/clients/src/test/keystore/ca/twoindex.txt
index 00b7307..1f9359d 100644
--- a/modules/clients/src/test/keystore/ca/twoindex.txt
+++ b/modules/clients/src/test/keystore/ca/twoindex.txt
@@ -1,2 +1,2 @@
-V	180824104716Z		01	unknown	/CN=node02
-V	180824104719Z		02	unknown	/CN=node03
+V	210823155541Z		01	unknown	/CN=node02
+V	210823155835Z		02	unknown	/CN=node03
diff --git a/modules/cloud/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/cloud/TcpDiscoveryCloudIpFinderSelfTest.java b/modules/cloud/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/cloud/TcpDiscoveryCloudIpFinderSelfTest.java
index 4bddb18..c754553 100644
--- a/modules/cloud/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/cloud/TcpDiscoveryCloudIpFinderSelfTest.java
+++ b/modules/cloud/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/cloud/TcpDiscoveryCloudIpFinderSelfTest.java
@@ -81,6 +81,8 @@
      * @throws Exception If any error occurs.
      */
     public void testRackspace() throws Exception {
+        fail("https://issues.apache.org/jira/browse/IGNITE-9444");
+
         testCloudProvider("rackspace-cloudservers-us");
     }
 
diff --git a/modules/codegen/src/main/java/org/apache/ignite/codegen/MessageCodeGenerator.java b/modules/codegen/src/main/java/org/apache/ignite/codegen/MessageCodeGenerator.java
index 99cf849..9512bae 100644
--- a/modules/codegen/src/main/java/org/apache/ignite/codegen/MessageCodeGenerator.java
+++ b/modules/codegen/src/main/java/org/apache/ignite/codegen/MessageCodeGenerator.java
@@ -168,7 +168,7 @@
 
 //        gen.generateAll(true);
 
-//        gen.generateAndWrite(GridChangeGlobalStateMessageResponse.class);
+//        gen.generateAndWrite(GridNearTxQueryResultsEnlistRequest.class);
 
 //        gen.generateAndWrite(GridNearAtomicUpdateRequest.class);
 
@@ -235,6 +235,8 @@
 //        gen.generateAndWrite(GridCacheVersionEx.class);
 //        gen.generateAndWrite(GridH2DmlRequest.class);
 //        gen.generateAndWrite(GridH2DmlResponse.class);
+//        gen.generateAndWrite(GridNearTxEnlistRequest.class);
+//        gen.generateAndWrite(GridNearTxEnlistResponse.class);
     }
 
     /**
diff --git a/modules/core/pom.xml b/modules/core/pom.xml
index 2b28c4f..9be5217 100644
--- a/modules/core/pom.xml
+++ b/modules/core/pom.xml
@@ -84,7 +84,7 @@
         <dependency>
             <groupId>commons-collections</groupId>
             <artifactId>commons-collections</artifactId>
-            <version>3.2.2</version>
+            <version>${commons.collections.version}</version>
             <scope>test</scope>
         </dependency>
 
diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteCompute.java b/modules/core/src/main/java/org/apache/ignite/IgniteCompute.java
index 98a9eb6..2522121 100644
--- a/modules/core/src/main/java/org/apache/ignite/IgniteCompute.java
+++ b/modules/core/src/main/java/org/apache/ignite/IgniteCompute.java
@@ -702,6 +702,20 @@
      */
     public IgniteCompute withNoFailover();
 
+
+    /**
+     * Disables caching for the next executed task in the <b>current thread</b>.
+     * Has the same behaviour as annotation {@link org.apache.ignite.compute.ComputeTaskNoResultCache}.
+     *
+     * <p>
+     * Here is an example.
+     * <pre name="code" class="java">
+     * ignite.compute().withNoResultCache().run(new IgniteRunnable() {...});
+     * </pre>
+     * @return This {@code IgniteCompute} instance for chaining calls.
+     */
+    public IgniteCompute withNoResultCache();
+
     /**
      * Explicitly deploys a task with given class loader on the local node. Upon completion of this method,
      * a task can immediately be executed on the grid, considering that all participating
diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteLock.java b/modules/core/src/main/java/org/apache/ignite/IgniteLock.java
index 2b3ad3d..b87a74a 100644
--- a/modules/core/src/main/java/org/apache/ignite/IgniteLock.java
+++ b/modules/core/src/main/java/org/apache/ignite/IgniteLock.java
@@ -122,7 +122,7 @@
      *
      * @throws IgniteException if the node is stopped or broken in non-failoverSafe mode
      */
-    void lock() throws IgniteException;
+    @Override void lock() throws IgniteException;
 
     /**
      * Acquires the lock unless the current thread is
@@ -314,7 +314,7 @@
      * @throws IllegalMonitorStateException if not owned by current thread
      * @throws IgniteException if node is stopped, or lock is already broken in non-failover safe mode
      */
-    void unlock() throws IgniteInterruptedException;
+    @Override void unlock() throws IgniteInterruptedException;
 
     /**
      * Returns a {@link Condition} instance for use with this
diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java b/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java
index c4b83e0..1db7296 100644
--- a/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java
+++ b/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java
@@ -29,6 +29,7 @@
 import org.apache.ignite.configuration.DataStorageConfiguration;
 import org.apache.ignite.internal.client.GridClient;
 import org.apache.ignite.internal.marshaller.optimized.OptimizedMarshaller;
+import org.apache.ignite.internal.processors.rest.GridRestCommand;
 import org.apache.ignite.internal.util.GridLogThrottle;
 import org.apache.ignite.stream.StreamTransformer;
 import org.jetbrains.annotations.Nullable;
@@ -137,6 +138,15 @@
     public static final String IGNITE_REST_START_ON_CLIENT = "IGNITE_REST_START_ON_CLIENT";
 
     /**
+     * This property changes output format of {@link GridRestCommand#CACHE_GET_ALL} from {k: v, ...}
+     * to [{"key": k, "value": v}, ...] to allow non-string keys output.
+     *
+     * @deprecated Should be made default in Apache Ignite 3.0.
+     */
+    @Deprecated
+    public static final String IGNITE_REST_GETALL_AS_ARRAY = "IGNITE_REST_GETALL_AS_ARRAY";
+
+    /**
      * This property defines the maximum number of attempts to remap near get to the same
      * primary node. Remapping may be needed when topology is changed concurrently with
      * get operation.
@@ -483,6 +493,9 @@
     /** SQL retry timeout. */
     public static final String IGNITE_SQL_RETRY_TIMEOUT = "IGNITE_SQL_RETRY_TIMEOUT";
 
+    /** Enable backward compatible handling of UUID through DDL. */
+    public static final String IGNITE_SQL_UUID_DDL_BYTE_FORMAT = "IGNITE_SQL_UUID_DDL_BYTE_FORMAT";
+
     /** Maximum size for affinity assignment history. */
     public static final String IGNITE_AFFINITY_HISTORY_SIZE = "IGNITE_AFFINITY_HISTORY_SIZE";
 
@@ -493,6 +506,10 @@
     public static final String IGNITE_DISCOVERY_CLIENT_RECONNECT_HISTORY_SIZE =
         "IGNITE_DISCOVERY_CLIENT_RECONNECT_HISTORY_SIZE";
 
+    /** Time interval that indicates that client reconnect throttle must be reset to zero. 2 minutes by default. */
+    public static final String CLIENT_THROTTLE_RECONNECT_RESET_TIMEOUT_INTERVAL =
+        "CLIENT_THROTTLE_RECONNECT_RESET_TIMEOUT_INTERVAL";
+
     /** Number of cache operation retries in case of topology exceptions. */
     public static final String IGNITE_CACHE_RETRIES_COUNT = "IGNITE_CACHE_RETRIES_COUNT";
 
@@ -783,6 +800,9 @@
      */
     public static final String IGNITE_WAL_SERIALIZER_VERSION = "IGNITE_WAL_SERIALIZER_VERSION";
 
+    /** Property for setup Ignite WAL segment sync timeout. */
+    public static final String IGNITE_WAL_SEGMENT_SYNC_TIMEOUT = "IGNITE_WAL_SEGMENT_SYNC_TIMEOUT";
+
     /**
      * If the property is set Ignite will use legacy node comparator (based on node order) inste
      *
@@ -869,6 +889,17 @@
     public static final String IGNITE_LOADED_PAGES_BACKWARD_SHIFT_MAP = "IGNITE_LOADED_PAGES_BACKWARD_SHIFT_MAP";
 
     /**
+     * Property for setup percentage of archive size for checkpoint trigger. Default value is 0.25
+     */
+    public static final String IGNITE_CHECKPOINT_TRIGGER_ARCHIVE_SIZE_PERCENTAGE = "IGNITE_CHECKPOINT_TRIGGER_ARCHIVE_SIZE_PERCENTAGE";
+
+    /**
+     * Property for setup percentage of WAL archive size to calculate threshold since which removing of old archive should be started.
+     * Default value is 0.5
+     */
+    public static final String IGNITE_THRESHOLD_WAL_ARCHIVE_SIZE_PERCENTAGE = "IGNITE_THRESHOLD_WAL_ARCHIVE_SIZE_PERCENTAGE";
+
+    /**
      * Whenever read load balancing is enabled, that means 'get' requests will be distributed between primary and backup
      * nodes if it is possible and {@link CacheConfiguration#readFromBackup} is {@code true}.
      *
@@ -937,6 +968,16 @@
     public static final String IGNITE_UNWIND_THROTTLING_TIMEOUT = "IGNITE_UNWIND_THROTTLING_TIMEOUT";
 
     /**
+     * Threshold for throttling operations logging.
+     */
+    public static final String IGNITE_THROTTLE_LOG_THRESHOLD = "IGNITE_THROTTLE_LOG_THRESHOLD";
+
+    /**
+     * Number of concurrent operation for evict partitions.
+     */
+    public static final String IGNITE_EVICTION_PERMITS = "IGNITE_EVICTION_PERMITS";
+
+    /**
      * Enforces singleton.
      */
     private IgniteSystemProperties() {
diff --git a/modules/core/src/main/java/org/apache/ignite/Ignition.java b/modules/core/src/main/java/org/apache/ignite/Ignition.java
index 835896e..64e855a 100644
--- a/modules/core/src/main/java/org/apache/ignite/Ignition.java
+++ b/modules/core/src/main/java/org/apache/ignite/Ignition.java
@@ -141,30 +141,26 @@
     }
 
     /**
-     * Sets client mode static flag.
+     * Sets client mode thread-local flag.
      * <p>
      * This flag used when node is started if {@link IgniteConfiguration#isClientMode()}
      * is {@code null}. When {@link IgniteConfiguration#isClientMode()} is set this flag is ignored.
-     * It is recommended to use {@link DiscoverySpi} in client mode too.
      *
      * @param clientMode Client mode flag.
      * @see IgniteConfiguration#isClientMode()
-     * @see TcpDiscoverySpi#setForceServerMode(boolean)
      */
     public static void setClientMode(boolean clientMode) {
         IgnitionEx.setClientMode(clientMode);
     }
 
     /**
-     * Gets client mode static flag.
+     * Gets client mode thread-local flag.
      * <p>
      * This flag used when node is started if {@link IgniteConfiguration#isClientMode()}
      * is {@code null}. When {@link IgniteConfiguration#isClientMode()} is set this flag is ignored.
-     * It is recommended to use {@link DiscoverySpi} in client mode too.
      *
      * @return Client mode flag.
      * @see IgniteConfiguration#isClientMode()
-     * @see TcpDiscoverySpi#setForceServerMode(boolean)
      */
     public static boolean isClientMode() {
         return IgnitionEx.isClientMode();
diff --git a/modules/core/src/main/java/org/apache/ignite/binary/BinaryBasicIdMapper.java b/modules/core/src/main/java/org/apache/ignite/binary/BinaryBasicIdMapper.java
index 53ad9b6..48b86f3 100644
--- a/modules/core/src/main/java/org/apache/ignite/binary/BinaryBasicIdMapper.java
+++ b/modules/core/src/main/java/org/apache/ignite/binary/BinaryBasicIdMapper.java
@@ -66,7 +66,7 @@
      * @param typeName Type name.
      * @return Type ID.
      */
-    public int typeId(String typeName) {
+    @Override public int typeId(String typeName) {
         A.notNull(typeName, "typeName");
 
         int id = isLowerCase ? lowerCaseHashCode(typeName) : typeName.hashCode();
@@ -87,7 +87,7 @@
      * @param fieldName Field name.
      * @return Field ID.
      */
-    public int fieldId(int typeId, String fieldName) {
+    @Override public int fieldId(int typeId, String fieldName) {
         A.notNull(fieldName, "fieldName");
 
         int id = isLowerCase ? lowerCaseHashCode(fieldName) : fieldName.hashCode();
diff --git a/modules/core/src/main/java/org/apache/ignite/cache/CacheAtomicityMode.java b/modules/core/src/main/java/org/apache/ignite/cache/CacheAtomicityMode.java
index 79a8e5f..1584286 100644
--- a/modules/core/src/main/java/org/apache/ignite/cache/CacheAtomicityMode.java
+++ b/modules/core/src/main/java/org/apache/ignite/cache/CacheAtomicityMode.java
@@ -33,8 +33,12 @@
  */
 public enum CacheAtomicityMode {
     /**
-     * Specified fully {@code ACID}-compliant transactional cache behavior. See
+     * Specified fully {@code ACID}-compliant transactional cache behavior for key-value API. See
      * {@link Transaction} for more information about transactions.
+     * <p>
+     * <b>Note:</b> this mode guaranties transactional behavior <b>only for key-value API</b> operations.
+     * For ACID SQL transactions use {@code CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT} mode.
+     * </p>
      */
     TRANSACTIONAL,
 
@@ -88,7 +92,32 @@
      *
      * @see IgniteCache#withNoRetries()
      */
-    ATOMIC;
+    ATOMIC,
+
+    /**
+     * Specified fully {@code ACID}-compliant transactional cache behavior not only for key-value API,
+     * but also for SQL transactions.
+     * <p>
+     * This cache atomicity mode is implemented within multiversion concurrency control (MVCC) where database can
+     * contain multiple versions of each row to allow readers do not collide with writers.
+     * Each update in this mode generates a new version of a row and don't remove a previous one.
+     * Old versions are cleaned only when they are not visible to anyone.
+     * </p>
+     * <p>
+     * There is one node in cluster is elected as MVCC coordinator. This node tracks all in-flight transactions and
+     * queries in the cluster.
+     * Each transaction or query over the cache with {@code TRANSACTIONAL_SNAPSHOT} mode obtains current
+     * database snapshot from the coordinator. This snapshot allows transactions and queries to skip invisible
+     * updates made by concurrent transactions to always observe the same consistent database state.
+     * </p>
+     * <p>
+     * <b>Note!</b> This atomicity mode is not interoperable with the other atomicity modes in the same transaction.
+     * Caches participated in transaction should either be all {@code TRANSACTIONAL} or all
+     * {@code TRANSACTIONAL_SNAPSHOT}, but not the mixed ones.
+     * </p>
+     * See {@link Transaction} for more information about transactions.
+     */
+    TRANSACTIONAL_SNAPSHOT;
 
     /** Enumerated values. */
     private static final CacheAtomicityMode[] VALS = values();
diff --git a/modules/core/src/main/java/org/apache/ignite/cache/QueryEntity.java b/modules/core/src/main/java/org/apache/ignite/cache/QueryEntity.java
index 37a7f15..769d74f 100644
--- a/modules/core/src/main/java/org/apache/ignite/cache/QueryEntity.java
+++ b/modules/core/src/main/java/org/apache/ignite/cache/QueryEntity.java
@@ -20,7 +20,6 @@
 import javax.cache.CacheException;
 import java.io.Serializable;
 import java.lang.reflect.Field;
-import java.math.BigDecimal;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -49,12 +48,9 @@
 import org.apache.ignite.internal.util.typedef.internal.A;
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.lang.IgniteBiTuple;
 import org.jetbrains.annotations.NotNull;
 import org.jetbrains.annotations.Nullable;
 
-import static java.util.Collections.unmodifiableMap;
-
 /**
  * Query entity is a description of {@link org.apache.ignite.IgniteCache cache} entry (composed of key and value)
  * in a way of how it must be indexed and can be queried.
@@ -100,8 +96,11 @@
     /** Fields default values. */
     private Map<String, Object> defaultFieldValues = new HashMap<>();
 
-    /** Decimal fields information. */
-    private Map<String, IgniteBiTuple<Integer, Integer>> decimalInfo = new HashMap<>();
+    /** Precision(Maximum length) for fields. */
+    private Map<String, Integer> fieldsPrecision = new HashMap<>();
+
+    /** Scale for fields. */
+    private Map<String, Integer> fieldsScale = new HashMap<>();
 
     /**
      * Creates an empty query entity.
@@ -135,7 +134,9 @@
         defaultFieldValues = other.defaultFieldValues != null ? new HashMap<>(other.defaultFieldValues)
             : new HashMap<String, Object>();
 
-        decimalInfo = other.decimalInfo != null ? new HashMap<>(other.decimalInfo) : new HashMap<>();
+        fieldsPrecision = other.fieldsPrecision != null ? new HashMap<>(other.fieldsPrecision) : new HashMap<>();
+
+        fieldsScale = other.fieldsScale != null ? new HashMap<>(other.fieldsScale) : new HashMap<>();
     }
 
     /**
@@ -282,13 +283,29 @@
                     getFromMap(getDefaultFieldValues(), targetFieldName),
                     getFromMap(target.getDefaultFieldValues(), targetFieldName)
                 );
+
+                checkEquals(conflicts,
+                    "precision of " + targetFieldName,
+                    getFromMap(getFieldsPrecision(), targetFieldName),
+                    getFromMap(target.getFieldsPrecision(), targetFieldName));
+
+                checkEquals(
+                    conflicts,
+                    "scale of " + targetFieldName,
+                    getFromMap(getFieldsScale(), targetFieldName),
+                    getFromMap(target.getFieldsScale(), targetFieldName));
             }
             else {
+                Integer precision = getFromMap(target.getFieldsPrecision(), targetFieldName);
+                Integer scale = getFromMap(target.getFieldsScale(), targetFieldName);
+
                 queryFieldsToAdd.add(new QueryField(
                     targetFieldName,
                     targetFieldType,
                     !contains(target.getNotNullFields(),targetFieldName),
-                    getFromMap(target.getDefaultFieldValues(), targetFieldName)
+                    getFromMap(target.getDefaultFieldValues(), targetFieldName),
+                    precision == null ? -1 : precision,
+                    scale == null ? -1 : scale
                 ));
             }
         }
@@ -308,7 +325,7 @@
     /**
      * @return Value from sourceMap or null if map is null.
      */
-    private static Object getFromMap(Map<String, Object> sourceMap, String key) {
+    private static <V> V getFromMap(Map<String, V> sourceMap, String key) {
         return sourceMap == null ? null : sourceMap.get(key);
     }
 
@@ -320,7 +337,7 @@
      * @param local Local object.
      * @param received Received object.
      */
-    private void checkEquals(StringBuilder conflicts, String name, Object local, Object received) {
+    private <V> void checkEquals(StringBuilder conflicts, String name, V local, V received) {
         if (!Objects.equals(local, received))
             conflicts.append(String.format("%s is different: local=%s, received=%s\n", name, local, received));
     }
@@ -573,22 +590,39 @@
     }
 
     /**
-     * Gets set of field name to precision and scale.
-     *
-     * @return Set of names of fields that must have non-null values.
+     * @return Precision map for a fields.
      */
-    public Map<String, IgniteBiTuple<Integer, Integer>> getDecimalInfo() {
-        return decimalInfo == null ? Collections.emptyMap() : unmodifiableMap(decimalInfo);
+    public Map<String, Integer> getFieldsPrecision() {
+        return fieldsPrecision;
     }
 
     /**
-     * Sets decimal fields info.
+     * Sets fieldsPrecision map for a fields.
      *
-     * @param decimalInfo Set of name to precision and scale for decimal fields.
-     * @return {@code this} for chaining.
+     * @param fieldsPrecision Precision map for a fields.
+     * @return {@code This} for chaining.
      */
-    public QueryEntity setDecimalInfo(Map<String, IgniteBiTuple<Integer, Integer>> decimalInfo) {
-        this.decimalInfo = decimalInfo;
+    public QueryEntity setFieldsPrecision(Map<String, Integer> fieldsPrecision) {
+        this.fieldsPrecision = fieldsPrecision;
+
+        return this;
+    }
+
+    /**
+     * @return Scale map for a fields.
+     */
+    public Map<String, Integer> getFieldsScale() {
+        return fieldsScale;
+    }
+
+    /**
+     * Sets fieldsScale map for a fields.
+     *
+     * @param fieldsScale Scale map for a fields.
+     * @return {@code This} for chaining.
+     */
+    public QueryEntity setFieldsScale(Map<String, Integer> fieldsScale) {
+        this.fieldsScale = fieldsScale;
 
         return this;
     }
@@ -707,8 +741,11 @@
         if (!F.isEmpty(desc.notNullFields()))
             entity.setNotNullFields(desc.notNullFields());
 
-        if (!F.isEmpty(desc.decimalInfo()))
-            entity.setDecimalInfo(desc.decimalInfo());
+        if (!F.isEmpty(desc.fieldsPrecision()))
+            entity.setFieldsPrecision(desc.fieldsPrecision());
+
+        if (!F.isEmpty(desc.fieldsScale()))
+            entity.setFieldsScale(desc.fieldsScale());
 
         return entity;
     }
@@ -835,8 +872,11 @@
             if (sqlAnn.notNull())
                 desc.addNotNullField(prop.fullName());
 
-            if (BigDecimal.class == fldCls && sqlAnn.precision() != -1 && sqlAnn.scale() != -1)
-                desc.addDecimalInfo(prop.fullName(), F.t(sqlAnn.precision(), sqlAnn.scale()));
+            if (sqlAnn.precision() != -1)
+                desc.addPrecision(prop.fullName(), sqlAnn.precision());
+
+            if (sqlAnn.scale() != -1)
+                desc.addScale(prop.fullName(), sqlAnn.scale());
 
             if ((!F.isEmpty(sqlAnn.groups()) || !F.isEmpty(sqlAnn.orderedGroups()))
                 && sqlAnn.inlineSize() != QueryIndex.DFLT_INLINE_SIZE) {
@@ -880,13 +920,14 @@
             F.eq(tableName, entity.tableName) &&
             F.eq(_notNullFields, entity._notNullFields) &&
             F.eq(defaultFieldValues, entity.defaultFieldValues) &&
-            F.eq(decimalInfo, entity.decimalInfo);
+            F.eq(fieldsPrecision, entity.fieldsPrecision) &&
+            F.eq(fieldsScale, entity.fieldsScale);
     }
 
     /** {@inheritDoc} */
     @Override public int hashCode() {
         return Objects.hash(keyType, valType, keyFieldName, valueFieldName, fields, keyFields, aliases, idxs,
-            tableName, _notNullFields, defaultFieldValues, decimalInfo);
+            tableName, _notNullFields, defaultFieldValues, fieldsPrecision, fieldsScale);
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/cache/affinity/rendezvous/RendezvousAffinityFunction.java b/modules/core/src/main/java/org/apache/ignite/cache/affinity/rendezvous/RendezvousAffinityFunction.java
index 4b52f77..7b1ea59 100644
--- a/modules/core/src/main/java/org/apache/ignite/cache/affinity/rendezvous/RendezvousAffinityFunction.java
+++ b/modules/core/src/main/java/org/apache/ignite/cache/affinity/rendezvous/RendezvousAffinityFunction.java
@@ -389,9 +389,7 @@
 
             if (!exclNeighborsWarn) {
                 LT.warn(log, "Affinity function excludeNeighbors property is ignored " +
-                        "because topology has no enough nodes to assign backups.",
-                    "Affinity function excludeNeighbors property is ignored " +
-                        "because topology has no enough nodes to assign backups.");
+                    "because topology has no enough nodes to assign backups.");
 
                 exclNeighborsWarn = true;
             }
diff --git a/modules/core/src/main/java/org/apache/ignite/cache/eviction/fifo/FifoEvictionPolicy.java b/modules/core/src/main/java/org/apache/ignite/cache/eviction/fifo/FifoEvictionPolicy.java
index 68495db..f6e1a15 100644
--- a/modules/core/src/main/java/org/apache/ignite/cache/eviction/fifo/FifoEvictionPolicy.java
+++ b/modules/core/src/main/java/org/apache/ignite/cache/eviction/fifo/FifoEvictionPolicy.java
@@ -124,7 +124,7 @@
      * @param entry Entry to touch.
      * @return {@code True} if queue has been changed by this call.
      */
-    protected boolean touch(EvictableEntry<K, V> entry) {
+    @Override protected boolean touch(EvictableEntry<K, V> entry) {
         Node<EvictableEntry<K, V>> node = entry.meta();
 
         // Entry has not been enqueued yet.
diff --git a/modules/core/src/main/java/org/apache/ignite/cache/query/ContinuousQuery.java b/modules/core/src/main/java/org/apache/ignite/cache/query/ContinuousQuery.java
index 9a8fbca..e4d6d0a 100644
--- a/modules/core/src/main/java/org/apache/ignite/cache/query/ContinuousQuery.java
+++ b/modules/core/src/main/java/org/apache/ignite/cache/query/ContinuousQuery.java
@@ -125,7 +125,7 @@
     }
 
     /** {@inheritDoc} */
-    public ContinuousQuery<K, V> setInitialQuery(Query<Cache.Entry<K, V>> initQry) {
+    @Override public ContinuousQuery<K, V> setInitialQuery(Query<Cache.Entry<K, V>> initQry) {
         return (ContinuousQuery<K, V>)super.setInitialQuery(initQry);
     }
 
@@ -199,12 +199,12 @@
     }
 
     /** {@inheritDoc} */
-    public ContinuousQuery<K, V> setTimeInterval(long timeInterval) {
+    @Override public ContinuousQuery<K, V> setTimeInterval(long timeInterval) {
         return (ContinuousQuery<K, V>)super.setTimeInterval(timeInterval);
     }
 
     /** {@inheritDoc} */
-    public ContinuousQuery<K, V> setAutoUnsubscribe(boolean autoUnsubscribe) {
+    @Override public ContinuousQuery<K, V> setAutoUnsubscribe(boolean autoUnsubscribe) {
         return (ContinuousQuery<K, V>)super.setAutoUnsubscribe(autoUnsubscribe);
     }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/cache/query/ContinuousQueryWithTransformer.java b/modules/core/src/main/java/org/apache/ignite/cache/query/ContinuousQueryWithTransformer.java
index 122410f..5fe0869 100644
--- a/modules/core/src/main/java/org/apache/ignite/cache/query/ContinuousQueryWithTransformer.java
+++ b/modules/core/src/main/java/org/apache/ignite/cache/query/ContinuousQueryWithTransformer.java
@@ -81,12 +81,12 @@
     }
 
     /** {@inheritDoc} */
-    public ContinuousQueryWithTransformer<K, V, T> setInitialQuery(Query<Cache.Entry<K, V>> initQry) {
+    @Override public ContinuousQueryWithTransformer<K, V, T> setInitialQuery(Query<Cache.Entry<K, V>> initQry) {
         return (ContinuousQueryWithTransformer<K, V, T>)super.setInitialQuery(initQry);
     }
 
     /** {@inheritDoc} */
-    public ContinuousQueryWithTransformer<K, V, T> setRemoteFilterFactory(
+    @Override public ContinuousQueryWithTransformer<K, V, T> setRemoteFilterFactory(
         Factory<? extends CacheEntryEventFilter<K, V>> rmtFilterFactory) {
         return (ContinuousQueryWithTransformer<K, V, T>)super.setRemoteFilterFactory(rmtFilterFactory);
     }
@@ -153,12 +153,12 @@
     }
 
     /** {@inheritDoc} */
-    public ContinuousQueryWithTransformer<K, V, T> setTimeInterval(long timeInterval) {
+    @Override public ContinuousQueryWithTransformer<K, V, T> setTimeInterval(long timeInterval) {
         return (ContinuousQueryWithTransformer<K, V, T>)super.setTimeInterval(timeInterval);
     }
 
     /** {@inheritDoc} */
-    public ContinuousQueryWithTransformer<K, V, T> setAutoUnsubscribe(boolean autoUnsubscribe) {
+    @Override public ContinuousQueryWithTransformer<K, V, T> setAutoUnsubscribe(boolean autoUnsubscribe) {
         return (ContinuousQueryWithTransformer<K, V, T>)super.setAutoUnsubscribe(autoUnsubscribe);
     }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/cluster/ClusterNode.java b/modules/core/src/main/java/org/apache/ignite/cluster/ClusterNode.java
index 97b3165..bfc6c18 100644
--- a/modules/core/src/main/java/org/apache/ignite/cluster/ClusterNode.java
+++ b/modules/core/src/main/java/org/apache/ignite/cluster/ClusterNode.java
@@ -245,17 +245,11 @@
     public boolean isDaemon();
 
     /**
-     * Tests whether or not this node is connected to cluster as a client.
-     * <p>
-     * Do not confuse client in terms of
-     * discovery {@link DiscoverySpi#isClientMode()} and client in terms of cache
-     * {@link IgniteConfiguration#isClientMode()}. Cache clients cannot carry data,
-     * while topology clients connect to topology in a different way.
+     * Whether this node is cache client (see {@link IgniteConfiguration#isClientMode()}).
      *
-     * @return {@code True} if this node is a client node, {@code false} otherwise.
+     * @return {@code True if client}.
+     *
      * @see IgniteConfiguration#isClientMode()
-     * @see Ignition#isClientMode()
-     * @see DiscoverySpi#isClientMode()
      */
     public boolean isClient();
 }
\ No newline at end of file
diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/CacheConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/CacheConfiguration.java
index 2e35f37..fb3789d 100644
--- a/modules/core/src/main/java/org/apache/ignite/configuration/CacheConfiguration.java
+++ b/modules/core/src/main/java/org/apache/ignite/configuration/CacheConfiguration.java
@@ -1019,6 +1019,7 @@
      * @param atomicityMode Cache atomicity mode.
      * @return {@code this} for chaining.
      */
+    @SuppressWarnings("unchecked")
     public CacheConfiguration<K, V> setAtomicityMode(CacheAtomicityMode atomicityMode) {
         this.atomicityMode = atomicityMode;
 
diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/DataStorageConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/DataStorageConfiguration.java
index d8619aa..e7e8ca3 100644
--- a/modules/core/src/main/java/org/apache/ignite/configuration/DataStorageConfiguration.java
+++ b/modules/core/src/main/java/org/apache/ignite/configuration/DataStorageConfiguration.java
@@ -114,6 +114,9 @@
     /** Default number of checkpoints to be kept in WAL after checkpoint is finished */
     public static final int DFLT_WAL_HISTORY_SIZE = 20;
 
+    /** Default max size of WAL archive files, in bytes */
+    public static final long DFLT_WAL_ARCHIVE_MAX_SIZE = 1024 * 1024 * 1024;
+
     /** */
     public static final int DFLT_WAL_SEGMENTS = 10;
 
@@ -190,6 +193,9 @@
     /** Number of checkpoints to keep */
     private int walHistSize = DFLT_WAL_HISTORY_SIZE;
 
+    /** Maximum size of wal archive folder, in bytes */
+    private long maxWalArchiveSize = DFLT_WAL_ARCHIVE_MAX_SIZE;
+
     /** Number of work WAL segments. */
     private int walSegments = DFLT_WAL_SEGMENTS;
 
@@ -487,7 +493,10 @@
      * Gets a total number of checkpoints to keep in the WAL history.
      *
      * @return Number of checkpoints to keep in WAL after a checkpoint is finished.
+     * @deprecated Instead of walHistorySize use maxWalArchiveSize for manage of archive size.
+     * @see DataStorageConfiguration#getMaxWalArchiveSize()
      */
+    @Deprecated
     public int getWalHistorySize() {
         return walHistSize <= 0 ? DFLT_WAL_HISTORY_SIZE : walHistSize;
     }
@@ -497,7 +506,10 @@
      *
      * @param walHistSize Number of checkpoints to keep after a checkpoint is finished.
      * @return {@code this} for chaining.
+     * @deprecated Instead of walHistorySize use maxWalArchiveSize for manage of archive size.
+     * @see DataStorageConfiguration#setMaxWalArchiveSize(long)
      */
+    @Deprecated
     public DataStorageConfiguration setWalHistorySize(int walHistSize) {
         this.walHistSize = walHistSize;
 
@@ -505,6 +517,36 @@
     }
 
     /**
+     * If WalHistorySize was set by user will use this parameter for compatibility.
+     *
+     * @return {@code true} if use WalHistorySize for compatibility.
+     */
+    public boolean isWalHistorySizeParameterUsed() {
+        return getWalHistorySize() != DFLT_WAL_HISTORY_SIZE && getWalHistorySize() != Integer.MAX_VALUE;
+    }
+
+    /**
+     * Gets a max allowed size of WAL archives. In bytes.
+     *
+     * @return max size of WAL archive directory.
+     */
+    public long getMaxWalArchiveSize() {
+        return maxWalArchiveSize <= 0 ? DFLT_WAL_ARCHIVE_MAX_SIZE : maxWalArchiveSize;
+    }
+
+    /**
+     * Sets a max allowed size of WAL archives. In bytes
+     *
+     * @param walArchiveMaxSize max size of WAL archive directory.
+     * @return {@code this} for chaining.
+     */
+    public DataStorageConfiguration setMaxWalArchiveSize(long walArchiveMaxSize) {
+        this.maxWalArchiveSize = walArchiveMaxSize;
+
+        return this;
+    }
+
+    /**
      * Gets a number of WAL segments to work with.
      *
      * @return Number of work WAL segments.
diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/DefaultCommunicationFailureResolver.java b/modules/core/src/main/java/org/apache/ignite/configuration/DefaultCommunicationFailureResolver.java
index 9ccadf3..d2a5081 100644
--- a/modules/core/src/main/java/org/apache/ignite/configuration/DefaultCommunicationFailureResolver.java
+++ b/modules/core/src/main/java/org/apache/ignite/configuration/DefaultCommunicationFailureResolver.java
@@ -67,11 +67,11 @@
     @Nullable private ClusterPart findLargestConnectedCluster(CommunicationFailureContext ctx) {
         List<ClusterNode> srvNodes = ctx.topologySnapshot()
             .stream()
-            .filter(node -> !CU.clientNode(node))
+            .filter(node -> !node.isClient())
             .collect(Collectors.toList());
 
         // Exclude client nodes from analysis.
-        ClusterGraph graph = new ClusterGraph(ctx, CU::clientNode);
+        ClusterGraph graph = new ClusterGraph(ctx, ClusterNode::isClient);
 
         List<BitSet> components = graph.findConnectedComponents();
 
@@ -153,7 +153,7 @@
             ClusterNode node = allNodes.get(idx);
 
             // Client nodes will be processed separately.
-            if (CU.clientNode(node))
+            if (node.isClient())
                 continue;
 
             if (!clusterPart.srvNodesSet.get(idx))
@@ -164,7 +164,7 @@
         for (int idx = 0; idx < allNodes.size(); idx++) {
             ClusterNode node = allNodes.get(idx);
 
-            if (CU.clientNode(node) && !clusterPart.connectedClients.contains(node))
+            if (node.isClient() && !clusterPart.connectedClients.contains(node))
                 ctx.killNode(node);
         }
     }
@@ -182,7 +182,7 @@
         List<ClusterNode> allNodes = ctx.topologySnapshot();
 
         for (ClusterNode node : allNodes) {
-            if (!CU.clientNode(node))
+            if (!node.isClient())
                 continue;
 
             boolean hasConnections = true;
diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java
index cc3ea10..6a0c7cb 100644
--- a/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java
+++ b/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java
@@ -214,6 +214,12 @@
     /** Default timeout after which long query warning will be printed. */
     public static final long DFLT_LONG_QRY_WARN_TIMEOUT = 3000;
 
+    /** Default size of MVCC vacuum thread pool. */
+    public static final int DFLT_MVCC_VACUUM_THREAD_CNT = 2;
+
+    /** Default time interval between vacuum process runs (ms). */
+    public static final int DFLT_MVCC_VACUUM_FREQUENCY = 5000;
+
     /** Optional local Ignite instance name. */
     private String igniteInstanceName;
 
@@ -487,6 +493,12 @@
     /** Client connector configuration. */
     private ClientConnectorConfiguration cliConnCfg = ClientListenerProcessor.DFLT_CLI_CFG;
 
+    /** Size of MVCC vacuum thread pool. */
+    private int mvccVacuumThreadCnt = DFLT_MVCC_VACUUM_THREAD_CNT;
+
+    /** Time interval between vacuum process runs (ms). */
+    private int mvccVacuumFreq = DFLT_MVCC_VACUUM_FREQUENCY;
+
     /** User authentication enabled. */
     private boolean authEnabled;
 
@@ -496,6 +508,9 @@
     /** Communication failure resolver */
     private CommunicationFailureResolver commFailureRslvr;
 
+    /** SQL schemas to be created on node start. */
+    private String[] sqlSchemas;
+
     /**
      * Creates valid grid configuration with all default values.
      */
@@ -532,6 +547,7 @@
         addrRslvr = cfg.getAddressResolver();
         allResolversPassReq = cfg.isAllSegmentationResolversPassRequired();
         atomicCfg = cfg.getAtomicConfiguration();
+        authEnabled = cfg.isAuthenticationEnabled();
         autoActivation = cfg.isAutoActivationEnabled();
         binaryCfg = cfg.getBinaryConfiguration();
         dsCfg = cfg.getDataStorageConfiguration();
@@ -575,6 +591,8 @@
         metricsLogFreq = cfg.getMetricsLogFrequency();
         metricsUpdateFreq = cfg.getMetricsUpdateFrequency();
         mgmtPoolSize = cfg.getManagementThreadPoolSize();
+        mvccVacuumThreadCnt = cfg.getMvccVacuumThreadCount();
+        mvccVacuumFreq = cfg.getMvccVacuumFrequency();
         netTimeout = cfg.getNetworkTimeout();
         nodeId = cfg.getNodeId();
         odbcCfg = cfg.getOdbcConfiguration();
@@ -594,6 +612,7 @@
         sndRetryCnt = cfg.getNetworkSendRetryCount();
         sndRetryDelay = cfg.getNetworkSendRetryDelay();
         sqlConnCfg = cfg.getSqlConnectorConfiguration();
+        sqlSchemas = cfg.getSqlSchemas();
         sslCtxFactory = cfg.getSslContextFactory();
         storeSesLsnrs = cfg.getCacheStoreSessionListenerFactories();
         stripedPoolSize = cfg.getStripedPoolSize();
@@ -608,7 +627,6 @@
         utilityCachePoolSize = cfg.getUtilityCacheThreadPoolSize();
         waitForSegOnStart = cfg.isWaitForSegmentOnStart();
         warmupClos = cfg.getWarmupClosure();
-        authEnabled = cfg.isAuthenticationEnabled();
     }
 
     /**
@@ -2980,6 +2998,48 @@
     }
 
     /**
+     * Returns number of MVCC vacuum cleanup threads.
+     *
+     * @return Number of MVCC vacuum cleanup threads.
+     */
+    public int getMvccVacuumThreadCount() {
+        return mvccVacuumThreadCnt;
+    }
+
+    /**
+     * Sets number of MVCC vacuum cleanup threads.
+     *
+     * @param mvccVacuumThreadCnt Number of MVCC vacuum cleanup threads.
+     * @return {@code this} for chaining.
+     */
+    public IgniteConfiguration setMvccVacuumThreadCount(int mvccVacuumThreadCnt) {
+        this.mvccVacuumThreadCnt = mvccVacuumThreadCnt;
+
+        return this;
+    }
+
+    /**
+     * Returns time interval between vacuum runs.
+     *
+     * @return Time interval between vacuum runs.
+     */
+    public int getMvccVacuumFrequency() {
+        return mvccVacuumFreq;
+    }
+
+    /**
+     * Sets time interval between vacuum runs.
+     *
+     * @param mvccVacuumFreq Time interval between vacuum runs.
+     * @return {@code this} for chaining.
+     */
+    public IgniteConfiguration setMvccVacuumFrequency(int mvccVacuumFreq) {
+        this.mvccVacuumFreq = mvccVacuumFreq;
+
+        return this;
+    }
+
+    /**
      * Returns {@code true} if user authentication is enabled for cluster. Otherwise returns {@code false}.
      * Default value is false; authentication is disabled.
      *
@@ -3001,6 +3061,35 @@
         return this;
     }
 
+    /**
+     * Gets SQL schemas to be created on node startup.
+     * <p>
+     * See {@link #setSqlSchemas(String...)} for more information.
+     *
+     * @return SQL schemas to be created on node startup.
+     */
+    public String[] getSqlSchemas() {
+        return sqlSchemas;
+    }
+
+    /**
+     * Sets SQL schemas to be created on node startup. Schemas are created on local node only and are not propagated
+     * to other cluster nodes. Created schemas cannot be dropped.
+     * <p>
+     * By default schema names are case-insensitive, i.e. {@code my_schema} and {@code My_Schema} represents the same
+     * object. Use quotes to enforce case sensitivity (e.g. {@code "My_Schema"}).
+     * <p>
+     * Property is ignored if {@code ignite-indexing} module is not in classpath.
+     *
+     * @param sqlSchemas SQL schemas to be created on node startup.
+     * @return {@code this} for chaining.
+     */
+    public IgniteConfiguration setSqlSchemas(String... sqlSchemas) {
+        this.sqlSchemas = sqlSchemas;
+
+        return this;
+    }
+
     /** {@inheritDoc} */
     @Override public String toString() {
         return S.toString(IgniteConfiguration.class, this);
diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/TransactionConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/TransactionConfiguration.java
index 0ac215f..e669bcf 100644
--- a/modules/core/src/main/java/org/apache/ignite/configuration/TransactionConfiguration.java
+++ b/modules/core/src/main/java/org/apache/ignite/configuration/TransactionConfiguration.java
@@ -19,7 +19,9 @@
 
 import java.io.Serializable;
 import javax.cache.configuration.Factory;
+import org.apache.ignite.internal.util.TransientSerializable;
 import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.lang.IgniteProductVersion;
 import org.apache.ignite.transactions.Transaction;
 import org.apache.ignite.transactions.TransactionConcurrency;
 import org.apache.ignite.transactions.TransactionIsolation;
@@ -27,8 +29,12 @@
 /**
  * Transactions configuration.
  */
+@TransientSerializable(methodName = "transientSerializableFields")
 public class TransactionConfiguration implements Serializable {
     /** */
+    private static final IgniteProductVersion TX_PME_TIMEOUT_SINCE = IgniteProductVersion.fromString("2.5.1");
+
+    /** */
     private static final long serialVersionUID = 0L;
 
     /** Default value for 'txSerializableEnabled' flag. */
@@ -387,4 +393,18 @@
     @Override public String toString() {
         return S.toString(TransactionConfiguration.class, this);
     }
+
+    /**
+     * Excludes incompatible fields from serialization/deserialization process.
+     *
+     * @param ver Sender/Receiver node version.
+     * @return Array of excluded from serialization/deserialization fields.
+     */
+    @SuppressWarnings("unused")
+    private static String[] transientSerializableFields(IgniteProductVersion ver) {
+        if (TX_PME_TIMEOUT_SINCE.compareToIgnoreTimestamp(ver) >= 0)
+            return new String[] { "txTimeoutOnPartitionMapExchange" };
+
+        return null;
+    }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/events/EventType.java b/modules/core/src/main/java/org/apache/ignite/events/EventType.java
index a6ab962..485e567 100644
--- a/modules/core/src/main/java/org/apache/ignite/events/EventType.java
+++ b/modules/core/src/main/java/org/apache/ignite/events/EventType.java
@@ -841,6 +841,19 @@
     public static final int EVT_TX_RESUMED = 133;
 
     /**
+     * Built-in event type: WAL archive segment compaction is completed.
+     * <p>
+     * Fired for each WAL archive segment upon its compaction completion.
+     * <p>
+     * <p>
+     * NOTE: all types in range <b>from 1 to 1000 are reserved</b> for
+     * internal Ignite events and should not be used by user-defined events.
+     *
+     * @see WalSegmentArchivedEvent
+     */
+    public static final int EVT_WAL_SEGMENT_COMPACTED = 134;
+
+    /**
      * All checkpoint events. This array can be directly passed into
      * {@link IgniteEvents#localListen(IgnitePredicate, int...)} method to
      * subscribe to all checkpoint events.
@@ -1062,4 +1075,4 @@
      * All Ignite events (<b>excluding</b> metric update event).
      */
     public static final int[] EVTS_ALL_MINUS_METRIC_UPDATE = U.gridEvents(EVT_NODE_METRICS_UPDATED);
-}
\ No newline at end of file
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/events/WalSegmentArchivedEvent.java b/modules/core/src/main/java/org/apache/ignite/events/WalSegmentArchivedEvent.java
index 2fc1715..2425c5f 100644
--- a/modules/core/src/main/java/org/apache/ignite/events/WalSegmentArchivedEvent.java
+++ b/modules/core/src/main/java/org/apache/ignite/events/WalSegmentArchivedEvent.java
@@ -22,7 +22,7 @@
 import org.jetbrains.annotations.NotNull;
 
 /**
- * Event indicates there was movement of WAL segment file to archive has been completed
+ * Event indicates the completion of WAL segment file transition to archive.
  */
 public class WalSegmentArchivedEvent extends EventAdapter {
     /** */
@@ -45,7 +45,16 @@
         @NotNull final ClusterNode node,
         final long absWalSegmentIdx,
         final File archiveFile) {
-        super(node, "", EventType.EVT_WAL_SEGMENT_ARCHIVED);
+        this(node, absWalSegmentIdx, archiveFile, EventType.EVT_WAL_SEGMENT_ARCHIVED);
+    }
+
+    /** */
+    protected WalSegmentArchivedEvent(
+        @NotNull final ClusterNode node,
+        final long absWalSegmentIdx,
+        final File archiveFile,
+        int evtType) {
+        super(node, "", evtType);
         this.absWalSegmentIdx = absWalSegmentIdx;
         this.archiveFile = archiveFile;
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/events/WalSegmentCompactedEvent.java b/modules/core/src/main/java/org/apache/ignite/events/WalSegmentCompactedEvent.java
new file mode 100644
index 0000000..50422ff
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/events/WalSegmentCompactedEvent.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.events;
+
+import java.io.File;
+import org.apache.ignite.cluster.ClusterNode;
+import org.jetbrains.annotations.NotNull;
+
+/**
+ * Event indicates the completion of WAL segment compaction.
+ * <p>
+ * {@link #getArchiveFile()} corresponds to compacted file.
+ */
+public class WalSegmentCompactedEvent extends WalSegmentArchivedEvent {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /**
+     * Creates WAL segment compaction event.
+     *
+     * @param node Node.
+     * @param absWalSegmentIdx Absolute wal segment index.
+     * @param archiveFile Compacted archive file.
+     */
+    public WalSegmentCompactedEvent(
+        @NotNull final ClusterNode node,
+        final long absWalSegmentIdx,
+        final File archiveFile) {
+        super(node, absWalSegmentIdx, archiveFile, EventType.EVT_WAL_SEGMENT_COMPACTED);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridComponent.java b/modules/core/src/main/java/org/apache/ignite/internal/GridComponent.java
index 504c83f..0cf3a6e 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/GridComponent.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/GridComponent.java
@@ -64,7 +64,10 @@
         QUERY_PROC,
 
         /** Authentication processor. */
-        AUTH_PROC
+        AUTH_PROC,
+
+        /** */
+        CACHE_CRD_PROC
     }
 
     /**
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridDiagnostic.java b/modules/core/src/main/java/org/apache/ignite/internal/GridDiagnostic.java
index ced4f84..c415efc 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/GridDiagnostic.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/GridDiagnostic.java
@@ -61,14 +61,12 @@
 
                         if (!locHost.isReachable(REACH_TIMEOUT)) {
                             U.warn(log, "Default local host is unreachable. This may lead to delays on " +
-                                "grid network operations. Check your OS network setting to correct it.",
-                                "Default local host is unreachable.");
+                                "grid network operations. Check your OS network setting to correct it.");
                         }
                     }
                     catch (IOException ignore) {
                         U.warn(log, "Failed to perform network diagnostics. It is usually caused by serious " +
-                            "network configuration problem. Check your OS network setting to correct it.",
-                            "Failed to perform network diagnostics.");
+                            "network configuration problem. Check your OS network setting to correct it.");
                     }
                 }
             });
@@ -80,14 +78,12 @@
 
                         if (locHost.isLoopbackAddress()) {
                             U.warn(log, "Default local host is a loopback address. This can be a sign of " +
-                                "potential network configuration problem.",
-                                "Default local host is a loopback address.");
+                                "potential network configuration problem.");
                         }
                     }
                     catch (IOException ignore) {
                         U.warn(log, "Failed to perform network diagnostics. It is usually caused by serious " +
-                            "network configuration problem. Check your OS network setting to correct it.",
-                            "Failed to perform network diagnostics.");
+                            "network configuration problem. Check your OS network setting to correct it.");
                     }
                 }
             });
@@ -98,8 +94,7 @@
                     if (!U.isSufficientlyTestedOs()) {
                         U.warn(log, "This operating system has been tested less rigorously: " + U.osString() +
                             ". Our team will appreciate the feedback if you experience any problems running " +
-                            "ignite in this environment.",
-                            "This OS is tested less rigorously: " + U.osString());
+                            "ignite in this environment.");
                     }
                 }
             });
@@ -109,8 +104,7 @@
                     // Fix for GG-1075.
                     if (F.isEmpty(U.allLocalMACs()))
                         U.warn(log, "No live network interfaces detected. If IP-multicast discovery is used - " +
-                            "make sure to add 127.0.0.1 as a local address.",
-                            "No live network interfaces. Add 127.0.0.1 as a local address.");
+                            "make sure to add 127.0.0.1 as a local address.");
                 }
             });
 
@@ -131,7 +125,7 @@
 
                         U.warn(log, "JMX remote management is enabled but JMX port is either not set or invalid. " +
                             "Check system property 'com.sun.management.jmxremote.port' to make sure it specifies " +
-                            "valid TCP/IP port.", "JMX remote port is invalid - JMX management is off.");
+                            "valid TCP/IP port.");
                     }
                 }
             });
@@ -156,4 +150,4 @@
                 "Failed to start background network diagnostics.", e);
         }
     }
-}
\ No newline at end of file
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java
index 051978c..0690565 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java
@@ -32,6 +32,7 @@
 import org.apache.ignite.internal.managers.failover.GridFailoverManager;
 import org.apache.ignite.internal.managers.indexing.GridIndexingManager;
 import org.apache.ignite.internal.managers.loadbalancer.GridLoadBalancerManager;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccProcessor;
 import org.apache.ignite.internal.worker.WorkersRegistry;
 import org.apache.ignite.internal.processors.affinity.GridAffinityProcessor;
 import org.apache.ignite.internal.processors.authentication.IgniteAuthenticationProcessor;
@@ -671,6 +672,11 @@
     public PlatformProcessor platform();
 
     /**
+     * @return Cache mvcc coordinator processor.
+     */
+    public MvccProcessor coordinators();
+
+    /**
      * @return PDS mode folder name resolver, also generates consistent ID in case new folder naming is used
      */
     public PdsFoldersResolver pdsFolderResolver();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java
index 2be64e5..3b7b430 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java
@@ -47,6 +47,7 @@
 import org.apache.ignite.internal.managers.failover.GridFailoverManager;
 import org.apache.ignite.internal.managers.indexing.GridIndexingManager;
 import org.apache.ignite.internal.managers.loadbalancer.GridLoadBalancerManager;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccProcessor;
 import org.apache.ignite.internal.worker.WorkersRegistry;
 import org.apache.ignite.internal.processors.affinity.GridAffinityProcessor;
 import org.apache.ignite.internal.processors.authentication.IgniteAuthenticationProcessor;
@@ -290,6 +291,10 @@
     @GridToStringExclude
     private DataStructuresProcessor dataStructuresProc;
 
+    /** Cache mvcc coordinators. */
+    @GridToStringExclude
+    private MvccProcessor coordProc;
+
     /** */
     @GridToStringExclude
     private IgniteAuthenticationProcessor authProc;
@@ -356,7 +361,7 @@
 
     /** */
     @GridToStringExclude
-    Map<String, ? extends ExecutorService> customExecSvcs;
+    private Map<String, ? extends ExecutorService> customExecSvcs;
 
     /** */
     @GridToStringExclude
@@ -610,6 +615,8 @@
             poolProc = (PoolProcessor)comp;
         else if (comp instanceof GridMarshallerMappingProcessor)
             mappingProc = (GridMarshallerMappingProcessor)comp;
+        else if (comp instanceof MvccProcessor)
+            coordProc = (MvccProcessor)comp;
         else if (comp instanceof PdsFoldersResolver)
             pdsFolderRslvr = (PdsFoldersResolver)comp;
         else if (comp instanceof GridInternalSubscriptionProcessor)
@@ -880,6 +887,11 @@
     }
 
     /** {@inheritDoc} */
+    @Override public MvccProcessor coordinators() {
+        return coordProc;
+    }
+
+    /** {@inheritDoc} */
     @Override public IgniteAuthenticationProcessor authentication() {
         return authProc;
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridTopic.java b/modules/core/src/main/java/org/apache/ignite/internal/GridTopic.java
index 0b2d41a..98a4d8d 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/GridTopic.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/GridTopic.java
@@ -81,7 +81,10 @@
     /** */
     TOPIC_CONTINUOUS,
 
-    /** */
+    /**
+     * @deprecated Should be removed in Apache Ignite 3.0.
+     */
+    @Deprecated
     TOPIC_MONGO,
 
     /** */
@@ -127,7 +130,10 @@
     TOPIC_AUTH,
 
     /** */
-    TOPIC_EXCHANGE;
+    TOPIC_EXCHANGE,
+
+    /** */
+    TOPIC_CACHE_COORDINATOR;
 
     /** Enum values. */
     private static final GridTopic[] VALS = values();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteComputeImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteComputeImpl.java
index 8d473e6..762d9a1 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteComputeImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteComputeImpl.java
@@ -52,6 +52,7 @@
 import static org.apache.ignite.internal.GridClosureCallMode.BALANCE;
 import static org.apache.ignite.internal.GridClosureCallMode.BROADCAST;
 import static org.apache.ignite.internal.processors.task.GridTaskThreadContextKey.TC_NO_FAILOVER;
+import static org.apache.ignite.internal.processors.task.GridTaskThreadContextKey.TC_NO_RESULT_CACHE;
 import static org.apache.ignite.internal.processors.task.GridTaskThreadContextKey.TC_SUBGRID_PREDICATE;
 import static org.apache.ignite.internal.processors.task.GridTaskThreadContextKey.TC_SUBJ_ID;
 import static org.apache.ignite.internal.processors.task.GridTaskThreadContextKey.TC_TASK_NAME;
@@ -1024,6 +1025,20 @@
     }
 
     /** {@inheritDoc} */
+    @Override public IgniteCompute withNoResultCache() {
+        guard();
+
+        try {
+            ctx.task().setThreadContext(TC_NO_RESULT_CACHE, true);
+        }
+        finally {
+            unguard();
+        }
+
+        return this;
+    }
+
+    /** {@inheritDoc} */
     @Override public void localDeployTask(Class<? extends ComputeTask> taskCls, ClassLoader clsLdr) {
         A.notNull(taskCls, "taskCls", clsLdr, "clsLdr");
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java
index 9b8ae5a..d74b3aa 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java
@@ -125,6 +125,7 @@
 import org.apache.ignite.internal.processors.cache.IgniteCacheProxy;
 import org.apache.ignite.internal.processors.cache.IgniteInternalCache;
 import org.apache.ignite.internal.processors.cache.binary.CacheObjectBinaryProcessorImpl;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccProcessorImpl;
 import org.apache.ignite.internal.processors.cache.persistence.DataRegion;
 import org.apache.ignite.internal.processors.cache.persistence.filename.PdsConsistentIdProcessor;
 import org.apache.ignite.internal.processors.cacheobject.IgniteCacheObjectProcessor;
@@ -993,6 +994,7 @@
             // be able to start receiving messages once discovery completes.
             try {
                 startProcessor(new PdsConsistentIdProcessor(ctx));
+                startProcessor(new MvccProcessorImpl(ctx));
                 startProcessor(createComponent(DiscoveryNodeValidationProcessor.class, ctx));
                 startProcessor(new GridAffinityProcessor(ctx));
                 startProcessor(createComponent(GridSegmentationProcessor.class, ctx));
@@ -1110,9 +1112,9 @@
                     catch (IgniteNeedReconnectException e) {
                         ClusterNode locNode = ctx.discovery().localNode();
 
-                        assert CU.clientNode(locNode);
+                        assert locNode.isClient();
 
-                        if (!locNode.isClient())
+                        if (!ctx.discovery().reconnectSupported())
                             throw new IgniteCheckedException("Client node in forceServerMode " +
                                 "is not allowed to reconnect to the cluster and will be stopped.");
 
@@ -1588,8 +1590,7 @@
         // Warn about loopback.
         if (ips.isEmpty() && macs.isEmpty())
             U.warn(log, "Ignite is starting on loopback address... Only nodes on the same physical " +
-                    "computer can participate in topology.",
-                "Ignite is starting on loopback address...");
+                "computer can participate in topology.");
 
         // Stick in network context into attributes.
         add(ATTR_IPS, (ips.isEmpty() ? "" : ips));
@@ -2458,7 +2459,7 @@
 
         U.log(log, "System cache's DataRegion size is configured to " +
             (memCfg.getSystemRegionInitialSize() / (1024 * 1024)) + " MB. " +
-            "Use DataStorageConfiguration.systemCacheMemorySize property to change the setting.");
+            "Use DataStorageConfiguration.systemRegionInitialSize property to change the setting.");
     }
 
     /**
@@ -2515,9 +2516,7 @@
             U.warn(
                 log,
                 "Peer class loading is enabled (disable it in production for performance and " +
-                    "deployment consistency reasons)",
-                "Peer class loading is enabled (disable it for better performance)"
-            );
+                    "deployment consistency reasons)");
     }
 
     /**
@@ -2764,7 +2763,7 @@
     /** {@inheritDoc} */
     @Override public <K, V> IgniteCache<K, V> createCache(CacheConfiguration<K, V> cacheCfg) {
         A.notNull(cacheCfg, "cacheCfg");
-        CU.validateCacheName(cacheCfg.getName());
+        CU.validateNewCacheName(cacheCfg.getName());
 
         guard();
 
@@ -2820,7 +2819,7 @@
 
     /** {@inheritDoc} */
     @Override public <K, V> IgniteCache<K, V> createCache(String cacheName) {
-        CU.validateCacheName(cacheName);
+        CU.validateNewCacheName(cacheName);
 
         guard();
 
@@ -2849,7 +2848,7 @@
     @Override public <K, V> IgniteBiTuple<IgniteCache<K, V>, Boolean> getOrCreateCache0(
         CacheConfiguration<K, V> cacheCfg, boolean sql) {
         A.notNull(cacheCfg, "cacheCfg");
-        CU.validateCacheName(cacheCfg.getName());
+        CU.validateNewCacheName(cacheCfg.getName());
 
         guard();
 
@@ -2915,7 +2914,7 @@
         NearCacheConfiguration<K, V> nearCfg
     ) {
         A.notNull(cacheCfg, "cacheCfg");
-        CU.validateCacheName(cacheCfg.getName());
+        CU.validateNewCacheName(cacheCfg.getName());
         A.notNull(nearCfg, "nearCfg");
 
         guard();
@@ -2944,7 +2943,7 @@
     @Override public <K, V> IgniteCache<K, V> getOrCreateCache(CacheConfiguration<K, V> cacheCfg,
         NearCacheConfiguration<K, V> nearCfg) {
         A.notNull(cacheCfg, "cacheCfg");
-        CU.validateCacheName(cacheCfg.getName());
+        CU.validateNewCacheName(cacheCfg.getName());
         A.notNull(nearCfg, "nearCfg");
 
         guard();
@@ -2985,7 +2984,7 @@
 
     /** {@inheritDoc} */
     @Override public <K, V> IgniteCache<K, V> createNearCache(String cacheName, NearCacheConfiguration<K, V> nearCfg) {
-        CU.validateCacheName(cacheName);
+        CU.validateNewCacheName(cacheName);
         A.notNull(nearCfg, "nearCfg");
 
         guard();
@@ -3017,7 +3016,7 @@
     /** {@inheritDoc} */
     @Override public <K, V> IgniteCache<K, V> getOrCreateNearCache(String cacheName,
         NearCacheConfiguration<K, V> nearCfg) {
-        CU.validateCacheName(cacheName);
+        CU.validateNewCacheName(cacheName);
         A.notNull(nearCfg, "nearCfg");
 
         guard();
@@ -3146,7 +3145,7 @@
 
     /** {@inheritDoc} */
     @Override public <K, V> IgniteCache<K, V> getOrCreateCache(String cacheName) {
-        CU.validateCacheName(cacheName);
+        CU.validateNewCacheName(cacheName);
 
         guard();
 
@@ -3175,7 +3174,7 @@
      */
     public IgniteInternalFuture<?> getOrCreateCacheAsync(String cacheName, String templateName,
         CacheConfigurationOverride cfgOverride, boolean checkThreadTx) {
-        CU.validateCacheName(cacheName);
+        CU.validateNewCacheName(cacheName);
 
         guard();
 
@@ -3195,7 +3194,7 @@
     /** {@inheritDoc} */
     @Override public <K, V> void addCacheConfiguration(CacheConfiguration<K, V> cacheCfg) {
         A.notNull(cacheCfg, "cacheCfg");
-        CU.validateCacheName(cacheCfg.getName());
+        CU.validateNewCacheName(cacheCfg.getName());
 
         guard();
 
@@ -3871,7 +3870,8 @@
                     }
                     catch (IgniteCheckedException e) {
                         if (!X.hasCause(e, IgniteNeedReconnectException.class,
-                            IgniteClientDisconnectedCheckedException.class)) {
+                            IgniteClientDisconnectedCheckedException.class,
+                            IgniteInterruptedCheckedException.class)) {
                             U.error(log, "Failed to reconnect, will stop node.", e);
 
                             reconnectState.firstReconnectFut.onDone(e);
@@ -3904,7 +3904,8 @@
         if (err != null) {
             U.error(log, "Failed to reconnect, will stop node", err);
 
-            close();
+            if (!X.hasCause(err, NodeStoppingException.class))
+                close();
         }
     }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteNodeAttributes.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteNodeAttributes.java
index ed16a77..5b764e4 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteNodeAttributes.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteNodeAttributes.java
@@ -82,9 +82,6 @@
     public static final String ATTR_IGFS = ATTR_PREFIX + ".igfs";
 
     /** Internal attribute name constant. */
-    public static final String ATTR_MONGO = ATTR_PREFIX + ".mongo";
-
-    /** Internal attribute name constant. */
     public static final String ATTR_DAEMON = ATTR_PREFIX + ".daemon";
 
     /** Internal attribute name constant. */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java b/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java
index aff19c0..148bd21 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java
@@ -2259,8 +2259,7 @@
                         "(only recent 1.6 and 1.7 versions HotSpot VMs are supported). " +
                         "To enable fast marshalling upgrade to recent 1.6 or 1.7 HotSpot VM release. " +
                         "Switching to standard JDK marshalling - " +
-                        "object serialization performance will be significantly slower.",
-                        "To enable fast marshalling upgrade to recent 1.6 or 1.7 HotSpot VM release.");
+                        "object serialization performance will be significantly slower.");
 
                     marsh = new JdkMarshaller();
                 }
@@ -2350,12 +2349,8 @@
                         "like TcpDiscoverySpi)");
 
                 for (CacheConfiguration ccfg : userCaches) {
-                    if (CU.isHadoopSystemCache(ccfg.getName()))
-                        throw new IgniteCheckedException("Cache name cannot be \"" + CU.SYS_CACHE_HADOOP_MR +
-                            "\" because it is reserved for internal purposes.");
-
-                    if (CU.isUtilityCache(ccfg.getName()))
-                        throw new IgniteCheckedException("Cache name cannot be \"" + CU.UTILITY_CACHE_NAME +
+                    if (CU.isReservedCacheName(ccfg.getName()))
+                        throw new IgniteCheckedException("Cache name cannot be \"" + ccfg.getName() +
                             "\" because it is reserved for internal purposes.");
 
                     if (IgfsUtils.matchIgfsCacheName(ccfg.getName()))
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/NodeStoppingException.java b/modules/core/src/main/java/org/apache/ignite/internal/NodeStoppingException.java
index cc39b14..75447a1 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/NodeStoppingException.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/NodeStoppingException.java
@@ -22,7 +22,7 @@
 /**
  *
  */
-public class NodeStoppingException extends IgniteCheckedException implements InvalidEnvironmentException {
+public class NodeStoppingException extends IgniteCheckedException {
     /** */
     private static final long serialVersionUID = 0L;
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/TransactionsMXBeanImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/TransactionsMXBeanImpl.java
index 210a320..16738de 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/TransactionsMXBeanImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/TransactionsMXBeanImpl.java
@@ -22,13 +22,10 @@
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
-import java.util.UUID;
 import java.util.stream.Collectors;
 import org.apache.ignite.IgniteCompute;
 import org.apache.ignite.cluster.ClusterNode;
-import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.internal.S;
-import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.internal.visor.VisorTaskArgument;
 import org.apache.ignite.internal.visor.tx.VisorTxInfo;
 import org.apache.ignite.internal.visor.tx.VisorTxOperation;
@@ -37,7 +34,6 @@
 import org.apache.ignite.internal.visor.tx.VisorTxTask;
 import org.apache.ignite.internal.visor.tx.VisorTxTaskArg;
 import org.apache.ignite.internal.visor.tx.VisorTxTaskResult;
-import org.apache.ignite.lang.IgniteClosure;
 import org.apache.ignite.mxbean.TransactionsMXBean;
 
 /**
@@ -103,21 +99,7 @@
                     w.println(key.toString());
 
                     for (VisorTxInfo info : entry.getValue().getInfos())
-                        w.println("    Tx: [xid=" + info.getXid() +
-                            ", label=" + info.getLabel() +
-                            ", state=" + info.getState() +
-                            ", startTime=" + info.getFormattedStartTime() +
-                            ", duration=" + info.getDuration() / 1000 +
-                            ", isolation=" + info.getIsolation() +
-                            ", concurrency=" + info.getConcurrency() +
-                            ", timeout=" + info.getTimeout() +
-                            ", size=" + info.getSize() +
-                            ", dhtNodes=" + F.transform(info.getPrimaryNodes(), new IgniteClosure<UUID, String>() {
-                            @Override public String apply(UUID id) {
-                                return U.id8(id);
-                            }
-                        }) +
-                            ']');
+                        w.println(info.toUserString());
                 }
 
                 w.flush();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/UnregisteredBinaryTypeException.java b/modules/core/src/main/java/org/apache/ignite/internal/UnregisteredBinaryTypeException.java
new file mode 100644
index 0000000..f46de12
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/UnregisteredBinaryTypeException.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal;
+
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.internal.binary.BinaryMetadata;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Exception thrown during serialization if binary metadata isn't registered and it's registration isn't allowed.
+ */
+public class UnregisteredBinaryTypeException extends IgniteException {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** */
+    private final int typeId;
+
+    /** */
+    private final BinaryMetadata binaryMetadata;
+
+    /**
+     * @param typeId Type ID.
+     * @param binaryMetadata Binary metadata.
+     */
+    public UnregisteredBinaryTypeException(int typeId, BinaryMetadata binaryMetadata) {
+        this.typeId = typeId;
+        this.binaryMetadata = binaryMetadata;
+    }
+
+    /**
+     * @param msg Error message.
+     * @param typeId Type ID.
+     * @param binaryMetadata Binary metadata.
+     */
+    public UnregisteredBinaryTypeException(String msg, int typeId,
+        BinaryMetadata binaryMetadata) {
+        super(msg);
+        this.typeId = typeId;
+        this.binaryMetadata = binaryMetadata;
+    }
+
+    /**
+     * @param cause Non-null throwable cause.
+     * @param typeId Type ID.
+     * @param binaryMetadata Binary metadata.
+     */
+    public UnregisteredBinaryTypeException(Throwable cause, int typeId,
+        BinaryMetadata binaryMetadata) {
+        super(cause);
+        this.typeId = typeId;
+        this.binaryMetadata = binaryMetadata;
+    }
+
+    /**
+     * @param msg Error message.
+     * @param cause Non-null throwable cause.
+     * @param typeId Type ID.
+     * @param binaryMetadata Binary metadata.
+     */
+    public UnregisteredBinaryTypeException(String msg, @Nullable Throwable cause, int typeId,
+        BinaryMetadata binaryMetadata) {
+        super(msg, cause);
+        this.typeId = typeId;
+        this.binaryMetadata = binaryMetadata;
+    }
+
+    /**
+     * @return Type ID.
+     */
+    public int typeId() {
+        return typeId;
+    }
+
+    /**
+     * @return Binary metadata.
+     */
+    public BinaryMetadata binaryMetadata() {
+        return binaryMetadata;
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryCachingMetadataHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryCachingMetadataHandler.java
index 535249c..a0559cb 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryCachingMetadataHandler.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryCachingMetadataHandler.java
@@ -46,7 +46,7 @@
     }
 
     /** {@inheritDoc} */
-    @Override public synchronized void addMeta(int typeId, BinaryType type) throws BinaryObjectException {
+    @Override public synchronized void addMeta(int typeId, BinaryType type, boolean failIfUnregistered) throws BinaryObjectException {
         synchronized (this) {
             BinaryType oldType = metas.put(typeId, type);
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryClassDescriptor.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryClassDescriptor.java
index 106d238..cd32120 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryClassDescriptor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryClassDescriptor.java
@@ -40,6 +40,8 @@
 import org.apache.ignite.binary.BinaryReflectiveSerializer;
 import org.apache.ignite.binary.BinarySerializer;
 import org.apache.ignite.binary.Binarylizable;
+import org.apache.ignite.internal.UnregisteredClassException;
+import org.apache.ignite.internal.UnregisteredBinaryTypeException;
 import org.apache.ignite.internal.marshaller.optimized.OptimizedMarshaller;
 import org.apache.ignite.internal.processors.cache.CacheObjectImpl;
 import org.apache.ignite.internal.processors.query.QueryUtils;
@@ -773,7 +775,7 @@
                                     BinaryMetadata meta = new BinaryMetadata(typeId, typeName, collector.meta(),
                                         affKeyFieldName, Collections.singleton(newSchema), false, null);
 
-                                    ctx.updateMetadata(typeId, meta);
+                                    ctx.updateMetadata(typeId, meta, writer.failIfUnregistered());
 
                                     schemaReg.addSchema(newSchema.schemaId(), newSchema);
                                 }
@@ -794,7 +796,7 @@
                         BinaryMetadata meta = new BinaryMetadata(typeId, typeName, stableFieldsMeta,
                             affKeyFieldName, Collections.singleton(stableSchema), false, null);
 
-                        ctx.updateMetadata(typeId, meta);
+                        ctx.updateMetadata(typeId, meta, writer.failIfUnregistered());
 
                         schemaReg.addSchema(stableSchema.schemaId(), stableSchema);
 
@@ -823,6 +825,9 @@
             }
         }
         catch (Exception e) {
+            if (e instanceof UnregisteredBinaryTypeException || e instanceof UnregisteredClassException)
+                throw e;
+
             String msg;
 
             if (S.INCLUDE_SENSITIVE && !F.isEmpty(typeName))
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryContext.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryContext.java
index 0121570..7885d95 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryContext.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryContext.java
@@ -653,7 +653,7 @@
                         schemas, desc0.isEnum(),
                         cls.isEnum() ? enumMap(cls) : null);
 
-                    metaHnd.addMeta(desc0.typeId(), meta.wrap(this));
+                    metaHnd.addMeta(desc0.typeId(), meta.wrap(this), false);
 
                     return desc0;
                 }
@@ -801,7 +801,7 @@
 
         if (!deserialize)
             metaHnd.addMeta(typeId, new BinaryMetadata(typeId, typeName, desc.fieldsMeta(), affFieldName, null,
-                desc.isEnum(), cls.isEnum() ? enumMap(cls) : null).wrap(this));
+                desc.isEnum(), cls.isEnum() ? enumMap(cls) : null).wrap(this), false);
 
         descByCls.put(cls, desc);
 
@@ -1170,7 +1170,7 @@
         }
 
         metaHnd.addMeta(id,
-            new BinaryMetadata(id, typeName, fieldsMeta, affKeyFieldName, null, isEnum, enumMap).wrap(this));
+            new BinaryMetadata(id, typeName, fieldsMeta, affKeyFieldName, null, isEnum, enumMap).wrap(this), false);
     }
 
     /**
@@ -1325,10 +1325,11 @@
     /**
      * @param typeId Type ID.
      * @param meta Meta data.
+     * @param failIfUnregistered Fail if unregistered.
      * @throws BinaryObjectException In case of error.
      */
-    public void updateMetadata(int typeId, BinaryMetadata meta) throws BinaryObjectException {
-        metaHnd.addMeta(typeId, meta.wrap(this));
+    public void updateMetadata(int typeId, BinaryMetadata meta, boolean failIfUnregistered) throws BinaryObjectException {
+        metaHnd.addMeta(typeId, meta.wrap(this), failIfUnregistered);
     }
 
     /**
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryFieldAccessor.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryFieldAccessor.java
index 3277403..87c4f3e 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryFieldAccessor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryFieldAccessor.java
@@ -26,6 +26,7 @@
 import java.util.Map;
 import java.util.UUID;
 import org.apache.ignite.binary.BinaryObjectException;
+import org.apache.ignite.internal.UnregisteredClassException;
 import org.apache.ignite.internal.util.GridUnsafe;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.internal.S;
@@ -155,6 +156,9 @@
             write0(obj, writer);
         }
         catch (Exception ex) {
+            if (ex instanceof UnregisteredClassException)
+                throw ex;
+
             if (S.INCLUDE_SENSITIVE && !F.isEmpty(name))
                 throw new BinaryObjectException("Failed to write field [name=" + name + ']', ex);
             else
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryMetadataHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryMetadataHandler.java
index 5df32e7..85ab137 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryMetadataHandler.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryMetadataHandler.java
@@ -30,9 +30,10 @@
      *
      * @param typeId Type ID.
      * @param meta Metadata.
+     * @param failIfUnregistered Fail if unregistered.
      * @throws BinaryObjectException In case of error.
      */
-    public void addMeta(int typeId, BinaryType meta) throws BinaryObjectException;
+    public void addMeta(int typeId, BinaryType meta, boolean failIfUnregistered) throws BinaryObjectException;
 
     /**
      * Gets meta data for provided type ID.
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryNoopMetadataHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryNoopMetadataHandler.java
index bbd9311..4ee2428 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryNoopMetadataHandler.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryNoopMetadataHandler.java
@@ -43,7 +43,7 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void addMeta(int typeId, BinaryType meta) throws BinaryObjectException {
+    @Override public void addMeta(int typeId, BinaryType meta, boolean failIfUnregistered) throws BinaryObjectException {
         // No-op.
     }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryObjectExImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryObjectExImpl.java
index c5fe6da..920a296 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryObjectExImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryObjectExImpl.java
@@ -164,7 +164,7 @@
     }
 
     /** {@inheritDoc} */
-    public boolean equals(Object other) {
+    @Override public boolean equals(Object other) {
         if (other == this)
             return true;
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryReaderExImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryReaderExImpl.java
index ab1f874..38934f0 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryReaderExImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryReaderExImpl.java
@@ -240,7 +240,7 @@
                 footerStart = start + offset;
 
                 if (BinaryUtils.hasRaw(flags)) {
-                    footerLen = len - offset - 4;
+                    footerLen = len - offset;
                     rawOff = start + in.readIntPositioned(start + len - 4);
                 }
                 else {
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryTypeImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryTypeImpl.java
index fac1ff9..a361bb4 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryTypeImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryTypeImpl.java
@@ -110,7 +110,7 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public String toString() {
         return S.toString(BinaryTypeImpl.class, this);
     }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryWriterExImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryWriterExImpl.java
index 3d93e70..d882b17 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryWriterExImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryWriterExImpl.java
@@ -116,6 +116,13 @@
     }
 
     /**
+     * @return Fail if unregistered flag value.
+     */
+    public boolean failIfUnregistered() {
+        return failIfUnregistered;
+    }
+
+    /**
      * @param failIfUnregistered Fail if unregistered.
      */
     public void failIfUnregistered(boolean failIfUnregistered) {
@@ -372,7 +379,7 @@
     /**
      * @param val Byte array.
      */
-    public void write(byte[] val) {
+    @Override public void write(byte[] val) {
         out.writeByteArray(val);
     }
 
@@ -381,7 +388,7 @@
      * @param off Offset.
      * @param len Length.
      */
-    public void write(byte[] val, int off, int len) {
+    @Override public void write(byte[] val, int off, int len) {
         out.write(val, off, len);
     }
 
@@ -503,6 +510,8 @@
         else {
             BinaryWriterExImpl writer = new BinaryWriterExImpl(ctx, out, schema, handles());
 
+            writer.failIfUnregistered(failIfUnregistered);
+
             writer.marshal(obj);
         }
     }
@@ -1492,6 +1501,8 @@
         else {
             BinaryWriterExImpl writer = new BinaryWriterExImpl(ctx, out, schema, null);
 
+            writer.failIfUnregistered(failIfUnregistered);
+
             writer.marshal(obj);
         }
     }
@@ -1915,6 +1926,8 @@
     public BinaryWriterExImpl newWriter(int typeId) {
         BinaryWriterExImpl res = new BinaryWriterExImpl(ctx, out, schema, handles());
 
+        res.failIfUnregistered(failIfUnregistered);
+
         res.typeId(typeId);
 
         return res;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryBuilderReader.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryBuilderReader.java
index 2d10cf4..47ff710 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryBuilderReader.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryBuilderReader.java
@@ -162,7 +162,7 @@
      * @param pos Position in the source array.
      * @return Read byte value.
      */
-    public byte readBytePositioned(int pos) {
+    @Override public byte readBytePositioned(int pos) {
         return BinaryPrimitives.readByte(arr, pos);
     }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryBuilderSerializer.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryBuilderSerializer.java
index 5333cc4..edc80b6 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryBuilderSerializer.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryBuilderSerializer.java
@@ -126,7 +126,7 @@
 
             BinaryMetadata meta = new BinaryMetadata(typeId, typeName, null, null, null, true, enumMap);
 
-            writer.context().updateMetadata(typeId, meta);
+            writer.context().updateMetadata(typeId, meta, writer.failIfUnregistered());
 
             // Need register class for marshaller to be able to deserialize enum value.
             writer.context().descriptorForClass(((Enum)val).getDeclaringClass(), false, false);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryObjectBuilderImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryObjectBuilderImpl.java
index c577e02..abd63cd 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryObjectBuilderImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryObjectBuilderImpl.java
@@ -36,6 +36,7 @@
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.lang.IgniteBiTuple;
+import org.apache.ignite.thread.IgniteThread;
 import org.jetbrains.annotations.Nullable;
 
 import java.util.Collection;
@@ -174,6 +175,12 @@
     /** {@inheritDoc} */
     @Override public BinaryObject build() {
         try (BinaryWriterExImpl writer = new BinaryWriterExImpl(ctx)) {
+            Thread curThread = Thread.currentThread();
+
+            if (curThread instanceof IgniteThread)
+                writer.failIfUnregistered(((IgniteThread)curThread).executingEntryProcessor() &&
+                    ((IgniteThread)curThread).holdsTopLock());
+
             writer.typeId(typeId);
 
             BinaryBuilderSerializer serializationCtx = new BinaryBuilderSerializer();
@@ -360,7 +367,7 @@
                 ctx.registerUserClassName(typeId, typeName);
 
                 ctx.updateMetadata(typeId, new BinaryMetadata(typeId, typeName, fieldsMeta, affFieldName0,
-                    Collections.singleton(curSchema), false, null));
+                    Collections.singleton(curSchema), false, null), writer.failIfUnregistered());
 
                 schemaReg.addSchema(curSchema.schemaId(), curSchema);
             }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/streams/BinaryOffheapOutputStream.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/streams/BinaryOffheapOutputStream.java
index d16e575..a782286 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/binary/streams/BinaryOffheapOutputStream.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/streams/BinaryOffheapOutputStream.java
@@ -90,7 +90,7 @@
     }
 
     /** {@inheritDoc} */
-    public int capacity() {
+    @Override public int capacity() {
         return cap;
     }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/impl/GridClientNodeImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/client/impl/GridClientNodeImpl.java
index a4b659b..4365c45 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/client/impl/GridClientNodeImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/client/impl/GridClientNodeImpl.java
@@ -191,7 +191,7 @@
     }
 
     /** {@inheritDoc} */
-    public long order() {
+    @Override public long order() {
         return order;
     }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/router/impl/GridTcpRouterNioListenerAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/client/router/impl/GridTcpRouterNioListenerAdapter.java
index 364e044..4c61ead 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/client/router/impl/GridTcpRouterNioListenerAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/client/router/impl/GridTcpRouterNioListenerAdapter.java
@@ -162,8 +162,7 @@
 
                 U.warn(
                     log,
-                    "Message forwarding was interrupted (will ignore last message): " + e.getMessage(),
-                    "Message forwarding was interrupted.");
+                    "Message forwarding was interrupted (will ignore last message): " + e.getMessage());
             }
         }
         else if (msg instanceof GridClientHandshakeRequest) {
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/ssl/GridSslBasicContextFactory.java b/modules/core/src/main/java/org/apache/ignite/internal/client/ssl/GridSslBasicContextFactory.java
index b651ae3..8bf1e8d 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/client/ssl/GridSslBasicContextFactory.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/client/ssl/GridSslBasicContextFactory.java
@@ -419,7 +419,7 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public String toString() {
         return getClass().getSimpleName() + parameters();
     }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientBinary.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientBinary.java
index 8525f5e..4164532 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientBinary.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientBinary.java
@@ -160,7 +160,7 @@
 
         int typeId = ctx.typeId(typeName);
 
-        ctx.updateMetadata(typeId, new BinaryMetadata(typeId, typeName, null, null, null, true, vals));
+        ctx.updateMetadata(typeId, new BinaryMetadata(typeId, typeName, null, null, null, true, vals), false);
 
         return ctx.metadata(typeId);
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientChannel.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientChannel.java
index eb62c80..9e97b34 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientChannel.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientChannel.java
@@ -43,4 +43,9 @@
      */
     public <T> T receive(ClientOperation op, long reqId, Function<BinaryInputStream, T> payloadReader)
         throws ClientConnectionException, ClientAuthorizationException;
+
+    /**
+     * @return Server version.
+     */
+    public ProtocolVersion serverVersion();
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientUtils.java
index a50d78e..3114fc0 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientUtils.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientUtils.java
@@ -55,7 +55,9 @@
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
 import org.apache.ignite.internal.binary.streams.BinaryInputStream;
 import org.apache.ignite.internal.binary.streams.BinaryOutputStream;
-import org.apache.ignite.lang.IgniteBiTuple;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
+
+import static org.apache.ignite.internal.processors.platform.client.ClientConnectionContext.VER_1_2_0;
 
 /**
  * Shared serialization/deserialization utils.
@@ -232,7 +234,7 @@
     }
 
     /** Serialize configuration to stream. */
-    void cacheConfiguration(ClientCacheConfiguration cfg, BinaryOutputStream out) {
+    void cacheConfiguration(ClientCacheConfiguration cfg, BinaryOutputStream out, ClientListenerProtocolVersion ver) {
         try (BinaryRawWriterEx writer = new BinaryWriterExImpl(marsh.context(), out, null, null)) {
             int origPos = out.position();
 
@@ -310,8 +312,11 @@
                                 w.writeBoolean(qf.isKey());
                                 w.writeBoolean(qf.isNotNull());
                                 w.writeObject(qf.getDefaultValue());
-                                w.writeInt(qf.getPrecision());
-                                w.writeInt(qf.getScale());
+
+                                if (ver.compareTo(VER_1_2_0) >= 0) {
+                                    w.writeInt(qf.getPrecision());
+                                    w.writeInt(qf.getScale());
+                                }
                             }
                         );
                         ClientUtils.collection(
@@ -344,7 +349,8 @@
     }
 
     /** Deserialize configuration from stream. */
-    ClientCacheConfiguration cacheConfiguration(BinaryInputStream in) throws IOException {
+    ClientCacheConfiguration cacheConfiguration(BinaryInputStream in, ClientListenerProtocolVersion ver)
+        throws IOException {
         try (BinaryReaderExImpl reader = new BinaryReaderExImpl(marsh.context(), in, null, true)) {
             reader.readInt(); // Do not need length to read data. The protocol defines fixed configuration layout.
 
@@ -388,17 +394,27 @@
                             .setKeyFieldName(reader.readString())
                             .setValueFieldName(reader.readString());
 
+                        boolean isCliVer1_2 = ver.compareTo(VER_1_2_0) >= 0;
+
                         Collection<QueryField> qryFields = ClientUtils.collection(
                             in,
-                            unused2 -> new QueryField(
-                                reader.readString(),
-                                reader.readString(),
-                                reader.readBoolean(),
-                                reader.readBoolean(),
-                                reader.readObject(),
-                                reader.readInt(),
-                                reader.readInt()
-                            )
+                            unused2 -> {
+                                String name = reader.readString();
+                                String typeName = reader.readString();
+                                boolean isKey = reader.readBoolean();
+                                boolean isNotNull = reader.readBoolean(); 
+                                Object dfltVal = reader.readObject();
+                                int precision = isCliVer1_2 ? reader.readInt() : -1;
+                                int scale = isCliVer1_2 ? reader.readInt() : -1; 
+
+                                return new QueryField(name,
+                                    typeName,
+                                    isKey,
+                                    isNotNull,
+                                    dfltVal,
+                                    precision,
+                                    scale);
+                            }
                         );
 
                         return qryEntity
@@ -419,6 +435,14 @@
                                 .filter(f -> f.getDefaultValue() != null)
                                 .collect(Collectors.toMap(QueryField::getName, QueryField::getDefaultValue))
                             )
+                            .setFieldsPrecision(qryFields.stream()
+                                .filter(f -> f.getPrecision() != -1)
+                                .collect(Collectors.toMap(QueryField::getName, QueryField::getPrecision))
+                            )
+                            .setFieldsScale(qryFields.stream()
+                                .filter(f -> f.getScale() != -1)
+                                .collect(Collectors.toMap(QueryField::getName, QueryField::getScale))
+                            )
                             .setAliases(ClientUtils.collection(
                                 in,
                                 unused3 -> new SimpleEntry<>(reader.readString(), reader.readString())
@@ -513,16 +537,14 @@
             Set<String> keys = e.getKeyFields();
             Set<String> notNulls = e.getNotNullFields();
             Map<String, Object> dflts = e.getDefaultFieldValues();
-            Map<String, IgniteBiTuple<Integer, Integer>> decimalInfo = e.getDecimalInfo();
+            Map<String, Integer> fldsPrecision = e.getFieldsPrecision();
+            Map<String, Integer> fldsScale = e.getFieldsScale();
 
             isKey = keys != null && keys.contains(name);
             isNotNull = notNulls != null && notNulls.contains(name);
             dfltVal = dflts == null ? null : dflts.get(name);
-
-            IgniteBiTuple<Integer, Integer> precisionAndScale = decimalInfo == null ? null : decimalInfo.get(name);
-
-            precision = precisionAndScale == null? -1 : precisionAndScale.get1();
-            scale = precisionAndScale == null? -1 : precisionAndScale.get2();
+            precision = fldsPrecision == null ? -1 : fldsPrecision.getOrDefault(name, -1);
+            scale = fldsScale == null? -1 : fldsScale.getOrDefault(name, -1);
         }
 
         /** Deserialization constructor. */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ReliableChannel.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ReliableChannel.java
index dac4320..537ce75 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ReliableChannel.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ReliableChannel.java
@@ -163,6 +163,13 @@
     }
 
     /**
+     * @return Server version.
+     */
+    public ProtocolVersion serverVersion() {
+        return ch.serverVersion();
+    }
+
+    /**
      * @return host:port_range address lines parsed as {@link InetSocketAddress}.
      */
     private static List<InetSocketAddress> parseAddresses(String[] addrs) throws ClientException {
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientCache.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientCache.java
index bb659d0..6f79442 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientCache.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientCache.java
@@ -40,6 +40,7 @@
 import org.apache.ignite.internal.binary.streams.BinaryOutputStream;
 
 import static java.util.AbstractMap.SimpleEntry;
+import static org.apache.ignite.internal.processors.platform.client.ClientConnectionContext.CURRENT_VER;
 
 /**
  * Implementation of {@link ClientCache} over TCP protocol.
@@ -74,7 +75,7 @@
     }
 
     /** {@inheritDoc} */
-    public V get(K key) throws ClientException {
+    @Override public V get(K key) throws ClientException {
         if (key == null)
             throw new NullPointerException("key");
 
@@ -89,7 +90,7 @@
     }
 
     /** {@inheritDoc} */
-    public void put(K key, V val) throws ClientException {
+    @Override public void put(K key, V val) throws ClientException {
         if (key == null)
             throw new NullPointerException("key");
 
@@ -133,7 +134,7 @@
             this::writeCacheInfo,
             res -> {
                 try {
-                    return serDes.cacheConfiguration(res);
+                    return serDes.cacheConfiguration(res, CURRENT_VER);
                 }
                 catch (IOException e) {
                     return null;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientChannel.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientChannel.java
index 10dc865..fa1efee 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientChannel.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientChannel.java
@@ -69,6 +69,9 @@
  * Implements {@link ClientChannel} over TCP.
  */
 class TcpClientChannel implements ClientChannel {
+    /** Protocol version: 1.2.0. */
+    private static final ProtocolVersion V1_2_0 = new ProtocolVersion((short)1, (short)2, (short)0);
+    
     /** Protocol version: 1.1.0. */
     private static final ProtocolVersion V1_1_0 = new ProtocolVersion((short)1, (short)1, (short)0);
 
@@ -76,10 +79,14 @@
     private static final ProtocolVersion V1_0_0 = new ProtocolVersion((short)1, (short)0, (short)0);
 
     /** Supported protocol versions. */
-    private static final Collection<ProtocolVersion> supportedVers = Arrays.asList(V1_1_0, V1_0_0);
+    private static final Collection<ProtocolVersion> supportedVers = Arrays.asList(
+        V1_2_0, 
+        V1_1_0, 
+        V1_0_0
+    );
 
     /** Protocol version agreed with the server. */
-    private ProtocolVersion ver = V1_1_0;
+    private ProtocolVersion ver = V1_2_0;
 
     /** Channel. */
     private final Socket sock;
@@ -139,7 +146,7 @@
     }
 
     /** {@inheritDoc} */
-    public <T> T receive(ClientOperation op, long reqId, Function<BinaryInputStream, T> payloadReader)
+    @Override public <T> T receive(ClientOperation op, long reqId, Function<BinaryInputStream, T> payloadReader)
         throws ClientConnectionException, ClientAuthorizationException {
 
         final int MIN_RES_SIZE = 8 + 4; // minimal response size: long (8 bytes) ID + int (4 bytes) status
@@ -182,6 +189,11 @@
         return payloadReader.apply(payload);
     }
 
+    /** {@inheritDoc} */
+    @Override public ProtocolVersion serverVersion() {
+        return ver;
+    }
+
     /** Validate {@link ClientConfiguration}. */
     private static void validateConfiguration(ClientChannelConfiguration cfg) {
         String error = null;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpIgniteClient.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpIgniteClient.java
index 7beeb79..c4af5ee 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpIgniteClient.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpIgniteClient.java
@@ -47,6 +47,7 @@
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
 import org.apache.ignite.internal.binary.streams.BinaryInputStream;
 import org.apache.ignite.internal.binary.streams.BinaryOutputStream;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.lang.IgnitePredicate;
 import org.apache.ignite.marshaller.MarshallerContext;
@@ -112,7 +113,8 @@
         ClientCacheConfiguration cfg) throws ClientException {
         ensureCacheConfiguration(cfg);
 
-        ch.request(ClientOperation.CACHE_GET_OR_CREATE_WITH_CONFIGURATION, req -> serDes.cacheConfiguration(cfg, req));
+        ch.request(ClientOperation.CACHE_GET_OR_CREATE_WITH_CONFIGURATION, 
+            req -> serDes.cacheConfiguration(cfg, req, toClientVersion(ch.serverVersion())));
 
         return new TcpClientCache<>(cfg.getName(), ch, marsh);
     }
@@ -149,11 +151,22 @@
     @Override public <K, V> ClientCache<K, V> createCache(ClientCacheConfiguration cfg) throws ClientException {
         ensureCacheConfiguration(cfg);
 
-        ch.request(ClientOperation.CACHE_CREATE_WITH_CONFIGURATION, req -> serDes.cacheConfiguration(cfg, req));
+        ch.request(ClientOperation.CACHE_CREATE_WITH_CONFIGURATION, 
+            req -> serDes.cacheConfiguration(cfg, req, toClientVersion(ch.serverVersion())));
 
         return new TcpClientCache<>(cfg.getName(), ch, marsh);
     }
 
+    /**
+     * Converts {@link ProtocolVersion} to {@link ClientListenerProtocolVersion}.
+     *
+     * @param srvVer Server protocol version.
+     * @return Client protocol version.
+     */
+    private ClientListenerProtocolVersion toClientVersion(ProtocolVersion srvVer) {
+        return ClientListenerProtocolVersion.create(srvVer.major(), srvVer.minor(), srvVer.patch());
+    }
+
     /** {@inheritDoc} */
     @Override public IgniteBinary binary() {
         return binary;
@@ -233,7 +246,7 @@
         private final BinaryMetadataHandler cache = BinaryCachingMetadataHandler.create();
 
         /** {@inheritDoc} */
-        @Override public void addMeta(int typeId, BinaryType meta) throws BinaryObjectException {
+        @Override public void addMeta(int typeId, BinaryType meta, boolean failIfUnregistered) throws BinaryObjectException {
             if (cache.metadata(typeId) == null) {
                 try {
                     ch.request(
@@ -246,7 +259,7 @@
                 }
             }
 
-            cache.addMeta(typeId, meta); // merge
+            cache.addMeta(typeId, meta, failIfUnregistered); // merge
         }
 
         /** {@inheritDoc} */
@@ -259,7 +272,7 @@
                 if (meta0 != null) {
                     meta = new BinaryTypeImpl(marsh.context(), meta0);
 
-                    cache.addMeta(typeId, meta);
+                    cache.addMeta(typeId, meta, false);
                 }
             }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/cluster/ClusterGroupAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/cluster/ClusterGroupAdapter.java
index 73bf224..e5bb47d 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/cluster/ClusterGroupAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/cluster/ClusterGroupAdapter.java
@@ -1013,7 +1013,7 @@
          * @return Reconstructed object.
          * @throws ObjectStreamException Thrown in case of unmarshalling error.
          */
-        protected Object readResolve() throws ObjectStreamException {
+        @Override protected Object readResolve() throws ObjectStreamException {
             ClusterGroupAdapter parent = (ClusterGroupAdapter)super.readResolve();
 
             return new AgeClusterGroup(parent, isOldest);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/cluster/IgniteClusterImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/cluster/IgniteClusterImpl.java
index 82779dab..b755258 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/cluster/IgniteClusterImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/cluster/IgniteClusterImpl.java
@@ -825,7 +825,7 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public String toString() {
         return "IgniteCluster [igniteInstanceName=" + ctx.igniteInstanceName() + ']';
     }
 }
\ No newline at end of file
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandHandler.java
index 7b5ce44..092efff 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandHandler.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandHandler.java
@@ -1060,29 +1060,7 @@
                     "]");
 
                 for (VisorTxInfo info : entry.getValue().getInfos())
-                    log("    Tx: [xid=" + info.getXid() +
-                        ", label=" + info.getLabel() +
-                        ", state=" + info.getState() +
-                        ", startTime=" + info.getFormattedStartTime() +
-                        ", duration=" + info.getDuration() / 1000 +
-                        ", isolation=" + info.getIsolation() +
-                        ", concurrency=" + info.getConcurrency() +
-                        ", timeout=" + info.getTimeout() +
-                        ", size=" + info.getSize() +
-                        ", dhtNodes=" + (info.getPrimaryNodes() == null ? "N/A" :
-                        F.transform(info.getPrimaryNodes(), new IgniteClosure<UUID, String>() {
-                            @Override public String apply(UUID id) {
-                                return U.id8(id);
-                            }
-                        })) +
-                        ", nearXid=" + info.getNearXid() +
-                        ", parentNodeIds=" + (info.getMasterNodeIds() == null ? "N/A" :
-                        F.transform(info.getMasterNodeIds(), new IgniteClosure<UUID, String>() {
-                            @Override public String apply(UUID id) {
-                                return U.id8(id);
-                            }
-                        })) +
-                        ']');
+                    log(info.toUserString());
             }
         }
         catch (Throwable e) {
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/direct/DirectMessageReader.java b/modules/core/src/main/java/org/apache/ignite/internal/direct/DirectMessageReader.java
index 7d3644f..47d7877 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/direct/DirectMessageReader.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/direct/DirectMessageReader.java
@@ -380,7 +380,7 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public String toString() {
         return S.toString(DirectMessageReader.class, this);
     }
 
@@ -420,7 +420,7 @@
         }
 
         /** {@inheritDoc} */
-        public String toString() {
+        @Override public String toString() {
             return S.toString(StateItem.class, this);
         }
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/direct/DirectMessageWriter.java b/modules/core/src/main/java/org/apache/ignite/internal/direct/DirectMessageWriter.java
index b224d68..51cea17 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/direct/DirectMessageWriter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/direct/DirectMessageWriter.java
@@ -388,7 +388,7 @@
         }
 
         /** {@inheritDoc} */
-        public String toString() {
+        @Override public String toString() {
             return S.toString(StateItem.class, this);
         }
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/direct/stream/v1/DirectByteBufferStreamImplV1.java b/modules/core/src/main/java/org/apache/ignite/internal/direct/stream/v1/DirectByteBufferStreamImplV1.java
index 76cb762..c78c479 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/direct/stream/v1/DirectByteBufferStreamImplV1.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/direct/stream/v1/DirectByteBufferStreamImplV1.java
@@ -1332,7 +1332,7 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public String toString() {
         return S.toString(DirectByteBufferStreamImplV1.class, this);
     }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionProperties.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionProperties.java
index 92355a0..5e40bbf 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionProperties.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionProperties.java
@@ -164,6 +164,16 @@
     public void setSkipReducerOnUpdate(boolean skipReducerOnUpdate);
 
     /**
+     * @return Nested transactions handling strategy.
+     */
+    public String nestedTxMode();
+
+    /**
+     * @param nestedTxMode Nested transactions handling strategy.
+     */
+    public void nestedTxMode(String nestedTxMode);
+
+    /**
      * Gets SSL connection mode.
      *
      * @return Use SSL flag.
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionPropertiesImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionPropertiesImpl.java
index 9371a07..51a3837 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionPropertiesImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionPropertiesImpl.java
@@ -27,6 +27,7 @@
 import org.apache.ignite.configuration.ClientConnectorConfiguration;
 import org.apache.ignite.internal.processors.odbc.SqlStateCode;
 import org.apache.ignite.internal.util.HostAndPortRange;
+import org.apache.ignite.internal.processors.query.NestedTxMode;
 import org.apache.ignite.internal.util.typedef.F;
 
 /**
@@ -99,6 +100,26 @@
     private BooleanProperty skipReducerOnUpdate = new BooleanProperty(
         "skipReducerOnUpdate", "Enable execution update queries on ignite server nodes", false, false);
 
+    /** Nested transactions handling strategy. */
+    private StringProperty nestedTxMode = new StringProperty(
+        "nestedTransactionsMode", "Way to handle nested transactions", NestedTxMode.ERROR.name(),
+        new String[] { NestedTxMode.COMMIT.name(), NestedTxMode.ERROR.name(), NestedTxMode.IGNORE.name() },
+        false, new PropertyValidator() {
+        private static final long serialVersionUID = 0L;
+
+        @Override public void validate(String mode) throws SQLException {
+            if (!F.isEmpty(mode)) {
+                try {
+                    NestedTxMode.valueOf(mode.toUpperCase());
+                }
+                catch (IllegalArgumentException e) {
+                    throw new SQLException("Invalid nested transactions handling mode, allowed values: " +
+                        Arrays.toString(nestedTxMode.choices), SqlStateCode.CLIENT_CONNECTION_FAILED);
+                }
+            }
+        }
+    });
+
     /** SSL: Use SSL connection to Ignite node. */
     private StringProperty sslMode = new StringProperty("sslMode",
         "The SSL mode of the connection", SSL_MODE_DISABLE,
@@ -165,7 +186,7 @@
     /** Properties array. */
     private final ConnectionProperty [] propsArray = {
         distributedJoins, enforceJoinOrder, collocated, replicatedOnly, autoCloseServerCursor,
-        tcpNoDelay, lazy, socketSendBuffer, socketReceiveBuffer, skipReducerOnUpdate,
+        tcpNoDelay, lazy, socketSendBuffer, socketReceiveBuffer, skipReducerOnUpdate, nestedTxMode,
         sslMode, sslProtocol, sslKeyAlgorithm,
         sslClientCertificateKeyStoreUrl, sslClientCertificateKeyStorePassword, sslClientCertificateKeyStoreType,
         sslTrustCertificateKeyStoreUrl, sslTrustCertificateKeyStorePassword, sslTrustCertificateKeyStoreType,
@@ -437,6 +458,16 @@
     }
 
     /** {@inheritDoc} */
+    @Override public String nestedTxMode() {
+        return nestedTxMode.value();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void nestedTxMode(String val) {
+        nestedTxMode.setValue(val);
+    }
+
+    /** {@inheritDoc} */
     @Override public void setUsername(String name) {
         user.setValue(name);
     }
@@ -815,11 +846,11 @@
                     SqlStateCode.CLIENT_CONNECTION_FAILED);
             }
 
-            checkChoices(strVal);
-
             if (validator != null)
                 validator.validate(strVal);
 
+            checkChoices(strVal);
+
             props.remove(name);
 
             init(strVal);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinConnection.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinConnection.java
index b36b319..547b1db 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinConnection.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinConnection.java
@@ -197,7 +197,7 @@
                 }
 
                 sendRequest(new JdbcQueryExecuteRequest(JdbcStatementType.ANY_STATEMENT_TYPE,
-                    schema, 1, 1, sql, null));
+                    schema, 1, 1, autoCommit, sql, null));
 
                 streamState = new StreamState((SqlSetStreamingCommand)cmd);
             }
@@ -328,19 +328,18 @@
     @Override public void setAutoCommit(boolean autoCommit) throws SQLException {
         ensureNotClosed();
 
-        this.autoCommit = autoCommit;
+        // Do nothing if resulting value doesn't actually change.
+        if (autoCommit != this.autoCommit) {
+            doCommit();
 
-        if (!autoCommit)
-            LOG.warning("Transactions are not supported.");
+            this.autoCommit = autoCommit;
+        }
     }
 
     /** {@inheritDoc} */
     @Override public boolean getAutoCommit() throws SQLException {
         ensureNotClosed();
 
-        if (!autoCommit)
-            LOG.warning("Transactions are not supported.");
-
         return autoCommit;
     }
 
@@ -351,7 +350,7 @@
         if (autoCommit)
             throw new SQLException("Transaction cannot be committed explicitly in auto-commit mode.");
 
-        LOG.warning("Transactions are not supported.");
+        doCommit();
     }
 
     /** {@inheritDoc} */
@@ -359,9 +358,21 @@
         ensureNotClosed();
 
         if (autoCommit)
-            throw new SQLException("Transaction cannot rollback in auto-commit mode.");
+            throw new SQLException("Transaction cannot be rolled back explicitly in auto-commit mode.");
 
-        LOG.warning("Transactions are not supported.");
+        try (Statement s = createStatement()) {
+            s.execute("ROLLBACK");
+        }
+    }
+
+    /**
+     * Send to the server {@code COMMIT} command.
+     * @throws SQLException if failed.
+     */
+    private void doCommit() throws SQLException {
+        try (Statement s = createStatement()) {
+            s.execute("COMMIT");
+        }
     }
 
     /** {@inheritDoc} */
@@ -927,7 +938,7 @@
                 respSem.acquire();
 
                 sendRequestNotWaitResponse(
-                    new JdbcOrderedBatchExecuteRequest(schema, streamBatch, lastBatch, order));
+                    new JdbcOrderedBatchExecuteRequest(schema, streamBatch, autoCommit, lastBatch, order));
 
                 streamBatch = null;
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinDatabaseMetadata.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinDatabaseMetadata.java
index 7adc301..e24ecbb 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinDatabaseMetadata.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinDatabaseMetadata.java
@@ -46,6 +46,7 @@
 import org.apache.ignite.internal.util.typedef.F;
 
 import static java.sql.Connection.TRANSACTION_NONE;
+import static java.sql.Connection.TRANSACTION_REPEATABLE_READ;
 import static java.sql.ResultSet.CONCUR_READ_ONLY;
 import static java.sql.ResultSet.HOLD_CURSORS_OVER_COMMIT;
 import static java.sql.ResultSet.TYPE_FORWARD_ONLY;
@@ -630,17 +631,19 @@
 
     /** {@inheritDoc} */
     @Override public int getDefaultTransactionIsolation() throws SQLException {
-        return TRANSACTION_NONE;
+        return conn.igniteVersion().greaterThanEqual(2, 5, 0) ? TRANSACTION_REPEATABLE_READ :
+            TRANSACTION_NONE;
     }
 
     /** {@inheritDoc} */
     @Override public boolean supportsTransactions() throws SQLException {
-        return false;
+        return conn.igniteVersion().greaterThanEqual(2, 5, 0);
     }
 
     /** {@inheritDoc} */
     @Override public boolean supportsTransactionIsolationLevel(int level) throws SQLException {
-        return false;
+        return conn.igniteVersion().greaterThanEqual(2, 5, 0) &&
+            TRANSACTION_REPEATABLE_READ == level;
     }
 
     /** {@inheritDoc} */
@@ -846,7 +849,7 @@
         row.add(colMeta.columnName());          // 4. COLUMN_NAME
         row.add(colMeta.dataType());            // 5. DATA_TYPE
         row.add(colMeta.dataTypeName());        // 6. TYPE_NAME
-        row.add(colMeta.precision() == -1 ? null : colMeta.precision());                 // 7. COLUMN_SIZE
+        row.add(colMeta.precision() == -1 ? null : colMeta.precision()); // 7. COLUMN_SIZE
         row.add((Integer)null);                 // 8. BUFFER_LENGTH
         row.add(colMeta.scale() == -1 ? null : colMeta.scale());           // 9. DECIMAL_DIGITS
         row.add(10);                            // 10. NUM_PREC_RADIX
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinStatement.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinStatement.java
index 30e446f..f0f7337 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinStatement.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinStatement.java
@@ -46,6 +46,8 @@
 import org.apache.ignite.internal.processors.odbc.jdbc.JdbcResult;
 import org.apache.ignite.internal.processors.odbc.jdbc.JdbcResultInfo;
 import org.apache.ignite.internal.processors.odbc.jdbc.JdbcStatementType;
+import org.apache.ignite.internal.processors.odbc.jdbc.JdbcBulkLoadBatchRequest;
+import org.apache.ignite.internal.processors.odbc.jdbc.JdbcStatementType;
 import org.apache.ignite.internal.processors.query.IgniteSQLException;
 import org.apache.ignite.internal.sql.SqlKeyword;
 import org.apache.ignite.internal.sql.SqlParseException;
@@ -208,7 +210,7 @@
         }
 
         JdbcResult res0 = conn.sendRequest(new JdbcQueryExecuteRequest(stmtType, schema, pageSize,
-            maxRows, sql, args == null ? null : args.toArray(new Object[args.size()])));
+            maxRows, conn.getAutoCommit(), sql, args == null ? null : args.toArray(new Object[args.size()])));
 
         assert res0 != null;
 
@@ -646,7 +648,8 @@
             throw new SQLException("Batch is empty.");
 
         try {
-            JdbcBatchExecuteResult res = conn.sendRequest(new JdbcBatchExecuteRequest(conn.getSchema(), batch, false));
+            JdbcBatchExecuteResult res = conn.sendRequest(new JdbcBatchExecuteRequest(conn.getSchema(), batch,
+                conn.getAutoCommit(), false));
 
             if (res.errorCode() != ClientListenerResponse.STATUS_SUCCESS) {
                 throw new BatchUpdateException(res.errorMessage(), IgniteQueryErrorCode.codeToSqlState(res.errorCode()),
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java
index 9dcf74d..6155423 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java
@@ -70,8 +70,11 @@
     /** Version 2.5.0. */
     private static final ClientListenerProtocolVersion VER_2_5_0 = ClientListenerProtocolVersion.create(2, 5, 0);
 
+    /** Version 2.7.0. */
+    private static final ClientListenerProtocolVersion VER_2_7_0 = ClientListenerProtocolVersion.create(2, 7, 0);
+
     /** Current version. */
-    private static final ClientListenerProtocolVersion CURRENT_VER = VER_2_5_0;
+    public static final ClientListenerProtocolVersion CURRENT_VER = VER_2_7_0;
 
     /** Initial output stream capacity for handshake. */
     private static final int HANDSHAKE_MSG_SIZE = 13;
@@ -324,6 +327,9 @@
         writer.writeBoolean(connProps.isLazy());
         writer.writeBoolean(connProps.isSkipReducerOnUpdate());
 
+        if (ver.compareTo(VER_2_7_0) >= 0)
+            writer.writeString(connProps.nestedTxMode());
+
         if (!F.isEmpty(connProps.getUsername())) {
             assert ver.compareTo(VER_2_5_0) >= 0 : "Authentication is supported since 2.5";
 
@@ -371,14 +377,16 @@
                     + ", url=" + connProps.getUrl() + ']', SqlStateCode.CONNECTION_REJECTED);
             }
 
-            if (VER_2_4_0.equals(srvProtocolVer) || VER_2_3_0.equals(srvProtocolVer) ||
-                VER_2_1_5.equals(srvProtocolVer))
-                handshake(srvProtocolVer);
-            else if (VER_2_1_0.equals(srvProtocolVer))
+            if (VER_2_5_0.equals(srvProtoVer0)
+                || VER_2_4_0.equals(srvProtoVer0)
+                || VER_2_3_0.equals(srvProtoVer0)
+                || VER_2_1_5.equals(srvProtoVer0))
+                handshake(srvProtoVer0);
+            else if (VER_2_1_0.equals(srvProtoVer0))
                 handshake_2_1_0();
             else {
                 throw new SQLException("Handshake failed [driverProtocolVer=" + CURRENT_VER +
-                    ", remoteNodeProtocolVer=" + srvProtocolVer + ", err=" + err + ']',
+                    ", remoteNodeProtocolVer=" + srvProtoVer0 + ", err=" + err + ']',
                     SqlStateCode.CONNECTION_REJECTED);
             }
         }
@@ -461,7 +469,7 @@
             BinaryWriterExImpl writer = new BinaryWriterExImpl(null, new BinaryHeapOutputStream(cap),
                 null, null);
 
-            req.writeBinary(writer);
+            req.writeBinary(writer, srvProtocolVer);
 
             send(writer.array());
         }
@@ -495,7 +503,7 @@
 
             BinaryWriterExImpl writer = new BinaryWriterExImpl(null, new BinaryHeapOutputStream(cap), null, null);
 
-            req.writeBinary(writer);
+            req.writeBinary(writer, srvProtocolVer);
 
             send(writer.array());
 
@@ -518,7 +526,7 @@
 
         JdbcResponse res = new JdbcResponse();
 
-        res.readBinary(reader);
+        res.readBinary(reader, srvProtocolVer);
 
         return res;
     }
@@ -538,8 +546,8 @@
 
             int cnt = !F.isEmpty(qrys) ? Math.min(MAX_BATCH_QRY_CNT, qrys.size()) : 0;
 
-            // One additional byte for last batch flag.
-            cap = cnt * DYNAMIC_SIZE_MSG_CAP + 1;
+            // One additional byte for autocommit and last batch flags.
+            cap = cnt * DYNAMIC_SIZE_MSG_CAP + 2;
         }
         else if (req instanceof JdbcQueryCloseRequest)
             cap = QUERY_CLOSE_MSG_SIZE;
@@ -644,4 +652,4 @@
 
         return srvProtocolVer.compareTo(VER_2_5_0) >= 0;
     }
-}
\ No newline at end of file
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/checkpoint/GridCheckpointManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/checkpoint/GridCheckpointManager.java
index f0b19f3..6337d53 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/managers/checkpoint/GridCheckpointManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/checkpoint/GridCheckpointManager.java
@@ -188,8 +188,7 @@
                         U.warn(log, S.toString("Checkpoint will not be saved due to session invalidation",
                             "key", key, true,
                             "val", state, true,
-                            "ses", ses, false),
-                            "Checkpoint will not be saved due to session invalidation.");
+                            "ses", ses, false));
 
                         break;
                     }
@@ -198,8 +197,7 @@
                         U.warn(log, S.toString("Checkpoint will not be saved due to session timeout",
                             "key", key, true,
                             "val", state, true,
-                            "ses", ses, false),
-                            "Checkpoint will not be saved due to session timeout.");
+                            "ses", ses, false));
 
                         break;
                     }
@@ -224,8 +222,7 @@
                             U.warn(log, S.toString("Checkpoint will not be saved due to session invalidation",
                                 "key", key, true,
                                 "val", state, true,
-                                "ses", ses, false),
-                                "Checkpoint will not be saved due to session invalidation.");
+                                "ses", ses, false));
 
                             keyMap.remove(ses.getId(), keys);
 
@@ -508,4 +505,4 @@
             }
         }
     }
-}
\ No newline at end of file
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoManager.java
index 8d9a700..b3c80b0 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoManager.java
@@ -67,6 +67,7 @@
 import org.apache.ignite.internal.managers.deployment.GridDeployment;
 import org.apache.ignite.internal.managers.eventstorage.GridEventStorageManager;
 import org.apache.ignite.internal.managers.eventstorage.GridLocalEventListener;
+import org.apache.ignite.internal.processors.cache.mvcc.msg.MvccMessage;
 import org.apache.ignite.internal.processors.platform.message.PlatformMessageFilter;
 import org.apache.ignite.internal.processors.pool.PoolProcessor;
 import org.apache.ignite.internal.processors.timeout.GridTimeoutObject;
@@ -101,6 +102,7 @@
 import static org.apache.ignite.events.EventType.EVT_NODE_FAILED;
 import static org.apache.ignite.events.EventType.EVT_NODE_JOINED;
 import static org.apache.ignite.events.EventType.EVT_NODE_LEFT;
+import static org.apache.ignite.internal.GridTopic.TOPIC_CACHE_COORDINATOR;
 import static org.apache.ignite.internal.GridTopic.TOPIC_COMM_USER;
 import static org.apache.ignite.internal.GridTopic.TOPIC_IO_TEST;
 import static org.apache.ignite.internal.managers.communication.GridIoPolicy.AFFINITY_POOL;
@@ -1112,6 +1114,17 @@
 
             return;
         }
+        if (msg.topicOrdinal() == TOPIC_CACHE_COORDINATOR.ordinal()) {
+            MvccMessage msg0 = (MvccMessage)msg.message();
+
+            // see IGNITE-8609
+            /*if (msg0.processedFromNioThread())
+                c.run();
+            else*/
+                ctx.getStripedExecutorService().execute(-1, c);
+
+            return;
+        }
 
         if (plc == GridIoPolicy.SYSTEM_POOL && msg.partition() != GridIoMessage.STRIPE_DISABLED_PART) {
             ctx.getStripedExecutorService().execute(msg.partition(), c);
@@ -1648,6 +1661,9 @@
                 if (e.getCause() instanceof ClusterTopologyCheckedException)
                     throw (ClusterTopologyCheckedException)e.getCause();
 
+                if (!ctx.discovery().alive(node))
+                    throw new ClusterTopologyCheckedException("Failed to send message, node left: " + node.id(), e);
+
                 throw new IgniteCheckedException("Failed to send message (node may have left the grid or " +
                     "TCP connection cannot be established due to firewall issues) " +
                     "[node=" + node + ", topic=" + topic +
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoMessageFactory.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoMessageFactory.java
index 581c32e..04f6b5f 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoMessageFactory.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoMessageFactory.java
@@ -47,12 +47,14 @@
 import org.apache.ignite.internal.processors.cache.CacheObjectByteArrayImpl;
 import org.apache.ignite.internal.processors.cache.CacheObjectImpl;
 import org.apache.ignite.internal.processors.cache.GridCacheEntryInfo;
+import org.apache.ignite.internal.processors.cache.GridCacheMvccEntryInfo;
 import org.apache.ignite.internal.processors.cache.GridCacheReturn;
 import org.apache.ignite.internal.processors.cache.GridChangeGlobalStateMessageResponse;
 import org.apache.ignite.internal.processors.cache.KeyCacheObjectImpl;
 import org.apache.ignite.internal.processors.cache.WalStateAckMessage;
 import org.apache.ignite.internal.processors.cache.binary.MetadataRequestMessage;
 import org.apache.ignite.internal.processors.cache.binary.MetadataResponseMessage;
+import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionSupplyMessageV2;
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.latch.LatchAckMessage;
 import org.apache.ignite.internal.processors.cache.distributed.GridCacheTtlUpdateRequest;
 import org.apache.ignite.internal.processors.cache.distributed.GridCacheTxRecoveryRequest;
@@ -73,7 +75,11 @@
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxOnePhaseCommitAckRequest;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxPrepareRequest;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxPrepareResponse;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxQueryEnlistRequest;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxQueryEnlistResponse;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxQueryFirstEnlistRequest;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtUnlockRequest;
+import org.apache.ignite.internal.processors.cache.distributed.dht.PartitionUpdateCounters;
 import org.apache.ignite.internal.processors.cache.distributed.dht.atomic.GridDhtAtomicDeferredUpdateResponse;
 import org.apache.ignite.internal.processors.cache.distributed.dht.atomic.GridDhtAtomicNearResponse;
 import org.apache.ignite.internal.processors.cache.distributed.dht.atomic.GridDhtAtomicSingleUpdateRequest;
@@ -97,6 +103,7 @@
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage;
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsSingleMessage;
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsSingleRequest;
+import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.latch.LatchAckMessage;
 import org.apache.ignite.internal.processors.cache.distributed.near.CacheVersionedValue;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearGetRequest;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearGetResponse;
@@ -108,7 +115,24 @@
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxFinishResponse;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxPrepareRequest;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxPrepareResponse;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxQueryEnlistRequest;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxQueryEnlistResponse;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxQueryResultsEnlistRequest;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxQueryResultsEnlistResponse;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearUnlockRequest;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshotWithoutTxs;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccVersionImpl;
+import org.apache.ignite.internal.processors.cache.mvcc.msg.MvccAckRequestQueryCntr;
+import org.apache.ignite.internal.processors.cache.mvcc.msg.MvccAckRequestQueryId;
+import org.apache.ignite.internal.processors.cache.mvcc.msg.MvccAckRequestTx;
+import org.apache.ignite.internal.processors.cache.mvcc.msg.MvccAckRequestTxAndQueryCntr;
+import org.apache.ignite.internal.processors.cache.mvcc.msg.MvccAckRequestTxAndQueryId;
+import org.apache.ignite.internal.processors.cache.mvcc.msg.MvccActiveQueriesMessage;
+import org.apache.ignite.internal.processors.cache.mvcc.msg.MvccFutureResponse;
+import org.apache.ignite.internal.processors.cache.mvcc.msg.MvccQuerySnapshotRequest;
+import org.apache.ignite.internal.processors.cache.mvcc.msg.MvccSnapshotResponse;
+import org.apache.ignite.internal.processors.cache.mvcc.msg.MvccTxSnapshotRequest;
+import org.apache.ignite.internal.processors.cache.mvcc.msg.MvccWaitTxsRequest;
 import org.apache.ignite.internal.processors.cache.query.GridCacheQueryRequest;
 import org.apache.ignite.internal.processors.cache.query.GridCacheQueryResponse;
 import org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery;
@@ -867,6 +891,7 @@
 
                 break;
 
+            // [120..123] - DR
             case 124:
                 msg = new GridMessageCollection<>();
 
@@ -927,6 +952,121 @@
 
                 break;
 
+            case 136:
+                msg = new MvccTxSnapshotRequest();
+
+                break;
+
+            case 137:
+                msg = new MvccAckRequestTx();
+
+                break;
+
+            case 138:
+                msg = new MvccFutureResponse();
+
+                break;
+
+            case 139:
+                msg = new MvccQuerySnapshotRequest();
+
+                break;
+
+            case 140:
+                msg = new MvccAckRequestQueryCntr();
+
+                break;
+
+            case 141:
+                msg = new MvccSnapshotResponse();
+
+                break;
+
+            case 142:
+                msg = new MvccWaitTxsRequest();
+
+                break;
+
+            case 143:
+                msg = new GridCacheMvccEntryInfo();
+
+                break;
+
+            case 144:
+                msg = new GridDhtTxQueryEnlistResponse();
+
+                break;
+
+            case 145:
+                msg = new MvccAckRequestQueryId();
+
+                break;
+
+            case 146:
+                msg = new MvccAckRequestTxAndQueryCntr();
+
+                break;
+
+            case 147:
+                msg = new MvccAckRequestTxAndQueryId();
+
+                break;
+
+            case 148:
+                msg = new MvccVersionImpl();
+
+                break;
+
+            case 149:
+                msg = new MvccActiveQueriesMessage();
+
+                break;
+
+            case 150:
+                msg = new MvccSnapshotWithoutTxs();
+
+                break;
+
+            case 151:
+                msg = new GridNearTxQueryEnlistRequest();
+
+                break;
+
+            case 152:
+                msg = new GridNearTxQueryEnlistResponse();
+
+                break;
+
+            case 153:
+                msg = new GridNearTxQueryResultsEnlistRequest();
+
+                break;
+
+            case 154:
+                msg = new GridNearTxQueryResultsEnlistResponse();
+
+                break;
+
+            case 155:
+                msg = new GridDhtTxQueryEnlistRequest();
+
+                break;
+
+            case 156:
+                msg = new GridDhtTxQueryFirstEnlistRequest();
+
+                break;
+
+            case 157:
+                msg = new PartitionUpdateCounters();
+
+                break;
+
+            case 158:
+                msg = new GridDhtPartitionSupplyMessageV2();
+
+                break;
+
             // [-3..119] [124..129] [-23..-27] [-36..-55]- this
             // [120..123] - DR
             // [-4..-22, -30..-35] - SQL
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentManager.java
index cea1786..01d8604 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentManager.java
@@ -470,7 +470,7 @@
                             "in some other mode). Either change IgniteConfiguration.getDeploymentMode() property to " +
                             "SHARED or CONTINUOUS or remove class from local classpath and any of " +
                             "the local GAR deployments that may have it [cls=" + meta.className() + ", depMode=" +
-                            locDep.deployMode() + ']', "Failed to deploy class in SHARED or CONTINUOUS mode.");
+                            locDep.deployMode() + ']');
 
                         return null;
                     }
@@ -478,8 +478,7 @@
                     if (!locDep.userVersion().equals(meta.userVersion())) {
                         U.warn(log, "Failed to deploy class in SHARED or CONTINUOUS mode for given user version " +
                             "(class is locally deployed for a different user version) [cls=" + meta.className() +
-                            ", localVer=" + locDep.userVersion() + ", otherVer=" + meta.userVersion() + ']',
-                            "Failed to deploy class in SHARED or CONTINUOUS mode.");
+                            ", localVer=" + locDep.userVersion() + ", otherVer=" + meta.userVersion() + ']');
 
                         return null;
                     }
@@ -664,4 +663,4 @@
             return S.toString(LocalDeployment.class, this, super.toString());
         }
     }
-}
\ No newline at end of file
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/DiscoCache.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/DiscoCache.java
index 8cdcbf3..84bcab1 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/DiscoCache.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/DiscoCache.java
@@ -26,6 +26,7 @@
 import org.apache.ignite.cluster.BaselineNode;
 import org.apache.ignite.cluster.ClusterNode;
 import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccCoordinator;
 import org.apache.ignite.internal.processors.cluster.DiscoveryDataClusterState;
 import org.apache.ignite.internal.util.GridConcurrentHashSet;
 import org.apache.ignite.internal.util.tostring.GridToStringInclude;
@@ -108,10 +109,14 @@
     /** */
     private final P1<ClusterNode> aliveNodePred;
 
+    /** */
+    private final MvccCoordinator mvccCrd;
+
     /**
      * @param topVer Topology version.
      * @param state Current cluster state.
      * @param loc Local node.
+     * @param mvccCrd MVCC coordinator node.
      * @param rmtNodes Remote nodes.
      * @param allNodes All nodes.
      * @param srvNodes Server nodes.
@@ -130,6 +135,7 @@
         AffinityTopologyVersion topVer,
         DiscoveryDataClusterState state,
         ClusterNode loc,
+        MvccCoordinator mvccCrd,
         List<ClusterNode> rmtNodes,
         List<ClusterNode> allNodes,
         List<ClusterNode> srvNodes,
@@ -148,6 +154,7 @@
         this.topVer = topVer;
         this.state = state;
         this.loc = loc;
+        this.mvccCrd = mvccCrd;
         this.rmtNodes = rmtNodes;
         this.allNodes = allNodes;
         this.srvNodes = srvNodes;
@@ -157,7 +164,7 @@
         this.allCacheNodes = allCacheNodes;
         this.cacheGrpAffNodes = cacheGrpAffNodes;
         this.nodeMap = nodeMap;
-        alives.addAll(alives0);
+        this.alives.addAll(alives0);
         this.minNodeVer = minNodeVer;
         this.minSrvNodeVer = minSrvNodeVer;
         this.nodeIdToConsIdx = nodeIdToConsIdx;
@@ -177,6 +184,13 @@
     }
 
     /**
+     * @return Mvcc coordinator node.
+     */
+    @Nullable public MvccCoordinator mvccCoordinator() {
+        return mvccCrd;
+    }
+
+    /**
      * @return Topology version.
      */
     public AffinityTopologyVersion version() {
@@ -461,6 +475,7 @@
             ver,
             state == null ? this.state : state,
             loc,
+            mvccCrd,
             rmtNodes,
             allNodes,
             srvNodes,
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java
index 38ce9bd..b9af961 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java
@@ -70,7 +70,9 @@
 import org.apache.ignite.internal.GridKernalContext;
 import org.apache.ignite.internal.IgniteClientDisconnectedCheckedException;
 import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.IgniteInterruptedCheckedException;
 import org.apache.ignite.internal.IgniteKernal;
+import org.apache.ignite.internal.NodeStoppingException;
 import org.apache.ignite.internal.cluster.NodeOrderComparator;
 import org.apache.ignite.internal.events.DiscoveryCustomEvent;
 import org.apache.ignite.internal.managers.GridManagerAdapter;
@@ -83,6 +85,7 @@
 import org.apache.ignite.internal.processors.cache.DynamicCacheDescriptor;
 import org.apache.ignite.internal.processors.cache.GridCacheAdapter;
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccCoordinator;
 import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager;
 import org.apache.ignite.internal.processors.cluster.BaselineTopology;
 import org.apache.ignite.internal.processors.cluster.ChangeGlobalStateFinishMessage;
@@ -101,6 +104,7 @@
 import org.apache.ignite.internal.util.typedef.CI1;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.P1;
+import org.apache.ignite.internal.util.typedef.T2;
 import org.apache.ignite.internal.util.typedef.internal.CU;
 import org.apache.ignite.internal.util.typedef.internal.LT;
 import org.apache.ignite.internal.util.typedef.internal.S;
@@ -130,6 +134,7 @@
 import org.apache.ignite.spi.discovery.DiscoverySpiNodeAuthenticator;
 import org.apache.ignite.spi.discovery.DiscoverySpiOrderSupport;
 import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.internal.TcpDiscoveryNode;
 import org.apache.ignite.thread.IgniteThread;
 import org.apache.ignite.thread.OomExceptionHandler;
 import org.jetbrains.annotations.NotNull;
@@ -207,7 +212,7 @@
     /** Predicate filtering client nodes. */
     private static final IgnitePredicate<ClusterNode> FILTER_CLI = new P1<ClusterNode>() {
         @Override public boolean apply(ClusterNode n) {
-            return CU.clientNode(n);
+            return n.isClient();
         }
     };
 
@@ -217,6 +222,9 @@
     /** Discovery event worker. */
     private final DiscoveryWorker discoWrk = new DiscoveryWorker();
 
+    /** Discovery event notyfier worker. */
+    private final DiscoveryMessageNotifyerWorker discoNotifierWrk = new DiscoveryMessageNotifyerWorker();
+
     /** Network segment check worker. */
     private SegmentCheckWorker segChkWrk;
 
@@ -579,16 +587,23 @@
                 }
             }
 
-            @Override public void onDiscovery(
+            @Override public IgniteInternalFuture onDiscovery(
                 final int type,
                 final long topVer,
                 final ClusterNode node,
                 final Collection<ClusterNode> topSnapshot,
                 final Map<Long, Collection<ClusterNode>> snapshots,
-                @Nullable DiscoverySpiCustomMessage spiCustomMsg) {
-                synchronized (discoEvtMux) {
-                    onDiscovery0(type, topVer, node, topSnapshot, snapshots, spiCustomMsg);
-                }
+                @Nullable DiscoverySpiCustomMessage spiCustomMsg
+            ) {
+                GridFutureAdapter notificationFut = new GridFutureAdapter();
+
+                discoNotifierWrk.submit(notificationFut, () -> {
+                    synchronized (discoEvtMux) {
+                        onDiscovery0(type, topVer, node, topSnapshot, snapshots, spiCustomMsg);
+                    }
+                });
+
+                return notificationFut;
             }
 
             /**
@@ -642,6 +657,8 @@
                     updateClientNodes(node.id());
                 }
 
+                ctx.coordinators().onDiscoveryEvent(type, topSnapshot, topVer, customMsg);
+
                 boolean locJoinEvt = type == EVT_NODE_JOINED && node.id().equals(locNode.id());
 
                 ChangeGlobalStateFinishMessage stateFinishMsg = null;
@@ -913,6 +930,8 @@
             }
         });
 
+        new IgniteThread(discoNotifierWrk).start();
+
         startSpi();
 
         registeredDiscoSpi = true;
@@ -1221,8 +1240,7 @@
                         "(all nodes in topology should have identical value) " +
                         "[locPreferIpV4=" + locPreferIpV4 + ", rmtPreferIpV4=" + rmtPreferIpV4 +
                         ", locId8=" + U.id8(locNode.id()) + ", rmtId8=" + U.id8(n.id()) +
-                        ", rmtAddrs=" + U.addressesAsString(n) + ", rmtNode=" + U.toShortString(n) + "]",
-                        "Local and remote 'java.net.preferIPv4Stack' system properties do not match.");
+                        ", rmtAddrs=" + U.addressesAsString(n) + ", rmtNode=" + U.toShortString(n) + "]");
 
                 ipV4Warned = true;
             }
@@ -1663,7 +1681,7 @@
         }
 
         if (!locJoin.isDone())
-            locJoin.onDone(new IgniteCheckedException("Failed to wait for local node joined event (grid is stopping)."));
+            locJoin.onDone(new NodeStoppingException("Failed to wait for local node joined event (grid is stopping)."));
     }
 
     /** {@inheritDoc} */
@@ -1680,6 +1698,10 @@
 
         U.join(discoWrk, log);
 
+        U.cancel(discoNotifierWrk);
+
+        U.join(discoNotifierWrk, log);
+
         // Stop SPI itself.
         stopSpi();
 
@@ -2314,8 +2336,12 @@
     public boolean reconnectSupported() {
         DiscoverySpi spi = getSpi();
 
-        return ctx.discovery().localNode().isClient() &&
-            (spi instanceof IgniteDiscoverySpi) &&
+        ClusterNode clusterNode = ctx.discovery().localNode();
+
+        boolean client = (clusterNode instanceof TcpDiscoveryNode) ?
+                (((TcpDiscoveryNode) clusterNode).clientRouterNodeId() != null) : clusterNode.isClient();
+
+        return client && (spi instanceof IgniteDiscoverySpi) &&
             ((IgniteDiscoverySpi)spi).clientReconnectSupported();
     }
 
@@ -2348,6 +2374,8 @@
         Collection<ClusterNode> topSnapshot) {
         assert topSnapshot.contains(loc);
 
+        MvccCoordinator mvccCrd = ctx.coordinators().assignedCoordinator();
+
         HashSet<UUID> alives = U.newHashSet(topSnapshot.size());
         HashMap<UUID, ClusterNode> nodeMap = U.newHashMap(topSnapshot.size());
 
@@ -2375,7 +2403,7 @@
                 if (!node.isLocal())
                     rmtNodes.add(node);
 
-                if (!CU.clientNode(node)) {
+                if (!node.isClient()) {
                     srvNodes.add(node);
 
                     if (minSrvVer == null)
@@ -2449,6 +2477,7 @@
             topVer,
             state,
             loc,
+            mvccCrd,
             Collections.unmodifiableList(rmtNodes),
             Collections.unmodifiableList(allNodes),
             Collections.unmodifiableList(srvNodes),
@@ -2639,6 +2668,86 @@
         }
     }
 
+    /**
+     *
+     */
+    private class DiscoveryMessageNotifyerWorker extends GridWorker {
+        /** Queue. */
+        private final BlockingQueue<T2<GridFutureAdapter, Runnable>> queue = new LinkedBlockingQueue<>();
+
+        /**
+         * Default constructor.
+         */
+        protected DiscoveryMessageNotifyerWorker() {
+            super(ctx.igniteInstanceName(), "disco-notyfier-worker", GridDiscoveryManager.this.log, ctx.workersRegistry());
+        }
+
+        /**
+         *
+         */
+        private void body0() throws InterruptedException {
+            T2<GridFutureAdapter, Runnable> notification = queue.take();
+
+            try {
+                notification.get2().run();
+            }
+            finally {
+                notification.get1().onDone();
+            }
+        }
+
+        /**
+         * @param cmd Command.
+         */
+        public synchronized void submit(GridFutureAdapter notificationFut, Runnable cmd) {
+            if (isCancelled()) {
+                notificationFut.onDone();
+
+                return;
+            }
+
+            queue.add(new T2<>(notificationFut, cmd));
+        }
+
+        /**
+         * Cancel thread execution and completes all notification futures.
+         */
+        @Override public synchronized void cancel() {
+            super.cancel();
+
+            while (!queue.isEmpty()) {
+                T2<GridFutureAdapter, Runnable> notification = queue.poll();
+
+                if (notification != null)
+                    notification.get1().onDone();
+            }
+        }
+
+        /** {@inheritDoc} */
+        @Override protected void body() throws InterruptedException, IgniteInterruptedCheckedException {
+            while (!isCancelled()) {
+                try {
+                    body0();
+                }
+                catch (InterruptedException e) {
+                    if (!isCancelled)
+                        ctx.failure().process(new FailureContext(SYSTEM_WORKER_TERMINATION, e));
+
+                    throw e;
+                }
+                catch (Throwable t) {
+                    U.error(log, "Exception in discovery notyfier worker thread.", t);
+
+                    FailureType type = t instanceof OutOfMemoryError ? CRITICAL_ERROR : SYSTEM_WORKER_TERMINATION;
+
+                    ctx.failure().process(new FailureContext(type, t));
+
+                    throw t;
+                }
+            }
+        }
+    }
+
     /** Worker for discovery events. */
     private class DiscoveryWorker extends GridWorker {
         /** */
@@ -2747,15 +2856,13 @@
                     throw e;
                 }
                 catch (Throwable t) {
-                    U.error(log, "Exception in discovery worker thread.", t);
+                    U.error(log, "Exception in discovery event worker thread.", t);
 
-                    if (t instanceof Error) {
-                        FailureType type = t instanceof OutOfMemoryError ? CRITICAL_ERROR : SYSTEM_WORKER_TERMINATION;
+                    FailureType type = t instanceof OutOfMemoryError ? CRITICAL_ERROR : SYSTEM_WORKER_TERMINATION;
 
-                        ctx.failure().process(new FailureContext(type, t));
+                    ctx.failure().process(new FailureContext(type, t));
 
-                        throw t;
-                    }
+                    throw t;
                 }
             }
         }
@@ -2770,7 +2877,7 @@
 
             AffinityTopologyVersion topVer = evt.get2();
 
-            if (type == EVT_NODE_METRICS_UPDATED && topVer.compareTo(discoCache.version()) < 0)
+            if (type == EVT_NODE_METRICS_UPDATED && (discoCache == null || topVer.compareTo(discoCache.version()) < 0))
                 return;
 
             ClusterNode node = evt.get3();
@@ -3357,6 +3464,7 @@
             topVer,
             discoCache.state(),
             discoCache.localNode(),
+            discoCache.mvccCoordinator(),
             discoCache.remoteNodes(),
             allNodes,
             discoCache.serverNodes(),
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/IgniteClusterNode.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/IgniteClusterNode.java
index cbc706a..a143122 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/IgniteClusterNode.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/IgniteClusterNode.java
@@ -59,11 +59,4 @@
      * @param cacheMetrics Cache metrics.
      */
     public void setCacheMetrics(Map<Integer, CacheMetrics> cacheMetrics);
-
-    /**
-     * Whether this node is cache client (see {@link IgniteConfiguration#isClientMode()}).
-     *
-     * @return {@code True if client}.
-     */
-    public boolean isCacheClient();
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/eventstorage/GridEventStorageManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/eventstorage/GridEventStorageManager.java
index 6808450..d4daab8 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/managers/eventstorage/GridEventStorageManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/eventstorage/GridEventStorageManager.java
@@ -1468,7 +1468,7 @@
         }
 
         /** {@inheritDoc} */
-        public IgnitePredicate<? extends Event> listener() {
+        @Override public IgnitePredicate<? extends Event> listener() {
             return lsnr;
         }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/failover/GridFailoverContextImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/failover/GridFailoverContextImpl.java
index 735fce9..6b3c29a 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/managers/failover/GridFailoverContextImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/failover/GridFailoverContextImpl.java
@@ -101,7 +101,7 @@
     }
 
     /** {@inheritDoc} */
-    public int partition() {
+    @Override public int partition() {
         return partId;
     }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/marshaller/optimized/OptimizedClassDescriptor.java b/modules/core/src/main/java/org/apache/ignite/internal/marshaller/optimized/OptimizedClassDescriptor.java
index ccd9946..0369b66 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/marshaller/optimized/OptimizedClassDescriptor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/marshaller/optimized/OptimizedClassDescriptor.java
@@ -43,16 +43,18 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
+import java.util.TreeMap;
 import java.util.UUID;
 import java.util.concurrent.ConcurrentMap;
 
 import org.apache.ignite.internal.util.GridUnsafe;
 import org.apache.ignite.internal.util.SerializableTransient;
+import org.apache.ignite.internal.util.TransientSerializable;
+import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.lang.IgniteProductVersion;
 import org.apache.ignite.marshaller.MarshallerContext;
 import org.apache.ignite.marshaller.MarshallerExclusions;
-import org.apache.ignite.marshaller.MarshallerUtils;
 
 import static java.lang.reflect.Modifier.isFinal;
 import static java.lang.reflect.Modifier.isPrivate;
@@ -92,6 +94,8 @@
 import static org.apache.ignite.internal.marshaller.optimized.OptimizedMarshallerUtils.STR;
 import static org.apache.ignite.internal.marshaller.optimized.OptimizedMarshallerUtils.UUID;
 import static org.apache.ignite.internal.marshaller.optimized.OptimizedMarshallerUtils.computeSerialVersionUid;
+import static org.apache.ignite.marshaller.MarshallerUtils.jobReceiverVersion;
+import static org.apache.ignite.marshaller.MarshallerUtils.jobSenderVersion;
 
 /**
  * Class descriptor.
@@ -172,6 +176,9 @@
     /** Method returns serializable transient fields. */
     private Method serTransMtd;
 
+    /** Method returns transient serializable fields. */
+    private Method transSerMtd;
+
     /**
      * Creates descriptor for class.
      *
@@ -448,16 +455,16 @@
                         readObjMtds.add(mtd);
 
                         final SerializableTransient serTransAn = c.getAnnotation(SerializableTransient.class);
+                        final TransientSerializable transSerAn = c.getAnnotation(TransientSerializable.class);
 
                         // Custom serialization policy for transient fields.
                         if (serTransAn != null) {
                             try {
-                                serTransMtd = c.getDeclaredMethod(serTransAn.methodName(), cls, IgniteProductVersion.class);
+                                serTransMtd = c.getDeclaredMethod(serTransAn.methodName(), IgniteProductVersion.class);
 
                                 int mod = serTransMtd.getModifiers();
 
-                                if (isStatic(mod) && isPrivate(mod)
-                                    && serTransMtd.getReturnType() == String[].class)
+                                if (isStatic(mod) && isPrivate(mod) && serTransMtd.getReturnType() == String[].class)
                                     serTransMtd.setAccessible(true);
                                 else
                                     // Set method back to null if it has incorrect signature.
@@ -468,6 +475,24 @@
                             }
                         }
 
+                        // Custom serialization policy for non-transient fields.
+                        if (transSerAn != null) {
+                            try {
+                                transSerMtd = c.getDeclaredMethod(transSerAn.methodName(), IgniteProductVersion.class);
+
+                                int mod = transSerMtd.getModifiers();
+
+                                if (isStatic(mod) && isPrivate(mod) && transSerMtd.getReturnType() == String[].class)
+                                    transSerMtd.setAccessible(true);
+                                else
+                                    // Set method back to null if it has incorrect signature.
+                                    transSerMtd = null;
+                            }
+                            catch (NoSuchMethodException ignored) {
+                                transSerMtd = null;
+                            }
+                        }
+
                         Field[] clsFields0 = c.getDeclaredFields();
 
                         Map<String, Field> fieldNames = new HashMap<>();
@@ -824,7 +849,7 @@
                 writeTypeData(out);
 
                 out.writeShort(checksum);
-                out.writeSerializable(obj, writeObjMtds, serializableFields(obj.getClass(), obj, null));
+                out.writeSerializable(obj, writeObjMtds, fields(obj.getClass(), jobReceiverVersion()));
 
                 break;
 
@@ -840,45 +865,52 @@
      * ignored.
      *
      * @param cls Class.
-     * @param obj Object.
      * @param ver Job sender version.
      * @return Serializable fields.
      */
     @SuppressWarnings("ForLoopReplaceableByForEach")
-    private Fields serializableFields(Class<?> cls, Object obj, IgniteProductVersion ver) {
-        if (serTransMtd == null)
+    private Fields fields(Class<?> cls, IgniteProductVersion ver) {
+        if (ver == null // No context available.
+            || serTransMtd == null && transSerMtd == null)
             return fields;
 
         try {
-            final String[] transFields = (String[])serTransMtd.invoke(cls, obj, ver);
+            final String[] transFields = serTransMtd == null ? null : (String[])serTransMtd.invoke(null, ver);
+            final String[] serFields = transSerMtd == null ? null : (String[])transSerMtd.invoke(null, ver);
 
-            if (transFields == null || transFields.length == 0)
+            if (F.isEmpty(transFields) && F.isEmpty(serFields))
                 return fields;
 
-            List<FieldInfo> clsFields = new ArrayList<>();
+            Map<String, FieldInfo> clsFields = new TreeMap<>();
 
-            clsFields.addAll(fields.fields.get(0).fields);
-
-            for (int i = 0; i < transFields.length; i++) {
-                final String fieldName = transFields[i];
-
-                final Field f = cls.getDeclaredField(fieldName);
-
-                FieldInfo fieldInfo = new FieldInfo(f, f.getName(),
-                    GridUnsafe.objectFieldOffset(f), fieldType(f.getType()));
-
-                clsFields.add(fieldInfo);
+            for (FieldInfo field : fields.fields.get(0).fields) {
+                clsFields.put(field.fieldName, field);
             }
 
-            Collections.sort(clsFields, new Comparator<FieldInfo>() {
-                @Override public int compare(FieldInfo t1, FieldInfo t2) {
-                    return t1.name().compareTo(t2.name());
+            // Add serializable transient fields
+            if (!F.isEmpty(transFields)) {
+                for (int i = 0; i < transFields.length; i++) {
+                    final String fieldName = transFields[i];
+
+                    final Field f = cls.getDeclaredField(fieldName);
+
+                    FieldInfo fieldInfo = new FieldInfo(f, f.getName(),
+                        GridUnsafe.objectFieldOffset(f), fieldType(f.getType()));
+
+                    clsFields.put(fieldName, fieldInfo);
                 }
-            });
+            }
 
-            List<ClassFields> fields = new ArrayList<>();
+            // Exclude non-transient fields which shouldn't be serialized.
+            if (!F.isEmpty(serFields)) {
+                for (int i = 0; i < serFields.length; i++) {
+                    clsFields.remove(serFields[i]);
+                }
+            }
 
-            fields.add(new ClassFields(clsFields));
+            List<ClassFields> fields = new ArrayList<>(1);
+
+            fields.add(new ClassFields(new ArrayList<>(clsFields.values())));
 
             return new Fields(fields);
         }
@@ -919,12 +951,7 @@
             case SERIALIZABLE:
                 verifyChecksum(in.readShort());
 
-                // If no serialize method, then unmarshal as usual.
-                if (serTransMtd != null)
-                    return in.readSerializable(cls, readObjMtds, readResolveMtd,
-                        serializableFields(cls, null, MarshallerUtils.jobSenderVersion()));
-                else
-                    return in.readSerializable(cls, readObjMtds, readResolveMtd, fields);
+                return in.readSerializable(cls, readObjMtds, readResolveMtd, fields(cls, jobSenderVersion()));
 
             default:
                 assert false : "Unexpected type: " + type;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/impl/PageMemoryNoStoreImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/impl/PageMemoryNoStoreImpl.java
index 4982fa3..c5eba60 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/impl/PageMemoryNoStoreImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/impl/PageMemoryNoStoreImpl.java
@@ -262,8 +262,6 @@
 
     /** {@inheritDoc} */
     @Override public long allocatePage(int grpId, int partId, byte flags) {
-        memMetrics.incrementTotalAllocatedPages();
-
         long relPtr = borrowFreePage();
         long absPtr = 0;
 
@@ -579,6 +577,8 @@
             if (freePageListHead.compareAndSet(freePageRelPtrMasked, relPtr)) {
                 allocatedPages.decrementAndGet();
 
+                memMetrics.updateTotalAllocatedPages(-1L);
+
                 return;
             }
         }
@@ -607,6 +607,8 @@
 
                     allocatedPages.incrementAndGet();
 
+                    memMetrics.updateTotalAllocatedPages(1L);
+
                     return freePageRelPtr;
                 }
             }
@@ -807,6 +809,8 @@
 
                     allocatedPages.incrementAndGet();
 
+                    memMetrics.updateTotalAllocatedPages(1L);
+
                     return pageIdx;
                 }
             }
@@ -837,7 +841,7 @@
     }
 
     /** {@inheritDoc} */
-    public int checkpointBufferPagesCount() {
+    @Override public int checkpointBufferPagesCount() {
         return 0;
     }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/IgnitePageStoreManager.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/IgnitePageStoreManager.java
index 5475bef..ff0c66a 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/IgnitePageStoreManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/IgnitePageStoreManager.java
@@ -26,6 +26,7 @@
 import org.apache.ignite.internal.processors.cache.CacheGroupDescriptor;
 import org.apache.ignite.internal.processors.cache.GridCacheSharedManager;
 import org.apache.ignite.internal.processors.cache.StoredCacheData;
+import org.apache.ignite.internal.processors.cache.persistence.AllocatedPageTracker;
 import org.apache.ignite.internal.processors.cluster.IgniteChangeGlobalStateSupport;
 
 /**
@@ -43,6 +44,18 @@
     public void finishRecover() throws IgniteCheckedException;
 
     /**
+     * Initializes disk store structures.
+     *
+     * @param cacheId Cache id.
+     * @param partitions Partitions count.
+     * @param workingDir Working directory.
+     * @param tracker Allocation tracker.
+     * @throws IgniteCheckedException If failed.
+     */
+    void initialize(int cacheId, int partitions, String workingDir, AllocatedPageTracker tracker)
+        throws IgniteCheckedException;
+
+    /**
      * Callback called when a cache is starting.
      *
      * @param grpDesc Cache group descriptor.
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/IgniteWriteAheadLogManager.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/IgniteWriteAheadLogManager.java
index 2b6358b..12fd3e9 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/IgniteWriteAheadLogManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/IgniteWriteAheadLogManager.java
@@ -21,6 +21,7 @@
 import org.apache.ignite.IgniteException;
 import org.apache.ignite.internal.pagemem.wal.record.WALRecord;
 import org.apache.ignite.internal.processors.cache.GridCacheSharedManager;
+import org.apache.ignite.internal.processors.cache.persistence.StorageException;
 import org.apache.ignite.internal.processors.cluster.IgniteChangeGlobalStateSupport;
 
 /**
@@ -109,12 +110,14 @@
     public int truncate(WALPointer low, WALPointer high);
 
     /**
-     * Gives a hint to WAL manager to compact WAL until given pointer (exclusively).
-     * Compaction implies filtering out physical records and ZIP compression.
+     * Notifies {@code this} about latest checkpoint pointer.
+     * <p>
+     * Current implementations, in fact, react by keeping all WAL segments uncompacted starting from index prior to
+     * the index of {@code ptr}. Compaction implies filtering out physical records and ZIP compression.
      *
      * @param ptr Pointer for which it is safe to compact the log.
      */
-    public void allowCompressionUntil(WALPointer ptr);
+    public void notchLastCheckpointPtr(WALPointer ptr);
 
     /**
      * @return Total number of segments in the WAL archive.
@@ -127,6 +130,16 @@
     public long lastArchivedSegment();
 
     /**
+     * @return Last compacted segment index.
+     */
+    public long lastCompactedSegment();
+
+    /**
+     * @return Max allowed index of archived segment to delete or -1 if it does not exist.
+     */
+    public long maxArchivedSegmentToDelete();
+
+    /**
      * Checks if WAL segment is under lock or reserved
      *
      * @param ptr Pointer to check.
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/StorageException.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/StorageException.java
deleted file mode 100644
index debc391..0000000
--- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/StorageException.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.pagemem.wal;
-
-import java.io.IOException;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.InvalidEnvironmentException;
-import org.jetbrains.annotations.NotNull;
-
-/**
- * Exception is needed to distinguish WAL manager & page store critical I/O errors.
- */
-public class StorageException extends IgniteCheckedException implements InvalidEnvironmentException {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /**
-     * @param msg Error message.
-     * @param cause Error cause.
-     */
-    public StorageException(String msg, @NotNull IOException cause) {
-        super(msg, cause);
-    }
-
-    /**
-     * @param e Cause exception.
-     */
-    public StorageException(IOException e) {
-        super(e);
-    }
-
-    /**
-     * @param msg Error message
-     */
-    public StorageException(String msg) {
-        super(msg);
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/WALRecord.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/WALRecord.java
index 585336a..a555aae 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/WALRecord.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/WALRecord.java
@@ -184,7 +184,16 @@
         RESERVED,
 
         /** Rotated id part record. */
-        ROTATED_ID_PART_RECORD;
+        ROTATED_ID_PART_RECORD,
+
+        /** */
+        MVCC_DATA_PAGE_MARK_UPDATED_RECORD,
+
+        /** */
+        MVCC_DATA_PAGE_TX_STATE_HINT_UPDATED_RECORD,
+
+        /** */
+        MVCC_DATA_PAGE_NEW_TX_STATE_HINT_UPDATED_RECORD;
 
         /** */
         private static final RecordType[] VALS = RecordType.values();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/DataPageMvccMarkUpdatedRecord.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/DataPageMvccMarkUpdatedRecord.java
new file mode 100644
index 0000000..5e89f8e
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/DataPageMvccMarkUpdatedRecord.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.pagemem.wal.record.delta;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.pagemem.PageMemory;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO;
+import org.apache.ignite.internal.util.typedef.internal.S;
+
+/**
+ * MVCC mark updated record.
+ */
+public class DataPageMvccMarkUpdatedRecord extends PageDeltaRecord {
+    /** */
+    private int itemId;
+
+    /** */
+    private long newMvccCrd;
+
+    /** */
+    private long newMvccCntr;
+
+    /** */
+    private int newMvccOpCntr;
+
+    /**
+     * @param grpId Cache group ID.
+     * @param pageId Page ID.
+     * @param itemId Item id.
+     * @param newMvccCrd New MVCC coordinator version.
+     * @param newMvccCntr New MVCC counter version.
+     * @param newMvccOpCntr New MVCC operation counter.
+     */
+    public DataPageMvccMarkUpdatedRecord(int grpId, long pageId, int itemId, long newMvccCrd, long newMvccCntr, int newMvccOpCntr) {
+        super(grpId, pageId);
+
+        this.itemId = itemId;
+        this.newMvccCrd = newMvccCrd;
+        this.newMvccCntr = newMvccCntr;
+        this.newMvccOpCntr = newMvccOpCntr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void applyDelta(PageMemory pageMem, long pageAddr) throws IgniteCheckedException {
+        DataPageIO io = PageIO.getPageIO(pageAddr);
+
+        io.updateNewVersion(pageAddr, itemId, pageMem.pageSize(), newMvccCrd, newMvccCntr, newMvccOpCntr);
+    }
+
+    /** {@inheritDoc} */
+    @Override public RecordType type() {
+        return RecordType.MVCC_DATA_PAGE_MARK_UPDATED_RECORD;
+    }
+
+    /**
+     * @return Item id.
+     */
+    public int itemId() {
+        return itemId;
+    }
+
+    /**
+     * @return New MVCC coordinator version.
+     */
+    public long newMvccCrd() {
+        return newMvccCrd;
+    }
+
+    /**
+     * @return New MVCC counter version.
+     */
+    public long newMvccCntr() {
+        return newMvccCntr;
+    }
+
+    /**
+     * @return New MVCC operation counter.
+     */
+    public int newMvccOpCntr() {
+        return newMvccOpCntr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(DataPageMvccMarkUpdatedRecord.class, this, "super", super.toString());
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/DataPageMvccUpdateNewTxStateHintRecord.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/DataPageMvccUpdateNewTxStateHintRecord.java
new file mode 100644
index 0000000..4a244a1
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/DataPageMvccUpdateNewTxStateHintRecord.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.pagemem.wal.record.delta;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.pagemem.PageMemory;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO;
+import org.apache.ignite.internal.util.typedef.internal.S;
+
+/**
+ * MVCC update tx state hint record.
+ */
+public class DataPageMvccUpdateNewTxStateHintRecord extends PageDeltaRecord {
+    /** */
+    private int itemId;
+
+    /** */
+    private byte txState;
+
+    /**
+     * @param grpId Cache group ID.
+     * @param pageId Page ID.
+     * @param itemId Item id.
+     * @param txState Tx state hint.
+     */
+    public DataPageMvccUpdateNewTxStateHintRecord(int grpId, long pageId, int itemId, byte txState) {
+        super(grpId, pageId);
+
+        this.itemId = itemId;
+        this.txState = txState;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void applyDelta(PageMemory pageMem, long pageAddr) throws IgniteCheckedException {
+        DataPageIO io = PageIO.getPageIO(pageAddr);
+
+        io.updateNewTxState(pageAddr, itemId, pageMem.pageSize(), txState);
+    }
+
+    /** {@inheritDoc} */
+    @Override public RecordType type() {
+        return RecordType.MVCC_DATA_PAGE_NEW_TX_STATE_HINT_UPDATED_RECORD;
+    }
+
+    /**
+     * @return Item id.
+     */
+    public int itemId() {
+        return itemId;
+    }
+
+    /**
+     * @return Tx state hint.
+     */
+    public byte txState() {
+        return txState;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(DataPageMvccUpdateNewTxStateHintRecord.class, this, "super", super.toString());
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/DataPageMvccUpdateTxStateHintRecord.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/DataPageMvccUpdateTxStateHintRecord.java
new file mode 100644
index 0000000..7e53609
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/DataPageMvccUpdateTxStateHintRecord.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.pagemem.wal.record.delta;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.pagemem.PageMemory;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO;
+import org.apache.ignite.internal.util.typedef.internal.S;
+
+/**
+ * MVCC update tx state hint record.
+ */
+public class DataPageMvccUpdateTxStateHintRecord extends PageDeltaRecord {
+    /** */
+    private int itemId;
+
+    /** */
+    private byte txState;
+
+    /**
+     * @param grpId Cache group ID.
+     * @param pageId Page ID.
+     * @param itemId Item id.
+     * @param txState Tx state hint.
+     */
+    public DataPageMvccUpdateTxStateHintRecord(int grpId, long pageId, int itemId, byte txState) {
+        super(grpId, pageId);
+
+        this.itemId = itemId;
+        this.txState = txState;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void applyDelta(PageMemory pageMem, long pageAddr) throws IgniteCheckedException {
+        DataPageIO io = PageIO.getPageIO(pageAddr);
+
+        io.updateTxState(pageAddr, itemId, pageMem.pageSize(), txState);
+    }
+
+    /** {@inheritDoc} */
+    @Override public RecordType type() {
+        return RecordType.MVCC_DATA_PAGE_TX_STATE_HINT_UPDATED_RECORD;
+    }
+
+    /**
+     * @return Item id.
+     */
+    public int itemId() {
+        return itemId;
+    }
+
+    /**
+     * @return Tx state hint.
+     */
+    public byte txState() {
+        return txState;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(DataPageMvccUpdateTxStateHintRecord.class, this, "super", super.toString());
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/AffinityAssignment.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/AffinityAssignment.java
index f78ab60..eb06d3e 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/AffinityAssignment.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/AffinityAssignment.java
@@ -17,24 +17,18 @@
 
 package org.apache.ignite.internal.processors.affinity;
 
-import org.apache.ignite.cluster.ClusterNode;
-
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 import java.util.UUID;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccCoordinator;
 
 /**
  * Cached affinity calculations.
  */
 public interface AffinityAssignment {
     /**
-     * @return {@code True} if related discovery event did not not cause affinity assignment change and
-     *    this assignment is just reference to the previous one.
-     */
-    public boolean clientEventChange();
-
-    /**
      * @return Affinity assignment computed by affinity function.
      */
     public List<List<ClusterNode>> idealAssignment();
@@ -90,4 +84,9 @@
      * @return Backup partitions for specified node ID.
      */
     public Set<Integer> backupPartitions(UUID nodeId);
+
+    /**
+     * @return Mvcc coordinator.
+     */
+    public MvccCoordinator mvccCoordinator();
 }
\ No newline at end of file
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityAssignment.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityAssignment.java
index cbec1a1..8ba2a06 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityAssignment.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityAssignment.java
@@ -27,6 +27,7 @@
 import java.util.Set;
 import java.util.UUID;
 import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccCoordinator;
 import org.apache.ignite.internal.util.typedef.internal.S;
 
 /**
@@ -40,6 +41,9 @@
     /** Topology version. */
     private final AffinityTopologyVersion topVer;
 
+    /** */
+    private final MvccCoordinator mvccCrd;
+
     /** Collection of calculated affinity nodes. */
     private List<List<ClusterNode>> assignment;
 
@@ -61,9 +65,6 @@
     /** */
     private transient List<List<ClusterNode>> idealAssignment;
 
-    /** */
-    private final boolean clientEvtChange;
-
     /**
      * Constructs cached affinity calculations item.
      *
@@ -73,7 +74,7 @@
         this.topVer = topVer;
         primary = new HashMap<>();
         backup = new HashMap<>();
-        clientEvtChange = false;
+        mvccCrd = null;
     }
 
     /**
@@ -83,7 +84,8 @@
      */
     GridAffinityAssignment(AffinityTopologyVersion topVer,
         List<List<ClusterNode>> assignment,
-        List<List<ClusterNode>> idealAssignment) {
+        List<List<ClusterNode>> idealAssignment,
+        MvccCoordinator mvccCrd) {
         assert topVer != null;
         assert assignment != null;
         assert idealAssignment != null;
@@ -91,10 +93,10 @@
         this.topVer = topVer;
         this.assignment = assignment;
         this.idealAssignment = idealAssignment.equals(assignment) ? assignment : idealAssignment;
+        this.mvccCrd = mvccCrd;
 
         primary = new HashMap<>();
         backup = new HashMap<>();
-        clientEvtChange = false;
 
         initPrimaryBackupMaps();
     }
@@ -110,36 +112,27 @@
         idealAssignment = aff.idealAssignment;
         primary = aff.primary;
         backup = aff.backup;
-
-        clientEvtChange = true;
-    }
-
-    /**
-     * @return {@code True} if related discovery event did not not cause affinity assignment change and
-     *    this assignment is just reference to the previous one.
-     */
-    public boolean clientEventChange() {
-        return clientEvtChange;
+        mvccCrd = aff.mvccCrd;
     }
 
     /**
      * @return Affinity assignment computed by affinity function.
      */
-    public List<List<ClusterNode>> idealAssignment() {
+    @Override public List<List<ClusterNode>> idealAssignment() {
         return idealAssignment;
     }
 
     /**
      * @return Affinity assignment.
      */
-    public List<List<ClusterNode>> assignment() {
+    @Override public List<List<ClusterNode>> assignment() {
         return assignment;
     }
 
     /**
      * @return Topology version.
      */
-    public AffinityTopologyVersion topologyVersion() {
+    @Override public AffinityTopologyVersion topologyVersion() {
         return topVer;
     }
 
@@ -232,7 +225,7 @@
      * @param nodeId Node ID to get primary partitions for.
      * @return Primary partitions for specified node ID.
      */
-    public Set<Integer> primaryPartitions(UUID nodeId) {
+    @Override public Set<Integer> primaryPartitions(UUID nodeId) {
         Set<Integer> set = primary.get(nodeId);
 
         return set == null ? Collections.<Integer>emptySet() : set;
@@ -244,7 +237,7 @@
      * @param nodeId Node ID to get backup partitions for.
      * @return Backup partitions for specified node ID.
      */
-    public Set<Integer> backupPartitions(UUID nodeId) {
+    @Override public Set<Integer> backupPartitions(UUID nodeId) {
         Set<Integer> set = backup.get(nodeId);
 
         return set == null ? Collections.<Integer>emptySet() : set;
@@ -283,6 +276,11 @@
     }
 
     /** {@inheritDoc} */
+    @Override public MvccCoordinator mvccCoordinator() {
+        return mvccCrd;
+    }
+
+    /** {@inheritDoc} */
     @Override public int hashCode() {
         return topVer.hashCode();
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityAssignmentCache.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityAssignmentCache.java
index 34e2b0a..2290ce6 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityAssignmentCache.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityAssignmentCache.java
@@ -43,6 +43,7 @@
 import org.apache.ignite.internal.IgniteInternalFuture;
 import org.apache.ignite.internal.cluster.NodeOrderComparator;
 import org.apache.ignite.internal.managers.discovery.DiscoCache;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccCoordinator;
 import org.apache.ignite.internal.processors.cache.ExchangeDiscoveryEvents;
 import org.apache.ignite.internal.processors.cluster.BaselineTopology;
 import org.apache.ignite.internal.util.future.GridFutureAdapter;
@@ -202,11 +203,28 @@
      * @param affAssignment Affinity assignment for topology version.
      */
     public void initialize(AffinityTopologyVersion topVer, List<List<ClusterNode>> affAssignment) {
+        MvccCoordinator mvccCrd = null;
+
+        if (!locCache)
+            mvccCrd = ctx.cache().context().coordinators().currentCoordinator(topVer);
+
+        initialize(topVer, affAssignment, mvccCrd);
+    }
+
+    /**
+     * Initializes affinity with given topology version and assignment.
+     *
+     * @param topVer Topology version.
+     * @param affAssignment Affinity assignment for topology version.
+     * @param mvccCrd Mvcc coordinator.
+     */
+    public void initialize(AffinityTopologyVersion topVer, List<List<ClusterNode>> affAssignment, MvccCoordinator mvccCrd) {
         assert topVer.compareTo(lastVersion()) >= 0 : "[topVer = " + topVer + ", last=" + lastVersion() + ']';
 
         assert idealAssignment != null;
+        assert mvccCrd == null || topVer.compareTo(mvccCrd.topologyVersion()) >= 0 : "[mvccCrd=" + mvccCrd + ", topVer=" + topVer + ']';
 
-        GridAffinityAssignment assignment = new GridAffinityAssignment(topVer, affAssignment, idealAssignment);
+        GridAffinityAssignment assignment = new GridAffinityAssignment(topVer, affAssignment, idealAssignment, mvccCrd);
 
         HistoryAffinityAssignment hAff = affCache.put(topVer, new HistoryAffinityAssignment(assignment));
 
@@ -745,7 +763,9 @@
 
         idealAssignment(aff.idealAssignment());
 
-        initialize(aff.lastVersion(), aff.assignments(aff.lastVersion()));
+        AffinityAssignment assign = aff.cachedAffinity(aff.lastVersion());
+
+        initialize(aff.lastVersion(), assign.assignment(), assign.mvccCoordinator());
     }
 
     /**
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityProcessor.java
index 08333c3..4a0908c 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityProcessor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityProcessor.java
@@ -423,7 +423,7 @@
             try {
                 GridAffinityAssignment assign = assign0 instanceof GridAffinityAssignment ?
                     (GridAffinityAssignment)assign0 :
-                    new GridAffinityAssignment(topVer, assign0.assignment(), assign0.idealAssignment());
+                    new GridAffinityAssignment(topVer, assign0.assignment(), assign0.idealAssignment(), assign0.mvccCoordinator());
 
                 AffinityInfo info = new AffinityInfo(
                     cctx.config().getAffinity(),
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityUtils.java
index abd5292..15d7e4e 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityUtils.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityUtils.java
@@ -184,7 +184,7 @@
 
             GridAffinityAssignment assign = assign0 instanceof GridAffinityAssignment ?
                 (GridAffinityAssignment)assign0 :
-                new GridAffinityAssignment(topVer, assign0.assignment(), assign0.idealAssignment());
+                new GridAffinityAssignment(topVer, assign0.assignment(), assign0.idealAssignment(), assign0.mvccCoordinator());
 
             return F.t(
                 affinityMessage(ctx, cctx.config().getAffinity()),
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/HistoryAffinityAssignment.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/HistoryAffinityAssignment.java
index 94eaab4..a949648 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/HistoryAffinityAssignment.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/HistoryAffinityAssignment.java
@@ -18,6 +18,7 @@
 package org.apache.ignite.internal.processors.affinity;
 
 import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccCoordinator;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.apache.ignite.internal.util.typedef.internal.U;
@@ -42,21 +43,21 @@
     private final List<List<ClusterNode>> idealAssignment;
 
     /** */
-    private final boolean clientEvtChange;
+    private final MvccCoordinator mvccCrd;
 
     /**
      * @param assign Assignment.
      */
-    public HistoryAffinityAssignment(GridAffinityAssignment assign) {
+    HistoryAffinityAssignment(GridAffinityAssignment assign) {
         this.topVer = assign.topologyVersion();
         this.assignment = assign.assignment();
         this.idealAssignment = assign.idealAssignment();
-        this.clientEvtChange = assign.clientEventChange();
+        this.mvccCrd = assign.mvccCoordinator();
     }
 
     /** {@inheritDoc} */
-    @Override public boolean clientEventChange() {
-        return clientEvtChange;
+    @Override public MvccCoordinator mvccCoordinator() {
+        return mvccCrd;
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java
index 2871e82..5859452 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java
@@ -58,6 +58,8 @@
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap;
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture;
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccCoordinator;
+import org.apache.ignite.internal.processors.cluster.ChangeGlobalStateFinishMessage;
 import org.apache.ignite.internal.processors.cluster.DiscoveryDataClusterState;
 import org.apache.ignite.internal.util.GridLongList;
 import org.apache.ignite.internal.util.GridPartitionStateMap;
@@ -70,7 +72,6 @@
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.lang.IgniteBiInClosure;
 import org.apache.ignite.lang.IgniteClosure;
-import org.apache.ignite.lang.IgniteFuture;
 import org.apache.ignite.lang.IgniteInClosure;
 import org.apache.ignite.lang.IgniteUuid;
 import org.jetbrains.annotations.Nullable;
@@ -168,7 +169,7 @@
             !DiscoveryCustomEvent.requiresCentralizedAffinityAssignment(customMsg))
             return;
 
-        if ((!CU.clientNode(node) && (type == EVT_NODE_FAILED || type == EVT_NODE_JOINED || type == EVT_NODE_LEFT)) ||
+        if ((!node.isClient() && (type == EVT_NODE_FAILED || type == EVT_NODE_JOINED || type == EVT_NODE_LEFT)) ||
             DiscoveryCustomEvent.requiresCentralizedAffinityAssignment(customMsg)) {
             synchronized (mux) {
                 assert lastAffVer == null || topVer.compareTo(lastAffVer) > 0 :
@@ -459,7 +460,11 @@
                         if (grpHolder.client()) {
                             ClientCacheDhtTopologyFuture topFut = new ClientCacheDhtTopologyFuture(topVer);
 
-                            grp.topology().updateTopologyVersion(topFut, discoCache, -1, false);
+                            grp.topology().updateTopologyVersion(topFut,
+                                discoCache,
+                                cctx.coordinators().currentCoordinator(),
+                                -1,
+                                false);
 
                             grpHolder = new CacheGroupHolder1(grp, grpHolder.affinity());
 
@@ -507,6 +512,7 @@
                 assert grp != null;
 
                 GridDhtAffinityAssignmentResponse res = fetchAffinity(topVer,
+                    cctx.coordinators().currentCoordinator(),
                     null,
                     discoCache,
                     grp.affinity(),
@@ -529,7 +535,11 @@
                         new ClusterTopologyServerNotFoundException("All server nodes left grid."));
                 }
 
-                grp.topology().updateTopologyVersion(topFut, discoCache, -1, false);
+                grp.topology().updateTopologyVersion(topFut,
+                    discoCache,
+                    cctx.coordinators().currentCoordinator(),
+                    -1,
+                    false);
 
                 grp.topology().update(topVer, partMap, null, Collections.<Integer>emptySet(), null, null);
 
@@ -1285,7 +1295,12 @@
 
             fetchFut.init(false);
 
-            fetchAffinity(evts.topologyVersion(), evts, evts.discoveryCache(), aff, fetchFut);
+            fetchAffinity(evts.topologyVersion(),
+                cctx.coordinators().currentCoordinator(),
+                evts,
+                evts.discoveryCache(),
+                aff,
+                fetchFut);
         }
     }
 
@@ -1383,9 +1398,13 @@
 
         final Map<Long, ClusterNode> nodesByOrder = new HashMap<>();
 
-        final Map<Integer, CacheGroupAffinityMessage> joinedNodeAff = msg.joinedNodeAffinity();
+        final Map<Integer, CacheGroupAffinityMessage> receivedAff = msg.joinedNodeAffinity();
 
-        assert F.isEmpty(affReq) || (!F.isEmpty(joinedNodeAff) && joinedNodeAff.size() >= affReq.size()) : msg;
+        assert F.isEmpty(affReq) || (!F.isEmpty(receivedAff) && receivedAff.size() >= affReq.size())
+            : ("Requested and received affinity are different " +
+                "[requestedCnt=" + (affReq != null ? affReq.size() : "none") +
+                ", receivedCnt=" + (receivedAff != null ? receivedAff.size() : "none") +
+                ", msg=" + msg + "]");
 
         forAllCacheGroups(false, new IgniteInClosureX<GridAffinityAssignmentCache>() {
             @Override public void applyx(GridAffinityAssignmentCache aff) throws IgniteCheckedException {
@@ -1398,7 +1417,7 @@
                 if (affReq != null && affReq.contains(aff.groupId())) {
                     assert AffinityTopologyVersion.NONE.equals(aff.lastVersion());
 
-                    CacheGroupAffinityMessage affMsg = joinedNodeAff.get(aff.groupId());
+                    CacheGroupAffinityMessage affMsg = receivedAff.get(aff.groupId());
 
                     assert affMsg != null;
 
@@ -1679,6 +1698,7 @@
             int grpId = fetchFut.groupId();
 
             fetchAffinity(topVer,
+                cctx.coordinators().currentCoordinator(),
                 fut.events(),
                 fut.events().discoveryCache(),
                 cctx.cache().cacheGroup(grpId).affinity(),
@@ -1688,6 +1708,7 @@
 
     /**
      * @param topVer Topology version.
+     * @param mvccCrd Mvcc coordinator to set in affinity.
      * @param events Discovery events.
      * @param discoCache Discovery data cache.
      * @param affCache Affinity.
@@ -1695,7 +1716,9 @@
      * @throws IgniteCheckedException If failed.
      * @return Affinity assignment response.
      */
-    private GridDhtAffinityAssignmentResponse fetchAffinity(AffinityTopologyVersion topVer,
+    private GridDhtAffinityAssignmentResponse fetchAffinity(
+        AffinityTopologyVersion topVer,
+        MvccCoordinator mvccCrd,
         @Nullable ExchangeDiscoveryEvents events,
         DiscoCache discoCache,
         GridAffinityAssignmentCache affCache,
@@ -1708,7 +1731,7 @@
         if (res == null) {
             List<List<ClusterNode>> aff = affCache.calculate(topVer, events, discoCache);
 
-            affCache.initialize(topVer, aff);
+            affCache.initialize(topVer, aff, mvccCrd);
         }
         else {
             List<List<ClusterNode>> idealAff = res.idealAffinityAssignment(discoCache);
@@ -1725,7 +1748,7 @@
 
             assert aff != null : res;
 
-            affCache.initialize(topVer, aff);
+            affCache.initialize(topVer, aff, mvccCrd);
         }
 
         return res;
@@ -1774,8 +1797,10 @@
      * @throws IgniteCheckedException If failed.
      * @return Future completed when caches initialization is done.
      */
-    public IgniteInternalFuture<?> initCoordinatorCaches(final GridDhtPartitionsExchangeFuture fut,
-        final boolean newAff) throws IgniteCheckedException {
+    public IgniteInternalFuture<?> initCoordinatorCaches(
+        final GridDhtPartitionsExchangeFuture fut,
+        final boolean newAff
+    ) throws IgniteCheckedException {
         final List<IgniteInternalFuture<AffinityTopologyVersion>> futs = new ArrayList<>();
 
         final AffinityTopologyVersion topVer = fut.initialVersion();
@@ -1842,6 +1867,7 @@
                             @Override public void applyx(IgniteInternalFuture<GridDhtAffinityAssignmentResponse> fetchFut)
                                 throws IgniteCheckedException {
                                 fetchAffinity(prev.topologyVersion(),
+                                    null, // Pass null mvcc coordinator, this affinity version should be used for queries.
                                     prev.events(),
                                     prev.events().discoveryCache(),
                                     aff,
@@ -2245,10 +2271,19 @@
                     if (!owners.isEmpty() && !owners.contains(curPrimary))
                         curPrimary = owners.get(0);
 
-                    if (curPrimary != null && newPrimary != null && !curPrimary.equals(newPrimary)) {
-                        if (aliveNodes.contains(curPrimary)) {
-                            GridDhtPartitionState state = top.partitionState(newPrimary.id(), p);
+                    // If new assignment is empty preserve current ownership for alive nodes.
+                    if (curPrimary != null && newPrimary == null) {
+                        newNodes0 = new ArrayList<>(curNodes.size());
 
+                        for (ClusterNode node : curNodes) {
+                            if (aliveNodes.contains(node))
+                                newNodes0.add(node);
+                        }
+                    }
+                    else if (curPrimary != null && !curPrimary.equals(newPrimary)) {
+                        GridDhtPartitionState state = top.partitionState(newPrimary.id(), p);
+
+                        if (aliveNodes.contains(curPrimary)) {
                             if (state != GridDhtPartitionState.OWNING) {
                                 newNodes0 = latePrimaryAssignment(grpHolder.affinity(),
                                     p,
@@ -2258,8 +2293,6 @@
                             }
                         }
                         else {
-                            GridDhtPartitionState state = top.partitionState(newPrimary.id(), p);
-
                             if (state != GridDhtPartitionState.OWNING) {
                                 for (int i = 1; i < curNodes.size(); i++) {
                                     ClusterNode curNode = curNodes.get(i);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryImpl.java
index 48649d2..60eba21 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryImpl.java
@@ -103,7 +103,7 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public String toString() {
         return "Entry [key=" + key + ", val=" + val + ']';
     }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryImpl0.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryImpl0.java
index 36d27ee..1e97f0c 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryImpl0.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryImpl0.java
@@ -58,7 +58,7 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public String toString() {
         return "CacheEntry [key=" + getKey() + ", val=" + getValue() + ']';
     }
 }
\ No newline at end of file
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryImplEx.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryImplEx.java
index 664b216..0294560 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryImplEx.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryImplEx.java
@@ -57,7 +57,7 @@
     }
 
     /** {@inheritDoc} */
-    public GridCacheVersion version() {
+    @Override public GridCacheVersion version() {
         if (ver == GET_ENTRY_INVALID_VER_AFTER_GET) {
             throw new IgniteException("Impossible to get entry version after " +
                 "get() inside OPTIMISTIC REPEATABLE_READ transaction. Use only getEntry() or getEntries() inside " +
@@ -84,7 +84,7 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public String toString() {
         String res = "CacheEntry [key=" + getKey() +
             ", val=" + getValue();
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryInfoCollection.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryInfoCollection.java
index 49f77fa..614d7c0 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryInfoCollection.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryInfoCollection.java
@@ -37,6 +37,18 @@
     @GridDirectCollection(GridCacheEntryInfo.class)
     private List<GridCacheEntryInfo> infos;
 
+    /** */
+    public CacheEntryInfoCollection() {
+        // No-op
+    }
+
+    /**
+     * @param infos List of cache entry info.
+     */
+    public CacheEntryInfoCollection(List<GridCacheEntryInfo> infos) {
+        this.infos = infos;
+    }
+
     /**
      *
      */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java
index 7009575..d5e2d66 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java
@@ -41,7 +41,6 @@
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtAffinityAssignmentResponse;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopologyImpl;
-import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionsEvictor;
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPreloader;
 import org.apache.ignite.internal.processors.cache.persistence.DataRegion;
 import org.apache.ignite.internal.processors.cache.persistence.GridCacheOffheapManager;
@@ -60,6 +59,7 @@
 import org.apache.ignite.mxbean.CacheGroupMetricsMXBean;
 import org.jetbrains.annotations.Nullable;
 
+import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT;
 import static org.apache.ignite.cache.CacheMode.LOCAL;
 import static org.apache.ignite.cache.CacheMode.REPLICATED;
 import static org.apache.ignite.cache.CacheRebalanceMode.NONE;
@@ -126,10 +126,6 @@
 
     /** */
     private GridCachePreloader preldr;
-
-    /** Partition evictor. */
-    private GridDhtPartitionsEvictor evictor;
-
     /** */
     private final DataRegion dataRegion;
 
@@ -151,6 +147,9 @@
     /** */
     private boolean qryEnabled;
 
+    /** */
+    private boolean mvccEnabled;
+
     /** MXBean. */
     private CacheGroupMetricsMXBean mxBean;
 
@@ -217,6 +216,8 @@
 
         storeCacheId = affNode && dataRegion.config().getPageEvictionMode() != DataPageEvictionMode.DISABLED;
 
+        mvccEnabled = ccfg.getAtomicityMode() == TRANSACTIONAL_SNAPSHOT;
+
         log = ctx.kernalContext().log(getClass());
 
         caches = new ArrayList<>();
@@ -225,6 +226,13 @@
     }
 
     /**
+     * @return Mvcc flag.
+     */
+    public boolean mvccEnabled() {
+        return mvccEnabled;
+    }
+
+    /**
      * @return {@code True} if this is cache group for one of system caches.
      */
     public boolean systemCache() {
@@ -260,13 +268,6 @@
     }
 
     /**
-     * @return Partitions evictor.
-     */
-    public GridDhtPartitionsEvictor evictor() {
-        return evictor;
-    }
-
-    /**
      * @return IO policy for the given cache group.
      */
     public byte ioPolicy() {
@@ -407,6 +408,13 @@
     }
 
     /**
+     * @return {@code True} if cache created by user.
+     */
+    public boolean userCache() {
+        return cacheType.userCache();
+    }
+
+    /**
      * Adds rebalancing event.
      *
      * @param part Partition.
@@ -733,7 +741,7 @@
         IgniteCheckedException err =
             new IgniteCheckedException("Failed to wait for topology update, cache (or node) is stopping.");
 
-        evictor.stop();
+        ctx.evict().onCacheGroupStopped(this);
 
         aff.cancelFutures(err);
 
@@ -787,8 +795,6 @@
                 cctx.dr().partitionEvicted(part);
 
             cctx.continuousQueries().onPartitionEvicted(part);
-
-            cctx.dataStructures().onPartitionEvicted(part);
         }
     }
 
@@ -909,8 +915,6 @@
         else
             preldr = new GridCachePreloaderAdapter(this);
 
-        evictor = new GridDhtPartitionsEvictor(this);
-
         if (persistenceEnabled()) {
             try {
                 offheapMgr = new GridCacheOffheapManager();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetricsMXBeanImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetricsMXBeanImpl.java
index 58de40c..15358cb 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetricsMXBeanImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetricsMXBeanImpl.java
@@ -86,16 +86,6 @@
     }
 
     /**
-     *
-     */
-    private static class NoopAllocationTracker implements AllocatedPageTracker{
-        /** {@inheritDoc} */
-        @Override public void updateTotalAllocatedPages(long delta) {
-            // No-op.
-        }
-    }
-
-    /**
      * Creates Group metrics MBean.
      *
      * @param ctx Cache group context.
@@ -112,7 +102,7 @@
             this.groupPageAllocationTracker = dataRegionMetrics.getOrAllocateGroupPageAllocationTracker(ctx.groupId());
         }
         else
-            this.groupPageAllocationTracker = new GroupAllocationTracker(new NoopAllocationTracker());
+            this.groupPageAllocationTracker = new GroupAllocationTracker(AllocatedPageTracker.NO_OP);
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheInvokeDirectResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheInvokeDirectResult.java
index 17f304e..3b463af 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheInvokeDirectResult.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheInvokeDirectResult.java
@@ -42,6 +42,10 @@
 
     /** */
     @GridToStringInclude
+    private transient Object unprepareRes;
+
+    /** */
+    @GridToStringInclude
     private CacheObject res;
 
     /** */
@@ -69,6 +73,22 @@
     }
 
     /**
+     * Constructs CacheInvokeDirectResult with unprepared res, to avoid object marshaling while holding topology locks.
+     *
+     * @param key Key.
+     * @param res Result.
+     * @return a new instance of CacheInvokeDirectResult.
+     */
+    static CacheInvokeDirectResult lazyResult(KeyCacheObject key, Object res) {
+        CacheInvokeDirectResult res0 = new CacheInvokeDirectResult();
+
+        res0.key = key;
+        res0.unprepareRes = res;
+
+        return res0;
+    }
+
+    /**
      * @param key Key.
      * @param err Exception thrown by {@link EntryProcessor#process(MutableEntry, Object...)}.
      */
@@ -120,11 +140,28 @@
             }
         }
 
+        assert unprepareRes == null : "marshalResult() was not called for the result: " + this;
+
         if (res != null)
             res.prepareMarshal(ctx.cacheObjectContext());
     }
 
     /**
+     * Converts the entry processor unprepared result to a cache object instance.
+     *
+     * @param ctx Cache context.
+     */
+    public void marshalResult(GridCacheContext ctx) {
+        try {
+            if (unprepareRes != null)
+                res = ctx.toCacheObject(unprepareRes);
+        }
+        finally {
+            unprepareRes = null;
+        }
+    }
+
+    /**
      * @param ctx Cache context.
      * @param ldr Class loader.
      * @throws IgniteCheckedException If failed.
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheInvokeResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheInvokeResult.java
index b51c136..2e6d64a 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheInvokeResult.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheInvokeResult.java
@@ -25,6 +25,9 @@
 import javax.cache.processor.EntryProcessorException;
 import javax.cache.processor.EntryProcessorResult;
 import javax.cache.processor.MutableEntry;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.internal.UnregisteredBinaryTypeException;
+import org.apache.ignite.internal.UnregisteredClassException;
 import org.apache.ignite.internal.util.tostring.GridToStringInclude;
 import org.apache.ignite.internal.util.typedef.internal.S;
 
@@ -96,6 +99,9 @@
     /** {@inheritDoc} */
     @Override public T get() throws EntryProcessorException {
         if (err != null) {
+            if (err instanceof UnregisteredClassException || err instanceof UnregisteredBinaryTypeException)
+                throw (IgniteException) err;
+
             if (err instanceof EntryProcessorException)
                 throw (EntryProcessorException)err;
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheLazyEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheLazyEntry.java
index a4bb6bc..4463dc8 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheLazyEntry.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheLazyEntry.java
@@ -200,7 +200,7 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public String toString() {
         return S.toString(CacheLazyEntry.class, this);
     }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectAdapter.java
index e2a15ee..67ee410 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectAdapter.java
@@ -172,7 +172,7 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public String toString() {
         return S.toString(S.INCLUDE_SENSITIVE ? getClass().getSimpleName() : "CacheObject",
             "val", val, true,
             "hasValBytes", valBytes != null, false);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectByteArrayImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectByteArrayImpl.java
index 414bebb..57a70f8 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectByteArrayImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectByteArrayImpl.java
@@ -188,7 +188,7 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public String toString() {
         return "CacheObjectByteArrayImpl [arrLen=" + (val != null ? val.length : 0) + ']';
     }
 }
\ No newline at end of file
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheOperationContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheOperationContext.java
index 8a7afe7..53f9e22 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheOperationContext.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheOperationContext.java
@@ -163,7 +163,7 @@
     /**
      * Gets data center ID.
      *
-     * @return Client ID.
+     * @return Datacenter ID.
      */
     @Nullable public Byte dataCenterId() {
         return dataCenterId;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java
index 3aaf7f3..572e33e 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java
@@ -69,6 +69,7 @@
 import org.apache.ignite.spi.discovery.DiscoveryDataBag;
 import org.jetbrains.annotations.Nullable;
 
+import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT;
 import static org.apache.ignite.cache.CacheMode.LOCAL;
 import static org.apache.ignite.cache.CacheMode.PARTITIONED;
 import static org.apache.ignite.events.EventType.EVT_NODE_JOINED;
@@ -1866,7 +1867,7 @@
      * @param ccfg Cache configuration to start.
      * @throws IgniteCheckedException If failed.
      */
-    public void validateStartCacheConfiguration(CacheConfiguration ccfg) throws IgniteCheckedException {
+    void validateStartCacheConfiguration(CacheConfiguration ccfg) throws IgniteCheckedException {
         if (ccfg.getGroupName() != null) {
             CacheGroupDescriptor grpDesc = cacheGroupByName(ccfg.getGroupName());
 
@@ -1891,6 +1892,10 @@
         CU.validateCacheGroupsAttributesMismatch(log, cfg, startCfg, "cacheMode", "Cache mode",
             cfg.getCacheMode(), startCfg.getCacheMode(), true);
 
+        if (cfg.getAtomicityMode() == TRANSACTIONAL_SNAPSHOT || startCfg.getAtomicityMode() == TRANSACTIONAL_SNAPSHOT)
+            CU.validateCacheGroupsAttributesMismatch(log, cfg, startCfg, "atomicityMode", "Atomicity mode",
+                attr1.atomicityMode(), attr2.atomicityMode(), true);
+
         CU.validateCacheGroupsAttributesMismatch(log, cfg, startCfg, "affinity", "Affinity function",
             attr1.cacheAffinityClassName(), attr2.cacheAffinityClassName(), true);
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ExchangeContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ExchangeContext.java
index 4046c98..34ed048 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ExchangeContext.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ExchangeContext.java
@@ -17,11 +17,15 @@
 
 package org.apache.ignite.internal.processors.cache;
 
+import java.util.HashMap;
 import java.util.HashSet;
+import java.util.Map;
 import java.util.Set;
+import java.util.UUID;
 import org.apache.ignite.cluster.ClusterNode;
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture;
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage;
+import org.apache.ignite.internal.util.GridLongList;
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.jetbrains.annotations.Nullable;
 
@@ -51,11 +55,20 @@
     /** */
     private final boolean compatibilityNode = getBoolean(IGNITE_EXCHANGE_COMPATIBILITY_VER_1, false);
 
+    /** */
+    private final boolean newMvccCrd;
+
+    /** Currently running mvcc queries, initialized when mvcc coordinator is changed. */
+    private Map<UUID, GridLongList> activeQueries;
+
     /**
      * @param crd Coordinator flag.
+     * @param newMvccCrd {@code True} if new coordinator assigned during this exchange.
      * @param fut Exchange future.
      */
-    public ExchangeContext(boolean crd, GridDhtPartitionsExchangeFuture fut) {
+    public ExchangeContext(boolean crd, boolean newMvccCrd, GridDhtPartitionsExchangeFuture fut) {
+        this.newMvccCrd = newMvccCrd;
+
         int protocolVer = exchangeProtocolVersion(fut.firstEventCache().minimumNodeVersion());
 
         if (compatibilityNode || (crd && fut.localJoinExchange())) {
@@ -124,6 +137,34 @@
         return merge;
     }
 
+    /**
+     * @return {@code True} if new node assigned as mvcc coordinator node during this exchange.
+     */
+    public boolean newMvccCoordinator() {
+        return newMvccCrd;
+    }
+
+    /**
+     * @return Active queries.
+     */
+    public Map<UUID, GridLongList> activeQueries() {
+        return activeQueries;
+    }
+
+    /**
+     * @param nodeId Node ID.
+     * @param nodeQueries Node queries.
+     */
+    public void addActiveQueries(UUID nodeId, @Nullable GridLongList nodeQueries) {
+        if (nodeQueries == null)
+            return;
+
+        if (activeQueries == null)
+            activeQueries = new HashMap<>();
+
+        activeQueries.put(nodeId, nodeQueries);
+    }
+
     /** {@inheritDoc} */
     @Override public String toString() {
         return S.toString(ExchangeContext.class, this);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ExchangeDiscoveryEvents.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ExchangeDiscoveryEvents.java
index 0e7e01c..2f7753b 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ExchangeDiscoveryEvents.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ExchangeDiscoveryEvents.java
@@ -29,7 +29,6 @@
 import org.apache.ignite.internal.managers.discovery.DiscoCache;
 import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture;
-import org.apache.ignite.internal.util.typedef.internal.CU;
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.apache.ignite.internal.util.typedef.internal.U;
 
@@ -127,7 +126,7 @@
 
         ClusterNode node = evt.eventNode();
 
-        if (!CU.clientNode(node)) {
+        if (!node.isClient()) {
             lastSrvEvt = evt;
 
             srvEvtTopVer = new AffinityTopologyVersion(evt.topologyVersion(), 0);
@@ -135,7 +134,7 @@
             if (evt.type()== EVT_NODE_JOINED)
                 srvJoin = true;
             else if (evt.type() == EVT_NODE_LEFT || evt.type() == EVT_NODE_FAILED)
-                srvLeft = !CU.clientNode(node);
+                srvLeft = !node.isClient();
         }
     }
 
@@ -151,7 +150,7 @@
      * @return {@code True} if given event is {@link EventType#EVT_NODE_FAILED} or {@link EventType#EVT_NODE_LEFT}.
      */
     public static boolean serverLeftEvent(DiscoveryEvent evt) {
-        return  ((evt.type() == EVT_NODE_FAILED || evt.type() == EVT_NODE_LEFT) && !CU.clientNode(evt.eventNode()));
+        return  ((evt.type() == EVT_NODE_FAILED || evt.type() == EVT_NODE_LEFT) && !evt.eventNode().isClient());
     }
 
     /**
@@ -159,7 +158,7 @@
      * @return {@code True} if given event is {@link EventType#EVT_NODE_JOINED}.
      */
     public static boolean serverJoinEvent(DiscoveryEvent evt) {
-        return  (evt.type() == EVT_NODE_JOINED && !CU.clientNode(evt.eventNode()));
+        return  (evt.type() == EVT_NODE_JOINED && !evt.eventNode().isClient());
     }
 
     /**
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GatewayProtectedCacheProxy.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GatewayProtectedCacheProxy.java
index 96c5e29..c99eb00 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GatewayProtectedCacheProxy.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GatewayProtectedCacheProxy.java
@@ -49,6 +49,7 @@
 import org.apache.ignite.cache.query.SqlFieldsQuery;
 import org.apache.ignite.cluster.ClusterGroup;
 import org.apache.ignite.internal.AsyncSupportAdapter;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUtils;
 import org.apache.ignite.internal.GridKernalState;
 import org.apache.ignite.internal.util.future.GridFutureAdapter;
 import org.apache.ignite.internal.util.future.IgniteFutureImpl;
@@ -145,6 +146,8 @@
         CacheOperationGate opGate = onEnter();
 
         try {
+            MvccUtils.verifyMvccOperationSupport(delegate.context(), "withExpiryPolicy");
+
             return new GatewayProtectedCacheProxy<>(delegate, opCtx.withExpiryPolicy(plc), lock);
         }
         finally {
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java
index eeed4fa..0570b4b 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java
@@ -92,6 +92,8 @@
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxLocalAdapter;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal;
 import org.apache.ignite.internal.processors.cache.dr.GridCacheDrInfo;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUtils;
 import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxLocalAdapter;
@@ -138,7 +140,6 @@
 import org.apache.ignite.plugin.security.SecurityPermission;
 import org.apache.ignite.resources.IgniteInstanceResource;
 import org.apache.ignite.resources.JobContextResource;
-import org.apache.ignite.spi.discovery.tcp.internal.TcpDiscoveryNode;
 import org.apache.ignite.transactions.Transaction;
 import org.apache.ignite.transactions.TransactionConcurrency;
 import org.apache.ignite.transactions.TransactionIsolation;
@@ -171,7 +172,6 @@
         IgniteSystemProperties.IGNITE_CACHE_START_SIZE, 4096);
 
     /** Size of keys batch to removeAll. */
-    // TODO GG-11231 (workaround for GG-11231).
     private static final int REMOVE_ALL_KEYS_BATCH = 10000;
 
     /** Maximum number of retries when topology changes. */
@@ -276,14 +276,6 @@
     /** Whether this cache is IGFS data cache. */
     private boolean igfsDataCache;
 
-    /** Whether this cache is Mongo data cache. */
-    @SuppressWarnings("UnusedDeclaration")
-    private boolean mongoDataCache;
-
-    /** Whether this cache is Mongo meta cache. */
-    @SuppressWarnings("UnusedDeclaration")
-    private boolean mongoMetaCache;
-
     /** Current IGFS data cache size. */
     private LongAdder igfsDataCacheSize;
 
@@ -798,7 +790,7 @@
                 return it;
             }
 
-            public String toString() {
+            @Override public String toString() {
                 return "CacheLocalEntries []";
             }
         };
@@ -817,6 +809,9 @@
 
         ctx.checkSecurity(SecurityPermission.CACHE_READ);
 
+        //TODO IGNITE-7955
+        MvccUtils.verifyMvccOperationSupport(ctx, "Peek");
+
         PeekModes modes = parsePeekModes(peekModes, false);
 
         KeyCacheObject cacheKey = ctx.toCacheKeyObject(key);
@@ -897,7 +892,7 @@
                         continue;
                     }
                     finally {
-                        ctx0.evicts().touch(e, null);
+                        e.touch(null);
 
                         ctx.shared().database().checkpointReadUnlock();
                     }
@@ -952,7 +947,7 @@
                 return e.peek(heap, offheap, AffinityTopologyVersion.NONE, plc);
             }
             finally {
-                ctx.evicts().touch(e, null);
+                e.touch(null);
             }
         }
 
@@ -1122,6 +1117,9 @@
     @Override public void clearLocally(boolean srv, boolean near, boolean readers) {
         ctx.checkSecurity(SecurityPermission.CACHE_REMOVE);
 
+        //TODO IGNITE-7952
+        MvccUtils.verifyMvccOperationSupport(ctx, "Clear");
+
         List<GridCacheClearAllRunnable<K, V>> jobs = splitClearLocally(srv, near, readers);
 
         if (!F.isEmpty(jobs)) {
@@ -1191,6 +1189,9 @@
      * @throws IgniteCheckedException In case of error.
      */
     private void clear(@Nullable Set<? extends K> keys) throws IgniteCheckedException {
+        //TODO IGNITE-7952
+        MvccUtils.verifyMvccOperationSupport(ctx, "Clear");
+
         if (isLocal()) {
             if (keys == null)
                 clearLocally(true, false, false);
@@ -1208,6 +1209,9 @@
      * @return Future.
      */
     private IgniteInternalFuture<?> clearAsync(@Nullable final Set<? extends K> keys) {
+        //TODO IGNITE-7952
+        MvccUtils.verifyMvccOperationSupport(ctx, "Clear");
+
         if (isLocal())
             return clearLocallyAsync(keys);
         else
@@ -1263,6 +1267,9 @@
         if (F.isEmpty(keys))
             return;
 
+        //TODO IGNITE-7952
+        MvccUtils.verifyMvccOperationSupport(ctx, "Clear");
+
         GridCacheVersion obsoleteVer = ctx.versions().next();
 
         for (KeyCacheObject key : keys) {
@@ -1882,7 +1889,8 @@
             skipVals,
             /*keep cache objects*/false,
             recovery,
-            needVer);
+            needVer,
+            null); // TODO IGNITE-7371
     }
 
     /**
@@ -1897,6 +1905,7 @@
      * @param skipVals Skip values flag.
      * @param keepCacheObjects Keep cache objects.
      * @param needVer If {@code true} returns values as tuples containing value and version.
+     * @param mvccSnapshot MVCC snapshot.
      * @return Future.
      */
     protected final <K1, V1> IgniteInternalFuture<Map<K1, V1>> getAllAsync0(
@@ -1911,7 +1920,8 @@
         final boolean skipVals,
         final boolean keepCacheObjects,
         final boolean recovery,
-        final boolean needVer
+        final boolean needVer,
+        MvccSnapshot mvccSnapshot
     ) {
         if (F.isEmpty(keys))
             return new GridFinishedFuture<>(Collections.<K1, V1>emptyMap());
@@ -1968,7 +1978,8 @@
                             boolean skipEntry = readNoEntry;
 
                             if (readNoEntry) {
-                                CacheDataRow row = ctx.offheap().read(ctx, key);
+                                CacheDataRow row = mvccSnapshot != null ? ctx.offheap().mvccRead(ctx, key, mvccSnapshot) :
+                                    ctx.offheap().read(ctx, key);
 
                                 if (row != null) {
                                     long expireTime = row.expireTime();
@@ -2031,6 +2042,7 @@
                                         taskName,
                                         expiry,
                                         !deserializeBinary,
+                                        mvccSnapshot,
                                         readerArgs);
 
                                     assert res != null;
@@ -2055,10 +2067,11 @@
                                         taskName,
                                         expiry,
                                         !deserializeBinary,
+                                        mvccSnapshot,
                                         readerArgs);
 
                                     if (res == null)
-                                        ctx.evicts().touch(entry, topVer);
+                                        entry.touch(topVer);
                                 }
                             }
 
@@ -2073,7 +2086,7 @@
                                     needVer);
 
                                 if (entry != null && (tx == null || (!tx.implicit() && tx.isolation() == READ_COMMITTED)))
-                                    ctx.evicts().touch(entry, topVer);
+                                    entry.touch(topVer);
 
                                 if (keysSize == 1)
                                     // Safe to return because no locks are required in READ_COMMITTED mode.
@@ -2155,7 +2168,7 @@
 
                                                 if (tx0 == null || (!tx0.implicit() &&
                                                     tx0.isolation() == READ_COMMITTED))
-                                                    ctx.evicts().touch(entry, topVer);
+                                                    entry.touch(topVer);
 
                                                 break;
                                             }
@@ -2198,7 +2211,7 @@
                                         GridCacheEntryEx entry = peekEx(key);
 
                                         if (entry != null)
-                                            ctx.evicts().touch(entry, topVer);
+                                            entry.touch(topVer);
                                     }
                                 }
 
@@ -2225,8 +2238,11 @@
             }
             catch (RuntimeException | AssertionError e) {
                 if (misses != null) {
-                    for (KeyCacheObject key0 : misses.keySet())
-                        ctx.evicts().touch(peekEx(key0), topVer);
+                    for (KeyCacheObject key0 : misses.keySet()) {
+                        GridCacheEntryEx entry = peekEx(key0);
+                        if (entry != null)
+                            entry.touch(topVer);
+                    }
                 }
 
                 if (newLocalEntries != null) {
@@ -2286,7 +2302,7 @@
                             entry.clearReserveForLoad(e.getValue().version());
 
                         if (needTouch)
-                            ctx.evicts().touch(entry, topVer);
+                            entry.touch(topVer);
                     }
                 }
             }
@@ -3070,7 +3086,7 @@
         List<K> keys = new ArrayList<>(Math.min(REMOVE_ALL_KEYS_BATCH, size()));
 
         do {
-            for (Iterator<CacheDataRow> it = ctx.offheap().cacheIterator(ctx.cacheId(), true, true, null);
+            for (Iterator<CacheDataRow> it = ctx.offheap().cacheIterator(ctx.cacheId(), true, true, null, null);
                 it.hasNext() && keys.size() < REMOVE_ALL_KEYS_BATCH; )
                 keys.add((K)it.next().key());
 
@@ -3395,6 +3411,9 @@
         if (keyCheck)
             validateCacheKeys(keys);
 
+        //TODO IGNITE-7764
+        MvccUtils.verifyMvccOperationSupport(ctx, "Lock");
+
         IgniteInternalFuture<Boolean> fut = lockAllAsync(keys, timeout);
 
         boolean isInterrupted = false;
@@ -3423,6 +3442,9 @@
         if (keyCheck)
             validateCacheKey(key);
 
+        //TODO IGNITE-7764
+        MvccUtils.verifyMvccOperationSupport(ctx, "Lock");
+
         return lockAllAsync(Collections.singletonList(key), timeout);
     }
 
@@ -3532,6 +3554,9 @@
     /** {@inheritDoc} */
     @Override public void localLoadCache(final IgniteBiPredicate<K, V> p, Object[] args)
         throws IgniteCheckedException {
+        //TODO IGNITE-7954
+        MvccUtils.verifyMvccOperationSupport(ctx, "Load");
+
         final boolean replicate = ctx.isDrEnabled();
         final AffinityTopologyVersion topVer = ctx.affinity().affinityTopologyVersion();
 
@@ -3633,7 +3658,7 @@
                 log.debug("Got removed entry during loadCache (will ignore): " + entry);
         }
         finally {
-            ctx.evicts().touch(entry, topVer);
+            entry.touch(topVer);
         }
 
         CU.unwindEvicts(ctx);
@@ -3669,6 +3694,9 @@
         if (!ctx.store().configured())
             return new GridFinishedFuture<>();
 
+        //TODO IGNITE-7954
+        MvccUtils.verifyMvccOperationSupport(ctx, "Load");
+
         CacheOperationContext opCtx = ctx.operationContextPerCall();
 
         ExpiryPolicy plc = opCtx != null ? opCtx.expiry() : null;
@@ -3734,7 +3762,7 @@
                     col.add(new DataStreamerEntry(key, ctx.toCacheObject(val)));
 
                     if (col.size() == ldr.perNodeBufferSize()) {
-                        ldr.addDataInternal(col);
+                        ldr.addDataInternal(col, false);
 
                         col.clear();
                     }
@@ -3753,6 +3781,9 @@
      */
     public void localLoad(Collection<? extends K> keys, @Nullable ExpiryPolicy plc, final boolean keepBinary)
         throws IgniteCheckedException {
+        //TODO IGNITE-7954
+        MvccUtils.verifyMvccOperationSupport(ctx, "Load");
+
         final boolean replicate = ctx.isDrEnabled();
         final AffinityTopologyVersion topVer = ctx.affinity().affinityTopologyVersion();
 
@@ -3825,6 +3856,9 @@
 
         assert !F.isEmpty(nodes) : "There are not datanodes fo cache: " + ctx.name();
 
+        //TODO IGNITE-7954
+        MvccUtils.verifyMvccOperationSupport(ctx, "Load");
+
         final boolean keepBinary = opCtx != null && opCtx.isKeepBinary();
 
         ComputeTaskInternalFuture fut = ctx.kernalContext().closure().callAsync(BROADCAST,
@@ -4183,6 +4217,7 @@
                     READ_COMMITTED,
                     tCfg.getDefaultTxTimeout(),
                     !ctx.skipStore(),
+                    false,
                     0,
                     null
                 );
@@ -4284,6 +4319,7 @@
                     READ_COMMITTED,
                     txCfg.getDefaultTxTimeout(),
                     !skipStore,
+                    false,
                     0,
                     null);
 
@@ -4356,20 +4392,36 @@
 
             if (fut != null && !fut.isDone()) {
                 IgniteInternalFuture<T> f = new GridEmbeddedFuture(fut,
-                    new IgniteOutClosure<IgniteInternalFuture>() {
-                        @Override public IgniteInternalFuture<T> apply() {
-                            if (ctx.kernalContext().isStopping())
-                                return new GridFinishedFuture<>(
-                                    new IgniteCheckedException("Operation has been cancelled (node is stopping)."));
+                    (IgniteOutClosure<IgniteInternalFuture>)() -> {
+                        GridFutureAdapter resFut = new GridFutureAdapter();
 
-                            try {
-                                return op.op(tx0, opCtx).chain(clo);
+                        ctx.kernalContext().closure().runLocalSafe(() -> {
+                            IgniteInternalFuture fut0;
+
+                            if (ctx.kernalContext().isStopping())
+                                fut0 = new GridFinishedFuture<>(
+                                    new IgniteCheckedException("Operation has been cancelled (node is stopping)."));
+                            else {
+                                try {
+                                    fut0 = op.op(tx0, opCtx).chain(clo);
+                                }
+                                finally {
+                                    // It is necessary to clear tx context in this thread as well.
+                                    ctx.shared().txContextReset();
+                                }
                             }
-                            finally {
-                                // It is necessary to clear tx context in this thread as well.
-                                ctx.shared().txContextReset();
-                            }
-                        }
+
+                            fut0.listen((IgniteInClosure<IgniteInternalFuture>)fut01 -> {
+                                try {
+                                    resFut.onDone(fut01.get());
+                                }
+                                catch (Throwable ex) {
+                                    resFut.onDone(ex);
+                                }
+                            });
+                        }, true);
+
+                        return resFut;
                     });
 
                 saveFuture(holder, f, retry);
@@ -4528,16 +4580,6 @@
         return igfsDataCacheSize.longValue();
     }
 
-    /** {@inheritDoc} */
-    @Override public boolean isMongoDataCache() {
-        return mongoDataCache;
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean isMongoMetaCache() {
-        return mongoMetaCache;
-    }
-
     /**
      * Callback invoked when data is added to IGFS cache.
      *
@@ -4554,6 +4596,9 @@
      * @param readers Whether to clear readers.
      */
     private boolean clearLocally0(K key, boolean readers) {
+        //TODO IGNITE-7952
+        MvccUtils.verifyMvccOperationSupport(ctx, "Clear");
+
         ctx.checkSecurity(SecurityPermission.CACHE_REMOVE);
 
         if (keyCheck)
@@ -4591,6 +4636,9 @@
         if (keyCheck)
             validateCacheKey(key);
 
+        //TODO IGNITE-7956
+        MvccUtils.verifyMvccOperationSupport(ctx, "Evict");
+
         return evictx(key, ctx.versions().next(), CU.empty0());
     }
 
@@ -4604,6 +4652,9 @@
         if (keyCheck)
             validateCacheKey(keys);
 
+        //TODO IGNITE-7956
+        MvccUtils.verifyMvccOperationSupport(ctx, "Evict");
+
         GridCacheVersion obsoleteVer = ctx.versions().next();
 
         try {
@@ -4888,7 +4939,8 @@
             /*transformClo*/null,
             /*taskName*/null,
             /*expiryPlc*/null,
-            !deserializeBinary);
+            !deserializeBinary,
+            null); // TODO IGNITE-7371
 
         if (val == null)
             return null;
@@ -4948,6 +5000,7 @@
                 READ_COMMITTED,
                 CU.transactionConfiguration(ctx, ctx.kernalContext().config()).getDefaultTxTimeout(),
                 opCtx == null || !opCtx.skipStore(),
+                false,
                 0,
                 null);
 
@@ -5430,14 +5483,14 @@
         /**
          * @return Whether to clear server cache.
          */
-        protected boolean clearServerCache() {
+        @Override protected boolean clearServerCache() {
             return false;
         }
 
         /**
          * @return Whether to clear near cache.
          */
-        protected boolean clearNearCache() {
+        @Override protected boolean clearNearCache() {
             return true;
         }
     }
@@ -5483,7 +5536,7 @@
         }
 
         /** {@inheritDoc} */
-        public String toString() {
+        @Override public String toString() {
             return S.toString(PartitionSizeLongJob.class, this);
         }
     }
@@ -5523,7 +5576,7 @@
         }
 
         /** {@inheritDoc} */
-        public String toString() {
+        @Override public String toString() {
             return S.toString(SizeJob.class, this);
         }
     }
@@ -5563,7 +5616,7 @@
         }
 
         /** {@inheritDoc} */
-        public String toString() {
+        @Override public String toString() {
             return S.toString(SizeLongJob.class, this);
         }
     }
@@ -5620,7 +5673,7 @@
         }
 
         /** {@inheritDoc} */
-        public String toString() {
+        @Override public String toString() {
             return S.toString(LoadCacheJob.class, this);
         }
     }
@@ -5663,7 +5716,7 @@
         }
 
         /** {@inheritDoc} */
-        public String toString() {
+        @Override public String toString() {
             return S.toString(LoadCacheJobV2.class, this);
         }
     }
@@ -6005,7 +6058,7 @@
             col.add(e);
 
             if (col.size() == ldr.perNodeBufferSize()) {
-                ldr.addDataInternal(col);
+                ldr.addDataInternal(col, false);
 
                 col.clear();
             }
@@ -6016,7 +6069,7 @@
          */
         void onDone() {
             if (!col.isEmpty())
-                ldr.addDataInternal(col);
+                ldr.addDataInternal(col, false);
         }
     }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAffinityManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAffinityManager.java
index c9ee38c..cf4344d 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAffinityManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAffinityManager.java
@@ -32,6 +32,7 @@
 import org.apache.ignite.internal.processors.affinity.AffinityAssignment;
 import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
 import org.apache.ignite.internal.processors.affinity.GridAffinityAssignmentCache;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccCoordinator;
 import org.apache.ignite.internal.util.future.GridFinishedFuture;
 import org.apache.ignite.internal.util.typedef.F;
 import org.jetbrains.annotations.Nullable;
@@ -243,6 +244,10 @@
         return aff0.cachedAffinity(topVer);
     }
 
+    public MvccCoordinator mvccCoordinator(AffinityTopologyVersion topVer) {
+        return assignment(topVer).mvccCoordinator();
+    }
+
     /**
      * @param key Key to check.
      * @param topVer Topology version.
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheConcurrentMapImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheConcurrentMapImpl.java
index c8cab66..75c0d0c 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheConcurrentMapImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheConcurrentMapImpl.java
@@ -201,9 +201,7 @@
                         true);
 
                 if (touch)
-                    ctx.evicts().touch(
-                        cur,
-                        topVer);
+                    cur.touch(topVer);
             }
 
             assert Math.abs(sizeChange) <= 1;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java
index 61b1878..7d200eb 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java
@@ -112,6 +112,7 @@
 import static org.apache.ignite.IgniteSystemProperties.IGNITE_READ_LOAD_BALANCING;
 import static org.apache.ignite.cache.CacheAtomicityMode.ATOMIC;
 import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
+import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT;
 import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC;
 import static org.apache.ignite.cache.CacheWriteSynchronizationMode.PRIMARY_SYNC;
 import static org.apache.ignite.events.EventType.EVT_CACHE_REBALANCE_STARTED;
@@ -817,7 +818,14 @@
      * @return {@code True} if transactional.
      */
     public boolean transactional() {
-        return cacheCfg.getAtomicityMode() == TRANSACTIONAL;
+        return cacheCfg.getAtomicityMode() == TRANSACTIONAL || cacheCfg.getAtomicityMode() == TRANSACTIONAL_SNAPSHOT;
+    }
+
+    /**
+     * @return {@code True} if transactional snapshot.
+     */
+    public boolean transactionalSnapshot() {
+        return cacheCfg.getAtomicityMode() == TRANSACTIONAL_SNAPSHOT;
     }
 
     /**
@@ -2114,6 +2122,13 @@
     }
 
     /**
+     * @return {@code True} if mvcc is enabled for cache.
+     */
+    public boolean mvccEnabled() {
+        return grp.mvccEnabled();
+    }
+
+    /**
      * @param part Partition.
      * @param topVer Topology version.
      * @return {@code True} if partition is available locally.
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEntryEx.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEntryEx.java
index aef38e6..3a2af5d 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEntryEx.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEntryEx.java
@@ -18,6 +18,7 @@
 package org.apache.ignite.internal.processors.cache;
 
 import java.util.Collection;
+import java.util.List;
 import java.util.UUID;
 import javax.cache.Cache;
 import javax.cache.expiry.ExpiryPolicy;
@@ -27,7 +28,11 @@
 import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
 import org.apache.ignite.internal.processors.cache.distributed.GridDistributedLockCancelledException;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxLocalAdapter;
 import org.apache.ignite.internal.processors.cache.distributed.dht.atomic.GridDhtAtomicAbstractUpdateFuture;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccVersion;
+import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxState;
 import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey;
@@ -187,6 +192,11 @@
     @Nullable public GridCacheEntryInfo info();
 
     /**
+     * @return Entry info for each MVCC version.
+     */
+    @Nullable public List<GridCacheEntryInfo> allVersionsInfo() throws IgniteCheckedException;
+
+    /**
      * Invalidates this entry.
      *
      * @param newVer New version to set.
@@ -264,7 +274,8 @@
         Object transformClo,
         String taskName,
         @Nullable IgniteCacheExpiryPolicy expiryPlc,
-        boolean keepBinary)
+        boolean keepBinary,
+        @Nullable MvccSnapshot mvccVer)
         throws IgniteCheckedException, GridCacheEntryRemovedException;
 
     /**
@@ -292,6 +303,7 @@
         String taskName,
         @Nullable IgniteCacheExpiryPolicy expiryPlc,
         boolean keepBinary,
+        @Nullable MvccSnapshot mvccVer,
         @Nullable ReaderArguments readerArgs)
         throws IgniteCheckedException, GridCacheEntryRemovedException;
 
@@ -313,6 +325,7 @@
         String taskName,
         @Nullable IgniteCacheExpiryPolicy expiryPlc,
         boolean keepBinary,
+        @Nullable MvccSnapshot mvccVer,
         @Nullable ReaderArguments readerArgs) throws IgniteCheckedException, GridCacheEntryRemovedException;
 
     /**
@@ -331,6 +344,64 @@
 
     /**
      * @param tx Cache transaction.
+     * @param affNodeId Partitioned node iD.
+     * @param val Value to set.
+     * @param ttl0 TTL.
+     * @param topVer Topology version.
+     * @param updateCntr Update counter.
+     * @param mvccVer Mvcc version.
+     * @param op Cache operation.
+     * @param needHistory Whether to collect rows created or affected by the current tx.
+     * @param noCreate Entry should not be created when enabled, e.g. SQL INSERT.
+     * @return Tuple containing success flag and old value. If success is {@code false},
+     *      then value is {@code null}.
+     * @throws IgniteCheckedException If storing value failed.
+     * @throws GridCacheEntryRemovedException If entry has been removed.
+     */
+    public GridCacheUpdateTxResult mvccSet(
+        @Nullable IgniteInternalTx tx,
+        UUID affNodeId,
+        CacheObject val,
+        long ttl0,
+        AffinityTopologyVersion topVer,
+        @Nullable Long updateCntr,
+        MvccSnapshot mvccVer,
+        GridCacheOperation op,
+        boolean needHistory,
+        boolean noCreate) throws IgniteCheckedException, GridCacheEntryRemovedException;
+
+    /**
+     * @param tx Cache transaction.
+     * @param affNodeId Partitioned node iD.
+     * @param topVer Topology version.
+     * @param updateCntr Update counter.
+     * @param mvccVer Mvcc version.
+     * @param needHistory Whether to collect rows created or affected by the current tx.
+     * @return Tuple containing success flag and old value. If success is {@code false},
+     *      then value is {@code null}.
+     * @throws IgniteCheckedException If storing value failed.
+     * @throws GridCacheEntryRemovedException If entry has been removed.
+     */
+    public GridCacheUpdateTxResult mvccRemove(
+        @Nullable IgniteInternalTx tx,
+        UUID affNodeId,
+        AffinityTopologyVersion topVer,
+        @Nullable Long updateCntr,
+        MvccSnapshot mvccVer,
+        boolean needHistory) throws IgniteCheckedException, GridCacheEntryRemovedException;
+
+    /**
+     * @param tx Transaction adapter.
+     * @param mvccVer Mvcc version.
+     * @return Lock result.
+     * @throws GridCacheEntryRemovedException If entry has been removed.
+     * @throws IgniteCheckedException If locking failed
+     */
+    GridCacheUpdateTxResult mvccLock(GridDhtTxLocalAdapter tx,
+        MvccSnapshot mvccVer) throws GridCacheEntryRemovedException, IgniteCheckedException;
+
+    /**
+     * @param tx Cache transaction.
      * @param evtNodeId ID of node responsible for this change.
      * @param affNodeId Partitioned node iD.
      * @param val Value to set.
@@ -377,7 +448,8 @@
         @Nullable UUID subjId,
         String taskName,
         @Nullable GridCacheVersion dhtVer,
-        @Nullable Long updateCntr
+        @Nullable Long updateCntr,
+        @Nullable MvccSnapshot mvccVer
     ) throws IgniteCheckedException, GridCacheEntryRemovedException;
 
     /**
@@ -419,7 +491,8 @@
         @Nullable UUID subjId,
         String taskName,
         @Nullable GridCacheVersion dhtVer,
-        @Nullable Long updateCntr
+        @Nullable Long updateCntr,
+        MvccSnapshot mvccVer
     ) throws IgniteCheckedException, GridCacheEntryRemovedException;
 
     /**
@@ -663,8 +736,43 @@
      * @throws IgniteCheckedException In case of error.
      * @throws GridCacheEntryRemovedException If entry was removed.
      */
+    default boolean initialValue(CacheObject val,
+        GridCacheVersion ver,
+        long ttl,
+        long expireTime,
+        boolean preload,
+        AffinityTopologyVersion topVer,
+        GridDrType drType,
+        boolean fromStore) throws IgniteCheckedException, GridCacheEntryRemovedException {
+        return initialValue(val, ver, null, null, TxState.NA, TxState.NA,
+            ttl, expireTime, preload, topVer, drType, fromStore);
+    }
+
+    /**
+     * Sets new value if current version is <tt>0</tt>
+     *
+     * @param val New value.
+     * @param ver Version to use.
+     * @param mvccVer Mvcc version.
+     * @param newMvccVer New mvcc version.
+     * @param mvccTxState Tx state hint for mvcc version.
+     * @param newMvccTxState Tx state hint for new mvcc version.
+     * @param ttl Time to live.
+     * @param expireTime Expiration time.
+     * @param preload Flag indicating whether entry is being preloaded.
+     * @param topVer Topology version.
+     * @param drType DR type.
+     * @param fromStore {@code True} if value was loaded from store.
+     * @return {@code True} if initial value was set.
+     * @throws IgniteCheckedException In case of error.
+     * @throws GridCacheEntryRemovedException If entry was removed.
+     */
     public boolean initialValue(CacheObject val,
         GridCacheVersion ver,
+        @Nullable MvccVersion mvccVer,
+        @Nullable MvccVersion newMvccVer,
+        byte mvccTxState,
+        byte newMvccTxState,
         long ttl,
         long expireTime,
         boolean preload,
@@ -1043,4 +1151,32 @@
      * @return {@code True} if the entry is locked.
      */
     public boolean lockedByCurrentThread();
+
+    /**
+     *
+     * @param tx Transaction.
+     * @param affNodeId Affinity node id.
+     * @param topVer Topology version.
+     * @param updateCntr Update counter.
+     * @param op Cache operation.
+     * @param mvccVer Mvcc version.  @return Update result.
+     * @throws IgniteCheckedException, If failed.
+     * @throws GridCacheEntryRemovedException, If entry has been removed.
+     */
+    public GridCacheUpdateTxResult mvccUpdateRowsWithPreloadInfo(
+        IgniteInternalTx tx,
+        UUID affNodeId,
+        AffinityTopologyVersion topVer,
+        Long updateCntr,
+        List<GridCacheEntryInfo> entries,
+        GridCacheOperation op,
+        MvccSnapshot mvccVer)
+        throws IgniteCheckedException, GridCacheEntryRemovedException;
+
+    /**
+     * Touch this entry in its context's eviction manager.
+     *
+     * @param topVer Topology version.
+     */
+    public void touch(AffinityTopologyVersion topVer);
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEntryInfo.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEntryInfo.java
index 7371153..5c8164c 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEntryInfo.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEntryInfo.java
@@ -17,9 +17,11 @@
 
 package org.apache.ignite.internal.processors.cache;
 
-import java.nio.ByteBuffer;
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.internal.GridDirectTransient;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUpdateVersionAware;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccVersion;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccVersionAware;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
 import org.apache.ignite.internal.util.tostring.GridToStringInclude;
 import org.apache.ignite.internal.util.typedef.internal.S;
@@ -28,6 +30,8 @@
 import org.apache.ignite.plugin.extensions.communication.MessageReader;
 import org.apache.ignite.plugin.extensions.communication.MessageWriter;
 
+import java.nio.ByteBuffer;
+
 /**
  * Entry information that gets passed over wire.
  */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEvictionManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEvictionManager.java
index 6caf3e4..e5ab189 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEvictionManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEvictionManager.java
@@ -171,6 +171,11 @@
 
     /** {@inheritDoc} */
     @Override public void touch(IgniteTxEntry txEntry, boolean loc) {
+        assert txEntry.context() == cctx : "Entry from another cache context passed to eviction manager: [" +
+            "entry=" + txEntry +
+            ", cctx=" + cctx +
+            ", entryCtx=" + txEntry.context() + "]";
+
         if (!plcEnabled)
             return;
 
@@ -197,6 +202,11 @@
 
     /** {@inheritDoc} */
     @Override public void touch(GridCacheEntryEx e, AffinityTopologyVersion topVer) {
+        assert e.context() == cctx : "Entry from another cache context passed to eviction manager: [" +
+            "entry=" + e +
+            ", cctx=" + cctx +
+            ", entryCtx=" + e.context() + "]";
+
         if (e.detached() || e.isInternal())
             return;
 
@@ -238,13 +248,17 @@
         }
 
         U.warn(log, "Evictions started (cache may have reached its capacity)." +
-                " You may wish to increase 'maxSize' on eviction policy being used for cache: " + cctx.name(),
-            "Evictions started (cache may have reached its capacity): " + cctx.name());
+            " You may wish to increase 'maxSize' on eviction policy being used for cache: " + cctx.name());
     }
 
     /** {@inheritDoc} */
     @Override public boolean evict(@Nullable GridCacheEntryEx entry, @Nullable GridCacheVersion obsoleteVer,
         boolean explicit, @Nullable CacheEntryPredicate[] filter) throws IgniteCheckedException {
+        assert entry == null || entry.context() == cctx : "Entry from another cache context passed to eviction manager: [" +
+            "entry=" + entry +
+            ", cctx=" + cctx +
+            ", entryCtx=" + entry.context() + "]";
+
         if (entry == null)
             return true;
 
@@ -297,6 +311,10 @@
         assert plcEnabled;
         assert plc != null;
         assert !e.isInternal() : "Invalid entry for policy notification: " + e;
+        assert e.context() == cctx : "Entry from another cache context passed to eviction manager: [" +
+            "entry=" + e +
+            ", cctx=" + cctx +
+            ", entryCtx=" + e.context() + "]";
 
         if (log.isDebugEnabled())
             log.debug("Notifying eviction policy with entry: " + e);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIoManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIoManager.java
index 0134421..2e66e5b 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIoManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIoManager.java
@@ -611,7 +611,6 @@
         }
     }
 
-
     /**
      * @param cacheMsg Cache message.
      * @param nodeId Node ID.
@@ -914,7 +913,8 @@
 
             break;
 
-            case 114: {
+            case 114:
+            case 120: {
                 processMessage(nodeId, msg, c);// Will be handled by Rebalance Demander.
             }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java
index 90d4c4a..fd8b2cd 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java
@@ -24,6 +24,7 @@
 import java.util.Map;
 import java.util.UUID;
 import java.util.concurrent.atomic.AtomicReference;
+import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 import javax.cache.Cache;
 import javax.cache.expiry.ExpiryPolicy;
@@ -34,8 +35,11 @@
 import org.apache.ignite.IgniteException;
 import org.apache.ignite.IgniteLogger;
 import org.apache.ignite.cache.eviction.EvictableEntry;
+import org.apache.ignite.internal.IgniteInternalFuture;
 import org.apache.ignite.internal.NodeStoppingException;
-import org.apache.ignite.internal.pagemem.wal.StorageException;
+import org.apache.ignite.internal.UnregisteredBinaryTypeException;
+import org.apache.ignite.internal.UnregisteredClassException;
+import org.apache.ignite.internal.processors.cache.persistence.StorageException;
 import org.apache.ignite.internal.pagemem.wal.WALPointer;
 import org.apache.ignite.internal.pagemem.wal.record.DataEntry;
 import org.apache.ignite.internal.pagemem.wal.record.DataRecord;
@@ -43,12 +47,17 @@
 import org.apache.ignite.internal.processors.cache.GridCacheUpdateAtomicResult.UpdateOutcome;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxLocalAdapter;
 import org.apache.ignite.internal.processors.cache.distributed.dht.atomic.GridDhtAtomicAbstractUpdateFuture;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearCacheEntry;
 import org.apache.ignite.internal.processors.cache.extras.GridCacheEntryExtras;
 import org.apache.ignite.internal.processors.cache.extras.GridCacheMvccEntryExtras;
 import org.apache.ignite.internal.processors.cache.extras.GridCacheObsoleteEntryExtras;
 import org.apache.ignite.internal.processors.cache.extras.GridCacheTtlEntryExtras;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUtils;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccVersion;
+import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxState;
 import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
 import org.apache.ignite.internal.processors.cache.persistence.CacheDataRowAdapter;
 import org.apache.ignite.internal.processors.cache.persistence.DataRegion;
@@ -57,6 +66,8 @@
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxLocalAdapter;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccUpdateResult;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.data.ResultType;
 import org.apache.ignite.internal.processors.cache.version.GridCacheLazyPlainVersionedEntry;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersionConflictContext;
@@ -64,10 +75,14 @@
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersionManager;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersionedEntryEx;
 import org.apache.ignite.internal.processors.dr.GridDrType;
+import org.apache.ignite.internal.processors.query.IgniteSQLException;
 import org.apache.ignite.internal.processors.query.schema.SchemaIndexCacheFilter;
 import org.apache.ignite.internal.processors.query.schema.SchemaIndexCacheVisitorClosure;
+import org.apache.ignite.internal.util.GridLongList;
 import org.apache.ignite.internal.util.IgniteTree;
+import org.apache.ignite.internal.util.future.GridFutureAdapter;
 import org.apache.ignite.internal.util.lang.GridClosureException;
+import org.apache.ignite.internal.util.lang.GridCursor;
 import org.apache.ignite.internal.util.lang.GridMetadataAwareAdapter;
 import org.apache.ignite.internal.util.lang.GridTuple;
 import org.apache.ignite.internal.util.lang.GridTuple3;
@@ -80,7 +95,9 @@
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.lang.IgniteBiTuple;
+import org.apache.ignite.lang.IgniteInClosure;
 import org.apache.ignite.lang.IgnitePredicate;
+import org.apache.ignite.thread.IgniteThread;
 import org.jetbrains.annotations.Nullable;
 
 import static org.apache.ignite.events.EventType.EVT_CACHE_OBJECT_EXPIRED;
@@ -89,13 +106,19 @@
 import static org.apache.ignite.events.EventType.EVT_CACHE_OBJECT_READ;
 import static org.apache.ignite.events.EventType.EVT_CACHE_OBJECT_REMOVED;
 import static org.apache.ignite.events.EventType.EVT_CACHE_OBJECT_UNLOCKED;
+import static org.apache.ignite.internal.processors.cache.GridCacheOperation.CREATE;
 import static org.apache.ignite.internal.processors.cache.GridCacheOperation.DELETE;
 import static org.apache.ignite.internal.processors.cache.GridCacheOperation.READ;
 import static org.apache.ignite.internal.processors.cache.GridCacheOperation.TRANSFORM;
 import static org.apache.ignite.internal.processors.cache.GridCacheOperation.UPDATE;
+import static org.apache.ignite.internal.processors.cache.persistence.CacheDataRowAdapter.RowData.NO_KEY;
+import static org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode.CONCURRENT_UPDATE;
+import static org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode.DUPLICATE_KEY;
 import static org.apache.ignite.internal.processors.cache.GridCacheUpdateAtomicResult.UpdateOutcome.INVOKE_NO_OP;
 import static org.apache.ignite.internal.processors.cache.GridCacheUpdateAtomicResult.UpdateOutcome.REMOVE_NO_VAL;
+import static org.apache.ignite.internal.processors.dr.GridDrType.DR_BACKUP;
 import static org.apache.ignite.internal.processors.dr.GridDrType.DR_NONE;
+import static org.apache.ignite.internal.processors.dr.GridDrType.DR_PRIMARY;
 
 /**
  * Adapter for cache entry.
@@ -162,6 +185,10 @@
     @GridToStringExclude
     private final ReentrantLock lock = new ReentrantLock();
 
+    /** Read Lock for continuous query listener */
+    @GridToStringExclude
+    private final Lock listenerLock;
+
     /**
      * Flags:
      * <ul>
@@ -190,6 +217,7 @@
         this.key = key;
         this.hash = key.hashCode();
         this.cctx = cctx;
+        this.listenerLock = cctx.continuousQueries().getListenerReadLock();
 
         ver = GridCacheVersionManager.START_VER;
     }
@@ -356,6 +384,77 @@
     }
 
     /** {@inheritDoc} */
+    @Nullable @Override public List<GridCacheEntryInfo> allVersionsInfo() throws IgniteCheckedException {
+        assert cctx.mvccEnabled();
+
+        lockEntry();
+
+        try {
+            if (obsolete())
+                return Collections.emptyList();
+
+            GridCursor<? extends CacheDataRow> cur =
+                cctx.offheap().dataStore(localPartition()).mvccAllVersionsCursor(cctx, key, NO_KEY);
+
+            List<GridCacheEntryInfo> res = new ArrayList<>();
+
+            while (cur.next()) {
+                CacheDataRow row = cur.get();
+
+                GridCacheMvccEntryInfo info = new GridCacheMvccEntryInfo();
+
+                info.key(key);
+                info.value(row.value());
+                info.cacheId(cctx.cacheId());
+                info.version(row.version());
+                info.setNew(false);
+                info.setDeleted(false);
+
+                byte txState = row.mvccTxState() != TxState.NA ? row.mvccTxState() :
+                    MvccUtils.state(cctx, row.mvccCoordinatorVersion(), row.mvccCounter(),
+                        row.mvccOperationCounter());
+
+                if (txState == TxState.ABORTED)
+                    continue;
+
+                info.mvccVersion(row.mvccCoordinatorVersion(), row.mvccCounter(), row.mvccOperationCounter());
+                info.mvccTxState(txState);
+
+                byte newTxState = row.newMvccTxState() != TxState.NA ? row.newMvccTxState() :
+                    MvccUtils.state(cctx, row.newMvccCoordinatorVersion(), row.newMvccCounter(),
+                        row.newMvccOperationCounter());
+
+                if (newTxState != TxState.ABORTED) {
+                    info.newMvccVersion(row.newMvccCoordinatorVersion(),
+                        row.newMvccCounter(),
+                        row.newMvccOperationCounter());
+
+                    info.newMvccTxState(newTxState);
+                }
+
+                long expireTime = row.expireTime();
+
+                long ttl;
+
+                ttl = expireTime == CU.EXPIRE_TIME_ETERNAL ? CU.TTL_ETERNAL : expireTime - U.currentTimeMillis();
+
+                if (ttl < 0)
+                    ttl = CU.TTL_MINIMUM;
+
+                info.ttl(ttl);
+                info.expireTime(expireTime);
+
+                res.add(info);
+            }
+
+            return res;
+        }
+        finally {
+            unlockEntry();
+        }
+    }
+
+    /** {@inheritDoc} */
     @Override public final CacheObject unswap() throws IgniteCheckedException, GridCacheEntryRemovedException {
         return unswap(true);
     }
@@ -487,7 +586,8 @@
         Object transformClo,
         String taskName,
         @Nullable IgniteCacheExpiryPolicy expirePlc,
-        boolean keepBinary)
+        boolean keepBinary,
+        MvccSnapshot mvccVer)
         throws IgniteCheckedException, GridCacheEntryRemovedException {
         return (CacheObject)innerGet0(
             ver,
@@ -502,6 +602,7 @@
             false,
             keepBinary,
             false,
+            mvccVer,
             null);
     }
 
@@ -512,6 +613,7 @@
         String taskName,
         @Nullable IgniteCacheExpiryPolicy expiryPlc,
         boolean keepBinary,
+        MvccSnapshot mvccVer,
         @Nullable ReaderArguments readerArgs) throws IgniteCheckedException, GridCacheEntryRemovedException {
         return (EntryGetResult)innerGet0(
             /*ver*/null,
@@ -526,6 +628,7 @@
             true,
             keepBinary,
             /*reserve*/true,
+            mvccVer,
             readerArgs);
     }
 
@@ -540,6 +643,7 @@
         String taskName,
         @Nullable IgniteCacheExpiryPolicy expiryPlc,
         boolean keepBinary,
+        MvccSnapshot mvccVer,
         @Nullable ReaderArguments readerArgs)
         throws IgniteCheckedException, GridCacheEntryRemovedException {
         return (EntryGetResult)innerGet0(
@@ -555,6 +659,7 @@
             true,
             keepBinary,
             false,
+            mvccVer,
             readerArgs);
     }
 
@@ -573,6 +678,7 @@
         boolean retVer,
         boolean keepBinary,
         boolean reserveForLoad,
+        @Nullable MvccSnapshot mvccVer,
         @Nullable ReaderArguments readerArgs
     ) throws IgniteCheckedException, GridCacheEntryRemovedException {
         assert !(retVer && readThrough);
@@ -596,41 +702,53 @@
         try {
             checkObsolete();
 
-            boolean valid = valid(tx != null ? tx.topologyVersion() : cctx.affinity().affinityTopologyVersion());
-
             CacheObject val;
 
-            if (valid) {
-                val = this.val;
+            if (mvccVer != null) {
+                CacheDataRow row = cctx.offheap().mvccRead(cctx, key, mvccVer);
 
-                if (val == null) {
-                    if (isStartVersion()) {
-                        unswap(null, false);
-
-                        val = this.val;
-                    }
+                if (row != null) {
+                    val = row.value();
+                    resVer = row.version();
                 }
+                else
+                    val = null;
+            }
+            else {
+                boolean valid = valid(tx != null ? tx.topologyVersion() : cctx.affinity().affinityTopologyVersion());
 
-                if (val != null) {
-                    long expireTime = expireTimeExtras();
+                if (valid) {
+                    val = this.val;
 
-                    if (expireTime > 0 && (expireTime < U.currentTimeMillis())) {
-                        if (onExpired((CacheObject)cctx.unwrapTemporary(val), null)) {
-                            val = null;
-                            evt = false;
+                    if (val == null) {
+                        if (isStartVersion()) {
+                            unswap(null, false);
 
-                            if (cctx.deferredDelete()) {
-                                deferred = true;
-                                ver0 = ver;
+                            val = this.val;
+                        }
+                    }
+
+                    if (val != null) {
+                        long expireTime = expireTimeExtras();
+
+                        if (expireTime > 0 && (expireTime < U.currentTimeMillis())) {
+                            if (onExpired((CacheObject)cctx.unwrapTemporary(val), null)) {
+                                val = null;
+                                evt = false;
+
+                                if (cctx.deferredDelete()) {
+                                    deferred = true;
+                                    ver0 = ver;
+                                }
+                                else
+                                    obsolete = true;
                             }
-                            else
-                                obsolete = true;
                         }
                     }
                 }
+                else
+                    val = null;
             }
-            else
-                val = null;
 
             CacheObject ret = val;
 
@@ -670,7 +788,7 @@
             if (ret != null && expiryPlc != null)
                 updateTtl(expiryPlc);
 
-            if (retVer) {
+            if (retVer && resVer == null) {
                 resVer = (isNear() && cctx.transactional()) ? ((GridNearCacheEntry)this).dhtVersion() : this.ver;
 
                 if (resVer == null)
@@ -756,7 +874,10 @@
                     long expTime = CU.toExpireTime(ttl);
 
                     // Update indexes before actual write to entry.
-                    storeValue(ret, expTime, nextVer);
+                    if (cctx.mvccEnabled())
+                        cctx.offheap().mvccInitialValue(this, ret, nextVer, expTime);
+                    else
+                        storeValue(ret, expTime, nextVer);
 
                     update(ret, expTime, ttl, nextVer, true);
 
@@ -867,13 +988,19 @@
 
                     // Update indexes.
                     if (ret != null) {
-                        storeValue(ret, expTime, nextVer);
+                        if (cctx.mvccEnabled())
+                            cctx.offheap().mvccInitialValue(this, ret, nextVer, expTime);
+                        else
+                            storeValue(ret, expTime, nextVer);
 
                         if (cctx.deferredDelete() && !isInternal() && !detached() && deletedUnlocked())
                             deletedUnlocked(false);
                     }
                     else {
-                        removeValue();
+                        if (cctx.mvccEnabled())
+                            cctx.offheap().mvccRemoveAll(this);
+                        else
+                            removeValue();
 
                         if (cctx.deferredDelete() && !isInternal() && !detached() && !deletedUnlocked())
                             deletedUnlocked(true);
@@ -897,7 +1024,7 @@
         }
         finally {
             if (touch)
-                cctx.evicts().touch(this, cctx.affinity().affinityTopologyVersion());
+                touch(cctx.affinity().affinityTopologyVersion());
         }
     }
 
@@ -909,6 +1036,301 @@
     }
 
     /** {@inheritDoc} */
+    @Override public final GridCacheUpdateTxResult mvccSet(
+        IgniteInternalTx tx,
+        UUID affNodeId,
+        CacheObject val,
+        long ttl0,
+        AffinityTopologyVersion topVer,
+        @Nullable Long updateCntr,
+        MvccSnapshot mvccVer,
+        GridCacheOperation op,
+        boolean needHistory,
+        boolean noCreate) throws IgniteCheckedException, GridCacheEntryRemovedException {
+        assert tx != null;
+
+        final boolean valid = valid(tx.topologyVersion());
+
+        final GridCacheVersion newVer;
+
+        WALPointer logPtr = null;
+
+        ensureFreeSpace();
+
+        lockEntry();
+
+        MvccUpdateResult res;
+
+        try {
+            checkObsolete();
+
+            newVer = tx.writeVersion();
+
+            assert newVer != null : "Failed to get write version for tx: " + tx;
+
+            // Determine new ttl and expire time.
+            long expireTime, ttl = ttl0;
+
+            if (ttl == -1L) {
+                ttl = ttlExtras();
+                expireTime = expireTimeExtras();
+            }
+            else
+                expireTime = CU.toExpireTime(ttl);
+
+            assert ttl >= 0 : ttl;
+            assert expireTime >= 0 : expireTime;
+
+            // Detach value before index update.
+            val = cctx.kernalContext().cacheObjects().prepareForCache(val, cctx);
+
+            assert val != null;
+
+            res = cctx.offheap().mvccUpdate(
+                this, val, newVer, expireTime, mvccVer, tx.local(), needHistory, noCreate);
+
+            assert res != null;
+
+            // VERSION_FOUND is possible only on primary node when inserting the same key, or on backup when
+            // updating the key which just has been rebalanced.
+            assert res.resultType() != ResultType.VERSION_FOUND || op == CREATE && tx.local() || !tx.local();
+
+            // PREV_NOT_NULL on CREATE is possible only on primary.
+            assert res.resultType() != ResultType.PREV_NOT_NULL || op != CREATE || tx.local();
+
+            if (res.resultType() == ResultType.VERSION_MISMATCH)
+                throw new IgniteSQLException("Mvcc version mismatch.", CONCURRENT_UPDATE);
+            else if (noCreate && res.resultType() == ResultType.PREV_NULL)
+                return new GridCacheUpdateTxResult(false);
+            else if (res.resultType() == ResultType.LOCKED) {
+                unlockEntry();
+
+                MvccVersion lockVer = res.resultVersion();
+
+                GridFutureAdapter<GridCacheUpdateTxResult> resFut = new GridFutureAdapter<>();
+
+                IgniteInternalFuture<?> lockFut = cctx.kernalContext().coordinators().waitFor(cctx, lockVer);
+
+                lockFut.listen(new MvccUpdateLockListener(tx, this, affNodeId, topVer, val, ttl0, updateCntr, mvccVer,
+                    op, needHistory, noCreate, resFut));
+
+                return new GridCacheUpdateTxResult(false, resFut);
+            }
+            else if (op == CREATE && tx.local() && (res.resultType() == ResultType.PREV_NOT_NULL ||
+                res.resultType() == ResultType.VERSION_FOUND))
+                throw new IgniteSQLException("Duplicate key during INSERT [key=" + key + ']', DUPLICATE_KEY);
+
+            if (cctx.deferredDelete() && deletedUnlocked() && !detached())
+                deletedUnlocked(false);
+
+            assert tx.local() && updateCntr == null || !tx.local() && updateCntr != null && updateCntr > 0;
+
+            if (tx.local())
+                updateCntr = nextMvccPartitionCounter();
+
+            if (res.resultType() == ResultType.PREV_NULL)
+                tx.txCounters(true).accumulateSizeDelta(cctx.cacheId(), partition(), 1);
+
+            if (cctx.group().persistenceEnabled() && cctx.group().walEnabled())
+                logPtr = cctx.shared().wal().log(new DataRecord(new DataEntry(
+                        cctx.cacheId(),
+                        key,
+                        val,
+                        res.resultType() == ResultType.PREV_NULL ? CREATE : UPDATE,
+                        tx.nearXidVersion(),
+                        newVer,
+                        expireTime,
+                        key.partition(),
+                        updateCntr)));
+
+            update(val, expireTime, ttl, newVer, true);
+
+            mvccDrReplicate(tx.local() ? DR_PRIMARY : DR_BACKUP, val, newVer, topVer, mvccVer);
+
+            recordNodeId(affNodeId, topVer);
+        }
+        finally {
+            if (lockedByCurrentThread()) {
+                unlockEntry();
+
+                cctx.evicts().touch(this, AffinityTopologyVersion.NONE);
+            }
+        }
+
+        onUpdateFinished(updateCntr);
+
+        GridCacheUpdateTxResult updRes = valid ? new GridCacheUpdateTxResult(true, updateCntr, logPtr) :
+            new GridCacheUpdateTxResult(false, logPtr);
+
+        updRes.mvccHistory(res.history());
+
+        return updRes;
+    }
+
+    /** {@inheritDoc} */
+    @Override public final GridCacheUpdateTxResult mvccRemove(
+        IgniteInternalTx tx,
+        UUID affNodeId,
+        AffinityTopologyVersion topVer,
+        @Nullable Long updateCntr,
+        MvccSnapshot mvccVer,
+        boolean needHistory) throws IgniteCheckedException, GridCacheEntryRemovedException {
+        assert tx != null;
+        assert mvccVer != null;
+
+        final boolean valid = valid(tx.topologyVersion());
+
+        final GridCacheVersion newVer;
+
+        WALPointer logPtr = null;
+
+        lockEntry();
+
+        MvccUpdateResult res;
+
+        try {
+            checkObsolete();
+
+            newVer = tx.writeVersion();
+
+            assert newVer != null : "Failed to get write version for tx: " + tx;
+
+            res = cctx.offheap().mvccRemove(this, mvccVer, tx.local(), needHistory);
+
+            assert res != null;
+
+            if (res.resultType() == ResultType.VERSION_MISMATCH)
+                throw new IgniteSQLException("Mvcc version mismatch.", CONCURRENT_UPDATE);
+            else if (res.resultType() == ResultType.PREV_NULL)
+                return new GridCacheUpdateTxResult(false);
+            else if (res.resultType() == ResultType.LOCKED) {
+                unlockEntry();
+
+                MvccVersion lockVer = res.resultVersion();
+
+                GridFutureAdapter<GridCacheUpdateTxResult> resFut = new GridFutureAdapter<>();
+
+                IgniteInternalFuture<?> lockFut = cctx.kernalContext().coordinators().waitFor(cctx, lockVer);
+
+                lockFut.listen(new MvccRemoveLockListener(tx, this, affNodeId, topVer, updateCntr, mvccVer, needHistory,
+                    resFut));
+
+                return new GridCacheUpdateTxResult(false, resFut);
+            }
+
+            if (cctx.deferredDelete() && deletedUnlocked() && !detached())
+                deletedUnlocked(false);
+
+            assert tx.local() && updateCntr == null || !tx.local() && updateCntr != null && updateCntr > 0;
+
+            if (tx.local())
+                updateCntr = nextMvccPartitionCounter();
+
+            if (res.resultType() == ResultType.PREV_NOT_NULL)
+                tx.txCounters(true).accumulateSizeDelta(cctx.cacheId(), partition(), -1);
+
+            if (cctx.group().persistenceEnabled() && cctx.group().walEnabled())
+                logPtr = logTxUpdate(tx, null, 0, updateCntr);
+
+            update(null, 0, 0, newVer, true);
+
+            mvccDrReplicate(tx.local() ? DR_PRIMARY : DR_BACKUP, null, newVer, topVer, mvccVer);
+
+            recordNodeId(affNodeId, topVer);
+        }
+        finally {
+            if (lockedByCurrentThread()) {
+                unlockEntry();
+
+                cctx.evicts().touch(this, AffinityTopologyVersion.NONE);
+            }
+        }
+
+        onUpdateFinished(updateCntr);
+
+        GridCacheUpdateTxResult updRes = valid ? new GridCacheUpdateTxResult(true, updateCntr, logPtr) :
+            new GridCacheUpdateTxResult(false, logPtr);
+
+        updRes.mvccHistory(res.history());
+
+        return updRes;
+    }
+
+    /** {@inheritDoc} */
+    @Override public GridCacheUpdateTxResult mvccLock(GridDhtTxLocalAdapter tx, MvccSnapshot mvccVer)
+        throws GridCacheEntryRemovedException, IgniteCheckedException {
+        assert tx != null;
+        assert mvccVer != null;
+
+        final boolean valid = valid(tx.topologyVersion());
+
+        final GridCacheVersion newVer;
+
+        WALPointer logPtr = null;
+
+        lockEntry();
+
+        try {
+            checkObsolete();
+
+            newVer = tx.writeVersion();
+
+            assert newVer != null : "Failed to get write version for tx: " + tx;
+
+            assert tx.local();
+
+            MvccUpdateResult res = cctx.offheap().mvccLock(this, mvccVer);
+
+            assert res != null;
+
+            if (res.resultType() == ResultType.VERSION_MISMATCH)
+                throw new IgniteSQLException("Mvcc version mismatch.", CONCURRENT_UPDATE);
+            else if (res.resultType() == ResultType.LOCKED) {
+                unlockEntry();
+
+                MvccVersion lockVer = res.resultVersion();
+
+                GridFutureAdapter<GridCacheUpdateTxResult> resFut = new GridFutureAdapter<>();
+
+                IgniteInternalFuture<?> lockFut = cctx.kernalContext().coordinators().waitFor(cctx, lockVer);
+
+                lockFut.listen(new MvccAcquireLockListener(tx, this, mvccVer, resFut));
+
+                return new GridCacheUpdateTxResult(false, resFut);
+            }
+        }
+        finally {
+            if (lockedByCurrentThread()) {
+                unlockEntry();
+
+                cctx.evicts().touch(this, AffinityTopologyVersion.NONE);
+            }
+        }
+
+        onUpdateFinished(0L);
+
+        return new GridCacheUpdateTxResult(valid, logPtr);
+    }
+
+    /**
+     * Enlist for DR if needed.
+     *
+     * @param drType DR type.
+     * @param val Value.
+     * @param ver Version.
+     * @param topVer Topology version.
+     * @param mvccVer MVCC snapshot.
+     * @throws IgniteCheckedException In case of exception.
+     */
+    private void mvccDrReplicate(GridDrType drType, CacheObject val, GridCacheVersion ver,
+        AffinityTopologyVersion topVer,
+        MvccSnapshot mvccVer) throws IgniteCheckedException {
+
+        if (cctx.isDrEnabled() && drType != DR_NONE && !isInternal())
+            cctx.dr().mvccReplicate(key, val, rawTtl(), rawExpireTime(), ver.conflictVersion(), drType, topVer, mvccVer);
+    }
+
+    /** {@inheritDoc} */
     @Override public final GridCacheUpdateTxResult innerSet(
         @Nullable IgniteInternalTx tx,
         UUID evtNodeId,
@@ -930,15 +1352,16 @@
         @Nullable UUID subjId,
         String taskName,
         @Nullable GridCacheVersion dhtVer,
-        @Nullable Long updateCntr
+        @Nullable Long updateCntr,
+        @Nullable MvccSnapshot mvccVer
     ) throws IgniteCheckedException, GridCacheEntryRemovedException {
         CacheObject old;
 
-        boolean valid = valid(tx != null ? tx.topologyVersion() : topVer);
+        final boolean valid = valid(tx != null ? tx.topologyVersion() : topVer);
 
         // Lock should be held by now.
         if (!cctx.isAll(this, filter))
-            return new GridCacheUpdateTxResult(false, null, null);
+            return new GridCacheUpdateTxResult(false);
 
         final GridCacheVersion newVer;
 
@@ -952,6 +1375,9 @@
 
         ensureFreeSpace();
 
+        GridLongList mvccWaitTxs = null;
+
+        lockListenerReadLock();
         lockEntry();
 
         try {
@@ -962,7 +1388,7 @@
 
                 // It is possible that 'get' could load more recent value.
                 if (!((GridNearCacheEntry)this).recordDhtVersion(dhtVer))
-                    return new GridCacheUpdateTxResult(false, null, logPtr);
+                    return new GridCacheUpdateTxResult(false, logPtr);
             }
 
             assert tx == null || (!tx.local() && tx.onePhaseCommit()) || tx.ownsLock(this) :
@@ -998,7 +1424,7 @@
                 key0 = e.key();
 
                 if (interceptorVal == null)
-                    return new GridCacheUpdateTxResult(false, (CacheObject)cctx.unwrapTemporary(old), logPtr);
+                    return new GridCacheUpdateTxResult(false, logPtr);
                 else if (interceptorVal != val0)
                     val0 = cctx.unwrapTemporary(interceptorVal);
 
@@ -1030,7 +1456,18 @@
 
             assert val != null;
 
-            storeValue(val, expireTime, newVer);
+            if (cctx.mvccEnabled()) {
+                assert mvccVer != null;
+
+                mvccWaitTxs = cctx.offheap().mvccUpdateNative(tx.local(),
+                    this,
+                    val,
+                    newVer,
+                    expireTime,
+                    mvccVer);
+            }
+            else
+                storeValue(val, expireTime, newVer);
 
             if (cctx.deferredDelete() && deletedUnlocked() && !isInternal() && !detached())
                 deletedUnlocked(false);
@@ -1089,11 +1526,10 @@
                     null,
                     topVer);
             }
-
-            cctx.dataStructures().onEntryUpdated(key, false, keepBinary);
         }
         finally {
             unlockEntry();
+            unlockListenerReadLock();
         }
 
         onUpdateFinished(updateCntr0);
@@ -1109,8 +1545,8 @@
         if (intercept)
             cctx.config().getInterceptor().onAfterPut(new CacheLazyEntry(cctx, key, key0, val, val0, keepBinary, updateCntr0));
 
-        return valid ? new GridCacheUpdateTxResult(true, retval ? old : null, updateCntr0, logPtr) :
-            new GridCacheUpdateTxResult(false, null, logPtr);
+        return valid ? new GridCacheUpdateTxResult(true, updateCntr0, logPtr, mvccWaitTxs) :
+            new GridCacheUpdateTxResult(false, logPtr);
     }
 
     /**
@@ -1139,7 +1575,8 @@
         @Nullable UUID subjId,
         String taskName,
         @Nullable GridCacheVersion dhtVer,
-        @Nullable Long updateCntr
+        @Nullable Long updateCntr,
+        @Nullable MvccSnapshot mvccVer
     ) throws IgniteCheckedException, GridCacheEntryRemovedException {
         assert cctx.transactional();
 
@@ -1147,11 +1584,11 @@
 
         GridCacheVersion newVer;
 
-        boolean valid = valid(tx != null ? tx.topologyVersion() : topVer);
+        final boolean valid = valid(tx != null ? tx.topologyVersion() : topVer);
 
         // Lock should be held by now.
         if (!cctx.isAll(this, filter))
-            return new GridCacheUpdateTxResult(false, null, null);
+            return new GridCacheUpdateTxResult(false);
 
         GridCacheVersion obsoleteVer = null;
 
@@ -1169,6 +1606,9 @@
 
         boolean marked = false;
 
+        GridLongList mvccWaitTxs = null;
+
+        lockListenerReadLock();
         lockEntry();
 
         try {
@@ -1179,7 +1619,7 @@
 
                 // It is possible that 'get' could load more recent value.
                 if (!((GridNearCacheEntry)this).recordDhtVersion(dhtVer))
-                    return new GridCacheUpdateTxResult(false, null, logPtr);
+                    return new GridCacheUpdateTxResult(false, logPtr);
             }
 
             assert tx == null || (!tx.local() && tx.onePhaseCommit()) || tx.ownsLock(this) :
@@ -1207,11 +1647,17 @@
                 if (cctx.cancelRemove(interceptRes)) {
                     CacheObject ret = cctx.toCacheObject(cctx.unwrapTemporary(interceptRes.get2()));
 
-                    return new GridCacheUpdateTxResult(false, ret, logPtr);
+                    return new GridCacheUpdateTxResult(false, logPtr);
                 }
             }
 
-            removeValue();
+            if (cctx.mvccEnabled()) {
+                assert mvccVer != null;
+
+                mvccWaitTxs = cctx.offheap().mvccRemoveNative(tx.local(), this, mvccVer);
+            }
+            else
+                removeValue();
 
             update(null, 0, 0, newVer, true);
 
@@ -1292,8 +1738,6 @@
                     topVer);
             }
 
-            cctx.dataStructures().onEntryUpdated(key, true, keepBinary);
-
             deferred = cctx.deferredDelete() && !detached() && !isInternal();
 
             if (intercept)
@@ -1317,6 +1761,7 @@
         }
         finally {
             unlockEntry();
+            unlockListenerReadLock();
         }
 
         if (deferred)
@@ -1333,18 +1778,10 @@
         if (intercept)
             cctx.config().getInterceptor().onAfterRemove(entry0);
 
-        if (valid) {
-            CacheObject ret;
-
-            if (interceptRes != null)
-                ret = cctx.toCacheObject(cctx.unwrapTemporary(interceptRes.get2()));
-            else
-                ret = old;
-
-            return new GridCacheUpdateTxResult(true, ret, updateCntr0, logPtr);
-        }
+        if (valid)
+            return new GridCacheUpdateTxResult(true, updateCntr0, logPtr, mvccWaitTxs);
         else
-            return new GridCacheUpdateTxResult(false, null, logPtr);
+            return new GridCacheUpdateTxResult(false, logPtr);
     }
 
     /**
@@ -1387,6 +1824,7 @@
 
         EntryProcessorResult<Object> invokeRes = null;
 
+        lockListenerReadLock();
         lockEntry();
 
         try {
@@ -1493,6 +1931,8 @@
 
                 CacheInvokeEntry<Object, Object> entry = new CacheInvokeEntry<>(key, old, version(), keepBinary, this);
 
+                IgniteThread.onEntryProcessorEntered(false);
+
                 try {
                     Object computed = entryProcessor.process(entry, invokeArgs);
 
@@ -1517,6 +1957,9 @@
 
                     invokeRes = CacheInvokeResult.fromError(e);
                 }
+                finally {
+                    IgniteThread.onEntryProcessorLeft();
+                }
 
                 if (!entry.modified()) {
                     if (expiryPlc != null && !readFromStore && hasValueUnlocked())
@@ -1607,6 +2050,8 @@
 
                 update(updated, expireTime, ttl, ver, true);
 
+                logUpdate(op, updated, ver, expireTime, 0);
+
                 if (evt) {
                     CacheObject evtOld = null;
 
@@ -1637,6 +2082,8 @@
 
                 update(null, CU.TTL_ETERNAL, CU.EXPIRE_TIME_ETERNAL, ver, true);
 
+                logUpdate(op, null, ver, CU.EXPIRE_TIME_ETERNAL, 0);
+
                 if (evt) {
                     CacheObject evtOld = null;
 
@@ -1682,8 +2129,6 @@
                 onUpdateFinished(updateCntr);
             }
 
-            cctx.dataStructures().onEntryUpdated(key, op == GridCacheOperation.DELETE, keepBinary);
-
             if (intercept) {
                 if (op == GridCacheOperation.UPDATE)
                     cctx.config().getInterceptor().onAfterPut(new CacheLazyEntry(cctx, key, key0, updated, updated0, keepBinary, 0L));
@@ -1693,6 +2138,7 @@
         }
         finally {
             unlockEntry();
+            unlockListenerReadLock();
         }
 
         return new GridTuple3<>(res,
@@ -1742,6 +2188,7 @@
         if (!primary && !isNear())
             ensureFreeSpace();
 
+        lockListenerReadLock();
         lockEntry();
 
         try {
@@ -1823,6 +2270,8 @@
                             CacheInvokeEntry<Object, Object> entry =
                                 new CacheInvokeEntry<>(key, prevVal, version(), keepBinary, this);
 
+                            IgniteThread.onEntryProcessorEntered(true);
+
                             try {
                                 entryProcessor.process(entry, invokeArgs);
 
@@ -1832,6 +2281,9 @@
                             catch (Exception ignore) {
                                 evtVal = prevVal;
                             }
+                            finally {
+                                IgniteThread.onEntryProcessorLeft();
+                            }
                         }
                         else
                             evtVal = (CacheObject)writeObj;
@@ -1969,8 +2421,6 @@
                     topVer);
             }
 
-            cctx.dataStructures().onEntryUpdated(key, c.op == GridCacheOperation.DELETE, keepBinary);
-
             if (intercept) {
                 if (c.op == GridCacheOperation.UPDATE) {
                     cctx.config().getInterceptor().onAfterPut(new CacheLazyEntry(
@@ -1998,6 +2448,7 @@
         }
         finally {
             unlockEntry();
+            unlockListenerReadLock();
         }
 
         onUpdateFinished(c.updateRes.updateCounter());
@@ -2163,7 +2614,10 @@
                     ", val=" + val + ']');
             }
 
-            removeValue();
+            if (cctx.mvccEnabled())
+                cctx.offheap().mvccRemoveAll(this);
+            else
+                removeValue();
         }
         finally {
             unlockEntry();
@@ -2395,7 +2849,10 @@
             ver = newVer;
             flags &= ~IS_EVICT_DISABLED;
 
-            removeValue();
+            if (cctx.mvccEnabled())
+                cctx.offheap().mvccRemoveAll(this);
+            else
+                removeValue();
 
             onInvalidate();
 
@@ -2660,7 +3117,10 @@
             long delta = expireTime - U.currentTimeMillis();
 
             if (delta <= 0) {
-                removeValue();
+                if (cctx.mvccEnabled())
+                    cctx.offheap().mvccRemoveAll(this);
+                else
+                    removeValue();
 
                 return true;
             }
@@ -2725,6 +3185,10 @@
     @Override public boolean initialValue(
         CacheObject val,
         GridCacheVersion ver,
+        MvccVersion mvccVer,
+        MvccVersion newMvccVer,
+        byte mvccTxState,
+        byte newMvccTxState,
         long ttl,
         long expireTime,
         boolean preload,
@@ -2739,6 +3203,7 @@
 
         GridCacheVersion oldVer = null;
 
+        lockListenerReadLock();
         lockEntry();
 
         try {
@@ -2799,11 +3264,63 @@
                         }
                     }
 
-                    storeValue(val, expTime, ver);
+                    if (cctx.mvccEnabled()) {
+                        if (preload && mvccVer != null) {
+                            cctx.offheap().mvccInitialValueIfAbsent(this,
+                                val,
+                                ver,
+                                expTime,
+                                mvccVer,
+                                newMvccVer,
+                                mvccTxState,
+                                newMvccTxState);
+                        }
+                        else
+                            cctx.offheap().mvccInitialValue(this, val, ver, expTime, mvccVer, newMvccVer);
+                    }
+                    else
+                        storeValue(val, expTime, ver);
                 }
             }
-            else // Optimization to access storage only once.
-                update = storeValue(val, expTime, ver, p);
+            else {
+                if (cctx.mvccEnabled()) {
+                    // cannot identify whether the entry is exist on the fly
+                    unswap(false);
+
+                    if (update = p.apply(null)) {
+                        // If entry is already unswapped and we are modifying it, we must run deletion callbacks for old value.
+                        long oldExpTime = expireTimeUnlocked();
+                        long delta = (oldExpTime == 0 ? 0 : oldExpTime - U.currentTimeMillis());
+
+                        if (delta < 0) {
+                            if (onExpired(this.val, null)) {
+                                if (cctx.deferredDelete()) {
+                                    deferred = true;
+                                    oldVer = this.ver;
+                                }
+                                else if (val == null)
+                                    obsolete = true;
+                            }
+                        }
+
+                        if (preload && mvccVer != null) {
+                            cctx.offheap().mvccInitialValueIfAbsent(this,
+                                val,
+                                ver,
+                                expTime,
+                                mvccVer,
+                                newMvccVer,
+                                mvccTxState,
+                                newMvccTxState);
+                        }
+                        else
+                            cctx.offheap().mvccInitialValue(this, val, ver, expTime, mvccVer, newMvccVer);
+                    }
+                }
+                else
+                    // Optimization to access storage only once.
+                    update = storeValue(val, expTime, ver, p);
+            }
 
             if (update) {
                 update(val, expTime, ttl, ver, true);
@@ -2852,8 +3369,6 @@
                         updateCntr,
                         null,
                         topVer);
-
-                    cctx.dataStructures().onEntryUpdated(key, false, true);
                 }
 
                 onUpdateFinished(updateCntr);
@@ -2870,6 +3385,7 @@
         }
         finally {
             unlockEntry();
+            unlockListenerReadLock();
 
             // It is necessary to execute these callbacks outside of lock to avoid deadlocks.
 
@@ -2904,6 +3420,14 @@
         return 0;
     }
 
+    /**
+     * @return Next mvcc update counter.
+     */
+    protected long nextMvccPartitionCounter() {
+        return 0;
+    }
+
+
     /** {@inheritDoc} */
     @Override public GridCacheVersionedEntryEx versionedEntry(final boolean keepBinary)
         throws IgniteCheckedException, GridCacheEntryRemovedException {
@@ -2991,7 +3515,10 @@
                     // Detach value before index update.
                     val = cctx.kernalContext().cacheObjects().prepareForCache(val, cctx);
 
-                    if (val != null) {
+                if (val != null) {
+                    if (cctx.mvccEnabled())
+                        cctx.offheap().mvccInitialValue(this, val, newVer, expTime);
+                    else
                         storeValue(val, expTime, newVer);
 
                         if (deletedUnlocked())
@@ -3446,7 +3973,10 @@
         cctx.shared().database().checkpointReadLock();
 
         try {
-            removeValue();
+            if (cctx.mvccEnabled())
+                cctx.offheap().mvccRemoveAll(this);
+            else
+                removeValue();
         }
         finally {
             cctx.shared().database().checkpointReadUnlock();
@@ -3621,8 +4151,7 @@
     }
 
     /**
-     * Stores value in offheap.
-     *
+     * Stores value inoffheap.*
      * @param val Value.
      * @param expireTime Expire time.
      * @param ver New entry version.
@@ -4329,12 +4858,37 @@
         lock.unlock();
     }
 
+    /**
+     * This method would obtain read lock for continuous query listener setup. This
+     * is to prevent race condition between entry update and continuous query setup.
+     * You should make sure you obtain this read lock first before locking the entry
+     * in order to ensure that the entry update is completed and existing continuous
+     * query notified before the next cache listener update
+     */
+    private void lockListenerReadLock() {
+        listenerLock.lock();
+    }
+
+    /**
+     * unlock the listener read lock
+     *
+     * @see #lockListenerReadLock()
+     */
+    private void unlockListenerReadLock() {
+        listenerLock.unlock();
+    }
+
     /** {@inheritDoc} */
     @Override public boolean lockedByCurrentThread() {
         return lock.isHeldByCurrentThread();
     }
 
     /** {@inheritDoc} */
+    @Override public void touch(AffinityTopologyVersion topVer) {
+        context().evicts().touch(this, topVer);
+    }
+
+    /** {@inheritDoc} */
     @Override public boolean equals(Object o) {
         // Identity comparison left on purpose.
         return o == this;
@@ -4357,6 +4911,456 @@
         }
     }
 
+    /** */
+    private static class MvccRemoveLockListener implements IgniteInClosure<IgniteInternalFuture> {
+        /** */
+        private static final long serialVersionUID = -1578749008606139541L;
+
+        /** */
+        private final IgniteInternalTx tx;
+
+        /** */
+        private final AffinityTopologyVersion topVer;
+
+        /** */
+        private final UUID affNodeId;
+
+        /** */
+        private final MvccSnapshot mvccVer;
+
+        /** */
+        private final Long updateCntr;
+
+        /** */
+        private final boolean needHistory;
+
+        /** */
+        private final GridFutureAdapter<GridCacheUpdateTxResult> resFut;
+
+        /** */
+        private GridCacheMapEntry entry;
+
+        /** */
+        MvccRemoveLockListener(IgniteInternalTx tx,
+            GridCacheMapEntry entry,
+            UUID affNodeId,
+            AffinityTopologyVersion topVer,
+            Long updateCntr,
+            MvccSnapshot mvccVer,
+            boolean needHistory,
+            GridFutureAdapter<GridCacheUpdateTxResult> resFut) {
+            this.tx = tx;
+            this.entry = entry;
+            this.topVer = topVer;
+            this.affNodeId = affNodeId;
+            this.mvccVer = mvccVer;
+            this.updateCntr = updateCntr;
+            this.needHistory = needHistory;
+            this.resFut = resFut;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void apply(IgniteInternalFuture lockFut) {
+            WALPointer logPtr = null;
+            long updateCntr0;
+            boolean valid;
+
+            GridCacheContext cctx = entry.context();
+            GridCacheVersion newVer = tx.writeVersion();
+
+            MvccUpdateResult res;
+
+            try {
+                lockFut.get();
+
+                while (true) {
+                    entry.lockEntry();
+
+                    if (entry.obsoleteVersionExtras() == null)
+                        break;
+
+                    entry.unlockEntry();
+
+                    entry = (GridCacheMapEntry)cctx.cache().entryEx(entry.key());
+                }
+
+                valid = entry.valid(tx.topologyVersion());
+
+                cctx.shared().database().checkpointReadLock();
+
+                try {
+                    res = cctx.offheap().mvccRemove(entry, mvccVer, tx.local(), needHistory);
+                } finally {
+                    cctx.shared().database().checkpointReadUnlock();
+                }
+
+                assert res != null;
+
+                if (res.resultType() == ResultType.VERSION_MISMATCH) {
+                    resFut.onDone(new IgniteSQLException("Mvcc version mismatch.", CONCURRENT_UPDATE));
+
+                    return;
+                }
+                else if (res.resultType() == ResultType.PREV_NULL) {
+                    resFut.onDone(new GridCacheUpdateTxResult(false));
+
+                    return;
+                }
+                else if (res.resultType() == ResultType.LOCKED) {
+                    entry.unlockEntry();
+
+                    IgniteInternalFuture<?> lockFuture = cctx.kernalContext().coordinators().waitFor(cctx, res.resultVersion());
+
+                    lockFuture.listen(this);
+
+                    return;
+                }
+
+                if (cctx.deferredDelete() && entry.deletedUnlocked() && !entry.detached())
+                    entry.deletedUnlocked(false);
+
+                assert tx.local() && updateCntr == null || !tx.local() && updateCntr != null && updateCntr > 0;
+
+                updateCntr0 = tx.local() ? entry.nextMvccPartitionCounter() : updateCntr;
+
+                if (updateCntr != null && updateCntr != 0)
+                    updateCntr0 = updateCntr;
+
+                if (res.resultType() == ResultType.PREV_NOT_NULL)
+                    tx.txCounters(true).accumulateSizeDelta(cctx.cacheId(), entry.partition(), -1);
+
+                if (cctx.group().persistenceEnabled() && cctx.group().walEnabled())
+                    logPtr = cctx.shared().wal().log(new DataRecord(new DataEntry(
+                            cctx.cacheId(),
+                            entry.key(),
+                            null,
+                            DELETE,
+                            tx.nearXidVersion(),
+                            tx.writeVersion(),
+                            0,
+                            entry.key().partition(),
+                            updateCntr0)));
+
+                entry.update(null, 0, 0, newVer, true);
+
+                entry.mvccDrReplicate(tx.local() ? DR_PRIMARY : DR_BACKUP, null, newVer, topVer, mvccVer);
+
+                entry.recordNodeId(affNodeId, topVer);
+            }
+            catch (IgniteCheckedException e) {
+                resFut.onDone(e);
+
+                return;
+            }
+            finally {
+                if (entry.lockedByCurrentThread()) {
+                    entry.unlockEntry();
+
+                    cctx.evicts().touch(entry, AffinityTopologyVersion.NONE);
+                }
+            }
+
+            entry.onUpdateFinished(updateCntr0);
+
+            GridCacheUpdateTxResult updRes = valid ? new GridCacheUpdateTxResult(true, updateCntr0, logPtr)
+                : new GridCacheUpdateTxResult(false, logPtr);
+
+            updRes.mvccHistory(res.history());
+
+            resFut.onDone(updRes);
+        }
+    }
+
+    /** */
+    private static class MvccAcquireLockListener implements IgniteInClosure<IgniteInternalFuture> {
+        /** */
+        private static final long serialVersionUID = -1578749008606139541L;
+
+        /** */
+        private final IgniteInternalTx tx;
+
+        /** */
+        private final MvccSnapshot mvccVer;
+
+        /** */
+        private final GridFutureAdapter<GridCacheUpdateTxResult> resFut;
+
+        /** */
+        private GridCacheMapEntry entry;
+
+        /** */
+        MvccAcquireLockListener(IgniteInternalTx tx,
+            GridCacheMapEntry entry,
+            MvccSnapshot mvccVer,
+            GridFutureAdapter<GridCacheUpdateTxResult> resFut) {
+            this.tx = tx;
+            this.entry = entry;
+            this.mvccVer = mvccVer;
+            this.resFut = resFut;
+        }
+
+        /** {@inheritDoc} */
+        @SuppressWarnings("unchecked")
+        @Override public void apply(IgniteInternalFuture lockFut) {
+            WALPointer logPtr = null;
+            boolean valid;
+
+            GridCacheContext cctx = entry.context();
+
+            try {
+                lockFut.get();
+
+                while (true) {
+                    entry.lockEntry();
+
+                    if (entry.obsoleteVersionExtras() == null)
+                        break;
+
+                    entry.unlockEntry();
+
+                    entry = (GridCacheMapEntry)cctx.cache().entryEx(entry.key());
+                }
+
+                valid = entry.valid(tx.topologyVersion());
+
+                cctx.shared().database().checkpointReadLock();
+
+                MvccUpdateResult res;
+
+                try {
+                    res = cctx.offheap().mvccLock(entry, mvccVer);
+                }
+                finally {
+                    cctx.shared().database().checkpointReadUnlock();
+                }
+
+                assert res != null;
+
+                if (res.resultType() == ResultType.VERSION_MISMATCH) {
+                    resFut.onDone(new IgniteSQLException("Mvcc version mismatch.", CONCURRENT_UPDATE));
+
+                    return;
+                }
+                else if (res.resultType() == ResultType.LOCKED) {
+                    entry.unlockEntry();
+
+                    cctx.kernalContext().coordinators().waitFor(cctx, res.resultVersion()).listen(this);
+
+                    return;
+                }
+            }
+            catch (IgniteCheckedException e) {
+                resFut.onDone(e);
+
+                return;
+            }
+            finally {
+                if (entry.lockedByCurrentThread()) {
+                    entry.unlockEntry();
+
+                    cctx.evicts().touch(entry, AffinityTopologyVersion.NONE);
+                }
+            }
+
+            entry.onUpdateFinished(0L);
+
+            resFut.onDone(new GridCacheUpdateTxResult(valid, logPtr));
+        }
+    }
+
+    /** */
+    private static class MvccUpdateLockListener implements IgniteInClosure<IgniteInternalFuture> {
+        /** */
+        private static final long serialVersionUID = 8452738214760268397L;
+
+        /** */
+        private final IgniteInternalTx tx;
+
+        /** */
+        private final UUID affNodeId;
+
+        /** */
+        private final AffinityTopologyVersion topVer;
+
+        /** */
+        private final CacheObject val;
+
+        /** */
+        private final long ttl;
+
+        /** */
+        private final Long updateCntr;
+
+        /** */
+        private final MvccSnapshot mvccVer;
+
+        /** */
+        private final GridFutureAdapter<GridCacheUpdateTxResult> resFut;
+
+        /** */
+        private GridCacheMapEntry entry;
+
+        /** */
+        private GridCacheOperation op;
+
+        /** */
+        private final boolean needHistory;
+
+        /** */
+        private final boolean noCreate;
+
+        /** */
+        MvccUpdateLockListener(IgniteInternalTx tx,
+            GridCacheMapEntry entry,
+            UUID affNodeId,
+            AffinityTopologyVersion topVer,
+            CacheObject val,
+            long ttl,
+            Long updateCntr,
+            MvccSnapshot mvccVer,
+            GridCacheOperation op,
+            boolean needHistory,
+            boolean noCreate,
+            GridFutureAdapter<GridCacheUpdateTxResult> resFut) {
+            this.tx = tx;
+            this.entry = entry;
+            this.affNodeId = affNodeId;
+            this.topVer = topVer;
+            this.val = val;
+            this.ttl = ttl;
+            this.updateCntr = updateCntr;
+            this.mvccVer = mvccVer;
+            this.op = op;
+            this.needHistory = needHistory;
+            this.noCreate = noCreate;
+            this.resFut = resFut;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void apply(IgniteInternalFuture lockFut) {
+            WALPointer logPtr = null;
+            long updateCntr0;
+            boolean valid;
+
+            GridCacheContext cctx = entry.context();
+            GridCacheVersion newVer = tx.writeVersion();
+
+            MvccUpdateResult res;
+
+            try {
+                lockFut.get();
+
+                entry.ensureFreeSpace();
+
+                while (true) {
+                    entry.lockEntry();
+
+                    if (entry.obsoleteVersionExtras() == null)
+                        break;
+
+                    entry.unlockEntry();
+
+                    entry = (GridCacheMapEntry)cctx.cache().entryEx(entry.key());
+                }
+
+                valid = entry.valid(tx.topologyVersion());
+
+                // Determine new ttl and expire time.
+                long expireTime, ttl = this.ttl;
+
+                if (ttl == -1L) {
+                    ttl = entry.ttlExtras();
+                    expireTime = entry.expireTimeExtras();
+                }
+                else
+                    expireTime = CU.toExpireTime(ttl);
+
+                assert ttl >= 0 : ttl;
+                assert expireTime >= 0 : expireTime;
+
+                cctx.shared().database().checkpointReadLock();
+
+                try {
+                    res = cctx.offheap().mvccUpdate(
+                        entry, val, newVer, expireTime, mvccVer, tx.local(), needHistory, noCreate);
+                } finally {
+                    cctx.shared().database().checkpointReadUnlock();
+                }
+
+                assert res != null;
+
+                if (res.resultType() == ResultType.VERSION_MISMATCH) {
+                    resFut.onDone(new IgniteSQLException("Mvcc version mismatch.", CONCURRENT_UPDATE));
+
+                    return;
+                }
+                else if (res.resultType() == ResultType.LOCKED) {
+                    entry.unlockEntry();
+
+                    cctx.kernalContext().coordinators().waitFor(cctx, res.resultVersion()).listen(this);
+
+                    return;
+                }
+                else if (op == CREATE && tx.local() && (res.resultType() == ResultType.PREV_NOT_NULL ||
+                    res.resultType() == ResultType.VERSION_FOUND)) {
+                    resFut.onDone(new IgniteSQLException("Duplicate key during INSERT [key=" + entry.key() + ']',
+                        DUPLICATE_KEY));
+
+                    return;
+                }
+
+                if (cctx.deferredDelete() && entry.deletedUnlocked() && !entry.detached())
+                    entry.deletedUnlocked(false);
+
+                assert tx.local() && updateCntr == null || !tx.local() && updateCntr != null && updateCntr > 0;
+
+                updateCntr0 = tx.local() ? entry.nextMvccPartitionCounter() : updateCntr;
+
+                if (res.resultType() == ResultType.PREV_NULL)
+                    tx.txCounters(true).accumulateSizeDelta(cctx.cacheId(), entry.partition(), 1);
+
+                if (cctx.group().persistenceEnabled() && cctx.group().walEnabled())
+                    logPtr = cctx.shared().wal().log(new DataRecord(new DataEntry(
+                            cctx.cacheId(),
+                            entry.key(),
+                            val,
+                            res.resultType() == ResultType.PREV_NULL ? CREATE : UPDATE,
+                            tx.nearXidVersion(),
+                            newVer,
+                            expireTime,
+                            entry.key().partition(),
+                            updateCntr0)));
+
+                entry.update(val, expireTime, ttl, newVer, true);
+
+                entry.mvccDrReplicate(tx.local() ? DR_PRIMARY : DR_BACKUP, val, newVer, topVer, mvccVer);
+
+                entry.recordNodeId(affNodeId, topVer);
+            }
+            catch (IgniteCheckedException e) {
+                resFut.onDone(e);
+
+                return;
+            }
+            finally {
+                if (entry.lockedByCurrentThread()) {
+                    entry.unlockEntry();
+
+                    cctx.evicts().touch(entry, AffinityTopologyVersion.NONE);
+                }
+            }
+
+            entry.onUpdateFinished(updateCntr0);
+
+            GridCacheUpdateTxResult updRes = valid ? new GridCacheUpdateTxResult(true, updateCntr0, logPtr)
+                : new GridCacheUpdateTxResult(false, logPtr);
+
+            updRes.mvccHistory(res.history());
+
+            resFut.onDone(updRes);
+        }
+    }
+
     /**
      *
      */
@@ -5405,6 +6409,8 @@
         private IgniteBiTuple<Object, Exception> runEntryProcessor(CacheInvokeEntry<Object, Object> invokeEntry) {
             EntryProcessor<Object, Object, ?> entryProcessor = (EntryProcessor<Object, Object, ?>)writeObj;
 
+            IgniteThread.onEntryProcessorEntered(true);
+
             try {
                 Object computed = entryProcessor.process(invokeEntry, invokeArgs);
 
@@ -5422,10 +6428,16 @@
                 return null;
             }
             catch (Exception e) {
+                if (e instanceof UnregisteredClassException || e instanceof UnregisteredBinaryTypeException)
+                    throw (IgniteException) e;
+
                 writeObj = invokeEntry.valObj;
 
                 return new IgniteBiTuple<>(null, e);
             }
+            finally {
+                IgniteThread.onEntryProcessorLeft();
+            }
         }
 
         /** {@inheritDoc} */
@@ -5433,4 +6445,83 @@
             return S.toString(AtomicCacheUpdateClosure.class, this);
         }
     }
+
+    /** {@inheritDoc} */
+    @Override public GridCacheUpdateTxResult mvccUpdateRowsWithPreloadInfo(
+        IgniteInternalTx tx,
+        UUID affNodeId,
+        AffinityTopologyVersion topVer,
+        Long updateCntr,
+        List<GridCacheEntryInfo> entries,
+        GridCacheOperation op,
+        MvccSnapshot mvccVer)
+        throws IgniteCheckedException, GridCacheEntryRemovedException {
+        assert updateCntr != null && updateCntr > 0 && !tx.local();
+
+        WALPointer logPtr = null;
+
+        ensureFreeSpace();
+
+        lockEntry();
+
+        try {
+            checkObsolete();
+
+            CacheObject val = null;
+
+            for (int i = 0; i < entries.size(); i++) {
+                GridCacheMvccEntryInfo info = (GridCacheMvccEntryInfo)entries.get(i);
+
+                if (val == null && op != DELETE && MvccUtils.compare(info.mvccVersion(),
+                    mvccVer.coordinatorVersion(),
+                    mvccVer.counter(),
+                    mvccVer.operationCounter()) == 0)
+                    val = info.value();
+
+                cctx.offheap().mvccUpdateRowWithPreloadInfo(this,
+                    info.value(),
+                    info.version(),
+                    info.expireTime(),
+                    info.mvccVersion(),
+                    info.newMvccVersion(),
+                    info.mvccTxState(),
+                    info.newMvccTxState());
+            }
+
+            if (cctx.deferredDelete() && deletedUnlocked() && !detached())
+                deletedUnlocked(false);
+
+            long expireTime = CU.EXPIRE_TIME_ETERNAL;
+            long ttl = CU.TTL_ETERNAL;
+
+            GridCacheVersion ver = tx.writeVersion();
+
+            if (cctx.group().persistenceEnabled() && cctx.group().walEnabled())
+                logPtr = cctx.shared().wal().log(new DataRecord(new DataEntry(
+                    cctx.cacheId(),
+                    key,
+                    val,
+                    op,
+                    tx.nearXidVersion(),
+                    ver,
+                    CU.EXPIRE_TIME_ETERNAL,
+                    key.partition(),
+                    updateCntr)));
+
+            update(val, expireTime, ttl, ver, true);
+
+            mvccDrReplicate(DR_BACKUP, val, ver, topVer, mvccVer);
+
+            recordNodeId(affNodeId, topVer);
+        }
+        finally {
+            if (lockedByCurrentThread()) {
+                unlockEntry();
+
+                cctx.evicts().touch(this, AffinityTopologyVersion.NONE);
+            }
+        }
+
+        return new GridCacheUpdateTxResult(true, logPtr);
+    }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMvccEntryInfo.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMvccEntryInfo.java
new file mode 100644
index 0000000..a80ddc0
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMvccEntryInfo.java
@@ -0,0 +1,257 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache;
+
+import java.nio.ByteBuffer;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUpdateVersionAware;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccVersionAware;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.plugin.extensions.communication.MessageReader;
+import org.apache.ignite.plugin.extensions.communication.MessageWriter;
+
+import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO.MVCC_HINTS_BIT_OFF;
+import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO.MVCC_HINTS_MASK;
+
+/**
+ *
+ */
+public class GridCacheMvccEntryInfo extends GridCacheEntryInfo implements MvccVersionAware, MvccUpdateVersionAware {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** */
+    private long mvccCrdVer;
+
+    /** */
+    private long mvccCntr;
+
+    /** */
+    private int mvccOpCntr;
+
+    /** */
+    private long newMvccCrdVer;
+
+    /** */
+    private long newMvccCntr;
+
+    /** */
+    private int newMvccOpCntr;
+
+    /** {@inheritDoc} */
+    @Override public long newMvccCoordinatorVersion() {
+        return newMvccCrdVer;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long newMvccCounter() {
+        return newMvccCntr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int newMvccOperationCounter() {
+        return newMvccOpCntr & ~MVCC_HINTS_MASK;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte newMvccTxState() {
+        return (byte)(newMvccOpCntr >>> MVCC_HINTS_BIT_OFF);
+    }
+
+    /** {@inheritDoc} */
+    @Override public long mvccCoordinatorVersion() {
+        return mvccCrdVer;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long mvccCounter() {
+        return mvccCntr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int mvccOperationCounter() {
+        return mvccOpCntr & ~MVCC_HINTS_MASK;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte mvccTxState() {
+        return (byte)(mvccOpCntr >>> MVCC_HINTS_BIT_OFF);
+    }
+
+    /**
+     * @param mvccTxState Mvcc version Tx state hint.
+     */
+    public void mvccTxState(byte mvccTxState) {
+        mvccOpCntr = (mvccOpCntr & ~MVCC_HINTS_MASK) | ((int)mvccTxState << MVCC_HINTS_BIT_OFF);
+    }
+
+    /**
+     * @param newMvccTxState New mvcc version Tx state hint.
+     */
+    public void newMvccTxState(byte newMvccTxState) {
+        newMvccOpCntr = (newMvccOpCntr & ~MVCC_HINTS_MASK) | ((int)newMvccTxState << MVCC_HINTS_BIT_OFF);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void newMvccVersion(long crd, long cntr, int opCntr) {
+        newMvccCrdVer = crd;
+        newMvccCntr = cntr;
+        newMvccOpCntr = opCntr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void mvccVersion(long crd, long cntr, int opCntr) {
+        mvccCrdVer = crd;
+        mvccCntr = cntr;
+        mvccOpCntr = opCntr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) {
+        writer.setBuffer(buf);
+
+        if (!super.writeTo(buf, writer))
+            return false;
+
+        if (!writer.isHeaderWritten()) {
+            if (!writer.writeHeader(directType(), fieldsCount()))
+                return false;
+
+            writer.onHeaderWritten();
+        }
+
+        switch (writer.state()) {
+            case 6:
+                if (!writer.writeLong("mvccCntr", mvccCntr))
+                    return false;
+
+                writer.incrementState();
+
+            case 7:
+                if (!writer.writeLong("mvccCrdVer", mvccCrdVer))
+                    return false;
+
+                writer.incrementState();
+
+            case 8:
+                if (!writer.writeInt("mvccOpCntr", mvccOpCntr))
+                    return false;
+
+                writer.incrementState();
+
+            case 9:
+                if (!writer.writeLong("newMvccCntr", newMvccCntr))
+                    return false;
+
+                writer.incrementState();
+
+            case 10:
+                if (!writer.writeLong("newMvccCrdVer", newMvccCrdVer))
+                    return false;
+
+                writer.incrementState();
+
+            case 11:
+                if (!writer.writeInt("newMvccOpCntr", newMvccOpCntr))
+                    return false;
+
+                writer.incrementState();
+
+        }
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) {
+        reader.setBuffer(buf);
+
+        if (!reader.beforeMessageRead())
+            return false;
+
+        if (!super.readFrom(buf, reader))
+            return false;
+
+        switch (reader.state()) {
+            case 6:
+                mvccCntr = reader.readLong("mvccCntr");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 7:
+                mvccCrdVer = reader.readLong("mvccCrdVer");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 8:
+                mvccOpCntr = reader.readInt("mvccOpCntr");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 9:
+                newMvccCntr = reader.readLong("newMvccCntr");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 10:
+                newMvccCrdVer = reader.readLong("newMvccCrdVer");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 11:
+                newMvccOpCntr = reader.readInt("newMvccOpCntr");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+        }
+
+        return reader.afterMessageRead(GridCacheMvccEntryInfo.class);
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte fieldsCount() {
+        return 12;
+    }
+
+    /** {@inheritDoc} */
+    @Override public short directType() {
+        return 143;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(GridCacheMvccEntryInfo.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMvccManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMvccManager.java
index fade833..690b15a 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMvccManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMvccManager.java
@@ -360,7 +360,7 @@
             try {
                 entry.removeExplicitNodeLocks(leftNodeId);
 
-                entry.context().evicts().touch(entry, topVer);
+                entry.touch(topVer);
             }
             catch (GridCacheEntryRemovedException ignore) {
                 if (log.isDebugEnabled())
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheOperation.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheOperation.java
index 555f825..377f95f 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheOperation.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheOperation.java
@@ -17,7 +17,7 @@
 
 package org.apache.ignite.internal.processors.cache;
 
-import org.jetbrains.annotations.*;
+import org.jetbrains.annotations.Nullable;
 
 /**
  * Cache value operations.
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCachePartitionExchangeManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCachePartitionExchangeManager.java
index 4ec7774..ca2d1c8 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCachePartitionExchangeManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCachePartitionExchangeManager.java
@@ -116,6 +116,7 @@
 import org.apache.ignite.lang.IgniteProductVersion;
 import org.apache.ignite.lang.IgniteUuid;
 import org.apache.ignite.thread.IgniteThread;
+import org.jetbrains.annotations.NotNull;
 import org.jetbrains.annotations.Nullable;
 
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
@@ -309,7 +310,7 @@
      * @param cache Discovery data cache.
      */
     private void processEventInactive(DiscoveryEvent evt, DiscoCache cache) {
-        // Clean local join caches context.
+        // Clear local join caches context.
         cctx.cache().localJoinCachesContext();
 
         if (log.isDebugEnabled())
@@ -330,6 +331,22 @@
         cctx.io().addCacheHandler(0, GridDhtPartitionsSingleMessage.class,
             new MessageHandler<GridDhtPartitionsSingleMessage>() {
                 @Override public void onMessage(final ClusterNode node, final GridDhtPartitionsSingleMessage msg) {
+                    GridDhtPartitionExchangeId exchangeId = msg.exchangeId();
+
+                    if (exchangeId != null) {
+                        GridDhtPartitionsExchangeFuture fut = exchangeFuture(exchangeId);
+
+                        boolean fastReplied = fut.fastReplyOnSingleMessage(node, msg);
+
+                        if (fastReplied) {
+                            if (log.isInfoEnabled())
+                                log.info("Fast replied to single message " +
+                                    "[exchId=" + exchangeId + ", nodeId=" + node.id() + "]");
+
+                            return;
+                        }
+                    }
+
                     if (!crdInitFut.isDone() && !msg.restoreState()) {
                         GridDhtPartitionExchangeId exchId = msg.exchangeId();
 
@@ -877,7 +894,7 @@
      * @param ver Topology version.
      * @return Future or {@code null} is future is already completed.
      */
-    @Nullable public IgniteInternalFuture<?> affinityReadyFuture(AffinityTopologyVersion ver) {
+    @Nullable public IgniteInternalFuture<AffinityTopologyVersion> affinityReadyFuture(AffinityTopologyVersion ver) {
         GridDhtPartitionsExchangeFuture lastInitializedFut0 = lastInitializedFut;
 
         if (lastInitializedFut0 != null && lastInitializedFut0.initialVersion().compareTo(ver) == 0) {
@@ -1363,6 +1380,15 @@
     }
 
     /**
+     * Gets exchange future by exchange id.
+     *
+     * @param exchId Exchange id.
+     */
+    private GridDhtPartitionsExchangeFuture exchangeFuture(@NotNull GridDhtPartitionExchangeId exchId) {
+        return exchangeFuture(exchId, null, null, null, null);
+    }
+
+    /**
      * @param exchId Exchange ID.
      * @param discoEvt Discovery event.
      * @param cache Discovery data cache.
@@ -1370,11 +1396,13 @@
      * @param affChangeMsg Affinity change message.
      * @return Exchange future.
      */
-    private GridDhtPartitionsExchangeFuture exchangeFuture(GridDhtPartitionExchangeId exchId,
+    private GridDhtPartitionsExchangeFuture exchangeFuture(
+        @NotNull GridDhtPartitionExchangeId exchId,
         @Nullable DiscoveryEvent discoEvt,
         @Nullable DiscoCache cache,
         @Nullable ExchangeActions exchActions,
-        @Nullable CacheAffinityChangeMessage affChangeMsg) {
+        @Nullable CacheAffinityChangeMessage affChangeMsg
+    ) {
         GridDhtPartitionsExchangeFuture fut;
 
         GridDhtPartitionsExchangeFuture old = exchFuts.addx(
@@ -1571,7 +1599,7 @@
                     scheduleResendPartitions();
             }
             else {
-                GridDhtPartitionsExchangeFuture exchFut = exchangeFuture(msg.exchangeId(), null, null, null, null);
+                GridDhtPartitionsExchangeFuture exchFut = exchangeFuture(msg.exchangeId());
 
                 if (log.isDebugEnabled())
                     log.debug("Notifying exchange future about single message: " + exchFut);
@@ -1845,9 +1873,15 @@
         IgniteTxManager tm = cctx.tm();
 
         if (tm != null) {
-            U.warn(diagnosticLog, "Pending transactions:");
+            boolean first = true;
 
             for (IgniteInternalTx tx : tm.activeTransactions()) {
+                if (first) {
+                    U.warn(diagnosticLog, "Pending transactions:");
+
+                    first = false;
+                }
+
                 if (exchTopVer != null) {
                     U.warn(diagnosticLog, ">>> [txVer=" + tx.topologyVersionSnapshot() +
                         ", exchWait=" + tm.needWaitTransaction(tx, exchTopVer) +
@@ -1861,31 +1895,66 @@
         GridCacheMvccManager mvcc = cctx.mvcc();
 
         if (mvcc != null) {
-            U.warn(diagnosticLog, "Pending explicit locks:");
+            boolean first = true;
 
-            for (GridCacheExplicitLockSpan lockSpan : mvcc.activeExplicitLocks())
+            for (GridCacheExplicitLockSpan lockSpan : mvcc.activeExplicitLocks()) {
+                if (first) {
+                    U.warn(diagnosticLog, "Pending explicit locks:");
+
+                    first = false;
+                }
+
                 U.warn(diagnosticLog, ">>> " + lockSpan);
+            }
 
-            U.warn(diagnosticLog, "Pending cache futures:");
+            first = true;
 
-            for (GridCacheFuture<?> fut : mvcc.activeFutures())
+            for (GridCacheFuture<?> fut : mvcc.activeFutures()) {
+                if (first) {
+                    U.warn(diagnosticLog, "Pending cache futures:");
+
+                    first = false;
+                }
+
                 dumpDiagnosticInfo(fut, diagCtx);
+            }
 
-            U.warn(diagnosticLog, "Pending atomic cache futures:");
+            first = true;
 
-            for (GridCacheFuture<?> fut : mvcc.atomicFutures())
+            for (GridCacheFuture<?> fut : mvcc.atomicFutures()) {
+                if (first) {
+                    U.warn(diagnosticLog, "Pending atomic cache futures:");
+
+                    first = false;
+                }
+
                 dumpDiagnosticInfo(fut, diagCtx);
+            }
 
-            U.warn(diagnosticLog, "Pending data streamer futures:");
+            first = true;
 
-            for (IgniteInternalFuture<?> fut : mvcc.dataStreamerFutures())
+            for (IgniteInternalFuture<?> fut : mvcc.dataStreamerFutures()) {
+                if (first) {
+                    U.warn(diagnosticLog, "Pending data streamer futures:");
+
+                    first = false;
+                }
+
                 dumpDiagnosticInfo(fut, diagCtx);
+            }
 
             if (tm != null) {
-                U.warn(diagnosticLog, "Pending transaction deadlock detection futures:");
+                first = true;
 
-                for (IgniteInternalFuture<?> fut : tm.deadlockDetectionFutures())
+                for (IgniteInternalFuture<?> fut : tm.deadlockDetectionFutures()) {
+                    if (first) {
+                        U.warn(diagnosticLog, "Pending transaction deadlock detection futures:");
+
+                        first = false;
+                    }
+
                     dumpDiagnosticInfo(fut, diagCtx);
+                }
             }
         }
 
@@ -1907,6 +1976,8 @@
                     affDumpCnt++;
             }
         }
+
+        cctx.kernalContext().coordinators().dumpDebugInfo(diagnosticLog, diagCtx);
     }
 
     /**
@@ -1969,7 +2040,7 @@
                         ", mergedFut=" + fut.initialVersion() +
                         ", evt=" + IgniteUtils.gridEventName(fut.firstEvent().type()) +
                         ", evtNode=" + fut.firstEvent().eventNode().id()+
-                        ", evtNodeClient=" + CU.clientNode(fut.firstEvent().eventNode())+ ']');
+                        ", evtNodeClient=" + fut.firstEvent().eventNode().isClient() + ']');
                 }
 
                 DiscoveryEvent evt = fut.firstEvent();
@@ -2042,12 +2113,21 @@
 
                     ClusterNode node = evt.eventNode();
 
+                    if ((evt.type() == EVT_NODE_FAILED || evt.type() == EVT_NODE_LEFT) &&
+                        node.equals(cctx.coordinators().currentCoordinator())) {
+                        if (log.isInfoEnabled())
+                            log.info("Stop merge, need exchange for mvcc coordinator failure: " + node);
+
+                        break;
+                    }
+
                     if (!curFut.context().supportsMergeExchanges(node)) {
                         if (log.isInfoEnabled())
                             log.info("Stop merge, node does not support merge: " + node);
 
                         break;
                     }
+
                     if (evt.type() == EVT_NODE_JOINED && cctx.cache().hasCachesReceivedFromJoin(node)) {
                         if (log.isInfoEnabled())
                             log.info("Stop merge, received caches from node: " + node);
@@ -2060,7 +2140,7 @@
                             ", mergedFut=" + fut.initialVersion() +
                             ", evt=" + IgniteUtils.gridEventName(fut.firstEvent().type()) +
                             ", evtNode=" + fut.firstEvent().eventNode().id() +
-                            ", evtNodeClient=" + CU.clientNode(fut.firstEvent().eventNode())+ ']');
+                            ", evtNodeClient=" + fut.firstEvent().eventNode().isClient() + ']');
                     }
 
                     addDiscoEvtForTest(fut.firstEvent());
@@ -2342,7 +2422,7 @@
                         // because only current exchange future can have multiple discovery events (exchange merge).
                         ClusterNode triggeredBy = ((GridDhtPartitionsExchangeFuture) task).firstEvent().eventNode();
 
-                        if (!CU.clientNode(triggeredBy))
+                        if (!triggeredBy.isClient())
                             return true;
                     }
                 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java
index 538d367..f000da7 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java
@@ -81,6 +81,7 @@
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheAdapter;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology;
+import org.apache.ignite.internal.processors.cache.distributed.dht.PartitionsEvictManager;
 import org.apache.ignite.internal.processors.cache.distributed.dht.atomic.GridDhtAtomicCache;
 import org.apache.ignite.internal.processors.cache.distributed.dht.colocated.GridDhtColocatedCache;
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.StopCachesOnClientReconnectExchangeTask;
@@ -96,7 +97,6 @@
 import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager;
 import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager;
 import org.apache.ignite.internal.processors.cache.persistence.freelist.FreeList;
-import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetaStorage;
 import org.apache.ignite.internal.processors.cache.persistence.snapshot.IgniteCacheSnapshotManager;
 import org.apache.ignite.internal.processors.cache.persistence.snapshot.SnapshotDiscoveryMessage;
 import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList;
@@ -164,6 +164,7 @@
 import static org.apache.ignite.IgniteSystemProperties.IGNITE_SKIP_CONFIGURATION_CONSISTENCY_CHECK;
 import static org.apache.ignite.IgniteSystemProperties.getBoolean;
 import static org.apache.ignite.cache.CacheAtomicityMode.ATOMIC;
+import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT;
 import static org.apache.ignite.cache.CacheMode.LOCAL;
 import static org.apache.ignite.cache.CacheMode.PARTITIONED;
 import static org.apache.ignite.cache.CacheMode.REPLICATED;
@@ -276,6 +277,7 @@
         throws IgniteCheckedException {
         CU.initializeConfigDefaults(log, cfg, cacheObjCtx);
 
+        ctx.coordinators().preProcessCacheConfiguration(cfg);
         ctx.igfsHelper().preProcessCacheConfiguration(cfg);
     }
 
@@ -509,23 +511,21 @@
 
         if (delay != 0) {
             if (cc.getCacheMode() != PARTITIONED)
-                U.warn(log, "Rebalance delay is supported only for partitioned caches (will ignore): " + (cc.getName()),
-                    "Will ignore rebalance delay for cache: " + U.maskName(cc.getName()));
+                U.warn(log, "Rebalance delay is supported only for partitioned caches (will ignore): " + (cc.getName()));
             else if (cc.getRebalanceMode() == SYNC) {
                 if (delay < 0) {
                     U.warn(log, "Ignoring SYNC rebalance mode with manual rebalance start (node will not wait for " +
-                            "rebalancing to be finished): " + U.maskName(cc.getName()),
-                        "Node will not wait for rebalance in SYNC mode: " + U.maskName(cc.getName()));
+                        "rebalancing to be finished): " + U.maskName(cc.getName()));
                 }
                 else {
                     U.warn(log, "Using SYNC rebalance mode with rebalance delay (node will wait until rebalancing is " +
-                            "initiated for " + delay + "ms) for cache: " + U.maskName(cc.getName()),
-                        "Node will wait until rebalancing is initiated for " + delay + "ms for cache: " + U.maskName(cc.getName()));
+                        "initiated for " + delay + "ms) for cache: " + U.maskName(cc.getName()));
                 }
             }
         }
 
         ctx.igfsHelper().validateCacheConfiguration(cc);
+        ctx.coordinators().validateCacheConfiguration(cc);
 
         if (cc.getAtomicityMode() == ATOMIC)
             assertParameter(cc.getTransactionManagerLookupClassName() == null,
@@ -543,6 +543,25 @@
             throw new IgniteCheckedException("Using cache group names reserved for datastructures is not allowed for " +
                 "other cache types [cacheName=" + cc.getName() + ", groupName=" + cc.getGroupName() +
                 ", cacheType=" + cacheType + "]");
+
+        // Make sure we do not use sql schema for system views.
+        if (ctx.query().moduleEnabled()) {
+            String schema = QueryUtils.normalizeSchemaName(cc.getName(), cc.getSqlSchema());
+
+            if (F.eq(schema, QueryUtils.SCHEMA_SYS)) {
+                if (cc.getSqlSchema() == null) {
+                    // Conflict on cache name.
+                    throw new IgniteCheckedException("SQL schema name derived from cache name is reserved (" +
+                        "please set explicit SQL schema name through CacheConfiguration.setSqlSchema() or choose " +
+                        "another cache name) [cacheName=" + cc.getName() + ", schemaName=" + cc.getSqlSchema() + "]");
+                }
+                else {
+                    // Conflict on schema name.
+                    throw new IgniteCheckedException("SQL schema name is reserved (please choose another one) [" +
+                        "cacheName=" + cc.getName() + ", schemaName=" + cc.getSqlSchema() + ']');
+                }
+            }
+        }
     }
 
     /**
@@ -665,8 +684,7 @@
         if (!F.isEmpty(ctx.config().getCacheConfiguration())) {
             if (depMode != CONTINUOUS && depMode != SHARED)
                 U.warn(log, "Deployment mode for cache is not CONTINUOUS or SHARED " +
-                        "(it is recommended that you change deployment mode and restart): " + depMode,
-                    "Deployment mode for cache is not CONTINUOUS or SHARED.");
+                    "(it is recommended that you change deployment mode and restart): " + depMode);
         }
 
         initializeInternalCacheNames();
@@ -792,14 +810,42 @@
         if (CU.isPersistenceEnabled(ctx.config()) && ctx.cache().context().pageStore() != null) {
             Map<String, StoredCacheData> storedCaches = ctx.cache().context().pageStore().readCacheConfigurations();
 
-            if (!F.isEmpty(storedCaches))
+            if (!F.isEmpty(storedCaches)) {
                 for (StoredCacheData storedCacheData : storedCaches.values()) {
                     String cacheName = storedCacheData.config().getName();
 
                     //Ignore stored caches if it already added by static config(static config has higher priority).
                     if (!caches.containsKey(cacheName))
                         addStoredCache(caches, storedCacheData, cacheName, cacheType(cacheName), false);
+                    else {
+                        CacheConfiguration cfg = caches.get(cacheName).cacheData().config();
+                        CacheConfiguration cfgFromStore = storedCacheData.config();
+
+                        validateCacheConfigurationOnRestore(cfg, cfgFromStore);
+                    }
                 }
+            }
+        }
+    }
+
+    /**
+     * Validates cache configuration against stored cache configuration when persistence is enabled.
+     *
+     * @param cfg Configured cache configuration.
+     * @param cfgFromStore Stored cache configuration
+     * @throws IgniteCheckedException If validation failed.
+     */
+    private void validateCacheConfigurationOnRestore(CacheConfiguration cfg, CacheConfiguration cfgFromStore)
+        throws IgniteCheckedException {
+        assert cfg != null && cfgFromStore != null;
+
+        if ((cfg.getAtomicityMode() == TRANSACTIONAL_SNAPSHOT ||
+            cfgFromStore.getAtomicityMode() == TRANSACTIONAL_SNAPSHOT)
+            && cfg.getAtomicityMode() != cfgFromStore.getAtomicityMode()) {
+            throw new IgniteCheckedException("Cannot start cache. Statically configured atomicity mode differs from " +
+                "previously stored configuration. Please check your configuration: [cacheName=" + cfg.getName() +
+                ", configuredAtomicityMode=" + cfg.getAtomicityMode() +
+                ", storedAtomicityMode=" + cfgFromStore.getAtomicityMode() + "]");
         }
     }
 
@@ -1208,7 +1254,8 @@
                 ", memoryPolicyName=" + memPlcName +
                 ", mode=" + cfg.getCacheMode() +
                 ", atomicity=" + cfg.getAtomicityMode() +
-                ", backups=" + cfg.getBackups() + ']');
+                ", backups=" + cfg.getBackups() +
+                ", mvcc=" + cacheCtx.mvccEnabled() +']');
         }
     }
 
@@ -1426,6 +1473,9 @@
 
         pluginMgr.validate();
 
+        if (cfg.getAtomicityMode() == TRANSACTIONAL_SNAPSHOT)
+            sharedCtx.coordinators().ensureStarted();
+
         sharedCtx.jta().registerCache(cfg);
 
         // Skip suggestions for internal caches.
@@ -1496,7 +1546,8 @@
         switch (cfg.getCacheMode()) {
             case LOCAL: {
                 switch (cfg.getAtomicityMode()) {
-                    case TRANSACTIONAL: {
+                    case TRANSACTIONAL:
+                    case TRANSACTIONAL_SNAPSHOT: {
                         cache = new GridLocalCache(cacheCtx);
 
                         break;
@@ -1518,7 +1569,8 @@
             case REPLICATED: {
                 if (nearEnabled) {
                     switch (cfg.getAtomicityMode()) {
-                        case TRANSACTIONAL: {
+                        case TRANSACTIONAL:
+                        case TRANSACTIONAL_SNAPSHOT: {
                             cache = new GridNearTransactionalCache(cacheCtx);
 
                             break;
@@ -1536,7 +1588,8 @@
                 }
                 else {
                     switch (cfg.getAtomicityMode()) {
-                        case TRANSACTIONAL: {
+                        case TRANSACTIONAL:
+                        case TRANSACTIONAL_SNAPSHOT: {
                             cache = cacheCtx.affinityNode() ?
                                 new GridDhtColocatedCache(cacheCtx) :
                                 new GridDhtColocatedCache(cacheCtx, new GridNoStorageCacheMap());
@@ -1626,7 +1679,8 @@
             GridDhtCacheAdapter dht = null;
 
             switch (cfg.getAtomicityMode()) {
-                case TRANSACTIONAL: {
+                case TRANSACTIONAL:
+                case TRANSACTIONAL_SNAPSHOT: {
                     assert cache instanceof GridNearTransactionalCache;
 
                     GridNearTransactionalCache near = (GridNearTransactionalCache)cache;
@@ -2166,50 +2220,31 @@
     Set<Integer> closeCaches(Set<String> cachesToClose, boolean retClientCaches) {
         Set<Integer> ids = null;
 
-        boolean locked = false;
+        for (String cacheName : cachesToClose) {
+            blockGateway(cacheName, false, false);
 
-        try {
-            for (String cacheName : cachesToClose) {
-                blockGateway(cacheName, false, false);
+            GridCacheContext ctx = sharedCtx.cacheContext(CU.cacheId(cacheName));
 
-                GridCacheContext ctx = sharedCtx.cacheContext(CU.cacheId(cacheName));
+            if (ctx == null)
+                continue;
 
-                if (ctx == null)
-                    continue;
+            if (retClientCaches && !ctx.affinityNode()) {
+                if (ids == null)
+                    ids = U.newHashSet(cachesToClose.size());
 
-                if (retClientCaches && !ctx.affinityNode()) {
-                    if (ids == null)
-                        ids = U.newHashSet(cachesToClose.size());
-
-                    ids.add(ctx.cacheId());
-                }
-
-                if (!ctx.affinityNode() && !locked) {
-                    // Do not close client cache while requests processing is in progress.
-                    sharedCtx.io().writeLock();
-
-                    locked = true;
-                }
-
-                if (!ctx.affinityNode() && ctx.transactional())
-                    sharedCtx.tm().rollbackTransactionsForCache(ctx.cacheId());
-
-                closeCache(ctx, false);
+                ids.add(ctx.cacheId());
             }
 
-            return ids;
+            closeCache(ctx);
         }
-        finally {
-            if (locked)
-                sharedCtx.io().writeUnlock();
-        }
+
+        return ids;
     }
 
     /**
      * @param cctx Cache context.
-     * @param destroy Destroy flag.
      */
-    private void closeCache(GridCacheContext cctx, boolean destroy) {
+    private void closeCache(GridCacheContext cctx) {
         if (cctx.affinityNode()) {
             GridCacheAdapter<?, ?> cache = caches.get(cctx.name());
 
@@ -2218,21 +2253,32 @@
             jCacheProxies.put(cctx.name(), new IgniteCacheProxyImpl(cache.context(), cache, false));
         }
         else {
-            jCacheProxies.remove(cctx.name());
-
             cctx.gate().onStopped();
 
-            sharedCtx.database().checkpointReadLock();
+            // Do not close client cache while requests processing is in progress.
+            sharedCtx.io().writeLock();
 
             try {
-                prepareCacheStop(cctx.name(), destroy);
+                if (!cctx.affinityNode() && cctx.transactional())
+                    sharedCtx.tm().rollbackTransactionsForCache(cctx.cacheId());
+
+                jCacheProxies.remove(cctx.name());
+
+                sharedCtx.database().checkpointReadLock();
+
+                try {
+                    prepareCacheStop(cctx.name(), false);
+                }
+                finally {
+                    sharedCtx.database().checkpointReadUnlock();
+                }
+
+                if (!cctx.group().hasCaches())
+                    stopCacheGroup(cctx.group().groupId());
             }
             finally {
-                sharedCtx.database().checkpointReadUnlock();
+                sharedCtx.io().writeUnlock();
             }
-
-            if (!cctx.group().hasCaches())
-                stopCacheGroup(cctx.group().groupId());
         }
     }
 
@@ -2467,6 +2513,7 @@
         GridCacheIoManager ioMgr = new GridCacheIoManager();
         CacheAffinitySharedManager topMgr = new CacheAffinitySharedManager();
         GridCacheSharedTtlCleanupManager ttl = new GridCacheSharedTtlCleanupManager();
+        PartitionsEvictManager evict = new PartitionsEvictManager();
 
         CacheJtaManagerAdapter jta = JTA.createOptional();
 
@@ -2485,6 +2532,7 @@
             topMgr,
             ioMgr,
             ttl,
+            evict,
             jta,
             storeSesLsnrs
         );
@@ -3631,7 +3679,7 @@
      * @return Validation result or {@code null} in case of success.
      */
     @Nullable private IgniteNodeValidationResult validateHashIdResolvers(ClusterNode node) {
-        if (!CU.clientNode(node)) {
+        if (!node.isClient()) {
             for (DynamicCacheDescriptor desc : cacheDescriptors().values()) {
                 CacheConfiguration cfg = desc.cacheConfiguration();
 
@@ -4454,7 +4502,7 @@
         if (ccfg != null) {
             cloneCheckSerializable(ccfg);
 
-            if (desc != null || MetaStorage.METASTORAGE_CACHE_NAME.equals(cacheName)) {
+            if (desc != null) {
                 if (failIfExists) {
                     throw new CacheExistsException("Failed to start cache " +
                         "(a cache with the same name is already started): " + cacheName);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProxyImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProxyImpl.java
index fb0a556..a9ce448 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProxyImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProxyImpl.java
@@ -445,30 +445,6 @@
     }
 
     /** {@inheritDoc} */
-    @Override public boolean isMongoDataCache() {
-        CacheOperationContext prev = gate.enter(opCtx);
-
-        try {
-            return delegate.isMongoDataCache();
-        }
-        finally {
-            gate.leave(prev);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean isMongoMetaCache() {
-        CacheOperationContext prev = gate.enter(opCtx);
-
-        try {
-            return delegate.isMongoMetaCache();
-        }
-        finally {
-            gate.leave(prev);
-        }
-    }
-
-    /** {@inheritDoc} */
     @Override public Map<K, V> getAll(@Nullable Collection<? extends K> keys) throws IgniteCheckedException {
         CacheOperationContext prev = gate.enter(opCtx);
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheReturn.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheReturn.java
index bc85931..2ae0f7c 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheReturn.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheReturn.java
@@ -31,6 +31,7 @@
 import org.apache.ignite.binary.BinaryObject;
 import org.apache.ignite.internal.GridDirectCollection;
 import org.apache.ignite.internal.GridDirectTransient;
+import org.apache.ignite.internal.UnregisteredBinaryTypeException;
 import org.apache.ignite.internal.UnregisteredClassException;
 import org.apache.ignite.internal.util.tostring.GridToStringInclude;
 import org.apache.ignite.internal.util.typedef.internal.CU;
@@ -243,9 +244,13 @@
                 v = resMap;
             }
 
-            // This exception means that we should register class and call EntryProcessor again.
-            if (err != null && err instanceof UnregisteredClassException)
-                throw (UnregisteredClassException) err;
+            // These exceptions mean that we should register class and call EntryProcessor again.
+            if (err != null) {
+                if (err instanceof UnregisteredClassException)
+                    throw (UnregisteredClassException) err;
+                else if (err instanceof UnregisteredBinaryTypeException)
+                    throw (UnregisteredBinaryTypeException) err;
+            }
 
             CacheInvokeResult res0 = err == null ? CacheInvokeResult.fromResult(res) : CacheInvokeResult.fromError(err);
 
@@ -264,7 +269,10 @@
                 invokeResCol = new ArrayList<>();
 
             CacheInvokeDirectResult res0 = err == null ?
-                new CacheInvokeDirectResult(key, cctx.toCacheObject(res)) : new CacheInvokeDirectResult(key, err);
+                cctx.transactional() ?
+                    new CacheInvokeDirectResult(key, cctx.toCacheObject(res)) :
+                    CacheInvokeDirectResult.lazyResult(key, res) :
+                new CacheInvokeDirectResult(key, err);
 
             invokeResCol.add(res0);
         }
@@ -303,6 +311,18 @@
     }
 
     /**
+     * Converts entry processor invokation results to cache object instances.
+     *
+     * @param ctx Cache context.
+     */
+    public void marshalResult(GridCacheContext ctx) {
+        if (invokeRes && invokeResCol != null) {
+            for (CacheInvokeDirectResult directRes : invokeResCol)
+                directRes.marshalResult(ctx);
+        }
+    }
+
+    /**
      * @param ctx Cache context.
      * @throws IgniteCheckedException If failed.
      */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheSharedContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheSharedContext.java
index b195508..bfe0001 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheSharedContext.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheSharedContext.java
@@ -37,7 +37,6 @@
 import org.apache.ignite.internal.IgniteInternalFuture;
 import org.apache.ignite.internal.managers.communication.GridIoManager;
 import org.apache.ignite.internal.managers.deployment.GridDeploymentManager;
-import org.apache.ignite.internal.managers.discovery.DiscoCache;
 import org.apache.ignite.internal.managers.discovery.GridDiscoveryManager;
 import org.apache.ignite.internal.managers.eventstorage.GridEventStorageManager;
 import org.apache.ignite.internal.pagemem.store.IgnitePageStoreManager;
@@ -45,8 +44,10 @@
 import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture;
+import org.apache.ignite.internal.processors.cache.distributed.dht.PartitionsEvictManager;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal;
 import org.apache.ignite.internal.processors.cache.jta.CacheJtaManagerAdapter;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccProcessor;
 import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager;
 import org.apache.ignite.internal.processors.cache.persistence.snapshot.IgniteCacheSnapshotManager;
 import org.apache.ignite.internal.processors.cache.store.CacheStoreManager;
@@ -126,6 +127,9 @@
     /** Ttl cleanup manager. */
     private GridCacheSharedTtlCleanupManager ttlMgr;
 
+    /** */
+    private PartitionsEvictManager evictMgr;
+
     /** Cache contexts map. */
     private ConcurrentHashMap<Integer, GridCacheContext<K, V>> ctxMap;
 
@@ -199,13 +203,30 @@
         CacheAffinitySharedManager<K, V> affMgr,
         GridCacheIoManager ioMgr,
         GridCacheSharedTtlCleanupManager ttlMgr,
+        PartitionsEvictManager evictMgr,
         CacheJtaManagerAdapter jtaMgr,
         Collection<CacheStoreSessionListener> storeSesLsnrs
     ) {
         this.kernalCtx = kernalCtx;
 
-        setManagers(mgrs, txMgr, jtaMgr, verMgr, mvccMgr, pageStoreMgr, walMgr, walStateMgr, dbMgr, snpMgr, depMgr,
-            exchMgr, affMgr, ioMgr, ttlMgr);
+        setManagers(
+            mgrs,
+            txMgr,
+            jtaMgr,
+            verMgr,
+            mvccMgr,
+            pageStoreMgr,
+            walMgr,
+            walStateMgr,
+            dbMgr,
+            snpMgr,
+            depMgr,
+            exchMgr,
+            affMgr,
+            ioMgr,
+            ttlMgr,
+            evictMgr
+        );
 
         this.storeSesLsnrs = storeSesLsnrs;
 
@@ -246,23 +267,8 @@
      * @throws IgniteCheckedException If failed.
      */
     public void activate() throws IgniteCheckedException {
-        if (!kernalCtx.clientNode())
-            dbMgr.lock();
-
-        boolean success = false;
-
-        try {
-            for (IgniteChangeGlobalStateSupport mgr : stateAwareMgrs)
-                mgr.onActivate(kernalCtx);
-
-            success = true;
-        }
-        finally {
-            if (!success) {
-                if (!kernalCtx.clientNode())
-                    dbMgr.unLock();
-            }
-        }
+        for (IgniteChangeGlobalStateSupport mgr : stateAwareMgrs)
+            mgr.onActivate(kernalCtx);
     }
 
     /**
@@ -364,7 +370,9 @@
     void onReconnected(boolean active) throws IgniteCheckedException {
         List<GridCacheSharedManager<K, V>> mgrs = new LinkedList<>();
 
-        setManagers(mgrs, txMgr,
+        setManagers(
+            mgrs,
+            txMgr,
             jtaMgr,
             verMgr,
             mvccMgr,
@@ -377,7 +385,9 @@
             new GridCachePartitionExchangeManager<K, V>(),
             affMgr,
             ioMgr,
-            ttlMgr);
+            ttlMgr,
+            evictMgr
+        );
 
         this.mgrs = mgrs;
 
@@ -419,7 +429,8 @@
      * @param ttlMgr Ttl cleanup manager.
      */
     @SuppressWarnings("unchecked")
-    private void setManagers(List<GridCacheSharedManager<K, V>> mgrs,
+    private void setManagers(
+        List<GridCacheSharedManager<K, V>> mgrs,
         IgniteTxManager txMgr,
         CacheJtaManagerAdapter jtaMgr,
         GridCacheVersionManager verMgr,
@@ -433,7 +444,9 @@
         GridCachePartitionExchangeManager<K, V> exchMgr,
         CacheAffinitySharedManager affMgr,
         GridCacheIoManager ioMgr,
-        GridCacheSharedTtlCleanupManager ttlMgr) {
+        GridCacheSharedTtlCleanupManager ttlMgr,
+        PartitionsEvictManager evictMgr
+    ) {
         this.mvccMgr = add(mgrs, mvccMgr);
         this.verMgr = add(mgrs, verMgr);
         this.txMgr = add(mgrs, txMgr);
@@ -448,6 +461,7 @@
         this.affMgr = add(mgrs, affMgr);
         this.ioMgr = add(mgrs, ioMgr);
         this.ttlMgr = add(mgrs, ttlMgr);
+        this.evictMgr = add(mgrs, evictMgr);
     }
 
     /**
@@ -780,6 +794,20 @@
     }
 
     /**
+     * @return Cache mvcc coordinator manager.
+     */
+    public MvccProcessor coordinators() {
+        return kernalCtx.coordinators();
+    }
+
+    /**
+     * @return Partition evict manager.
+     */
+    public PartitionsEvictManager evict() {
+        return evictMgr;
+    }
+
+    /**
      * @return Node ID.
      */
     public UUID localNodeId() {
@@ -834,7 +862,7 @@
     /**
      * Captures all ongoing operations that we need to wait before we can proceed to the next topology version.
      * This method must be called only after
-     * {@link GridDhtPartitionTopology#updateTopologyVersion(GridDhtTopologyFuture, DiscoCache, long, boolean)}
+     * {@link GridDhtPartitionTopology#updateTopologyVersion}
      * method is called so that all new updates will wait to switch to the new version.
      * This method will capture:
      * <ul>
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheTtlManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheTtlManager.java
index 8277c2a..2166ce5 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheTtlManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheTtlManager.java
@@ -71,7 +71,7 @@
                 }
 
                 if (touch)
-                    entry.context().evicts().touch(entry, null);
+                    entry.touch(null);
             }
         };
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUpdateTxResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUpdateTxResult.java
index 92af83b..b646cf9 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUpdateTxResult.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUpdateTxResult.java
@@ -17,8 +17,12 @@
 
 package org.apache.ignite.internal.processors.cache;
 
+import java.util.List;
+import org.apache.ignite.internal.IgniteInternalFuture;
 import org.apache.ignite.internal.pagemem.wal.WALPointer;
-import org.apache.ignite.internal.util.tostring.GridToStringInclude;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.search.MvccLinkAwareSearchRow;
+import org.apache.ignite.internal.util.GridLongList;
+import org.apache.ignite.internal.util.future.GridFutureAdapter;
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.jetbrains.annotations.Nullable;
 
@@ -29,26 +33,38 @@
     /** Success flag.*/
     private final boolean success;
 
-    /** Old value. */
-    @GridToStringInclude
-    private final CacheObject oldVal;
-
-    /** Partition idx. */
+    /** Partition update counter. */
     private long updateCntr;
 
     /** */
+    private GridLongList mvccWaitTxs;
+
+    /** */
+    private  GridFutureAdapter<GridCacheUpdateTxResult> fut;
+
+    /** */
     private WALPointer logPtr;
 
+    /** */
+    private List<MvccLinkAwareSearchRow> mvccHistory;
+
     /**
      * Constructor.
      *
      * @param success Success flag.
-     * @param oldVal Old value (if any),
+     */
+    GridCacheUpdateTxResult(boolean success) {
+        this.success = success;
+    }
+
+    /**
+     * Constructor.
+     *
+     * @param success Success flag.
      * @param logPtr Logger WAL pointer for the update.
      */
-    GridCacheUpdateTxResult(boolean success, @Nullable CacheObject oldVal, WALPointer logPtr) {
+    GridCacheUpdateTxResult(boolean success, WALPointer logPtr) {
         this.success = success;
-        this.oldVal = oldVal;
         this.logPtr = logPtr;
     }
 
@@ -56,20 +72,45 @@
      * Constructor.
      *
      * @param success Success flag.
-     * @param oldVal Old value (if any).
+     * @param fut Update future.
+     */
+    GridCacheUpdateTxResult(boolean success, GridFutureAdapter<GridCacheUpdateTxResult> fut) {
+        this.success = success;
+        this.fut = fut;
+    }
+
+    /**
+     * Constructor.
+     *
+     * @param success Success flag.
+     * @param updateCntr Update counter.
      * @param logPtr Logger WAL pointer for the update.
      */
-    GridCacheUpdateTxResult(boolean success, @Nullable CacheObject oldVal, long updateCntr, WALPointer logPtr) {
+    GridCacheUpdateTxResult(boolean success, long updateCntr, WALPointer logPtr) {
         this.success = success;
-        this.oldVal = oldVal;
         this.updateCntr = updateCntr;
         this.logPtr = logPtr;
     }
 
     /**
-     * @return Partition idx.
+     * Constructor.
+     *
+     * @param success Success flag.
+     * @param updateCntr Update counter.
+     * @param logPtr Logger WAL pointer for the update.
+     * @param mvccWaitTxs List of transactions to wait for completion.
      */
-    public long updatePartitionCounter() {
+    GridCacheUpdateTxResult(boolean success, long updateCntr, WALPointer logPtr, GridLongList mvccWaitTxs) {
+        this.success = success;
+        this.updateCntr = updateCntr;
+        this.logPtr = logPtr;
+        this.mvccWaitTxs = mvccWaitTxs;
+    }
+
+    /**
+     * @return Partition update counter.
+     */
+    public long updateCounter() {
         return updateCntr;
     }
 
@@ -88,10 +129,33 @@
     }
 
     /**
-     * @return Old value.
+     * @return Update future.
      */
-    @Nullable public CacheObject oldValue() {
-        return oldVal;
+    @Nullable public IgniteInternalFuture<GridCacheUpdateTxResult> updateFuture() {
+        return fut;
+    }
+
+    /**
+     * @return List of transactions to wait for completion.
+     */
+    @Nullable public GridLongList mvccWaitTransactions() {
+        return mvccWaitTxs;
+    }
+
+    /**
+     *
+     * @return Mvcc history rows.
+     */
+    @Nullable public List<MvccLinkAwareSearchRow> mvccHistory() {
+        return mvccHistory;
+    }
+
+    /**
+     *
+     * @param mvccHistory Mvcc history rows.
+     */
+    public void mvccHistory(List<MvccLinkAwareSearchRow> mvccHistory) {
+        this.mvccHistory = mvccHistory;
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtilityKey.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtilityKey.java
index 8110170..affa6ce 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtilityKey.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtilityKey.java
@@ -41,5 +41,5 @@
     protected abstract boolean equalsx(K key);
 
     /** {@inheritDoc} */
-    public abstract int hashCode();
+    @Override public abstract int hashCode();
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java
index 6b1e42d..9ef470c 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java
@@ -32,6 +32,7 @@
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentLinkedQueue;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.LongAdder;
 import javax.cache.Cache;
 import javax.cache.CacheException;
 import javax.cache.configuration.Factory;
@@ -67,10 +68,13 @@
 import org.apache.ignite.internal.managers.discovery.IgniteClusterNode;
 import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
 import org.apache.ignite.internal.processors.cache.distributed.GridDistributedLockCancelledException;
+import org.apache.ignite.internal.processors.cache.distributed.GridDistributedTxMapping;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheAdapter;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtInvalidPartitionException;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearCacheAdapter;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal;
+import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxLog;
+import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetaStorage;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
@@ -99,7 +103,6 @@
 import org.apache.ignite.lifecycle.LifecycleAware;
 import org.apache.ignite.plugin.CachePluginConfiguration;
 import org.apache.ignite.plugin.security.SecurityException;
-import org.apache.ignite.spi.discovery.tcp.internal.TcpDiscoveryNode;
 import org.apache.ignite.transactions.Transaction;
 import org.apache.ignite.transactions.TransactionConcurrency;
 import org.apache.ignite.transactions.TransactionIsolation;
@@ -173,6 +176,14 @@
     /** System cache name. */
     public static final String UTILITY_CACHE_NAME = "ignite-sys-cache";
 
+    /** Reserved cache names */
+    public static final String[] RESERVED_NAMES = new String[] {
+        SYS_CACHE_HADOOP_MR,
+        UTILITY_CACHE_NAME,
+        MetaStorage.METASTORAGE_CACHE_NAME,
+        TxLog.TX_LOG_CACHE_NAME,
+    };
+
     /** */
     public static final String CONTINUOUS_QRY_LOG_CATEGORY = "org.apache.ignite.continuous.query";
 
@@ -319,6 +330,18 @@
         }
     };
 
+    /** Query mapped filter. */
+    public static final IgnitePredicate<GridDistributedTxMapping> FILTER_QUERY_MAPPING = new P1<GridDistributedTxMapping>() {
+
+        @Override public boolean apply(GridDistributedTxMapping m) {
+            return m.queryUpdate();
+        }
+
+        @Override public String toString() {
+            return "FILTER_QUERY_MAPPING";
+        }
+    };
+
     /** Transaction entry to key. */
     private static final IgniteClosure tx2key = new C1<IgniteTxEntry, Object>() {
         @Override public Object apply(IgniteTxEntry e) {
@@ -589,6 +612,30 @@
     }
 
     /**
+     * @return Long reducer.
+     */
+    public static IgniteReducer<Long, Long> longReducer() {
+        return new IgniteReducer<Long, Long>() {
+            private final LongAdder res = new LongAdder();
+
+            @Override public boolean collect(Long l) {
+                if(l != null)
+                    res.add(l);
+
+                return true;
+            }
+
+            @Override public Long reduce() {
+                return res.sum();
+            }
+
+            @Override public String toString() {
+                return "Long reducer: " + res;
+            }
+        };
+    }
+
+    /**
      * Gets reducer that aggregates maps into one.
      *
      * @param size Predicted size of the resulting map to avoid resizings.
@@ -1102,30 +1149,6 @@
             return 1;
     }
 
-//    /**
-//     * @param cfg Grid configuration.
-//     * @param cacheName Cache name.
-//     * @return {@code True} in this is Mongo data or meta cache.
-//     */
-//    public static boolean isMongoCache(GridConfiguration cfg, @Nullable String cacheName) {
-//        GridMongoConfiguration mongoCfg = cfg.getMongoConfiguration();
-//
-//        if (mongoCfg != null) {
-//            if (F.eq(cacheName, mongoCfg.getDefaultDataCacheName()) || F.eq(cacheName, mongoCfg.getMetaCacheName()))
-//                return true;
-//
-//            // Mongo config probably has not been validated yet => possible NPE, so we check for null.
-//            if (mongoCfg.getDataCacheNames() != null) {
-//                for (String mongoCacheName : mongoCfg.getDataCacheNames().values()) {
-//                    if (F.eq(cacheName, mongoCacheName))
-//                        return true;
-//                }
-//            }
-//        }
-//
-//        return false;
-//    }
-
     /**
      * @param cfg Grid configuration.
      * @param cacheName Cache name.
@@ -1353,35 +1376,11 @@
 
     /**
      * @param node Node.
-     * @return {@code True} if given node is client node (has flag {@link IgniteConfiguration#isClientMode()} set).
-     */
-    public static boolean clientNode(ClusterNode node) {
-        if (node instanceof IgniteClusterNode)
-            return ((IgniteClusterNode)node).isCacheClient();
-        else
-            return clientNodeDirect(node);
-    }
-
-    /**
-     * @param node Node.
-     * @return {@code True} if given node is client node (has flag {@link IgniteConfiguration#isClientMode()} set).
-     */
-    @SuppressWarnings("ConstantConditions")
-    public static boolean clientNodeDirect(ClusterNode node) {
-        Boolean clientModeAttr = node.attribute(IgniteNodeAttributes.ATTR_CLIENT_MODE);
-
-        assert clientModeAttr != null : node;
-
-        return clientModeAttr != null && clientModeAttr;
-    }
-
-    /**
-     * @param node Node.
      * @param filter Node filter.
      * @return {@code True} if node is not client node and pass given filter.
      */
     public static boolean affinityNode(ClusterNode node, IgnitePredicate<ClusterNode> filter) {
-        return !node.isDaemon() && !clientNode(node) && filter.apply(node);
+        return !node.isDaemon() && !node.isClient() && filter.apply(node);
     }
 
     /**
@@ -1591,6 +1590,17 @@
     }
 
     /**
+     * @param name Cache name.
+     * @throws IllegalArgumentException In case the name is not valid.
+     */
+    public static void validateNewCacheName(String name) throws IllegalArgumentException {
+        validateCacheName(name);
+
+        A.ensure(!isReservedCacheName(name), "Cache name cannot be \"" + name +
+            "\" because it is reserved for internal purposes.");
+    }
+
+    /**
      * @param cacheNames Cache names to validate.
      * @throws IllegalArgumentException In case the name is not valid.
      */
@@ -1606,7 +1616,20 @@
     public static void validateConfigurationCacheNames(Collection<CacheConfiguration> ccfgs)
         throws IllegalArgumentException {
         for (CacheConfiguration ccfg : ccfgs)
-            validateCacheName(ccfg.getName());
+            validateNewCacheName(ccfg.getName());
+    }
+
+    /**
+     * @param name Cache name.
+     * @return {@code True} if it is a reserved cache name.
+     */
+    public static boolean isReservedCacheName(String name) {
+        for (String reserved : RESERVED_NAMES) {
+            if (reserved.equals(name))
+                return true;
+        }
+
+        return false;
     }
 
     /**
@@ -1696,7 +1719,7 @@
      * <p>
      * Useful only when store with readThrough is used. In situation when
      * get() on backup node returns successful result, it's expected that
-     * localPeek() will be successful as well. But it doesn't true when
+     * localPeek() will be successful as well. But it isn't true when
      * primary node loaded value from local store, in this case backups
      * will remain non-initialized.
      * <br>
@@ -1762,7 +1785,7 @@
                     }
                     finally {
                         if (entry != null)
-                            cctx.evicts().touch(entry, topVer);
+                            entry.touch(topVer);
 
                         cctx.shared().database().checkpointReadUnlock();
                     }
@@ -1878,6 +1901,44 @@
     }
 
     /**
+     * @param sctx Shared context.
+     * @param cacheIds Cache ids.
+     * @return First partitioned cache or {@code null} in case no partitioned cache ids are in list.
+     */
+    public static GridCacheContext<?, ?> firstPartitioned(GridCacheSharedContext<?, ?> sctx, int[] cacheIds) {
+        for (int i = 0; i < cacheIds.length; i++) {
+            GridCacheContext<?, ?> cctx = sctx.cacheContext(cacheIds[i]);
+
+            if (cctx == null)
+                throw new CacheException("Failed to find cache.");
+
+            if (!cctx.isLocal() && !cctx.isReplicated())
+                return cctx;
+        }
+
+        return null;
+    }
+
+    /**
+     * @param sctx Shared context.
+     * @param cacheIds Cache ids.
+     * @return First partitioned cache or {@code null} in case no partitioned cache ids are in list.
+     */
+    public static GridCacheContext<?, ?> firstPartitioned(GridCacheSharedContext<?, ?> sctx, Iterable<Integer> cacheIds) {
+        for (Integer i : cacheIds) {
+            GridCacheContext<?, ?> cctx = sctx.cacheContext(i);
+
+            if (cctx == null)
+                throw new CacheException("Failed to find cache.");
+
+            if (!cctx.isLocal() && !cctx.isReplicated())
+                return cctx;
+        }
+
+        return null;
+    }
+
+    /**
      * @param cacheName Name of cache or cache template.
      * @return {@code true} if cache name ends with asterisk (*), and therefire is a template name.
      */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManager.java
index fa25412..fdf42fe 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManager.java
@@ -17,25 +17,33 @@
 
 package org.apache.ignite.internal.processors.cache;
 
+import java.util.List;
 import java.util.Map;
 import javax.cache.Cache;
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition;
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.IgniteDhtDemandedPartitionsMap;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccVersion;
 import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
+import org.apache.ignite.internal.processors.cache.persistence.CacheSearchRow;
 import org.apache.ignite.internal.processors.cache.persistence.RootPage;
 import org.apache.ignite.internal.processors.cache.persistence.RowStore;
 import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList;
 import org.apache.ignite.internal.processors.cache.tree.PendingEntriesTree;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccUpdateResult;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.search.MvccLinkAwareSearchRow;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
 import org.apache.ignite.internal.processors.query.GridQueryRowCacheCleaner;
 import org.apache.ignite.internal.util.GridAtomicLong;
+import org.apache.ignite.internal.util.GridLongList;
 import org.apache.ignite.internal.util.IgniteTree;
 import org.apache.ignite.internal.util.lang.GridCloseableIterator;
 import org.apache.ignite.internal.util.lang.GridCursor;
 import org.apache.ignite.internal.util.lang.GridIterator;
 import org.apache.ignite.internal.util.lang.IgniteInClosure2X;
+import org.apache.ignite.lang.IgniteBiTuple;
 import org.jetbrains.annotations.Nullable;
 
 /**
@@ -171,6 +179,202 @@
     /**
      * @param cctx Cache context.
      * @param key Key.
+     * @return Cached row, if available, null otherwise.
+     * @throws IgniteCheckedException If failed.
+     */
+    @Nullable public CacheDataRow mvccRead(GridCacheContext cctx, KeyCacheObject key, MvccSnapshot ver)
+        throws IgniteCheckedException;
+
+    /**
+     * For testing only.
+     *
+     * @param cctx Cache context.
+     * @param key Key.
+     * @return All stored versions for given key.
+     * @throws IgniteCheckedException If failed.
+     */
+    public List<IgniteBiTuple<Object, MvccVersion>> mvccAllVersions(GridCacheContext cctx, KeyCacheObject key)
+        throws IgniteCheckedException;
+
+    /**
+     * Returns iterator over the all row versions for the given key.
+     *
+     * @param cctx Cache context.
+     * @param key Key.
+     * @param x Implementation specific argument, {@code null} always means that we need to return full detached data row.
+     * @return Iterator over all versions.
+     * @throws IgniteCheckedException If failed.
+     */
+    GridCursor<CacheDataRow> mvccAllVersionsCursor(GridCacheContext cctx, KeyCacheObject key, Object x)
+        throws IgniteCheckedException;
+
+    /**
+     * @param entry Entry.
+     * @param val Value.
+     * @param ver Version.
+     * @param expireTime Expire time.
+     * @return {@code True} if value was inserted.
+     * @throws IgniteCheckedException If failed.
+     */
+    default boolean mvccInitialValue(
+        GridCacheMapEntry entry,
+        @Nullable CacheObject val,
+        GridCacheVersion ver,
+        long expireTime
+    ) throws IgniteCheckedException {
+        return mvccInitialValue(entry, val, ver, expireTime, null, null);
+    }
+
+    /**
+     * @param entry Entry.
+     * @param val Value.
+     * @param ver Version.
+     * @param expireTime Expire time.
+     * @param mvccVer MVCC version.
+     * @param newMvccVer New MVCC version.
+     * @return {@code True} if value was inserted.
+     * @throws IgniteCheckedException If failed.
+     */
+    public boolean mvccInitialValue(
+        GridCacheMapEntry entry,
+        @Nullable CacheObject val,
+        GridCacheVersion ver,
+        long expireTime,
+        MvccVersion mvccVer,
+        MvccVersion newMvccVer
+    ) throws IgniteCheckedException;
+
+    /**
+     * @param entry Entry.
+     * @param val Value.
+     * @param ver Version.
+     * @param expireTime Expire time.
+     * @param mvccVer MVCC version.
+     * @param newMvccVer New MVCC version.
+     * @param txState Tx state hint for the mvcc version.
+     * @param newTxState Tx state hint for the new mvcc version.
+     * @return {@code True} if value was inserted.
+     * @throws IgniteCheckedException If failed.
+     */
+    public boolean mvccInitialValueIfAbsent(
+        GridCacheMapEntry entry,
+        @Nullable CacheObject val,
+        GridCacheVersion ver,
+        long expireTime,
+        MvccVersion mvccVer,
+        MvccVersion newMvccVer,
+        byte txState,
+        byte newTxState
+    ) throws IgniteCheckedException;
+
+    /**
+     * @param entry Entry.
+     * @param val Value.
+     * @param ver Cache version.
+     * @param expireTime Expire time.
+     * @param mvccSnapshot MVCC snapshot.
+     * @param primary {@code True} if on primary node.
+     * @param needHistory Flag to collect history.
+     * @param noCreate Flag indicating that row should not be created if absent.
+     * @return Update result.
+     * @throws IgniteCheckedException If failed.
+     */
+    @Nullable public MvccUpdateResult mvccUpdate(
+        GridCacheMapEntry entry,
+        CacheObject val,
+        GridCacheVersion ver,
+        long expireTime,
+        MvccSnapshot mvccSnapshot,
+        boolean primary,
+        boolean needHistory,
+        boolean noCreate) throws IgniteCheckedException;
+
+    /**
+     * @param entry Entry.
+     * @param mvccSnapshot MVCC snapshot.
+     * @param primary {@code True} if on primary node.
+     * @param needHistory Flag to collect history.
+     * @return Update result.
+     * @throws IgniteCheckedException If failed.
+     */
+    @Nullable public MvccUpdateResult mvccRemove(
+        GridCacheMapEntry entry,
+        MvccSnapshot mvccSnapshot,
+        boolean primary,
+        boolean needHistory) throws IgniteCheckedException;
+
+    /**
+     * @param entry Entry.
+     * @param mvccSnapshot MVCC snapshot.
+     * @return Update result.
+     * @throws IgniteCheckedException If failed.
+     */
+    @Nullable public MvccUpdateResult mvccLock(
+        GridCacheMapEntry entry,
+        MvccSnapshot mvccSnapshot
+    ) throws IgniteCheckedException;
+
+    /**
+     * @param entry Entry.
+     * @param val Value.
+     * @param ver Version.
+     * @param mvccVer MVCC version.
+     * @param newMvccVer New MVCC version.
+     * @return {@code True} if value was inserted.
+     * @throws IgniteCheckedException If failed.
+     */
+    public boolean mvccUpdateRowWithPreloadInfo(
+        GridCacheMapEntry entry,
+        @Nullable CacheObject val,
+        GridCacheVersion ver,
+        long expireTime,
+        MvccVersion mvccVer,
+        MvccVersion newMvccVer,
+        byte mvccTxState,
+        byte newMvccTxState
+    ) throws IgniteCheckedException;
+
+    /**
+     * @param primary {@code True} if on primary node.
+     * @param entry Entry.
+     * @param val Value.
+     * @param ver Cache version.
+     * @param expireTime Expire time.
+     * @param mvccSnapshot MVCC snapshot.
+     * @return Transactions to wait for before finishing current transaction.
+     * @throws IgniteCheckedException If failed.
+     */
+    GridLongList mvccUpdateNative(
+            boolean primary,
+            GridCacheMapEntry entry,
+            CacheObject val,
+            GridCacheVersion ver,
+            long expireTime,
+            MvccSnapshot mvccSnapshot) throws IgniteCheckedException;
+
+    /**
+     * @param primary {@code True} if on primary node.
+     * @param entry Entry.
+     * @param mvccSnapshot MVCC snapshot.
+     * @return Transactions to wait for before finishing current transaction.
+     * @throws IgniteCheckedException If failed.
+     */
+    GridLongList mvccRemoveNative(
+            boolean primary,
+            GridCacheMapEntry entry,
+            MvccSnapshot mvccSnapshot
+    ) throws IgniteCheckedException;
+
+    /**
+     * @param entry Entry.
+     * @throws IgniteCheckedException If failed.
+     */
+    public void mvccRemoveAll(GridCacheMapEntry entry)
+        throws IgniteCheckedException;
+
+    /**
+     * @param cctx Cache context.
+     * @param key Key.
      * @param val Value.
      * @param ver Version.
      * @param expireTime Expire time.
@@ -209,6 +413,7 @@
     public int onUndeploy(ClassLoader ldr);
 
     /**
+     *
      * @param cacheId Cache ID.
      * @param primary Primary entries flag.
      * @param backup Backup entries flag.
@@ -219,7 +424,8 @@
     public GridIterator<CacheDataRow> cacheIterator(int cacheId,
         boolean primary,
         boolean backup,
-        final AffinityTopologyVersion topVer)
+        AffinityTopologyVersion topVer,
+        @Nullable MvccSnapshot mvccSnapshot)
         throws IgniteCheckedException;
 
     /**
@@ -228,7 +434,20 @@
      * @return Partition data iterator.
      * @throws IgniteCheckedException If failed.
      */
-    public GridIterator<CacheDataRow> cachePartitionIterator(int cacheId, final int part) throws IgniteCheckedException;
+    public default GridIterator<CacheDataRow> cachePartitionIterator(int cacheId, final int part)
+        throws IgniteCheckedException {
+        return cachePartitionIterator(cacheId, part, null);
+    }
+
+    /**
+     * @param cacheId Cache ID.
+     * @param part Partition.
+     * @param mvccSnapshot MVCC snapshot.
+     * @return Partition data iterator.
+     * @throws IgniteCheckedException If failed.
+     */
+    public GridIterator<CacheDataRow> cachePartitionIterator(int cacheId, final int part,
+        @Nullable MvccSnapshot mvccSnapshot) throws IgniteCheckedException;
 
     /**
      * @param part Partition number.
@@ -251,6 +470,7 @@
      * @return Partition data iterator.
      * @throws IgniteCheckedException If failed.
      */
+    // TODO: MVCC>
     public IgniteRebalanceIterator rebalanceIterator(IgniteDhtDemandedPartitionsMap parts, AffinityTopologyVersion topVer)
         throws IgniteCheckedException;
 
@@ -263,6 +483,7 @@
      * @return Entries iterator.
      * @throws IgniteCheckedException If failed.
      */
+    // TODO: MVCC>
     public <K, V> GridCloseableIterator<Cache.Entry<K, V>> cacheEntriesIterator(
         GridCacheContext cctx,
         final boolean primary,
@@ -276,6 +497,7 @@
      * @return Iterator.
      * @throws IgniteCheckedException If failed.
      */
+    // TODO: MVCC>
     public GridCloseableIterator<KeyCacheObject> cacheKeysIterator(int cacheId, final int part)
         throws IgniteCheckedException;
 
@@ -287,6 +509,7 @@
      * @return Entries count.
      * @throws IgniteCheckedException If failed.
      */
+    // TODO: MVCC>
     public long cacheEntriesCount(int cacheId, boolean primary, boolean backup, AffinityTopologyVersion topVer)
         throws IgniteCheckedException;
 
@@ -397,12 +620,20 @@
         long fullSize();
 
         /**
+         * Updates size metric for particular cache.
+         *
+         * @param cacheId Cache ID.
+         * @param delta Size delta.
+         */
+        void updateSize(int cacheId, long delta);
+
+        /**
          * @return Update counter.
          */
         long updateCounter();
 
         /**
-         *
+         * @param val Update counter.
          */
         void updateCounter(long val);
 
@@ -412,6 +643,16 @@
         public long nextUpdateCounter();
 
         /**
+         * @return Next mvcc update counter.
+         */
+        long nextMvccUpdateCounter();
+
+        /**
+         * @return Current mvcc update counter value.
+         */
+        long mvccUpdateCounter();
+
+        /**
          * @return Initial update counter.
          */
         public long initialUpdateCounter();
@@ -436,6 +677,24 @@
 
         /**
          * @param cctx Cache context.
+         * @param cleanupRows Rows to cleanup.
+         * @throws IgniteCheckedException If failed.
+         * @return Cleaned rows count.
+         */
+        public int cleanup(GridCacheContext cctx, @Nullable List<MvccLinkAwareSearchRow> cleanupRows)
+            throws IgniteCheckedException;
+
+        /**
+         *
+         * @param cctx Cache context.
+         * @param row Row.
+         * @throws IgniteCheckedException
+         */
+        public void updateTxState(GridCacheContext cctx, CacheSearchRow row)
+            throws IgniteCheckedException;
+
+        /**
+         * @param cctx Cache context.
          * @param key Key.
          * @param val Value.
          * @param ver Version.
@@ -454,6 +713,163 @@
         /**
          * @param cctx Cache context.
          * @param key Key.
+         * @param val Value.
+         * @param ver Version.
+         * @param mvccVer MVCC version.
+         * @param newMvccVer New MVCC version.
+         * @return {@code True} if new value was inserted.
+         * @throws IgniteCheckedException If failed.
+         */
+        boolean mvccInitialValue(
+            GridCacheContext cctx,
+            KeyCacheObject key,
+            @Nullable CacheObject val,
+            GridCacheVersion ver,
+            long expireTime,
+            MvccVersion mvccVer,
+            MvccVersion newMvccVer) throws IgniteCheckedException;
+
+        /**
+         * @param cctx Cache context.
+         * @param key Key.
+         * @param val Value.
+         * @param ver Version.
+         * @param mvccVer MVCC version.
+         * @param newMvccVer New MVCC version.
+         * @param txState Tx state hint for the mvcc version.
+         * @param newTxState Tx state hint for the new mvcc version.
+         * @return {@code True} if new value was inserted.
+         * @throws IgniteCheckedException If failed.
+         */
+        boolean mvccInitialValueIfAbsent(
+            GridCacheContext cctx,
+            KeyCacheObject key,
+            @Nullable CacheObject val,
+            GridCacheVersion ver,
+            long expireTime,
+            MvccVersion mvccVer,
+            MvccVersion newMvccVer,
+            byte txState,
+            byte newTxState) throws IgniteCheckedException;
+
+        /**
+         *
+         * @param cctx Grid cache context.
+         * @param key Key.
+         * @param val Value.
+         * @param ver Version.
+         * @param expireTime Expiration time.
+         * @param mvccVer Mvcc version.
+         * @param newMvccVer New mvcc version.
+         * @return {@code true} on success.
+         * @throws IgniteCheckedException, if failed.
+         */
+        boolean mvccUpdateRowWithPreloadInfo(
+            GridCacheContext cctx,
+            KeyCacheObject key,
+            @Nullable CacheObject val,
+            GridCacheVersion ver,
+            long expireTime,
+            MvccVersion mvccVer,
+            MvccVersion newMvccVer,
+            byte mvccTxState,
+            byte newMvccTxState) throws IgniteCheckedException;
+
+        /**
+         * @param cctx Cache context.
+         * @param key Key.
+         * @param val Value.
+         * @param ver Version.
+         * @param expireTime Expire time.
+         * @param mvccSnapshot MVCC snapshot.
+         * @param primary {@code True} if update is executed on primary node.
+         * @param needHistory Flag to collect history.
+         * @param noCreate Flag indicating that row should not be created if absent.
+         * @return Update result.
+         * @throws IgniteCheckedException If failed.
+         */
+        MvccUpdateResult mvccUpdate(
+            GridCacheContext cctx,
+            KeyCacheObject key,
+            CacheObject val,
+            GridCacheVersion ver,
+            long expireTime,
+            MvccSnapshot mvccSnapshot,
+            boolean primary,
+            boolean needHistory,
+            boolean noCreate) throws IgniteCheckedException;
+
+        /**
+         * @param cctx Cache context.
+         * @param key Key.
+         * @param mvccSnapshot MVCC snapshot.
+         * @param primary {@code True} if update is executed on primary node.
+         * @param needHistory Flag to collect history.
+         * @return List of transactions to wait for.
+         * @throws IgniteCheckedException If failed.
+         */
+        MvccUpdateResult mvccRemove(
+            GridCacheContext cctx,
+            KeyCacheObject key,
+            MvccSnapshot mvccSnapshot,
+            boolean primary,
+            boolean needHistory) throws IgniteCheckedException;
+
+        /**
+         * @param cctx Cache context.
+         * @param key Key.
+         * @param mvccSnapshot MVCC snapshot.
+         * @return List of transactions to wait for.
+         * @throws IgniteCheckedException If failed.
+         */
+        MvccUpdateResult mvccLock(
+            GridCacheContext cctx,
+            KeyCacheObject key,
+            MvccSnapshot mvccSnapshot) throws IgniteCheckedException;
+
+        /**
+         * @param cctx Cache context.
+         * @param primary {@code True} if update is executed on primary node.
+         * @param key Key.
+         * @param val Value.
+         * @param ver Version.
+         * @param expireTime Expire time.
+         * @param mvccSnapshot MVCC snapshot.
+         * @return Update result.
+         * @throws IgniteCheckedException If failed.
+         */
+        @Nullable GridLongList mvccUpdateNative(
+                GridCacheContext cctx,
+                boolean primary,
+                KeyCacheObject key,
+                CacheObject val,
+                GridCacheVersion ver,
+                long expireTime,
+                MvccSnapshot mvccSnapshot) throws IgniteCheckedException;
+
+        /**
+         * @param cctx Cache context.
+         * @param primary {@code True} if update is executed on primary node.
+         * @param key Key.
+         * @param mvccSnapshot MVCC snapshot.
+         * @return List of transactions to wait for.
+         * @throws IgniteCheckedException If failed.
+         */
+        @Nullable GridLongList mvccRemoveNative(GridCacheContext cctx,
+                                      boolean primary,
+                                      KeyCacheObject key,
+                                      MvccSnapshot mvccSnapshot) throws IgniteCheckedException;
+
+        /**
+         * @param cctx Cache context.
+         * @param key Key.
+         * @throws IgniteCheckedException If failed.
+         */
+        void mvccRemoveAll(GridCacheContext cctx, KeyCacheObject key) throws IgniteCheckedException;
+
+        /**
+         * @param cctx Cache context.
+         * @param key Key.
          * @param c Closure.
          * @throws IgniteCheckedException If failed.
          */
@@ -476,12 +892,58 @@
         public CacheDataRow find(GridCacheContext cctx, KeyCacheObject key) throws IgniteCheckedException;
 
         /**
+         * Returns iterator over the all row versions for the given key.
+         *
+         * @param cctx Cache context.
+         * @param key Key.
+         * @param x Implementation specific argument, {@code null} always means that we need to return full detached data row.
+         * @return Iterator over all versions.
+         * @throws IgniteCheckedException If failed.
+         */
+        GridCursor<CacheDataRow> mvccAllVersionsCursor(GridCacheContext cctx, KeyCacheObject key, Object x)
+            throws IgniteCheckedException;
+
+        /**
+         * @param cctx Cache context.
+         * @param key Key.
+         * @return Data row.
+         * @throws IgniteCheckedException If failed.
+         */
+        public CacheDataRow mvccFind(GridCacheContext cctx, KeyCacheObject key, MvccSnapshot snapshot)
+            throws IgniteCheckedException;
+
+        /**
+         * For testing only.
+         *
+         * @param cctx Cache context.
+         * @param key Key.
+         * @return All stored versions for given key.
+         * @throws IgniteCheckedException If failed.
+         */
+        List<IgniteBiTuple<Object, MvccVersion>> mvccFindAllVersions(GridCacheContext cctx, KeyCacheObject key)
+            throws IgniteCheckedException;
+
+        /**
          * @return Data cursor.
          * @throws IgniteCheckedException If failed.
          */
         public GridCursor<? extends CacheDataRow> cursor() throws IgniteCheckedException;
 
         /**
+         * @param x Implementation specific argument, {@code null} always means that we need to return full detached data row.
+         * @return Data cursor.
+         * @throws IgniteCheckedException If failed.
+         */
+        public GridCursor<? extends CacheDataRow> cursor(Object x) throws IgniteCheckedException;
+
+        /**
+         * @param mvccSnapshot MVCC snapshot.
+         * @return Data cursor.
+         * @throws IgniteCheckedException If failed.
+         */
+        public GridCursor<? extends CacheDataRow> cursor(MvccSnapshot mvccSnapshot) throws IgniteCheckedException;
+
+        /**
          * @param cacheId Cache ID.
          * @return Data cursor.
          * @throws IgniteCheckedException If failed.
@@ -490,6 +952,15 @@
 
         /**
          * @param cacheId Cache ID.
+         * @param mvccSnapshot Mvcc snapshot.
+         * @return Data cursor.
+         * @throws IgniteCheckedException If failed.
+         */
+        public GridCursor<? extends CacheDataRow> cursor(int cacheId, MvccSnapshot mvccSnapshot)
+            throws IgniteCheckedException;
+
+        /**
+         * @param cacheId Cache ID.
          * @param lower Lower bound.
          * @param upper Upper bound.
          * @return Data cursor.
@@ -510,6 +981,18 @@
             KeyCacheObject upper, Object x) throws IgniteCheckedException;
 
         /**
+         * @param cacheId Cache ID.
+         * @param lower Lower bound.
+         * @param upper Upper bound.
+         * @param x Implementation specific argument, {@code null} always means that we need to return full detached data row.
+         * @param snapshot Mvcc snapshot.
+         * @return Data cursor.
+         * @throws IgniteCheckedException If failed.
+         */
+        public GridCursor<? extends CacheDataRow> cursor(int cacheId, KeyCacheObject lower,
+            KeyCacheObject upper, Object x, MvccSnapshot snapshot) throws IgniteCheckedException;
+
+        /**
          * Destroys the tree associated with the store.
          *
          * @throws IgniteCheckedException If failed.
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java
index 5482b3a..9919240 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheOffheapManagerImpl.java
@@ -17,10 +17,12 @@
 
 package org.apache.ignite.internal.processors.cache;
 
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Map;
 import java.util.NoSuchElementException;
 import java.util.Set;
@@ -36,6 +38,9 @@
 import org.apache.ignite.IgniteSystemProperties;
 import org.apache.ignite.internal.NodeStoppingException;
 import org.apache.ignite.internal.pagemem.FullPageId;
+import org.apache.ignite.internal.pagemem.wal.record.delta.DataPageMvccMarkUpdatedRecord;
+import org.apache.ignite.internal.pagemem.wal.record.delta.DataPageMvccUpdateNewTxStateHintRecord;
+import org.apache.ignite.internal.pagemem.wal.record.delta.DataPageMvccUpdateTxStateHintRecord;
 import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtInvalidPartitionException;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition;
@@ -43,28 +48,48 @@
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.IgniteDhtDemandedPartitionsMap;
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.IgniteHistoricalIterator;
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.IgniteRebalanceIteratorImpl;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccVersion;
+import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxState;
 import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
 import org.apache.ignite.internal.processors.cache.persistence.CacheDataRowAdapter;
 import org.apache.ignite.internal.processors.cache.persistence.CacheSearchRow;
 import org.apache.ignite.internal.processors.cache.persistence.RootPage;
 import org.apache.ignite.internal.processors.cache.persistence.RowStore;
 import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO;
 import org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO;
 import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList;
+import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandler;
 import org.apache.ignite.internal.processors.cache.query.GridCacheQueryManager;
 import org.apache.ignite.internal.processors.cache.tree.CacheDataRowStore;
 import org.apache.ignite.internal.processors.cache.tree.CacheDataTree;
 import org.apache.ignite.internal.processors.cache.tree.DataRow;
 import org.apache.ignite.internal.processors.cache.tree.PendingEntriesTree;
 import org.apache.ignite.internal.processors.cache.tree.PendingRow;
+import org.apache.ignite.internal.processors.cache.tree.RowLinkIO;
 import org.apache.ignite.internal.processors.cache.tree.SearchRow;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccDataRow;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccUpdateDataRow;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccUpdateDataRowNative;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccUpdateResult;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.data.ResultType;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.search.MvccFirstRowTreeClosure;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.search.MvccLinkAwareSearchRow;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.search.MvccMaxSearchRow;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.search.MvccMinSearchRow;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.search.MvccSnapshotSearchRow;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.search.MvccTreeClosure;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
 import org.apache.ignite.internal.processors.query.GridQueryRowCacheCleaner;
 import org.apache.ignite.internal.util.GridAtomicLong;
 import org.apache.ignite.internal.util.GridCloseableIteratorAdapter;
 import org.apache.ignite.internal.util.GridEmptyCloseableIterator;
+import org.apache.ignite.internal.util.GridLongList;
 import org.apache.ignite.internal.util.GridSpinBusyLock;
 import org.apache.ignite.internal.util.GridStripedLock;
+import org.apache.ignite.internal.util.IgniteTree;
 import org.apache.ignite.internal.util.lang.GridCloseableIterator;
 import org.apache.ignite.internal.util.lang.GridCursor;
 import org.apache.ignite.internal.util.lang.GridIterator;
@@ -72,14 +97,32 @@
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.internal.CU;
 import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteBiTuple;
 import org.apache.ignite.lang.IgniteClosure;
 import org.apache.ignite.lang.IgniteInClosure;
 import org.apache.ignite.lang.IgnitePredicate;
+import org.jetbrains.annotations.NotNull;
 import org.jetbrains.annotations.Nullable;
 
 import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_IDX;
 import static org.apache.ignite.internal.pagemem.PageIdAllocator.INDEX_PARTITION;
 import static org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState.OWNING;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.INITIAL_VERSION;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_COUNTER_NA;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_CRD_COUNTER_NA;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_OP_COUNTER_NA;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.compare;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.compareNewVersion;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.isVisible;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.mvccVersionIsValid;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.state;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.unexpectedStateException;
+import static org.apache.ignite.internal.processors.cache.persistence.GridCacheOffheapManager.EMPTY_CURSOR;
+import static org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO.MVCC_INFO_SIZE;
+import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO.MVCC_HINTS_BIT_OFF;
+import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO.MVCC_HINTS_MASK;
+import static org.apache.ignite.internal.util.IgniteTree.OperationType.NOOP;
+import static org.apache.ignite.internal.util.IgniteTree.OperationType.PUT;
 
 /**
  *
@@ -158,7 +201,7 @@
     }
 
     /** {@inheritDoc} */
-    public void onCacheStarted(GridCacheContext cctx) throws IgniteCheckedException {
+    @Override public void onCacheStarted(GridCacheContext cctx) throws IgniteCheckedException {
         initPendingTree(cctx);
     }
 
@@ -252,7 +295,7 @@
      * @param part Partition.
      * @return Data store for given entry.
      */
-    public CacheDataStore dataStore(GridDhtLocalPartition part) {
+    @Override public CacheDataStore dataStore(GridDhtLocalPartition part) {
         if (grp.isLocal())
             return locCacheDataStore;
         else {
@@ -397,6 +440,170 @@
     }
 
     /** {@inheritDoc} */
+    @Override public boolean mvccInitialValue(
+        GridCacheMapEntry entry,
+        CacheObject val,
+        GridCacheVersion ver,
+        long expireTime,
+        MvccVersion mvccVer,
+        MvccVersion newMvccVer) throws IgniteCheckedException {
+        return dataStore(entry.localPartition()).mvccInitialValue(
+            entry.context(),
+            entry.key(),
+            val,
+            ver,
+            expireTime,
+            mvccVer,
+            newMvccVer);
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean mvccInitialValueIfAbsent(
+        GridCacheMapEntry entry,
+        CacheObject val,
+        GridCacheVersion ver,
+        long expireTime,
+        MvccVersion mvccVer,
+        MvccVersion newMvccVer,
+        byte txState,
+        byte newTxState) throws IgniteCheckedException {
+        return dataStore(entry.localPartition()).mvccInitialValueIfAbsent(
+            entry.context(),
+            entry.key(),
+            val,
+            ver,
+            expireTime,
+            mvccVer,
+            newMvccVer,
+            txState,
+            newTxState);
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean mvccUpdateRowWithPreloadInfo(
+        GridCacheMapEntry entry,
+        @Nullable CacheObject val,
+        GridCacheVersion ver,
+        long expireTime,
+        MvccVersion mvccVer,
+        MvccVersion newMvccVer,
+        byte mvccTxState,
+        byte newMvccTxState
+    ) throws IgniteCheckedException {
+        assert entry.lockedByCurrentThread();
+
+        return dataStore(entry.localPartition()).mvccUpdateRowWithPreloadInfo(
+            entry.context(),
+            entry.key(),
+            val,
+            ver,
+            expireTime,
+            mvccVer,
+            newMvccVer,
+            mvccTxState,
+            newMvccTxState
+        );
+    }
+
+    /** {@inheritDoc} */
+    @Override public MvccUpdateResult mvccUpdate(
+        GridCacheMapEntry entry,
+        CacheObject val,
+        GridCacheVersion ver,
+        long expireTime,
+        MvccSnapshot mvccSnapshot,
+        boolean primary,
+        boolean needHistory,
+        boolean noCreate) throws IgniteCheckedException {
+        if (entry.detached() || entry.isNear())
+            return null;
+
+        assert entry.lockedByCurrentThread();
+
+        return dataStore(entry.localPartition()).mvccUpdate(entry.context(),
+            entry.key(),
+            val,
+            ver,
+            expireTime,
+            mvccSnapshot,
+            primary,
+            needHistory,
+            noCreate);
+    }
+
+    /** {@inheritDoc} */
+    @Override public MvccUpdateResult mvccRemove(
+        GridCacheMapEntry entry,
+        MvccSnapshot mvccSnapshot,
+        boolean primary,
+        boolean needHistory) throws IgniteCheckedException {
+        if (entry.detached() || entry.isNear())
+            return null;
+
+        assert entry.lockedByCurrentThread();
+
+        return dataStore(entry.localPartition()).mvccRemove(entry.context(),
+            entry.key(),
+            mvccSnapshot,
+            primary,
+            needHistory);
+    }
+
+    /** {@inheritDoc} */
+    @Override public GridLongList mvccUpdateNative(
+        boolean primary,
+        GridCacheMapEntry entry,
+        CacheObject val,
+        GridCacheVersion ver,
+        long expireTime,
+        MvccSnapshot mvccSnapshot) throws IgniteCheckedException {
+        if (entry.detached() || entry.isNear())
+            return null;
+
+        return dataStore(entry.localPartition()).mvccUpdateNative(entry.context(),
+            primary,
+            entry.key(),
+            val,
+            ver,
+            expireTime,
+            mvccSnapshot);
+    }
+
+    /** {@inheritDoc} */
+    @Override public GridLongList mvccRemoveNative(
+        boolean primary,
+        GridCacheMapEntry entry,
+        MvccSnapshot mvccSnapshot
+    ) throws IgniteCheckedException {
+        if (entry.detached() || entry.isNear())
+            return null;
+
+        return dataStore(entry.localPartition()).mvccRemoveNative(entry.context(),
+            primary,
+            entry.key(),
+            mvccSnapshot);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void mvccRemoveAll(GridCacheMapEntry entry) throws IgniteCheckedException {
+        if (entry.detached() || entry.isNear())
+            return;
+
+        dataStore(entry.localPartition()).mvccRemoveAll(entry.context(), entry.key());
+    }
+
+    /** {@inheritDoc} */
+    @Nullable @Override public MvccUpdateResult mvccLock(GridCacheMapEntry entry,
+        MvccSnapshot mvccSnapshot) throws IgniteCheckedException {
+        if (entry.detached() || entry.isNear())
+            return null;
+
+        assert entry.lockedByCurrentThread();
+
+        return dataStore(entry.localPartition()).mvccLock(entry.context(), entry.key(), mvccSnapshot);
+    }
+
+    /** {@inheritDoc} */
     @Override public void remove(
         GridCacheContext cctx,
         KeyCacheObject key,
@@ -418,16 +625,11 @@
     }
 
     /** {@inheritDoc} */
-    @Nullable @Override public CacheDataRow read(GridCacheContext cctx, KeyCacheObject key) throws IgniteCheckedException {
-        CacheDataRow row;
+    @Nullable @Override public CacheDataRow read(GridCacheContext cctx, KeyCacheObject key)
+        throws IgniteCheckedException {
+        CacheDataStore dataStore = dataStore(cctx, key);
 
-        if (cctx.isLocal())
-            row = locCacheDataStore.find(cctx, key);
-        else {
-            GridDhtLocalPartition part = cctx.topology().localPartition(cctx.affinity().partition(key), null, false);
-
-            row = part != null ? dataStore(part).find(cctx, key) : null;
-        }
+        CacheDataRow row = dataStore != null ? dataStore.find(cctx, key) : null;
 
         assert row == null || row.value() != null : row;
 
@@ -435,6 +637,51 @@
     }
 
     /** {@inheritDoc} */
+    @Nullable @Override public CacheDataRow mvccRead(GridCacheContext cctx, KeyCacheObject key, MvccSnapshot ver)
+        throws IgniteCheckedException {
+        assert ver != null;
+
+        CacheDataStore dataStore = dataStore(cctx, key);
+
+        CacheDataRow row = dataStore != null ? dataStore.mvccFind(cctx, key, ver) : null;
+
+        assert row == null || row.value() != null : row;
+
+        return row;
+    }
+
+    /** {@inheritDoc} */
+    @Override public List<IgniteBiTuple<Object, MvccVersion>> mvccAllVersions(GridCacheContext cctx, KeyCacheObject key)
+        throws IgniteCheckedException {
+        CacheDataStore dataStore = dataStore(cctx, key);
+
+        return dataStore != null ? dataStore.mvccFindAllVersions(cctx, key) :
+            Collections.emptyList();
+    }
+
+    /** {@inheritDoc} */
+    @Override public GridCursor<CacheDataRow> mvccAllVersionsCursor(GridCacheContext cctx,
+        KeyCacheObject key, Object x) throws IgniteCheckedException {
+        CacheDataStore dataStore = dataStore(cctx, key);
+
+        return dataStore != null ? dataStore.mvccAllVersionsCursor(cctx, key, x) : EMPTY_CURSOR;
+    }
+
+    /**
+     * @param cctx Cache context.
+     * @param key Key.
+     * @return Data store.
+     */
+    @Nullable private CacheDataStore dataStore(GridCacheContext cctx, KeyCacheObject key) {
+        if (grp.isLocal())
+            return locCacheDataStore;
+
+        GridDhtLocalPartition part = grp.topology().localPartition(cctx.affinity().partition(key), null, false);
+
+        return part != null ? dataStore(part) : null;
+    }
+
+    /** {@inheritDoc} */
     @Override public boolean containsKey(GridCacheMapEntry entry) {
         try {
             return read(entry) != null;
@@ -470,7 +717,7 @@
     @Override public void clearCache(GridCacheContext cctx, boolean readers) {
         GridCacheVersion obsoleteVer = null;
 
-        try (GridCloseableIterator<CacheDataRow> it = grp.isLocal() ? iterator(cctx.cacheId(), cacheDataStores().iterator()) :
+        try (GridCloseableIterator<CacheDataRow> it = grp.isLocal() ? iterator(cctx.cacheId(), cacheDataStores().iterator(), null) :
             evictionSafeIterator(cctx.cacheId(), cacheDataStores().iterator())) {
             while (it.hasNext()) {
                 cctx.shared().database().checkpointReadLock();
@@ -529,7 +776,7 @@
         final boolean backup,
         final AffinityTopologyVersion topVer,
         final boolean keepBinary) throws IgniteCheckedException {
-        final Iterator<CacheDataRow> it = cacheIterator(cctx.cacheId(), primary, backup, topVer);
+        final Iterator<CacheDataRow> it = cacheIterator(cctx.cacheId(), primary, backup, topVer, null);
 
         return new GridCloseableIteratorAdapter<Cache.Entry<K, V>>() {
             /** */
@@ -570,7 +817,8 @@
     }
 
     /** {@inheritDoc} */
-    @Override public GridCloseableIterator<KeyCacheObject> cacheKeysIterator(int cacheId, final int part) throws IgniteCheckedException {
+    @Override public GridCloseableIterator<KeyCacheObject> cacheKeysIterator(int cacheId, final int part)
+        throws IgniteCheckedException {
         CacheDataStore data = partitionData(part);
 
         if (data == null)
@@ -611,19 +859,21 @@
         int cacheId,
         boolean primary,
         boolean backups,
-        final AffinityTopologyVersion topVer)
+        final AffinityTopologyVersion topVer,
+        @Nullable MvccSnapshot mvccSnapshot)
         throws IgniteCheckedException {
-        return iterator(cacheId, cacheData(primary, backups, topVer));
+        return iterator(cacheId, cacheData(primary, backups, topVer), mvccSnapshot);
     }
 
     /** {@inheritDoc} */
-    @Override public GridIterator<CacheDataRow> cachePartitionIterator(int cacheId, int part) throws IgniteCheckedException {
+    @Override public GridIterator<CacheDataRow> cachePartitionIterator(int cacheId, int part,
+        @Nullable MvccSnapshot mvccSnapshot) throws IgniteCheckedException {
         CacheDataStore data = partitionData(part);
 
         if (data == null)
             return new GridEmptyCloseableIterator<>();
 
-        return iterator(cacheId, singletonIterator(data));
+        return iterator(cacheId, singletonIterator(data), mvccSnapshot);
     }
 
     /** {@inheritDoc} */
@@ -633,15 +883,20 @@
         if (data == null)
             return new GridEmptyCloseableIterator<>();
 
-        return iterator(CU.UNDEFINED_CACHE_ID, singletonIterator(data));
+        return iterator(CU.UNDEFINED_CACHE_ID, singletonIterator(data), null);
     }
 
     /**
+     *
      * @param cacheId Cache ID.
      * @param dataIt Data store iterator.
+     * @param mvccSnapshot Mvcc snapshot.
      * @return Rows iterator
      */
-    private GridCloseableIterator<CacheDataRow> iterator(final int cacheId, final Iterator<CacheDataStore> dataIt) {
+    private GridCloseableIterator<CacheDataRow> iterator(final int cacheId,
+        final Iterator<CacheDataStore> dataIt,
+        final MvccSnapshot mvccSnapshot)
+    {
         return new GridCloseableIteratorAdapter<CacheDataRow>() {
             /** */
             private GridCursor<? extends CacheDataRow> cur;
@@ -670,7 +925,13 @@
                             CacheDataStore ds = dataIt.next();
 
                             curPart = ds.partId();
-                            cur = cacheId == CU.UNDEFINED_CACHE_ID ? ds.cursor() : ds.cursor(cacheId);
+
+                            if (mvccSnapshot == null)
+                                cur = cacheId == CU.UNDEFINED_CACHE_ID ? ds.cursor() : ds.cursor(cacheId);
+                            else {
+                                cur = cacheId == CU.UNDEFINED_CACHE_ID ?
+                                    ds.cursor(mvccSnapshot) : ds.cursor(cacheId, mvccSnapshot);
+                            }
                         }
                         else
                             break;
@@ -1142,6 +1403,13 @@
         /** Update counter. */
         protected final AtomicLong cntr = new AtomicLong();
 
+        /**
+         * Mvcc update counter. This counter is used for an mvcc-style entries updates where this counter is
+         * incremented on each entry write (which happens before commit), but main update counter is updated
+         * on commit phase only.
+         */
+        protected final AtomicLong mvccUpdCntr = new AtomicLong();
+
         /** Partition size. */
         private final AtomicLong storageSize = new AtomicLong();
 
@@ -1151,6 +1419,15 @@
         /** Initial update counter. */
         protected long initCntr;
 
+        /** Mvcc remove handler. */
+        private final PageHandler<MvccVersion, Boolean> mvccUpdateMarker = new MvccMarkUpdatedHandler();
+
+        /** Mvcc update tx state hint handler. */
+        private final PageHandler<Void, Boolean> mvccUpdateTxStateHint = new MvccUpdateTxStateHintHandler();
+
+        /** */
+        private final PageHandler<MvccDataRow, Boolean> mvccApplyChanges = new MvccApplyChangesHandler();
+
         /**
          * @param partId Partition number.
          * @param name Name.
@@ -1173,36 +1450,14 @@
          * @param cacheId Cache ID.
          */
         void incrementSize(int cacheId) {
-            storageSize.incrementAndGet();
-
-            if (grp.sharedGroup()) {
-                AtomicLong size = cacheSizes.get(cacheId);
-
-                if (size == null) {
-                    AtomicLong old = cacheSizes.putIfAbsent(cacheId, size = new AtomicLong());
-
-                    if (old != null)
-                        size = old;
-                }
-
-                size.incrementAndGet();
-            }
+            updateSize(cacheId, 1);
         }
 
         /**
          * @param cacheId Cache ID.
          */
         void decrementSize(int cacheId) {
-            storageSize.decrementAndGet();
-
-            if (grp.sharedGroup()) {
-                AtomicLong size = cacheSizes.get(cacheId);
-
-                if (size == null)
-                    return;
-
-                size.decrementAndGet();
-            }
+            updateSize(cacheId, -1);
         }
 
         /** {@inheritDoc} */
@@ -1240,13 +1495,34 @@
         }
 
         /** {@inheritDoc} */
+        @Override public void updateSize(int cacheId, long delta) {
+            storageSize.addAndGet(delta);
+
+            if (grp.sharedGroup()) {
+                AtomicLong size = cacheSizes.get(cacheId);
+
+                if (size == null) {
+                    AtomicLong old = cacheSizes.putIfAbsent(cacheId, size = new AtomicLong());
+
+                    if (old != null)
+                        size = old;
+                }
+
+                size.addAndGet(delta);
+            }
+        }
+
+        /** {@inheritDoc} */
+        @Override public long nextUpdateCounter() {
+            return cntr.incrementAndGet();
+        }
+
+        /** {@inheritDoc} */
         @Override public long updateCounter() {
             return cntr.get();
         }
 
-        /**
-         * @param val Update index value.
-         */
+        /** {@inheritDoc} */
         @Override public void updateCounter(long val) {
             while (true) {
                 long val0 = cntr.get();
@@ -1260,6 +1536,16 @@
         }
 
         /** {@inheritDoc} */
+        @Override public long nextMvccUpdateCounter() {
+            return mvccUpdCntr.incrementAndGet();
+        }
+
+        /** {@inheritDoc} */
+        @Override public long mvccUpdateCounter() {
+            return mvccUpdCntr.get();
+        }
+
+        /** {@inheritDoc} */
         @Override public String name() {
             return name;
         }
@@ -1273,7 +1559,7 @@
          */
         private boolean canUpdateOldRow(GridCacheContext cctx, @Nullable CacheDataRow oldRow, DataRow dataRow)
             throws IgniteCheckedException {
-            if (oldRow == null || cctx.queries().enabled())
+            if (oldRow == null || cctx.queries().enabled() || grp.mvccEnabled())
                 return false;
 
             if (oldRow.expireTime() != dataRow.expireTime())
@@ -1282,12 +1568,12 @@
             // Use grp.sharedGroup() flag since it is possible cacheId is not yet set here.
             boolean sizeWithCacheId = grp.sharedGroup();
 
-            int oldLen = DataPageIO.getRowSize(oldRow, sizeWithCacheId);
+            int oldLen = oldRow.size();
 
             if (oldLen > updateValSizeThreshold)
                 return false;
 
-            int newLen = DataPageIO.getRowSize(dataRow, sizeWithCacheId);
+            int newLen = dataRow.size();
 
             return oldLen == newLen;
         }
@@ -1295,15 +1581,26 @@
         /** {@inheritDoc} */
         @Override public void invoke(GridCacheContext cctx, KeyCacheObject key, OffheapInvokeClosure c)
             throws IgniteCheckedException {
+            int cacheId = grp.sharedGroup() ? cctx.cacheId() : CU.UNDEFINED_CACHE_ID;
+
+            invoke0(cctx, new SearchRow(cacheId, key), c);
+        }
+
+        /**
+         * @param cctx Cache context.
+         * @param row Search row.
+         * @param c Closure.
+         * @throws IgniteCheckedException If failed.
+         */
+        private void invoke0(GridCacheContext cctx, CacheSearchRow row, OffheapInvokeClosure c)
+            throws IgniteCheckedException {
             if (!busyLock.enterBusy())
                 throw new NodeStoppingException("Operation has been cancelled (node is stopping).");
 
             try {
-                int cacheId = grp.sharedGroup() ? cctx.cacheId() : CU.UNDEFINED_CACHE_ID;
-
                 assert cctx.shared().database().checkpointLockIsHeldByThread();
 
-                dataTree.invoke(new SearchRow(cacheId, key), CacheDataRowAdapter.RowData.NO_KEY, c);
+                dataTree.invoke(row, CacheDataRowAdapter.RowData.NO_KEY, c);
 
                 switch (c.operationType()) {
                     case PUT: {
@@ -1319,7 +1616,7 @@
                     case REMOVE: {
                         CacheDataRow oldRow = c.oldRow();
 
-                        finishRemove(cctx, key, oldRow);
+                        finishRemove(cctx, row.key(), oldRow);
 
                         break;
                     }
@@ -1346,7 +1643,7 @@
             @Nullable CacheDataRow oldRow) throws IgniteCheckedException {
             int cacheId = grp.storeCacheIdInDataPage() ? cctx.cacheId() : CU.UNDEFINED_CACHE_ID;
 
-            DataRow dataRow = new DataRow(key, val, ver, partId, expireTime, cacheId);
+            DataRow dataRow = makeDataRow(key, val, ver, expireTime, cacheId);
 
             if (canUpdateOldRow(cctx, oldRow, dataRow) && rowStore.updateRow(oldRow.link(), dataRow))
                 dataRow.link(oldRow.link());
@@ -1367,10 +1664,680 @@
             return dataRow;
         }
 
+        /**
+         * @param key Cache key.
+         * @param val Cache value.
+         * @param ver Version.
+         * @param expireTime Expired time.
+         * @param cacheId Cache id.
+         * @return Made data row.
+         */
+        @NotNull private DataRow makeDataRow(KeyCacheObject key, CacheObject val, GridCacheVersion ver, long expireTime,
+            int cacheId) {
+            if (key.partition() == -1)
+                key.partition(partId);
+
+            return new DataRow(key, val, ver, partId, expireTime, cacheId);
+        }
+
         /** {@inheritDoc} */
-        @Override public void update(
+        @Override public boolean mvccInitialValue(
             GridCacheContext cctx,
             KeyCacheObject key,
+            @Nullable CacheObject val,
+            GridCacheVersion ver,
+            long expireTime,
+            MvccVersion mvccVer,
+            MvccVersion newMvccVer)
+            throws IgniteCheckedException
+        {
+            assert mvccVer != null || newMvccVer == null : newMvccVer;
+
+            if (!busyLock.enterBusy())
+                throw new NodeStoppingException("Operation has been cancelled (node is stopping).");
+
+            try {
+                CacheObjectContext coCtx = cctx.cacheObjectContext();
+
+                // Make sure value bytes initialized.
+                key.valueBytes(coCtx);
+
+                // null is passed for loaded from store.
+                if (mvccVer == null) {
+                    mvccVer = INITIAL_VERSION;
+
+                    // Clean all versions of row
+                    mvccRemoveAll(cctx, key);
+                }
+
+                if (val != null) {
+                    val.valueBytes(coCtx);
+
+                    MvccDataRow updateRow = new MvccDataRow(
+                        key,
+                        val,
+                        ver,
+                        partId,
+                        expireTime,
+                        cctx.cacheId(),
+                        mvccVer,
+                        newMvccVer);
+
+                    assert cctx.shared().database().checkpointLockIsHeldByThread();
+
+                    if (!grp.storeCacheIdInDataPage() && updateRow.cacheId() != CU.UNDEFINED_CACHE_ID) {
+                        updateRow.cacheId(CU.UNDEFINED_CACHE_ID);
+
+                        rowStore.addRow(updateRow);
+
+                        updateRow.cacheId(cctx.cacheId());
+                    }
+                    else
+                        rowStore.addRow(updateRow);
+
+                    dataTree.putx(updateRow);
+
+                    incrementSize(cctx.cacheId());
+
+                    if (cctx.queries().enabled())
+                        cctx.queries().store(updateRow, null, true);
+
+                    return true;
+                }
+            }
+            finally {
+                busyLock.leaveBusy();
+            }
+
+            return false;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean mvccInitialValueIfAbsent(
+            GridCacheContext cctx,
+            KeyCacheObject key,
+            @Nullable CacheObject val,
+            GridCacheVersion ver,
+            long expireTime,
+            MvccVersion mvccVer,
+            MvccVersion newMvccVer,
+            byte txState,
+            byte newTxState)
+            throws IgniteCheckedException
+        {
+            assert mvccVer != null;
+
+            if (!busyLock.enterBusy())
+                throw new NodeStoppingException("Operation has been cancelled (node is stopping).");
+
+            try {
+                CacheObjectContext coCtx = cctx.cacheObjectContext();
+
+                // Make sure value bytes initialized.
+                key.valueBytes(coCtx);
+
+                if (val != null)
+                    val.valueBytes(coCtx);
+
+                assert cctx.shared().database().checkpointLockIsHeldByThread();
+
+                MvccPutIfAbsentClosure clo = new MvccPutIfAbsentClosure(key,
+                    val,
+                    ver,
+                    partId,
+                    expireTime,
+                    cctx.cacheId(),
+                    mvccVer,
+                    newMvccVer,
+                    txState,
+                    newTxState);
+
+                dataTree.invoke(clo, CacheDataRowAdapter.RowData.LINK_ONLY, clo);
+
+                if (clo.operationType() == PUT)
+                    finishUpdate(cctx, clo, null);
+
+                return clo.operationType() == PUT;
+            }
+            finally {
+                busyLock.leaveBusy();
+            }
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean mvccUpdateRowWithPreloadInfo(
+            GridCacheContext cctx,
+            KeyCacheObject key,
+            @Nullable CacheObject val,
+            GridCacheVersion ver,
+            long expireTime,
+            MvccVersion mvccVer,
+            MvccVersion newMvccVer,
+            byte mvccTxState,
+            byte newMvccTxState) throws IgniteCheckedException {
+            if (!busyLock.enterBusy())
+                throw new NodeStoppingException("Operation has been cancelled (node is stopping).");
+
+            try {
+                CacheObjectContext coCtx = cctx.cacheObjectContext();
+
+                // Make sure value bytes initialized.
+                key.valueBytes(coCtx);
+
+                if (val != null)
+                    val.valueBytes(coCtx);
+
+                assert cctx.shared().database().checkpointLockIsHeldByThread();
+
+                MvccUpdateRowWithPreloadInfoClosure clo = new MvccUpdateRowWithPreloadInfoClosure(cctx,
+                    key,
+                    val,
+                    ver,
+                    expireTime,
+                    mvccVer,
+                    newMvccVer,
+                    mvccTxState,
+                    newMvccTxState);
+
+                invoke0(cctx, clo, clo);
+            }
+            finally {
+                busyLock.leaveBusy();
+            }
+
+            return true;
+        }
+
+        /** {@inheritDoc} */
+        @Override public MvccUpdateResult mvccUpdate(
+            GridCacheContext cctx,
+            KeyCacheObject key,
+            CacheObject val,
+            GridCacheVersion ver,
+            long expireTime,
+            MvccSnapshot mvccSnapshot,
+            boolean primary,
+            boolean needHistory,
+            boolean noCreate) throws IgniteCheckedException {
+            assert mvccSnapshot != null;
+            assert primary || !needHistory;
+
+            if (!busyLock.enterBusy())
+                throw new NodeStoppingException("Operation has been cancelled (node is stopping).");
+
+            try {
+                int cacheId = grp.sharedGroup() ? cctx.cacheId() : CU.UNDEFINED_CACHE_ID;
+
+                CacheObjectContext coCtx = cctx.cacheObjectContext();
+
+                // Make sure value bytes initialized.
+                key.valueBytes(coCtx);
+                val.valueBytes(coCtx);
+
+                MvccUpdateDataRow updateRow = new MvccUpdateDataRow(
+                    cctx,
+                    key,
+                    val,
+                    ver,
+                    partId,
+                    expireTime,
+                    mvccSnapshot,
+                    null,
+                    primary,
+                    false,
+                    needHistory,
+                    // we follow fast update visit flow here if row cannot be created by current operation
+                    noCreate);
+
+                assert cctx.shared().database().checkpointLockIsHeldByThread();
+
+                dataTree.visit(new MvccMaxSearchRow(cacheId, key), new MvccMinSearchRow(cacheId, key), updateRow);
+
+                ResultType res = updateRow.resultType();
+
+                if (res == ResultType.LOCKED // cannot update locked
+                    || res == ResultType.VERSION_MISMATCH) // cannot update on write conflict
+                    return updateRow;
+                else if (res == ResultType.VERSION_FOUND) {
+                    // Do nothing, except cleaning up not needed versions
+                    cleanup(cctx, updateRow.cleanupRows());
+
+                    return updateRow;
+                }
+                else if (res == ResultType.PREV_NULL && noCreate) {
+                    cleanup(cctx, updateRow.cleanupRows());
+
+                    return updateRow;
+                }
+
+                CacheDataRow oldRow = null;
+
+                if (res == ResultType.PREV_NOT_NULL) {
+                    oldRow = updateRow.oldRow();
+
+                    assert oldRow != null && oldRow.link() != 0 : oldRow;
+
+                    oldRow.key(key);
+
+                    rowStore.updateDataRow(oldRow.link(), mvccUpdateMarker, mvccSnapshot);
+                }
+                else
+                    assert res == ResultType.PREV_NULL;
+
+                if (!grp.storeCacheIdInDataPage() && updateRow.cacheId() != CU.UNDEFINED_CACHE_ID) {
+                    updateRow.cacheId(CU.UNDEFINED_CACHE_ID);
+
+                    rowStore.addRow(updateRow);
+
+                    updateRow.cacheId(cctx.cacheId());
+                }
+                else
+                    rowStore.addRow(updateRow);
+
+                if (needHistory) {
+                    assert updateRow.link() != 0;
+
+                    updateRow.history().add(new MvccLinkAwareSearchRow(cacheId,
+                        key,
+                        updateRow.mvccCoordinatorVersion(),
+                        updateRow.mvccCounter(),
+                        updateRow.mvccOperationCounter(),
+                        updateRow.link()));
+                }
+
+                boolean old = dataTree.putx(updateRow);
+
+                assert !old;
+
+                GridCacheQueryManager qryMgr = cctx.queries();
+
+                if (qryMgr.enabled())
+                    qryMgr.store(updateRow, null, true);
+
+                updatePendingEntries(cctx, updateRow, oldRow);
+
+                cleanup(cctx, updateRow.cleanupRows());
+
+                return updateRow;
+            }
+            finally {
+                busyLock.leaveBusy();
+            }
+        }
+
+        /** {@inheritDoc} */
+        @Override public MvccUpdateResult mvccRemove(GridCacheContext cctx,
+            KeyCacheObject key,
+            MvccSnapshot mvccSnapshot,
+            boolean primary,
+            boolean needHistory) throws IgniteCheckedException {
+            assert mvccSnapshot != null;
+            assert primary || mvccSnapshot.activeTransactions().size() == 0 : mvccSnapshot;
+            assert primary || !needHistory;
+
+            if (!busyLock.enterBusy())
+                throw new NodeStoppingException("Operation has been cancelled (node is stopping).");
+
+            try {
+                int cacheId = grp.sharedGroup() ? cctx.cacheId() : CU.UNDEFINED_CACHE_ID;
+
+                CacheObjectContext coCtx = cctx.cacheObjectContext();
+
+                // Make sure value bytes initialized.
+                key.valueBytes(coCtx);
+
+                MvccUpdateDataRow updateRow = new MvccUpdateDataRow(
+                    cctx,
+                    key,
+                    null,
+                    null,
+                    partId,
+                    0,
+                    mvccSnapshot,
+                    null,
+                    primary,
+                    false,
+                    needHistory,
+                    true);
+
+                assert cctx.shared().database().checkpointLockIsHeldByThread();
+
+                dataTree.visit(new MvccMaxSearchRow(cacheId, key), new MvccMinSearchRow(cacheId, key), updateRow);
+
+                ResultType res = updateRow.resultType();
+
+                if (res == ResultType.LOCKED // cannot update locked
+                    || res == ResultType.VERSION_MISMATCH) // cannot update on write conflict
+                    return updateRow;
+                else if (res == ResultType.VERSION_FOUND) {
+                    // Do nothing, except cleaning up not needed versions
+                    cleanup(cctx, updateRow.cleanupRows());
+
+                    return updateRow;
+                }
+                else if (res == ResultType.PREV_NOT_NULL) {
+                    CacheDataRow oldRow = updateRow.oldRow();
+
+                    assert oldRow != null && oldRow.link() != 0 : oldRow;
+
+                    rowStore.updateDataRow(oldRow.link(), mvccUpdateMarker, mvccSnapshot);
+
+                    clearPendingEntries(cctx, oldRow);
+                }
+
+                cleanup(cctx, updateRow.cleanupRows());
+
+                return updateRow;
+            }
+            finally {
+                busyLock.leaveBusy();
+            }
+        }
+
+        /** {@inheritDoc} */
+        @Override public MvccUpdateResult mvccLock(GridCacheContext cctx, KeyCacheObject key,
+            MvccSnapshot mvccSnapshot) throws IgniteCheckedException {
+            assert mvccSnapshot != null;
+
+            if (!busyLock.enterBusy())
+                throw new NodeStoppingException("Operation has been cancelled (node is stopping).");
+
+            try {
+                int cacheId = grp.sharedGroup() ? cctx.cacheId() : CU.UNDEFINED_CACHE_ID;
+
+                CacheObjectContext coCtx = cctx.cacheObjectContext();
+
+                // Make sure value bytes initialized.
+                key.valueBytes(coCtx);
+
+                MvccUpdateDataRow updateRow = new MvccUpdateDataRow(
+                    cctx,
+                    key,
+                    null,
+                    null,
+                    partId,
+                    0,
+                    mvccSnapshot,
+                    null,
+                    true,
+                    true,
+                    false,
+                    false);
+
+                assert cctx.shared().database().checkpointLockIsHeldByThread();
+
+                dataTree.visit(new MvccMaxSearchRow(cacheId, key), new MvccMinSearchRow(cacheId, key), updateRow);
+
+                ResultType res = updateRow.resultType();
+
+                // cannot update locked, cannot update on write conflict
+                if (res == ResultType.LOCKED || res == ResultType.VERSION_MISMATCH)
+                    return updateRow;
+
+                // Do nothing, except cleaning up not needed versions
+                cleanup(cctx, updateRow.cleanupRows());
+
+                return updateRow;
+            }
+            finally {
+                busyLock.leaveBusy();
+            }
+        }
+
+        /** {@inheritDoc} */
+        @Override public GridLongList mvccUpdateNative(
+            GridCacheContext cctx,
+            boolean primary,
+            KeyCacheObject key,
+            CacheObject val,
+            GridCacheVersion ver,
+            long expireTime,
+            MvccSnapshot mvccSnapshot) throws IgniteCheckedException {
+            assert mvccSnapshot != null;
+            assert primary || mvccSnapshot.activeTransactions().size() == 0 : mvccSnapshot;
+
+            if (!busyLock.enterBusy())
+                throw new NodeStoppingException("Operation has been cancelled (node is stopping).");
+
+            try {
+                int cacheId = grp.sharedGroup() ? cctx.cacheId() : CU.UNDEFINED_CACHE_ID;
+
+                CacheObjectContext coCtx = cctx.cacheObjectContext();
+
+                // Make sure value bytes initialized.
+                key.valueBytes(coCtx);
+                val.valueBytes(coCtx);
+
+                MvccUpdateDataRowNative updateRow = new MvccUpdateDataRowNative(
+                    key,
+                    val,
+                    ver,
+                    expireTime,
+                    mvccSnapshot,
+                    null,
+                    partId,
+                    cctx);
+
+                assert cctx.shared().database().checkpointLockIsHeldByThread();
+
+                dataTree.iterate(new MvccMaxSearchRow(cacheId, key), new MvccMinSearchRow(cacheId, key), updateRow);
+
+                ResultType res = updateRow.resultType();
+
+                if (res == ResultType.VERSION_FOUND) {
+                    // Do nothing, except cleaning up not needed versions
+                    cleanup(cctx, updateRow.cleanupRows());
+
+                    return null;
+                }
+
+                CacheDataRow oldRow = null;
+
+                if (res == ResultType.PREV_NOT_NULL) {
+                    oldRow = updateRow.oldRow();
+
+                    assert oldRow != null && oldRow.link() != 0 : oldRow;
+
+                    oldRow.key(key);
+
+                    rowStore.updateDataRow(oldRow.link(), mvccUpdateMarker, mvccSnapshot);
+                }
+                else
+                    assert res == ResultType.PREV_NULL;
+
+                if (!grp.storeCacheIdInDataPage() && updateRow.cacheId() != CU.UNDEFINED_CACHE_ID) {
+                    updateRow.cacheId(CU.UNDEFINED_CACHE_ID);
+
+                    rowStore.addRow(updateRow);
+
+                    updateRow.cacheId(cctx.cacheId());
+                }
+                else
+                    rowStore.addRow(updateRow);
+
+                boolean old = dataTree.putx(updateRow);
+
+                assert !old;
+
+                incrementSize(cctx.cacheId());
+
+                GridCacheQueryManager qryMgr = cctx.queries();
+
+                if (qryMgr.enabled())
+                    qryMgr.store(updateRow, null, true);
+
+                updatePendingEntries(cctx, updateRow, oldRow);
+
+                cleanup(cctx, updateRow.cleanupRows());
+
+                return updateRow.activeTransactions();
+            }
+            finally {
+                busyLock.leaveBusy();
+            }
+        }
+
+        /** {@inheritDoc} */
+        @Override public GridLongList mvccRemoveNative(GridCacheContext cctx,
+            boolean primary,
+            KeyCacheObject key,
+            MvccSnapshot mvccSnapshot) throws IgniteCheckedException {
+            assert mvccSnapshot != null;
+            assert primary || mvccSnapshot.activeTransactions().size() == 0 : mvccSnapshot;
+
+            if (!busyLock.enterBusy())
+                throw new NodeStoppingException("Operation has been cancelled (node is stopping).");
+
+            try {
+                int cacheId = grp.sharedGroup() ? cctx.cacheId() : CU.UNDEFINED_CACHE_ID;
+
+                CacheObjectContext coCtx = cctx.cacheObjectContext();
+
+                // Make sure value bytes initialized.
+                key.valueBytes(coCtx);
+
+                MvccUpdateDataRowNative updateRow = new MvccUpdateDataRowNative(
+                    key,
+                    null,
+                    null,
+                    0,
+                    mvccSnapshot,
+                    null,
+                    partId,
+                    cctx);
+
+                assert cctx.shared().database().checkpointLockIsHeldByThread();
+
+                dataTree.iterate(new MvccMaxSearchRow(cacheId, key) , new MvccMinSearchRow(cacheId, key), updateRow);
+
+                ResultType res = updateRow.resultType();
+
+                if (res == ResultType.VERSION_FOUND) {
+                    assert !primary : updateRow;
+
+                    // Do nothing, except cleaning up not needed versions
+                    cleanup(cctx, updateRow.cleanupRows());
+
+                    return null;
+                }
+                else if (res == ResultType.PREV_NOT_NULL) {
+                    CacheDataRow oldRow = updateRow.oldRow();
+
+                    assert oldRow != null && oldRow.link() != 0 : oldRow;
+
+                    rowStore.updateDataRow(oldRow.link(), mvccUpdateMarker, mvccSnapshot);
+
+                    clearPendingEntries(cctx, oldRow);
+                }
+
+                cleanup(cctx, updateRow.cleanupRows());
+
+                return updateRow.activeTransactions();
+            }
+            finally {
+                busyLock.leaveBusy();
+            }
+        }
+
+        /** {@inheritDoc} */
+        @Override public void mvccRemoveAll(GridCacheContext cctx, KeyCacheObject key) throws IgniteCheckedException {
+            key.valueBytes(cctx.cacheObjectContext());
+
+            int cacheId = grp.sharedGroup() ? cctx.cacheId() : CU.UNDEFINED_CACHE_ID;
+
+            boolean cleanup = cctx.queries().enabled() || hasPendingEntries;
+
+            assert cctx.shared().database().checkpointLockIsHeldByThread();
+
+            GridCursor<CacheDataRow> cur = dataTree.find(
+                new MvccMaxSearchRow(cacheId, key),
+                new MvccMinSearchRow(cacheId, key),
+                cleanup ? CacheDataRowAdapter.RowData.NO_KEY : CacheDataRowAdapter.RowData.LINK_ONLY
+            );
+
+            boolean first = true;
+
+            while (cur.next()) {
+                CacheDataRow row = cur.get();
+
+                row.key(key);
+
+                assert row.link() != 0 : row;
+
+                boolean rmvd = dataTree.removex(row);
+
+                assert rmvd : row;
+
+                if (cleanup) {
+                    if (cctx.queries().enabled())
+                        cctx.queries().remove(key, row);
+
+                    if (first)
+                        clearPendingEntries(cctx, row);
+                }
+
+                rowStore.removeRow(row.link());
+
+                if (first)
+                    first = false;
+            }
+
+            // first == true means there were no row versions
+            if (!first)
+                decrementSize(cctx.cacheId());
+        }
+
+        /** {@inheritDoc} */
+        @Override public int cleanup(GridCacheContext cctx, @Nullable List<MvccLinkAwareSearchRow> cleanupRows)
+            throws IgniteCheckedException {
+            int res = 0;
+
+            if (cleanupRows != null) {
+                GridCacheQueryManager qryMgr = cctx.queries();
+
+                for (int i = 0; i < cleanupRows.size(); i++) {
+                    MvccLinkAwareSearchRow cleanupRow = cleanupRows.get(i);
+
+                    assert cleanupRow.link() != 0 : cleanupRow;
+
+                    assert cctx.shared().database().checkpointLockIsHeldByThread();
+
+                    CacheDataRow oldRow = dataTree.remove(cleanupRow);
+
+                    if (oldRow != null) { // oldRow == null means it was cleaned by another cleanup process.
+                        assert oldRow.mvccCounter() == cleanupRow.mvccCounter();
+
+                        if (qryMgr.enabled())
+                            qryMgr.remove(oldRow.key(), oldRow);
+
+                        clearPendingEntries(cctx, oldRow);
+
+                        rowStore.removeRow(cleanupRow.link());
+
+                        res++;
+                    }
+                }
+            }
+
+            return res;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void updateTxState(GridCacheContext cctx, CacheSearchRow row)
+            throws IgniteCheckedException {
+            assert grp.mvccEnabled();
+            assert mvccVersionIsValid(row.mvccCoordinatorVersion(), row.mvccCounter(), row.mvccOperationCounter()) : row;
+
+            // Need an extra lookup because the row may be already cleaned by another thread.
+            CacheDataRow row0 = dataTree.findOne(row, CacheDataRowAdapter.RowData.LINK_ONLY);
+
+            if (row0 != null)
+                rowStore.updateDataRow(row0.link(), mvccUpdateTxStateHint, null);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void update(GridCacheContext cctx,
+            KeyCacheObject key,
             CacheObject val,
             GridCacheVersion ver,
             long expireTime,
@@ -1386,10 +2353,7 @@
 
                 assert oldRow == null || oldRow.cacheId() == cacheId : oldRow;
 
-                if (key.partition() == -1)
-                    key.partition(partId);
-
-                DataRow dataRow = new DataRow(key, val, ver, partId, expireTime, cacheId);
+                DataRow dataRow = makeDataRow(key, val, ver, expireTime, cacheId);
 
                 CacheObjectContext coCtx = cctx.cacheObjectContext();
 
@@ -1443,23 +2407,41 @@
 
             KeyCacheObject key = newRow.key();
 
-            long expireTime = newRow.expireTime();
-
             GridCacheQueryManager qryMgr = cctx.queries();
 
-            int cacheId = grp.sharedGroup() ? cctx.cacheId() : CU.UNDEFINED_CACHE_ID;
-
             if (qryMgr.enabled())
                 qryMgr.store(newRow, oldRow, true);
 
+            updatePendingEntries(cctx, newRow, oldRow);
+
+            if (oldRow != null) {
+                assert oldRow.link() != 0 : oldRow;
+
+                if (newRow.link() != oldRow.link())
+                    rowStore.removeRow(oldRow.link());
+            }
+
+            updateIgfsMetrics(cctx, key, (oldRow != null ? oldRow.value() : null), newRow.value());
+        }
+
+        /**
+         * @param cctx Cache context.
+         * @param newRow New row.
+         * @param oldRow Old row.
+         * @throws IgniteCheckedException If failed.
+         */
+        private void updatePendingEntries(GridCacheContext cctx, CacheDataRow newRow, @Nullable CacheDataRow oldRow)
+            throws IgniteCheckedException
+        {
+            long expireTime = newRow.expireTime();
+
+            int cacheId = grp.sharedGroup() ? cctx.cacheId() : CU.UNDEFINED_CACHE_ID;
+
             if (oldRow != null) {
                 assert oldRow.link() != 0 : oldRow;
 
                 if (pendingTree() != null && oldRow.expireTime() != 0)
                     pendingTree().removex(new PendingRow(cacheId, oldRow.expireTime(), oldRow.link()));
-
-                if (newRow.link() != oldRow.link())
-                    rowStore.removeRow(oldRow.link());
             }
 
             if (pendingTree() != null && expireTime != 0) {
@@ -1467,8 +2449,6 @@
 
                 hasPendingEntries = true;
             }
-
-            updateIgfsMetrics(cctx, key, (oldRow != null ? oldRow.value() : null), newRow.value());
         }
 
         /** {@inheritDoc} */
@@ -1498,14 +2478,7 @@
          */
         private void finishRemove(GridCacheContext cctx, KeyCacheObject key, @Nullable CacheDataRow oldRow) throws IgniteCheckedException {
             if (oldRow != null) {
-                int cacheId = grp.sharedGroup() ? cctx.cacheId() : CU.UNDEFINED_CACHE_ID;
-
-                assert oldRow.link() != 0 : oldRow;
-                assert cacheId == CU.UNDEFINED_CACHE_ID || oldRow.cacheId() == cacheId :
-                    "Incorrect cache ID [expected=" + cacheId + ", actual=" + oldRow.cacheId() + "].";
-
-                if (pendingTree() != null && oldRow.expireTime() != 0)
-                    pendingTree().removex(new PendingRow(cacheId, oldRow.expireTime(), oldRow.link()));
+                clearPendingEntries(cctx, oldRow);
 
                 decrementSize(cctx.cacheId());
             }
@@ -1521,21 +2494,133 @@
             updateIgfsMetrics(cctx, key, (oldRow != null ? oldRow.value() : null), null);
         }
 
+        /**
+         * @param cctx Cache context.
+         * @param oldRow Old row.
+         * @throws IgniteCheckedException
+         */
+        private void clearPendingEntries(GridCacheContext cctx, CacheDataRow oldRow)
+            throws IgniteCheckedException {
+            int cacheId = grp.sharedGroup() ? cctx.cacheId() : CU.UNDEFINED_CACHE_ID;
+
+            assert oldRow.link() != 0 : oldRow;
+            assert cacheId == CU.UNDEFINED_CACHE_ID || oldRow.cacheId() == cacheId :
+                "Incorrect cache ID [expected=" + cacheId + ", actual=" + oldRow.cacheId() + "].";
+
+            if (pendingTree() != null && oldRow.expireTime() != 0)
+                pendingTree().removex(new PendingRow(cacheId, oldRow.expireTime(), oldRow.link()));
+        }
+
         /** {@inheritDoc} */
         @Override public CacheDataRow find(GridCacheContext cctx, KeyCacheObject key) throws IgniteCheckedException {
             key.valueBytes(cctx.cacheObjectContext());
 
             int cacheId = grp.sharedGroup() ? cctx.cacheId() : CU.UNDEFINED_CACHE_ID;
 
-            CacheDataRow row = dataTree.findOne(new SearchRow(cacheId, key), CacheDataRowAdapter.RowData.NO_KEY);
+            CacheDataRow row;
 
+            if (grp.mvccEnabled()) {
+                MvccFirstRowTreeClosure clo = new MvccFirstRowTreeClosure(cctx);
+
+                dataTree.iterate(
+                    new MvccMaxSearchRow(cacheId, key),
+                    new MvccMinSearchRow(cacheId, key),
+                    clo
+                );
+
+                row = clo.row();
+            }
+            else
+                row = dataTree.findOne(new SearchRow(cacheId, key), CacheDataRowAdapter.RowData.NO_KEY);
+
+            afterRowFound(row, key);
+
+            return row;
+        }
+
+        /** {@inheritDoc} */
+        @Override public List<IgniteBiTuple<Object, MvccVersion>> mvccFindAllVersions(
+            GridCacheContext cctx,
+            KeyCacheObject key)
+            throws IgniteCheckedException
+        {
+            assert grp.mvccEnabled();
+
+            // Note: this method is intended for testing only.
+
+            key.valueBytes(cctx.cacheObjectContext());
+
+            int cacheId = grp.sharedGroup() ? cctx.cacheId() : CU.UNDEFINED_CACHE_ID;
+
+            GridCursor<CacheDataRow> cur = dataTree.find(
+                new MvccMaxSearchRow(cacheId, key),
+                new MvccMinSearchRow(cacheId, key)
+            );
+
+            List<IgniteBiTuple<Object, MvccVersion>> res = new ArrayList<>();
+
+            long crd = MVCC_CRD_COUNTER_NA;
+            long cntr = MVCC_COUNTER_NA;
+            int opCntr = MVCC_OP_COUNTER_NA;
+
+            while (cur.next()) {
+                CacheDataRow row = cur.get();
+
+                if (compareNewVersion(row, crd, cntr, opCntr) != 0) // deleted row
+                    res.add(F.t(null, row.newMvccVersion()));
+
+                res.add(F.t(row.value(), row.mvccVersion()));
+
+                crd = row.mvccCoordinatorVersion();
+                cntr = row.mvccCounter();
+                opCntr = row.mvccOperationCounter();
+            }
+
+            return res;
+        }
+
+        /** {@inheritDoc} */
+        @Override public GridCursor<CacheDataRow> mvccAllVersionsCursor(GridCacheContext cctx, KeyCacheObject key, Object x)
+            throws IgniteCheckedException {
+            int cacheId = cctx.cacheId();
+
+            return dataTree.find(new MvccMaxSearchRow(cacheId, key), new MvccMinSearchRow(cacheId, key), x);
+        }
+
+        /** {@inheritDoc} */
+        @Override public CacheDataRow mvccFind(GridCacheContext cctx,
+            KeyCacheObject key,
+            MvccSnapshot snapshot) throws IgniteCheckedException {
+            key.valueBytes(cctx.cacheObjectContext());
+
+            int cacheId = grp.sharedGroup() ? cctx.cacheId() : CU.UNDEFINED_CACHE_ID;
+
+            MvccSnapshotSearchRow clo = new MvccSnapshotSearchRow(cctx, key, snapshot);
+
+            dataTree.iterate(
+                clo,
+                new MvccMinSearchRow(cacheId, key),
+                clo
+            );
+
+            CacheDataRow row = clo.row();
+
+            afterRowFound(row, key);
+
+            return row;
+        }
+
+        /**
+         * @param row Row.
+         * @param key Key.
+         * @throws IgniteCheckedException If failed.
+         */
+        private void afterRowFound(@Nullable CacheDataRow row, KeyCacheObject key) throws IgniteCheckedException {
             if (row != null) {
                 row.key(key);
 
                 grp.dataRegion().evictionTracker().touchPage(row.link());
             }
-
-            return row;
         }
 
         /** {@inheritDoc} */
@@ -1544,11 +2629,36 @@
         }
 
         /** {@inheritDoc} */
+        @Override public GridCursor<? extends CacheDataRow> cursor(Object x) throws IgniteCheckedException {
+            return dataTree.find(null, null, x);
+        }
+
+        /** {@inheritDoc} */
+        @Override public GridCursor<? extends CacheDataRow> cursor(MvccSnapshot mvccSnapshot)
+            throws IgniteCheckedException {
+
+            if (mvccSnapshot != null) {
+                assert grp.mvccEnabled();
+
+                return dataTree.find(null, null,
+                    new MvccFirstVisibleRowTreeClosure(grp.singleCacheContext(), mvccSnapshot), null);
+            }
+
+            return dataTree.find(null, null);
+        }
+
+        /** {@inheritDoc} */
         @Override public GridCursor<? extends CacheDataRow> cursor(int cacheId) throws IgniteCheckedException {
             return cursor(cacheId, null, null);
         }
 
         /** {@inheritDoc} */
+        @Override public GridCursor<? extends CacheDataRow> cursor(int cacheId,
+            MvccSnapshot mvccSnapshot) throws IgniteCheckedException {
+            return cursor(cacheId, null, null, null, mvccSnapshot);
+        }
+
+        /** {@inheritDoc} */
         @Override public GridCursor<? extends CacheDataRow> cursor(int cacheId, KeyCacheObject lower,
             KeyCacheObject upper) throws IgniteCheckedException {
             return cursor(cacheId, lower, upper, null);
@@ -1557,6 +2667,12 @@
         /** {@inheritDoc} */
         @Override public GridCursor<? extends CacheDataRow> cursor(int cacheId, KeyCacheObject lower,
             KeyCacheObject upper, Object x) throws IgniteCheckedException {
+            return cursor(cacheId, lower, upper, null, null);
+        }
+
+        /** {@inheritDoc} */
+        @Override public GridCursor<? extends CacheDataRow> cursor(int cacheId, KeyCacheObject lower,
+            KeyCacheObject upper, Object x, MvccSnapshot snapshot) throws IgniteCheckedException {
             SearchRow lowerRow;
             SearchRow upperRow;
 
@@ -1571,6 +2687,15 @@
                 upperRow = upper != null ? new SearchRow(CU.UNDEFINED_CACHE_ID, upper) : null;
             }
 
+            if (snapshot != null) {
+                assert grp.mvccEnabled();
+
+                GridCacheContext cctx =
+                    grp.sharedGroup() ? grp.shared().cacheContext(cacheId) : grp.singleCacheContext();
+
+                return dataTree.find(lowerRow, upperRow, new MvccFirstVisibleRowTreeClosure(cctx, snapshot), x);
+            }
+
             return dataTree.find(lowerRow, upperRow, x);
         }
 
@@ -1584,7 +2709,7 @@
                         rowStore.removeRow(row.link());
                     }
                     catch (IgniteCheckedException e) {
-                        U.error(log, "Fail remove row [link=" + row.link() + "]");
+                        U.error(log, "Failed to remove row [link=" + row.link() + "]");
 
                         IgniteCheckedException ex = exception.get();
 
@@ -1597,7 +2722,7 @@
             });
 
             if (exception.get() != null)
-                throw new IgniteCheckedException("Fail destroy store", exception.get());
+                throw new IgniteCheckedException("Failed to destroy store", exception.get());
         }
 
         /** {@inheritDoc} */
@@ -1645,13 +2770,6 @@
             return rowStore;
         }
 
-        /**
-         * @return Next update index.
-         */
-        @Override public long nextUpdateCounter() {
-            return cntr.incrementAndGet();
-        }
-
         /** {@inheritDoc} */
         @Override public long initialUpdateCounter() {
             return initCntr;
@@ -1739,5 +2857,337 @@
             else
                 return 0;
         }
+
+        /** */
+        private final class MvccFirstVisibleRowTreeClosure implements MvccTreeClosure {
+            /** */
+            private final GridCacheContext cctx;
+
+            /** */
+            private final MvccSnapshot snapshot;
+
+            /**
+             *
+             * @param cctx Cache context.
+             * @param snapshot MVCC snapshot.
+             */
+            MvccFirstVisibleRowTreeClosure(GridCacheContext cctx, MvccSnapshot snapshot) {
+                this.cctx = cctx;
+                this.snapshot = snapshot;
+            }
+
+            /** {@inheritDoc} */
+            @Override public boolean apply(BPlusTree<CacheSearchRow, CacheDataRow> tree, BPlusIO<CacheSearchRow> io,
+                long pageAddr, int idx) throws IgniteCheckedException {
+                RowLinkIO rowIo = (RowLinkIO)io;
+
+                long rowCrdVer = rowIo.getMvccCoordinatorVersion(pageAddr, idx);
+                long rowCntr = rowIo.getMvccCounter(pageAddr, idx);
+                int rowOpCntr = rowIo.getMvccOperationCounter(pageAddr, idx);
+
+                assert mvccVersionIsValid(rowCrdVer, rowCntr, rowOpCntr);
+
+                return isVisible(cctx, snapshot, rowCrdVer, rowCntr, rowOpCntr, rowIo.getLink(pageAddr, idx));
+            }
+        }
+
+        /**
+         * Put row if it doesn't exist yet.
+         */
+        private class MvccPutIfAbsentClosure extends MvccDataRow implements IgniteTree.InvokeClosure<CacheDataRow> {
+            /** */
+            private IgniteTree.OperationType op;
+
+            /**
+             * @param key Key.
+             * @param val Value.
+             * @param ver Version.
+             * @param part Partition.
+             * @param expireTime Expire time.
+             * @param cacheId Cache ID.
+             * @param mvccVer Mvcc version.
+             * @param newMvccVer New mvcc version.
+             * @param txState Tx state hint for mvcc version.
+             * @param newTxState Tx state hint for new mvcc version.
+             */
+            MvccPutIfAbsentClosure(KeyCacheObject key,
+                CacheObject val,
+                GridCacheVersion ver,
+                int part,
+                long expireTime,
+                int cacheId,
+                MvccVersion mvccVer,
+                MvccVersion newMvccVer,
+                byte txState,
+                byte newTxState) {
+                super(key, val, ver, part, expireTime, cacheId, mvccVer, newMvccVer);
+
+                mvccTxState(txState);
+                newMvccTxState(newTxState);
+            }
+
+            /** {@inheritDoc} */
+            @Override public void call(@Nullable CacheDataRow old) throws IgniteCheckedException {
+                if (old == null) {
+                    op = PUT;
+
+                    int cacheId = cacheId();
+
+                    if (!grp.storeCacheIdInDataPage() && cacheId != CU.UNDEFINED_CACHE_ID)
+                        cacheId(CU.UNDEFINED_CACHE_ID);
+
+                    rowStore().addRow(this);
+
+                    cacheId(cacheId);
+                }
+                else
+                    op = NOOP;
+            }
+
+            /** {@inheritDoc} */
+            @Override public MvccDataRow newRow() {
+                return this;
+            }
+
+            /** {@inheritDoc} */
+            @Override public IgniteTree.OperationType operationType() {
+                return op;
+            }
+        }
+
+        /**
+         *
+         */
+        private class MvccUpdateRowWithPreloadInfoClosure extends MvccDataRow implements OffheapInvokeClosure {
+            /** */
+            private CacheDataRow oldRow;
+            /** */
+            private IgniteTree.OperationType op;
+
+            /**
+             * @param cctx Cache context.
+             * @param key Key.
+             * @param val Value.
+             * @param ver Version.
+             * @param expireTime Expire time.
+             * @param mvccVer Mvcc created version.
+             * @param newMvccVer Mvcc updated version.
+             * @param mvccTxState Mvcc Tx state hint.
+             * @param newMvccTxState New Mvcc Tx state hint.
+             */
+            MvccUpdateRowWithPreloadInfoClosure(GridCacheContext cctx,
+                KeyCacheObject key,
+                @Nullable CacheObject val,
+                GridCacheVersion ver,
+                long expireTime,
+                MvccVersion mvccVer,
+                MvccVersion newMvccVer,
+                byte mvccTxState,
+                byte newMvccTxState) {
+                super(key,
+                    val,
+                    ver,
+                    CacheDataStoreImpl.this.partId(),
+                    expireTime,
+                    cctx.cacheId(),
+                    mvccVer,
+                    newMvccVer);
+
+                mvccTxState(mvccTxState);
+                newMvccTxState(newMvccTxState);
+            }
+
+            /** {@inheritDoc} */
+            @Nullable @Override public CacheDataRow oldRow() {
+                return oldRow;
+            }
+
+            /** {@inheritDoc} */
+            @Override public void call(@Nullable CacheDataRow oldRow) throws IgniteCheckedException {
+                this.oldRow = oldRow;
+
+                if (oldRow == null) {
+                    op = PUT;
+
+                    int cacheId = cacheId();
+
+                    if (!grp.storeCacheIdInDataPage() && cacheId != CU.UNDEFINED_CACHE_ID)
+                        cacheId(CU.UNDEFINED_CACHE_ID);
+
+                    rowStore().addRow(this);
+
+                    cacheId(cacheId);
+                }
+                else {
+                    op = NOOP;
+
+                    if (oldRow.mvccTxState() != mvccTxState() ||
+                        oldRow.newMvccCoordinatorVersion() != newMvccCoordinatorVersion() ||
+                        oldRow.newMvccCounter() != newMvccCounter() ||
+                        oldRow.newMvccOperationCounter() != newMvccOperationCounter() ||
+                        oldRow.newMvccTxState() != newMvccTxState()) {
+
+                        rowStore().updateDataRow(oldRow.link(), mvccApplyChanges, this);
+                    }
+                }
+            }
+
+            /** {@inheritDoc} */
+            @Override public CacheDataRow newRow() {
+                return op == PUT ? this : null;
+            }
+
+            /** {@inheritDoc} */
+            @Override public IgniteTree.OperationType operationType() {
+                return op == null ? NOOP : op;
+            }
+        }
+    }
+
+    /**
+     * Mvcc remove handler.
+     */
+    private final class MvccMarkUpdatedHandler extends PageHandler<MvccVersion, Boolean> {
+        /** {@inheritDoc} */
+        @Override public Boolean run(int cacheId, long pageId, long page, long pageAddr, PageIO io, Boolean walPlc,
+            MvccVersion newVer, int itemId) throws IgniteCheckedException {
+            assert grp.mvccEnabled();
+
+            DataPageIO iox = (DataPageIO)io;
+
+            int offset = iox.getPayloadOffset(pageAddr, itemId, grp.dataRegion().pageMemory().pageSize(), MVCC_INFO_SIZE);
+
+            long newCrd = iox.newMvccCoordinator(pageAddr, offset);
+            long newCntr = iox.newMvccCounter(pageAddr, offset);
+            int newOpCntr = iox.newMvccOperationCounter(pageAddr, offset);
+
+            assert newCrd == MVCC_CRD_COUNTER_NA || state(grp, newCrd, newCntr, newOpCntr) == TxState.ABORTED;
+
+            iox.updateNewVersion(pageAddr, offset, newVer, TxState.NA);
+
+            if (isWalDeltaRecordNeeded(grp.dataRegion().pageMemory(), cacheId, pageId, page, ctx.wal(), walPlc))
+                ctx.wal().log(new DataPageMvccMarkUpdatedRecord(cacheId, pageId, itemId,
+                    newVer.coordinatorVersion(), newVer.counter(), newVer.operationCounter()));
+
+            return Boolean.TRUE;
+        }
+    }
+
+    /**
+     * Mvcc update operation counter hints handler.
+     */
+    private final class MvccUpdateTxStateHintHandler extends PageHandler<Void, Boolean> {
+        /** {@inheritDoc} */
+        @Override public Boolean run(int cacheId, long pageId, long page, long pageAddr, PageIO io,
+            Boolean walPlc, Void ignore,
+            int itemId) throws IgniteCheckedException {
+
+            DataPageIO iox = (DataPageIO)io;
+
+            int pageSize = grp.dataRegion().pageMemory().pageSize();
+            int offset = iox.getPayloadOffset(pageAddr, itemId, pageSize, MVCC_INFO_SIZE);
+
+            long crd = iox.mvccCoordinator(pageAddr, offset);
+            long cntr = iox.mvccCounter(pageAddr, offset);
+            int opCntr = iox.mvccOperationCounter(pageAddr, offset);
+            byte txState = (byte)(opCntr >>> MVCC_HINTS_BIT_OFF);
+
+            if (txState == TxState.NA) {
+                byte state = state(grp, crd, cntr, opCntr);
+
+                if (state == TxState.COMMITTED || state == TxState.ABORTED) {
+                    iox.mvccOperationCounter(pageAddr, offset, opCntr | (state << MVCC_HINTS_BIT_OFF));
+
+                    if (isWalDeltaRecordNeeded(grp.dataRegion().pageMemory(), cacheId, pageId, page, ctx.wal(), walPlc))
+                        ctx.wal().log(new DataPageMvccUpdateTxStateHintRecord(cacheId, pageId, itemId, state));
+                }
+                else
+                    throw unexpectedStateException(grp, state, crd, cntr, opCntr);
+            }
+
+            long newCrd = iox.newMvccCoordinator(pageAddr, offset);
+            long newCntr = iox.newMvccCounter(pageAddr, offset);
+            int newOpCntr = iox.newMvccOperationCounter(pageAddr, offset);
+            byte newTxState = (byte)(newOpCntr >>> MVCC_HINTS_BIT_OFF);
+
+            if (newCrd != MVCC_CRD_COUNTER_NA && newTxState == TxState.NA) {
+                byte state = state(grp, newCrd, newCntr, newOpCntr);
+
+                if (state == TxState.COMMITTED || state == TxState.ABORTED) {
+                    iox.newMvccOperationCounter(pageAddr, offset, newOpCntr | (state << MVCC_HINTS_BIT_OFF));
+
+                    if (isWalDeltaRecordNeeded(grp.dataRegion().pageMemory(), cacheId, pageId, page, ctx.wal(), walPlc))
+                        ctx.wal().log(new DataPageMvccUpdateNewTxStateHintRecord(cacheId, pageId, itemId, state));
+                }
+
+                // We do not throw an exception here because new version may be updated by active Tx at this moment.
+            }
+
+            return Boolean.TRUE;
+        }
+    }
+
+    /**
+     * Applies changes to the row.
+     */
+    private final class MvccApplyChangesHandler extends PageHandler<MvccDataRow, Boolean> {
+        /** {@inheritDoc} */
+        @Override public Boolean run(int cacheId, long pageId, long page, long pageAddr, PageIO io, Boolean walPlc,
+            MvccDataRow newRow, int itemId) throws IgniteCheckedException {
+            assert grp.mvccEnabled();
+
+            DataPageIO iox = (DataPageIO)io;
+
+            int offset = iox.getPayloadOffset(pageAddr, itemId, grp.dataRegion().pageMemory().pageSize(), MVCC_INFO_SIZE);
+
+            long crd = iox.mvccCoordinator(pageAddr, offset);
+            long cntr = iox.mvccCounter(pageAddr, offset);
+            int opCntrAndHint = iox.mvccOperationCounter(pageAddr, offset);
+            int opCntr = opCntrAndHint & ~MVCC_HINTS_MASK;
+            byte txState = (byte)(opCntrAndHint >>> MVCC_HINTS_BIT_OFF);
+
+            long newCrd = iox.newMvccCoordinator(pageAddr, offset);
+            long newCntr = iox.newMvccCounter(pageAddr, offset);
+            int newOpCntrAndHint = iox.newMvccOperationCounter(pageAddr, offset);
+            int newOpCntr = newOpCntrAndHint & ~MVCC_HINTS_MASK;
+            byte newTxState = (byte)(newOpCntrAndHint >>> MVCC_HINTS_BIT_OFF);
+
+            assert crd == newRow.mvccCoordinatorVersion();
+            assert cntr == newRow.mvccCounter();
+            assert opCntr == newRow.mvccOperationCounter();
+
+            if (txState != newRow.mvccTxState() && newRow.mvccTxState() != TxState.NA) {
+                assert txState == TxState.NA;
+
+                iox.mvccOperationCounter(pageAddr, offset, opCntr | (newRow.mvccTxState() << MVCC_HINTS_BIT_OFF));
+
+                if (isWalDeltaRecordNeeded(grp.dataRegion().pageMemory(), cacheId, pageId, page, ctx.wal(), walPlc))
+                    ctx.wal().log(new DataPageMvccUpdateTxStateHintRecord(cacheId, pageId, itemId, newRow.mvccTxState()));
+            }
+
+            if (compare(newCrd,
+                newCntr,
+                newOpCntr,
+                newRow.newMvccCoordinatorVersion(),
+                newRow.newMvccCounter(),
+                newRow.newMvccOperationCounter()) != 0) {
+
+                iox.updateNewVersion(pageAddr, offset, newRow.newMvccVersion(), newRow.newMvccTxState());
+
+                if (isWalDeltaRecordNeeded(grp.dataRegion().pageMemory(), cacheId, pageId, page, ctx.wal(), walPlc))
+                    ctx.wal().log(new DataPageMvccMarkUpdatedRecord(cacheId, pageId, itemId,
+                        newRow.newMvccCoordinatorVersion(), newRow.newMvccCounter(), newRow.newMvccOperationCounter()));
+            }
+            else if (newTxState != newRow.newMvccTxState() && newRow.newMvccTxState() != TxState.NA) {
+                assert newTxState == TxState.NA;
+
+                iox.newMvccOperationCounter(pageAddr, offset, newOpCntr | (newRow.newMvccTxState() << MVCC_HINTS_BIT_OFF));
+
+                if (isWalDeltaRecordNeeded(grp.dataRegion().pageMemory(), cacheId, pageId, page, ctx.wal(), walPlc))
+                    ctx.wal().log(new DataPageMvccUpdateNewTxStateHintRecord(cacheId, pageId, itemId, newRow.newMvccTxState()));
+            }
+
+            return Boolean.TRUE;
+        }
     }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheProxyImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheProxyImpl.java
index e34f22f..225fa81 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheProxyImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteCacheProxyImpl.java
@@ -74,6 +74,7 @@
 import org.apache.ignite.internal.IgniteEx;
 import org.apache.ignite.internal.IgniteInternalFuture;
 import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUtils;
 import org.apache.ignite.internal.processors.cache.query.CacheQuery;
 import org.apache.ignite.internal.processors.cache.query.CacheQueryFuture;
 import org.apache.ignite.internal.processors.cache.query.GridCacheQueryType;
@@ -347,6 +348,9 @@
 
     /** {@inheritDoc} */
     @Override public Lock lockAll(final Collection<? extends K> keys) {
+        //TODO IGNITE-7764
+        MvccUtils.verifyMvccOperationSupport(ctx, "Lock");
+
         return new CacheLockImpl<>(ctx.gate(), delegate, ctx.operationContextPerCall(), keys);
     }
 
@@ -375,7 +379,8 @@
 
         IgniteBiPredicate<K, V> p = scanQry.getFilter();
 
-        final CacheQuery<R> qry = ctx.queries().createScanQuery(p, transformer, scanQry.getPartition(), isKeepBinary);
+        final CacheQuery<R> qry = ctx.queries().createScanQuery(
+            p, transformer, scanQry.getPartition(), isKeepBinary, scanQry.isLocal());
 
         if (scanQry.getPageSize() > 0)
             qry.pageSize(scanQry.getPageSize());
@@ -755,9 +760,6 @@
             (qry instanceof SqlQuery || qry instanceof SqlFieldsQuery || qry instanceof TextQuery))
             throw new CacheException("Failed to execute query. Add module 'ignite-indexing' to the classpath " +
                     "of all Ignite nodes.");
-
-        if (qry.isLocal() && (qry instanceof SqlQuery) && ctx.kernalContext().clientNode())
-            throw new CacheException("Execution of local sql query on client node disallowed.");
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteInternalCache.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteInternalCache.java
index bbedef8..cba2228 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteInternalCache.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IgniteInternalCache.java
@@ -1658,20 +1658,6 @@
     public long igfsDataSpaceUsed();
 
     /**
-     * Checks whether this cache is Mongo data cache.
-     *
-     * @return {@code True} if this cache is mongo data cache.
-     */
-    public boolean isMongoDataCache();
-
-    /**
-     * Checks whether this cache is Mongo meta cache.
-     *
-     * @return {@code True} if this cache is mongo meta cache.
-     */
-    public boolean isMongoMetaCache();
-
-    /**
      * @param keepBinary Keep binary flag.
      * @param p Optional key/value predicate.
      * @return Scan query iterator.
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IncompleteCacheObject.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IncompleteCacheObject.java
index f874571..dedb3bd 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IncompleteCacheObject.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/IncompleteCacheObject.java
@@ -51,7 +51,7 @@
     }
 
     /** {@inheritDoc} */
-    public void readData(ByteBuffer buf) {
+    @Override public void readData(ByteBuffer buf) {
         if (data == null) {
             assert head != null;
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/KeyCacheObject.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/KeyCacheObject.java
index 21b1f89..8f8ceb6 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/KeyCacheObject.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/KeyCacheObject.java
@@ -24,7 +24,7 @@
     /**
      * @return Key hash code.
      */
-    public int hashCode();
+    @Override public int hashCode();
 
     /**
      * @return {@code True} if internal cache key.
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/CacheObjectBinaryProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/CacheObjectBinaryProcessor.java
index c7e2e68..781bc5e 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/CacheObjectBinaryProcessor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/CacheObjectBinaryProcessor.java
@@ -49,9 +49,10 @@
     /**
      * @param typeId Type ID.
      * @param newMeta New meta data.
+     * @param failIfUnregistered Fail if unregistered.
      * @throws IgniteException In case of error.
      */
-    public void addMeta(int typeId, final BinaryType newMeta) throws IgniteException;
+    public void addMeta(int typeId, final BinaryType newMeta, boolean failIfUnregistered) throws IgniteException;
 
     /**
      * Adds metadata locally without triggering discovery exchange.
@@ -136,7 +137,7 @@
      * @return Binaries interface.
      * @throws IgniteException If failed.
      */
-    public IgniteBinary binary() throws IgniteException;
+    @Override public IgniteBinary binary() throws IgniteException;
 
     /**
      * @param obj Original object.
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/CacheObjectBinaryProcessorImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/CacheObjectBinaryProcessorImpl.java
index 69d1f91..4c101b2 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/CacheObjectBinaryProcessorImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/CacheObjectBinaryProcessorImpl.java
@@ -42,6 +42,7 @@
 import org.apache.ignite.events.Event;
 import org.apache.ignite.internal.GridKernalContext;
 import org.apache.ignite.internal.IgniteNodeAttributes;
+import org.apache.ignite.internal.UnregisteredBinaryTypeException;
 import org.apache.ignite.internal.binary.BinaryContext;
 import org.apache.ignite.internal.binary.BinaryEnumObjectImpl;
 import org.apache.ignite.internal.binary.BinaryFieldMetadata;
@@ -163,7 +164,7 @@
             transport = new BinaryMetadataTransport(metadataLocCache, metadataFileStore, ctx, log);
 
             BinaryMetadataHandler metaHnd = new BinaryMetadataHandler() {
-                @Override public void addMeta(int typeId, BinaryType newMeta) throws BinaryObjectException {
+                @Override public void addMeta(int typeId, BinaryType newMeta, boolean failIfUnregistered) throws BinaryObjectException {
                     assert newMeta != null;
                     assert newMeta instanceof BinaryTypeImpl;
 
@@ -182,7 +183,7 @@
 
                     BinaryMetadata newMeta0 = ((BinaryTypeImpl)newMeta).metadata();
 
-                    CacheObjectBinaryProcessorImpl.this.addMeta(typeId, newMeta0.wrap(binaryCtx));
+                    CacheObjectBinaryProcessorImpl.this.addMeta(typeId, newMeta0.wrap(binaryCtx), failIfUnregistered);
                 }
 
                 @Override public BinaryType metadata(int typeId) throws BinaryObjectException {
@@ -436,11 +437,11 @@
         BinaryMetadata meta = new BinaryMetadata(typeId, typeName, fieldTypeIds, affKeyFieldName, null, isEnum,
             enumMap);
 
-        binaryCtx.updateMetadata(typeId, meta);
+        binaryCtx.updateMetadata(typeId, meta, false);
     }
 
     /** {@inheritDoc} */
-    @Override public void addMeta(final int typeId, final BinaryType newMeta) throws BinaryObjectException {
+    @Override public void addMeta(final int typeId, final BinaryType newMeta, boolean failIfUnregistered) throws BinaryObjectException {
         assert newMeta != null;
         assert newMeta instanceof BinaryTypeImpl;
 
@@ -457,6 +458,14 @@
             if (mergedMeta == oldMeta)
                 return;
 
+            if (failIfUnregistered)
+                throw new UnregisteredBinaryTypeException(
+                    "Attempted to update binary metadata inside a critical synchronization block (will be " +
+                        "automatically retried). This exception must not be wrapped to any other exception class. " +
+                        "If you encounter this exception outside of EntryProcessor, please report to Apache Ignite " +
+                        "dev-list.",
+                    typeId, mergedMeta);
+
             MetadataUpdateResult res = transport.requestMetadataUpdate(mergedMeta).get();
 
             assert res != null;
@@ -483,7 +492,8 @@
         try {
             BinaryMetadata mergedMeta = BinaryUtils.mergeMetadata(oldMeta, newMeta0);
 
-            metadataFileStore.mergeAndWriteMetadata(mergedMeta);
+            if (!ctx.clientNode())
+                metadataFileStore.mergeAndWriteMetadata(mergedMeta);
 
             metadataLocCache.put(typeId, new BinaryMetadataHolder(mergedMeta, 0, 0));
         }
@@ -1007,7 +1017,8 @@
                             localMetaHolder.pendingVersion(),
                             localMetaHolder.acceptedVersion()));
 
-                    metadataFileStore.writeMetadata(mergedMeta);
+                    if (!ctx.clientNode())
+                        metadataFileStore.writeMetadata(mergedMeta);
                 }
             }
             else {
@@ -1023,7 +1034,8 @@
 
                 metadataLocCache.put(metaEntry.getKey(), newMetaHolder);
 
-                metadataFileStore.writeMetadata(newMeta);
+                if (!ctx.clientNode())
+                    metadataFileStore.writeMetadata(newMeta);
             }
         }
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/IgniteBinaryImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/IgniteBinaryImpl.java
index 71475be..5f4cdcd 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/IgniteBinaryImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/binary/IgniteBinaryImpl.java
@@ -170,7 +170,7 @@
     }
 
     /** {@inheritDoc} */
-    public BinaryType registerEnum(String typeName, Map<String, Integer> vals) {
+    @Override public BinaryType registerEnum(String typeName, Map<String, Integer> vals) {
         guard();
 
         try {
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/datastructures/CacheDataStructuresManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/datastructures/CacheDataStructuresManager.java
index 7d45c81..932f000 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/datastructures/CacheDataStructuresManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/datastructures/CacheDataStructuresManager.java
@@ -25,7 +25,6 @@
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashSet;
-import java.util.Iterator;
 import java.util.Map;
 import java.util.UUID;
 import java.util.concurrent.Callable;
@@ -33,6 +32,7 @@
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.atomic.AtomicBoolean;
+import javax.cache.Cache;
 import javax.cache.event.CacheEntryEvent;
 import javax.cache.event.CacheEntryUpdatedListener;
 import org.apache.ignite.Ignite;
@@ -40,6 +40,7 @@
 import org.apache.ignite.IgniteSet;
 import org.apache.ignite.binary.BinaryObject;
 import org.apache.ignite.cache.CacheEntryEventSerializableFilter;
+import org.apache.ignite.cache.CachePeekMode;
 import org.apache.ignite.cluster.ClusterNode;
 import org.apache.ignite.internal.IgniteKernal;
 import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException;
@@ -49,7 +50,6 @@
 import org.apache.ignite.internal.processors.cache.GridCacheGateway;
 import org.apache.ignite.internal.processors.cache.GridCacheManagerAdapter;
 import org.apache.ignite.internal.processors.cache.IgniteInternalCache;
-import org.apache.ignite.internal.processors.cache.KeyCacheObject;
 import org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor;
 import org.apache.ignite.internal.processors.datastructures.GridAtomicCacheQueueImpl;
 import org.apache.ignite.internal.processors.datastructures.GridCacheQueueHeader;
@@ -62,7 +62,6 @@
 import org.apache.ignite.internal.processors.datastructures.GridTransactionalCacheQueueImpl;
 import org.apache.ignite.internal.processors.datastructures.SetItemKey;
 import org.apache.ignite.internal.processors.task.GridInternal;
-import org.apache.ignite.internal.util.GridConcurrentHashSet;
 import org.apache.ignite.internal.util.GridSpinBusyLock;
 import org.apache.ignite.internal.util.typedef.internal.CU;
 import org.apache.ignite.internal.util.typedef.internal.U;
@@ -105,10 +104,6 @@
     /** Sets map. */
     private final ConcurrentMap<IgniteUuid, GridCacheSetProxy> setsMap;
 
-    /** Set keys used for set iteration. */
-    private ConcurrentMap<IgniteUuid, GridConcurrentHashSet<SetItemKey>> setDataMap =
-        new ConcurrentHashMap<>();
-
     /** Queues map. */
     private final ConcurrentMap<IgniteUuid, GridCacheQueueProxy> queuesMap;
 
@@ -344,45 +339,6 @@
     }
 
     /**
-     * Entry update callback.
-     *
-     * @param key Key.
-     * @param rmv {@code True} if entry was removed.
-     * @param keepBinary Keep binary flag.
-     */
-    public void onEntryUpdated(KeyCacheObject key, boolean rmv, boolean keepBinary) {
-        // No need to notify data structures manager for a user cache since all DS objects are stored
-        // in system caches.
-        if (cctx.userCache())
-            return;
-
-        Object key0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(key, keepBinary, false);
-
-        if (key0 instanceof SetItemKey)
-            onSetItemUpdated((SetItemKey)key0, rmv);
-    }
-
-    /**
-     * Partition evicted callback.
-     *
-     * @param part Partition number.
-     */
-    public void onPartitionEvicted(int part) {
-        GridCacheAffinityManager aff = cctx.affinity();
-
-        for (GridConcurrentHashSet<SetItemKey> set : setDataMap.values()) {
-            Iterator<SetItemKey> iter = set.iterator();
-
-            while (iter.hasNext()) {
-                SetItemKey key = iter.next();
-
-                if (aff.partition(key) == part)
-                    iter.remove();
-            }
-        }
-    }
-
-    /**
      * @param name Set name.
      * @param colloc Collocated flag.
      * @param create Create flag.
@@ -462,14 +418,6 @@
     }
 
     /**
-     * @param id Set ID.
-     * @return Data for given set.
-     */
-    @Nullable public GridConcurrentHashSet<SetItemKey> setData(IgniteUuid id) {
-        return setDataMap.get(id);
-    }
-
-    /**
      * @param setId Set ID.
      * @param topVer Topology version.
      * @throws IgniteCheckedException If failed.
@@ -486,22 +434,19 @@
             cctx.preloader().syncFuture().get();
         }
 
-        GridConcurrentHashSet<SetItemKey> set = setDataMap.get(setId);
-
-        if (set == null)
-            return;
-
-        IgniteInternalCache cache = cctx.cache();
+        IgniteInternalCache<?, ?> cache = cctx.cache();
 
         final int BATCH_SIZE = 100;
 
         Collection<SetItemKey> keys = new ArrayList<>(BATCH_SIZE);
 
-        for (SetItemKey key : set) {
-            if (!loc && !aff.primaryByKey(cctx.localNode(), key, topVer))
+        for (Cache.Entry entry : cache.localEntries(new CachePeekMode[] {CachePeekMode.PRIMARY})) {
+            Object obj = entry.getKey();
+
+            if (!(obj instanceof SetItemKey && setId.equals(((SetItemKey)obj).setId())))
                 continue;
 
-            keys.add(key);
+            keys.add((SetItemKey)obj);
 
             if (keys.size() == BATCH_SIZE) {
                 retryRemoveAll(cache, keys);
@@ -512,16 +457,15 @@
 
         if (!keys.isEmpty())
             retryRemoveAll(cache, keys);
-
-        setDataMap.remove(setId);
     }
 
     /**
      * @param id Set ID.
+     * @param separated Separated cache flag.
      * @throws IgniteCheckedException If failed.
      */
     @SuppressWarnings("unchecked")
-    public void removeSetData(IgniteUuid id) throws IgniteCheckedException {
+    public void removeSetData(IgniteUuid id, boolean separated) throws IgniteCheckedException {
         assert id != null;
 
         if (!cctx.isLocal()) {
@@ -536,6 +480,10 @@
                         nodes,
                         true,
                         0, false).get();
+
+                    // Separated cache will be destroyed after the set is blocked.
+                    if (separated)
+                        break;
                 }
                 catch (IgniteCheckedException e) {
                     if (e.hasCause(ClusterTopologyCheckedException.class)) {
@@ -604,34 +552,6 @@
     }
 
     /**
-     * @param key Set item key.
-     * @param rmv {@code True} if item was removed.
-     */
-    private void onSetItemUpdated(SetItemKey key, boolean rmv) {
-        // Items stored in a separate cache don't have identifier.
-        if (key.setId() == null)
-            return;
-
-        GridConcurrentHashSet<SetItemKey> set = setDataMap.get(key.setId());
-
-        if (set == null) {
-            if (rmv)
-                return;
-
-            GridConcurrentHashSet<SetItemKey> old = setDataMap.putIfAbsent(key.setId(),
-                set = new GridConcurrentHashSet<>());
-
-            if (old != null)
-                set = old;
-        }
-
-        if (rmv)
-            set.remove(key);
-        else
-            set.add(key);
-    }
-
-    /**
      * @param setId Set ID.
      */
     @SuppressWarnings("unchecked")
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheTxFinishSync.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheTxFinishSync.java
index 7cc368a..1f688f6 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheTxFinishSync.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheTxFinishSync.java
@@ -248,7 +248,7 @@
                     log.trace("Moved transaction synchronizer to waiting state [nodeId=" + nodeId +
                         ", threadId=" + threadId + ']');
 
-                assert cnt == 0 || nodeLeft;
+                assert cnt == 0 || nodeLeft : cnt;
 
                 if (nodeLeft)
                     return;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheTxRecoveryFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheTxRecoveryFuture.java
index 3eeb0db..41e8aba 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheTxRecoveryFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheTxRecoveryFuture.java
@@ -146,6 +146,17 @@
      */
     @SuppressWarnings("ConstantConditions")
     public void prepare() {
+        if (tx.txState().mvccEnabled(cctx)) { // TODO IGNITE-7313
+            U.error(log, "Cannot commit MVCC enabled transaction by recovery procedure. " +
+                "Operation is usupported at the moment [tx=" + CU.txString(tx) + ']');
+
+            onDone(false);
+
+            markInitialized();
+
+            return;
+        }
+
         if (nearTxCheck) {
             UUID nearNodeId = tx.eventNodeId();
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxMapping.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxMapping.java
index 481c954..b543786 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxMapping.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxMapping.java
@@ -46,6 +46,9 @@
     /** Explicit lock flag. */
     private boolean explicitLock;
 
+    /** Query update flag. */
+    private boolean queryUpdate;
+
     /** DHT version. */
     private GridCacheVersion dhtVer;
 
@@ -133,6 +136,20 @@
     }
 
     /**
+     * @return {@code True} if mapping was created for a query update.
+     */
+    public boolean queryUpdate() {
+        return queryUpdate;
+    }
+
+    /**
+     * Sets query update flag to {@code true}.
+     */
+    public void markQueryUpdate() {
+        queryUpdate = true;
+    }
+
+    /**
      * @return {@code True} if lock is explicit.
      */
     public boolean explicitLock() {
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxPrepareRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxPrepareRequest.java
index 91dcd9e..a5aa0d8 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxPrepareRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxPrepareRequest.java
@@ -78,6 +78,9 @@
     /** */
     public static final int STORE_WRITE_THROUGH_FLAG_MASK = 0x20;
 
+    /** */
+    public static final int QUERY_UPDATE_FLAG_MASK = 0x40;
+
     /** Collection to message converter. */
     private static final C1<Collection<UUID>, UUIDCollectionMessage> COL_TO_MSG = new C1<Collection<UUID>, UUIDCollectionMessage>() {
         @Override public UUIDCollectionMessage apply(Collection<UUID> uuids) {
@@ -253,6 +256,25 @@
     }
 
     /**
+     *
+     * @return Flag indicating whether it is a query update.
+     */
+    public boolean queryUpdate() {
+        return (flags & QUERY_UPDATE_FLAG_MASK) != 0;
+    }
+
+    /**
+     *
+     * @param queryUpdate Query update value.
+     */
+    public void queryUpdate(boolean queryUpdate) {
+        if (queryUpdate)
+            flags = (byte)(flags | QUERY_UPDATE_FLAG_MASK);
+        else
+            flags &= ~QUERY_UPDATE_FLAG_MASK;
+    }
+
+    /**
      * @return IO policy.
      */
     public byte policy() {
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxRemoteAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxRemoteAdapter.java
index 1b9b3a8..8e96ae2 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxRemoteAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxRemoteAdapter.java
@@ -29,9 +29,11 @@
 import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
 import java.util.stream.Collectors;
 import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.InvalidEnvironmentException;
+import org.apache.ignite.failure.FailureContext;
+import org.apache.ignite.failure.FailureType;
 import org.apache.ignite.internal.IgniteInternalFuture;
-import org.apache.ignite.internal.pagemem.wal.StorageException;
+import org.apache.ignite.internal.InvalidEnvironmentException;
+import org.apache.ignite.internal.NodeStoppingException;
 import org.apache.ignite.internal.pagemem.wal.WALPointer;
 import org.apache.ignite.internal.pagemem.wal.record.DataEntry;
 import org.apache.ignite.internal.pagemem.wal.record.DataRecord;
@@ -48,7 +50,11 @@
 import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
 import org.apache.ignite.internal.processors.cache.GridCacheUpdateTxResult;
 import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology;
+import org.apache.ignite.internal.processors.cache.distributed.dht.PartitionUpdateCounters;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearCacheEntry;
+import org.apache.ignite.internal.processors.cache.persistence.StorageException;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxAdapter;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry;
@@ -56,6 +62,7 @@
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxRemoteEx;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxRemoteState;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxState;
+import org.apache.ignite.internal.processors.cache.transactions.TxCounters;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersionConflictContext;
 import org.apache.ignite.internal.transactions.IgniteTxHeuristicCheckedException;
@@ -202,6 +209,16 @@
         return false;
     }
 
+    /** {@inheritDoc} */
+    @Override public void activeCachesDeploymentEnabled(boolean depEnabled) {
+        throw new UnsupportedOperationException("Remote tx doesn't support deployment.");
+    }
+
+    /** {@inheritDoc} */
+    @Override public void addActiveCache(GridCacheContext cacheCtx, boolean recovery) throws IgniteCheckedException {
+        txState.addActiveCache(cacheCtx, recovery, this);
+    }
+
     /**
      * @return Checks if transaction has no entries.
      */
@@ -457,7 +474,7 @@
 
                 GridCacheReturnCompletableWrapper wrapper = null;
 
-                if (!F.isEmpty(writeMap)) {
+                if (!F.isEmpty(writeMap) || mvccSnapshot != null) {
                     GridCacheReturn ret = null;
 
                     if (!near() && !local() && onePhaseCommit()) {
@@ -485,6 +502,8 @@
                     cctx.database().checkpointReadLock();
 
                     try {
+                        assert !txState.mvccEnabled(cctx) || mvccSnapshot != null : "Mvcc is not initialized: " + this;
+
                         Collection<IgniteTxEntry> entries = near() || cctx.snapshot().needTxReadLogging() ? allEntries() : writeEntries();
 
                         // Data entry to write to WAL and associated with it TxEntry.
@@ -610,7 +629,8 @@
                                                         CU.subjectId(this, cctx),
                                                         resolveTaskName(),
                                                         dhtVer,
-                                                        txEntry.updateCounter());
+                                                        txEntry.updateCounter(),
+                                                        mvccSnapshot());
                                                 else {
                                                     assert val != null : txEntry;
 
@@ -634,9 +654,10 @@
                                                         CU.subjectId(this, cctx),
                                                         resolveTaskName(),
                                                         dhtVer,
-                                                        txEntry.updateCounter());
+                                                        txEntry.updateCounter(),
+                                                        mvccSnapshot());
 
-                                                    txEntry.updateCounter(updRes.updatePartitionCounter());
+                                                    txEntry.updateCounter(updRes.updateCounter());
 
                                                     if (updRes.loggedPointer() != null)
                                                         ptr = updRes.loggedPointer();
@@ -671,9 +692,10 @@
                                                     CU.subjectId(this, cctx),
                                                     resolveTaskName(),
                                                     dhtVer,
-                                                    txEntry.updateCounter());
+                                                    txEntry.updateCounter(),
+                                                    mvccSnapshot());
 
-                                                txEntry.updateCounter(updRes.updatePartitionCounter());
+                                                txEntry.updateCounter(updRes.updateCounter());
 
                                                 if (updRes.loggedPointer() != null)
                                                     ptr = updRes.loggedPointer();
@@ -744,29 +766,51 @@
                                     }
                                 }
                                 catch (Throwable ex) {
-                                    boolean hasIOIssue = X.hasCause(ex, InvalidEnvironmentException.class);
+                                    boolean isNodeStopping = X.hasCause(ex, NodeStoppingException.class);
+                                    boolean hasInvalidEnvironmentIssue = X.hasCause(ex, InvalidEnvironmentException.class);
 
                                     // In case of error, we still make the best effort to commit,
                                     // as there is no way to rollback at this point.
                                     err = new IgniteTxHeuristicCheckedException("Commit produced a runtime exception " +
                                         "(all transaction entries will be invalidated): " + CU.txString(this), ex);
 
-                                    if (hasIOIssue) {
+                                    if (isNodeStopping) {
                                         U.warn(log, "Failed to commit transaction, node is stopping [tx=" + this +
                                             ", err=" + ex + ']');
                                     }
+                                    else if (hasInvalidEnvironmentIssue) {
+                                        U.warn(log, "Failed to commit transaction, node is in invalid state and will be stopped [tx=" + this +
+                                            ", err=" + ex + ']');
+                                    }
                                     else
                                         U.error(log, "Commit failed.", err);
 
-                                    uncommit(hasIOIssue);
-
                                     state(UNKNOWN);
 
+                                    if (hasInvalidEnvironmentIssue)
+                                        cctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, ex));
+                                    else if (!isNodeStopping) { // Skip fair uncommit in case of node stopping or invalidation.
+                                        try {
+                                            // Courtesy to minimize damage.
+                                            uncommit();
+                                        }
+                                        catch (Throwable ex1) {
+                                            U.error(log, "Failed to uncommit transaction: " + this, ex1);
+
+                                            if (ex1 instanceof Error)
+                                                throw ex1;
+                                        }
+                                    }
+
                                     if (ex instanceof Error)
-                                        throw (Error)ex;
+                                        throw (Error) ex;
+
+                                    throw err;
                                 }
                             }
 
+                            applyTxCounters();
+
                             if (!near() && !F.isEmpty(dataEntries) && cctx.wal() != null) {
                                 // Set new update counters for data entries received from persisted tx entries.
                                 List<DataEntry> entriesWithCounters = dataEntries.stream()
@@ -780,6 +824,8 @@
                                 cctx.wal().flush(ptr, false);
                         }
                         catch (StorageException e) {
+                            err = e;
+
                             throw new IgniteCheckedException("Failed to log transaction record " +
                                 "(transaction will be rolled back): " + this, e);
                         }
@@ -787,17 +833,13 @@
                     finally {
                         cctx.database().checkpointReadUnlock();
 
+                        notifyDrManager(state() == COMMITTING && err == null);
+
                         if (wrapper != null)
                             wrapper.initialize(ret);
                     }
                 }
 
-                if (err != null) {
-                    state(UNKNOWN);
-
-                    throw err;
-                }
-
                 cctx.tm().commitTx(this);
 
                 state(COMMITTED);
@@ -806,6 +848,38 @@
     }
 
     /** {@inheritDoc} */
+    @Override protected void applyTxCounters() {
+        super.applyTxCounters();
+
+        TxCounters txCntrs = txCounters(false);
+
+        if (txCntrs == null)
+            return;
+
+        Map<Integer, PartitionUpdateCounters> updCntrs = txCntrs.updateCounters();
+
+        for (Map.Entry<Integer, PartitionUpdateCounters> entry : updCntrs.entrySet()) {
+            int cacheId = entry.getKey();
+
+            GridDhtPartitionTopology top = cctx.cacheContext(cacheId).topology();
+
+            Map<Integer, Long> cacheUpdCntrs = entry.getValue().updateCounters();
+
+            assert cacheUpdCntrs != null;
+
+            for (Map.Entry<Integer, Long> e : cacheUpdCntrs.entrySet()) {
+                long updCntr = e.getValue();
+
+                GridDhtLocalPartition dhtPart = top.localPartition(e.getKey());
+
+                assert dhtPart != null && updCntr > 0;
+
+                dhtPart.updateCounter(updCntr);
+            }
+        }
+    }
+
+    /** {@inheritDoc} */
     @Override public final void commitRemoteTx() throws IgniteCheckedException {
         if (optimistic())
             state(PREPARED);
@@ -884,6 +958,8 @@
     /** {@inheritDoc} */
     @Override public final void rollbackRemoteTx() {
         try {
+            notifyDrManager(false);
+
             // Note that we don't evict near entries here -
             // they will be deleted by their corresponding transactions.
             if (state(ROLLING_BACK) || state() == UNKNOWN) {
@@ -957,5 +1033,4 @@
     @Override public String toString() {
         return GridToStringBuilder.toString(GridDistributedTxRemoteAdapter.class, this, "super", super.toString());
     }
-
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/CompoundLockFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/CompoundLockFuture.java
new file mode 100644
index 0000000..63eff40
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/CompoundLockFuture.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed.dht;
+
+import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxAbstractEnlistFuture;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxSelectForUpdateFuture;
+import org.apache.ignite.internal.util.future.GridFutureAdapter;
+import org.apache.ignite.lang.IgniteInClosure;
+
+/**
+ *
+ */
+public class CompoundLockFuture extends GridFutureAdapter<Void> implements DhtLockFuture<Void>, IgniteInClosure<IgniteInternalFuture<?>> {
+    /** */
+    private static final long serialVersionUID = 4644646033267042131L;
+    /** */
+    private static final AtomicIntegerFieldUpdater<CompoundLockFuture> CNT_UPD =
+        AtomicIntegerFieldUpdater.newUpdater(CompoundLockFuture.class, "cnt");
+    /** */
+    private volatile int cnt;
+    /** */
+    private final GridDhtTxLocalAdapter tx;
+
+    /**
+     * @param cnt ResultSet futures count.
+     * @param tx Transaction.
+     */
+    public CompoundLockFuture(int cnt, GridDhtTxLocalAdapter tx) {
+        this.cnt = cnt;
+        this.tx = tx;
+    }
+
+    /**
+     * @param fut ResultSet future.
+     */
+    public void register(IgniteInternalFuture<?> fut) {
+        fut.listen(this);
+    }
+
+    /**
+     *  Init method.
+     */
+    public void init() {
+        while(true) {
+            IgniteInternalFuture<?> fut = tx.lockFuture();
+
+            if (fut == GridDhtTxLocalAdapter.ROLLBACK_FUT) {
+                onDone(tx.timedOut() ? tx.timeoutException() : tx.rollbackException());
+
+                break;
+            }
+            else if (fut != null) {
+                // Wait for previous future.
+                assert fut instanceof GridNearTxAbstractEnlistFuture
+                    || fut instanceof GridDhtTxAbstractEnlistFuture
+                    || fut instanceof CompoundLockFuture
+                    || fut instanceof GridNearTxSelectForUpdateFuture : fut;
+
+                // Terminate this future if parent future is terminated by rollback.
+                if (!fut.isDone()) {
+                    fut.listen(new IgniteInClosure<IgniteInternalFuture>() {
+                        @Override public void apply(IgniteInternalFuture fut) {
+                            if (fut.error() != null)
+                                onDone(fut.error());
+                        }
+                    });
+                }
+                else if (fut.error() != null)
+                    onDone(fut.error());
+
+                break;
+            }
+            else if (tx.updateLockFuture(null, this))
+                break;
+        }
+    }
+
+    @Override public void apply(IgniteInternalFuture<?> future) {
+        if (!isDone() && (future.error() != null || CNT_UPD.decrementAndGet(this) == 0)) {
+            Throwable err = future.error();
+
+            if (err == null)
+                tx.clearLockFuture(this);
+
+            onDone(err);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onError(Throwable error) {
+        assert error != null;
+
+        onDone(error);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/DhtLockFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/DhtLockFuture.java
new file mode 100644
index 0000000..b729dcd
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/DhtLockFuture.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed.dht;
+
+import org.apache.ignite.internal.IgniteInternalFuture;
+
+/**
+ * Marker interface.
+ */
+public interface DhtLockFuture<T> extends IgniteInternalFuture<T> {
+    /**
+     * @param error Error.
+     */
+    public void onError(Throwable error);
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/ExceptionAware.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/ExceptionAware.java
new file mode 100644
index 0000000..036492f
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/ExceptionAware.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed.dht;
+
+import org.jetbrains.annotations.Nullable;
+
+/**
+ *
+ */
+public interface ExceptionAware {
+    /**
+     * @return Exception.
+     */
+    @Nullable Throwable error();
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridClientPartitionTopology.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridClientPartitionTopology.java
index 54a850c..70ce082 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridClientPartitionTopology.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridClientPartitionTopology.java
@@ -46,6 +46,7 @@
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionFullMap;
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap;
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccCoordinator;
 import org.apache.ignite.internal.util.F0;
 import org.apache.ignite.internal.util.GridAtomicLong;
 import org.apache.ignite.internal.util.GridPartitionStateMap;
@@ -199,6 +200,11 @@
     }
 
     /** {@inheritDoc} */
+    @Override public MvccCoordinator mvccCoordinator() {
+        throw new UnsupportedOperationException();
+    }
+
+    /** {@inheritDoc} */
     @Override public boolean holdsLock() {
         return lock.isWriteLockedByCurrentThread() || lock.getReadHoldCount() > 0;
     }
@@ -207,6 +213,7 @@
     @Override public void updateTopologyVersion(
         GridDhtTopologyFuture exchFut,
         DiscoCache discoCache,
+        MvccCoordinator mvccCrd,
         long updSeq,
         boolean stopping
     ) throws IgniteInterruptedCheckedException {
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtCacheAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtCacheAdapter.java
index ea99f5d..cd37650 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtCacheAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtCacheAdapter.java
@@ -68,6 +68,8 @@
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearGetResponse;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearSingleGetRequest;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearSingleGetResponse;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUtils;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
 import org.apache.ignite.internal.processors.platform.cache.PlatformCacheEntryFilter;
 import org.apache.ignite.internal.util.future.GridCompoundFuture;
@@ -230,18 +232,32 @@
 
                         entry.unswap();
 
-                        GridCacheEntryInfo info = entry.info();
+                        if (ctx.mvccEnabled()) {
+                            List<GridCacheEntryInfo> infos = entry.allVersionsInfo();
 
-                        if (info == null) {
-                            assert entry.obsolete() : entry;
+                            if (infos == null) {
+                                assert entry.obsolete() : entry;
 
-                            continue;
+                                continue;
+                            }
+
+                            for (int i = 0; i < infos.size(); i++)
+                                res.addInfo(infos.get(i));
+                        }
+                        else {
+                            GridCacheEntryInfo info = entry.info();
+
+                            if (info == null) {
+                                assert entry.obsolete() : entry;
+
+                                continue;
+                            }
+
+                            if (!info.isNew())
+                                res.addInfo(info);
                         }
 
-                        if (!info.isNew())
-                            res.addInfo(info);
-
-                        ctx.evicts().touch(entry, msg.topologyVersion());
+                        entry.touch(msg.topologyVersion());
 
                         break;
                     }
@@ -588,6 +604,9 @@
             return;
         }
 
+        //TODO IGNITE-7954
+        MvccUtils.verifyMvccOperationSupport(ctx, "Load");
+
         final AffinityTopologyVersion topVer = ctx.affinity().affinityTopologyVersion();
 
         // Version for all loaded entries.
@@ -677,7 +696,7 @@
                 }
                 finally {
                     if (entry != null)
-                        entry.context().evicts().touch(entry, topVer);
+                        entry.touch(topVer);
 
                     part.release();
 
@@ -766,6 +785,7 @@
      * @param taskName Task name.
      * @param expiry Expiry policy.
      * @param skipVals Skip values flag.
+     * @param mvccSnapshot MVCC snapshot.
      * @return Get future.
      */
     IgniteInternalFuture<Map<KeyCacheObject, EntryGetResult>> getDhtAllAsync(
@@ -776,7 +796,8 @@
         String taskName,
         @Nullable IgniteCacheExpiryPolicy expiry,
         boolean skipVals,
-        boolean recovery
+        boolean recovery,
+        MvccSnapshot mvccSnapshot
     ) {
         return getAllAsync0(keys,
             readerArgs,
@@ -789,7 +810,8 @@
             skipVals,
             /*keep cache objects*/true,
             recovery,
-            /*need version*/true);
+            /*need version*/true,
+            mvccSnapshot);
     }
 
     /**
@@ -803,6 +825,7 @@
      * @param taskNameHash Task name hash code.
      * @param expiry Expiry policy.
      * @param skipVals Skip values flag.
+     * @param mvccSnapshot MVCC snapshot.
      * @return DHT future.
      */
     public GridDhtFuture<Collection<GridCacheEntryInfo>> getDhtAsync(UUID reader,
@@ -815,7 +838,8 @@
         int taskNameHash,
         @Nullable IgniteCacheExpiryPolicy expiry,
         boolean skipVals,
-        boolean recovery
+        boolean recovery,
+        MvccSnapshot mvccSnapshot
     ) {
         GridDhtGetFuture<K, V> fut = new GridDhtGetFuture<>(ctx,
             msgId,
@@ -828,7 +852,8 @@
             expiry,
             skipVals,
             recovery,
-            addReaders);
+            addReaders,
+            mvccSnapshot);
 
         fut.init();
 
@@ -846,9 +871,10 @@
      * @param taskNameHash Task name hash.
      * @param expiry Expiry.
      * @param skipVals Skip vals flag.
+     * @param mvccSnapshot Mvcc snapshot.
      * @return Future for the operation.
      */
-    public GridDhtGetSingleFuture getDhtSingleAsync(
+    GridDhtGetSingleFuture getDhtSingleAsync(
         UUID nodeId,
         long msgId,
         KeyCacheObject key,
@@ -859,7 +885,8 @@
         int taskNameHash,
         @Nullable IgniteCacheExpiryPolicy expiry,
         boolean skipVals,
-        boolean recovery
+        boolean recovery,
+        MvccSnapshot mvccSnapshot
     ) {
         GridDhtGetSingleFuture fut = new GridDhtGetSingleFuture<>(
             ctx,
@@ -873,7 +900,8 @@
             taskNameHash,
             expiry,
             skipVals,
-            recovery);
+            recovery,
+            mvccSnapshot);
 
         fut.init();
 
@@ -901,7 +929,8 @@
                 req.taskNameHash(),
                 expiryPlc,
                 req.skipValues(),
-                req.recovery());
+                req.recovery(),
+                req.mvccSnapshot());
 
         fut.listen(new CI1<IgniteInternalFuture<GridCacheEntryInfo>>() {
             @Override public void apply(IgniteInternalFuture<GridCacheEntryInfo> f) {
@@ -1004,7 +1033,8 @@
                 req.taskNameHash(),
                 expiryPlc,
                 req.skipValues(),
-                req.recovery());
+                req.recovery(),
+                req.mvccSnapshot());
 
         fut.listen(new CI1<IgniteInternalFuture<Collection<GridCacheEntryInfo>>>() {
             @Override public void apply(IgniteInternalFuture<Collection<GridCacheEntryInfo>> f) {
@@ -1184,7 +1214,7 @@
                 }
                 finally {
                     if (entry != null)
-                        cache.context().evicts().touch(entry, AffinityTopologyVersion.NONE);
+                        entry.touch(AffinityTopologyVersion.NONE);
                 }
             }
             catch (IgniteCheckedException e) {
@@ -1230,6 +1260,8 @@
         if (expVer.equals(curVer))
             return false;
 
+        // TODO IGNITE-7164 check mvcc crd for mvcc enabled txs.
+
         Collection<ClusterNode> cacheNodes0 = ctx.discovery().cacheGroupAffinityNodes(ctx.groupId(), expVer);
         Collection<ClusterNode> cacheNodes1 = ctx.discovery().cacheGroupAffinityNodes(ctx.groupId(), curVer);
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtCacheEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtCacheEntry.java
index fe02090..343e418 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtCacheEntry.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtCacheEntry.java
@@ -102,6 +102,11 @@
     }
 
     /** {@inheritDoc} */
+    @Override protected long nextMvccPartitionCounter() {
+        return locPart.nextMvccUpdateCounter();
+    }
+
+    /** {@inheritDoc} */
     @Override public int memorySize() throws IgniteCheckedException {
         int rdrsOverhead;
 
@@ -648,7 +653,10 @@
                     ']');
             }
 
-            removeValue();
+                if (cctx.mvccEnabled())
+                    cctx.offheap().mvccRemoveAll(this);
+                else
+                    removeValue();
 
             // Give to GC.
             update(null, 0L, 0L, ver, true);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtGetFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtGetFuture.java
index 4319374..023c058 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtGetFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtGetFuture.java
@@ -37,6 +37,7 @@
 import org.apache.ignite.internal.processors.cache.IgniteCacheExpiryPolicy;
 import org.apache.ignite.internal.processors.cache.KeyCacheObject;
 import org.apache.ignite.internal.processors.cache.ReaderArguments;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
 import org.apache.ignite.internal.util.future.GridCompoundFuture;
 import org.apache.ignite.internal.util.future.GridCompoundIdentityFuture;
@@ -114,6 +115,9 @@
     /** */
     private final boolean addReaders;
 
+    /** */
+    private final MvccSnapshot mvccSnapshot;
+
     /**
      * @param cctx Context.
      * @param msgId Message ID.
@@ -125,6 +129,7 @@
      * @param taskNameHash Task name hash code.
      * @param expiryPlc Expiry policy.
      * @param skipVals Skip values flag.
+     * @param mvccSnapshot MVCC snapshot.
      */
     public GridDhtGetFuture(
         GridCacheContext<K, V> cctx,
@@ -138,7 +143,8 @@
         @Nullable IgniteCacheExpiryPolicy expiryPlc,
         boolean skipVals,
         boolean recovery,
-        boolean addReaders
+        boolean addReaders,
+        MvccSnapshot mvccSnapshot
     ) {
         super(CU.<GridCacheEntryInfo>collectionsReducer(keys.size()));
 
@@ -157,6 +163,7 @@
         this.skipVals = skipVals;
         this.recovery = recovery;
         this.addReaders = addReaders;
+        this.mvccSnapshot = mvccSnapshot;
 
         futId = IgniteUuid.randomUuid();
 
@@ -402,7 +409,7 @@
                             log.debug("Got removed entry when getting a DHT value: " + e);
                     }
                     finally {
-                        cctx.evicts().touch(e, topVer);
+                        e.touch(topVer);
                     }
                 }
             }
@@ -422,7 +429,8 @@
                 taskName,
                 expiryPlc,
                 skipVals,
-                recovery);
+                recovery,
+                mvccSnapshot);
         }
         else {
             final ReaderArguments args = readerArgs;
@@ -445,7 +453,8 @@
                             taskName,
                             expiryPlc,
                             skipVals,
-                            recovery);
+                            recovery,
+                            mvccSnapshot);
                     }
                 }
             );
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtGetSingleFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtGetSingleFuture.java
index 7c6c020..de76eb1 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtGetSingleFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtGetSingleFuture.java
@@ -35,6 +35,7 @@
 import org.apache.ignite.internal.processors.cache.IgniteCacheExpiryPolicy;
 import org.apache.ignite.internal.processors.cache.KeyCacheObject;
 import org.apache.ignite.internal.processors.cache.ReaderArguments;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
 import org.apache.ignite.internal.util.future.GridFutureAdapter;
 import org.apache.ignite.internal.util.typedef.F;
@@ -103,6 +104,9 @@
     /** Recovery context flag. */
     private final boolean recovery;
 
+    /** */
+    private final MvccSnapshot mvccSnapshot;
+
     /**
      * @param cctx Context.
      * @param msgId Message ID.
@@ -115,6 +119,7 @@
      * @param taskNameHash Task name hash code.
      * @param expiryPlc Expiry policy.
      * @param skipVals Skip values flag.
+     * @param mvccSnapshot Mvcc snapshot.
      */
     public GridDhtGetSingleFuture(
         GridCacheContext<K, V> cctx,
@@ -128,7 +133,8 @@
         int taskNameHash,
         @Nullable IgniteCacheExpiryPolicy expiryPlc,
         boolean skipVals,
-        boolean recovery
+        boolean recovery,
+        @Nullable MvccSnapshot mvccSnapshot
     ) {
         assert reader != null;
         assert key != null;
@@ -145,6 +151,7 @@
         this.expiryPlc = expiryPlc;
         this.skipVals = skipVals;
         this.recovery = recovery;
+        this.mvccSnapshot = mvccSnapshot;
 
         futId = IgniteUuid.randomUuid();
 
@@ -349,7 +356,7 @@
                         log.debug("Got removed entry when getting a DHT value: " + e);
                 }
                 finally {
-                    cctx.evicts().touch(e, topVer);
+                    e.touch(topVer);
                 }
             }
         }
@@ -365,7 +372,8 @@
                 taskName,
                 expiryPlc,
                 skipVals,
-                recovery);
+                recovery,
+                mvccSnapshot);
         }
         else {
             final ReaderArguments args = readerArgs;
@@ -390,7 +398,8 @@
                                 taskName,
                                 expiryPlc,
                                 skipVals,
-                                recovery);
+                                recovery,
+                                mvccSnapshot);
 
                         fut0.listen(createGetFutureListener());
                     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLocalPartition.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLocalPartition.java
index e13c952..f56df00 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLocalPartition.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLocalPartition.java
@@ -517,7 +517,30 @@
      * @param stateToRestore State to restore.
      */
     public void restoreState(GridDhtPartitionState stateToRestore) {
-        state.set(setPartState(state.get(),stateToRestore));
+        state.set(setPartState(state.get(), stateToRestore));
+    }
+
+    /**
+     * For testing purposes only.
+     * @param toState State to set.
+     */
+    public void setState(GridDhtPartitionState toState) {
+        if (grp.persistenceEnabled() && grp.walEnabled()) {
+            synchronized (this) {
+                long state0 = state.get();
+
+                this.state.compareAndSet(state0, setPartState(state0, toState));
+
+                try {
+                    ctx.wal().log(new PartitionMetaStateRecord(grp.groupId(), id, toState, updateCounter()));
+                }
+                catch (IgniteCheckedException e) {
+                    U.error(log, "Error while writing to log", e);
+                }
+            }
+        }
+        else
+            restoreState(toState);
     }
 
     /**
@@ -684,18 +707,21 @@
             }
         }
 
-        grp.evictor().evictPartitionAsync(this);
+        ctx.evict().evictPartitionAsync(grp,this);
     }
 
     /**
-     * Initiates single clear process if partition is in MOVING state.
+     * Initiates single clear process if partition is in MOVING state or continues cleaning for RENTING state.
      * Method does nothing if clear process is already running.
      */
     public void clearAsync() {
-        if (state() != MOVING)
+        GridDhtPartitionState state0 = state();
+
+        if (state0 != MOVING && state0 != RENTING)
             return;
 
         clear = true;
+
         clearAsync0(false);
     }
 
@@ -729,10 +755,8 @@
             if (cnt != 0)
                 return false;
 
-            if (evictGuard.compareAndSet(cnt, cnt + 1)) {
-
+            if (evictGuard.compareAndSet(cnt, cnt + 1))
                 return true;
-            }
         }
     }
 
@@ -952,6 +976,20 @@
     }
 
     /**
+     * @return Current mvcc update counter value.
+     */
+    public long mvccUpdateCounter() {
+        return store.mvccUpdateCounter();
+    }
+
+    /**
+     * @return Next mvcc update counter.
+     */
+    public long nextMvccUpdateCounter() {
+        return store.nextMvccUpdateCounter();
+    }
+
+    /**
      * @return Initial update counter.
      */
     public long initialUpdateCounter() {
@@ -1235,6 +1273,15 @@
     }
 
     /**
+     * Returns group context.
+     *
+     * @return Group context.
+     */
+    public CacheGroupContext group() {
+        return grp;
+    }
+
+    /**
      * @param cacheId Cache ID.
      */
     void onCacheStopped(int cacheId) {
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLockFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLockFuture.java
index 529d965..c42b557 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLockFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtLockFuture.java
@@ -54,6 +54,9 @@
 import org.apache.ignite.internal.processors.cache.distributed.GridDistributedLockCancelledException;
 import org.apache.ignite.internal.processors.cache.distributed.dht.colocated.GridDhtColocatedLockFuture;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearCacheAdapter;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUpdateVersionAware;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccVersionAware;
+import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxState;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
@@ -82,7 +85,7 @@
  * Cache lock future.
  */
 public final class GridDhtLockFuture extends GridCacheCompoundIdentityFuture<Boolean>
-    implements GridCacheVersionedFuture<Boolean>, GridDhtFuture<Boolean>, GridCacheMappedVersion {
+    implements GridCacheVersionedFuture<Boolean>, GridDhtFuture<Boolean>, GridCacheMappedVersion, DhtLockFuture<Boolean> {
     /** */
     private static final long serialVersionUID = 0L;
 
@@ -264,7 +267,7 @@
 
         if (tx != null) {
             while(true) {
-                IgniteInternalFuture<Boolean> fut = tx.lockFut;
+                IgniteInternalFuture fut = tx.lockFut;
 
                 if (fut != null) {
                     if (fut == GridDhtTxLocalAdapter.ROLLBACK_FUT)
@@ -274,8 +277,8 @@
                         assert fut instanceof GridDhtColocatedLockFuture : fut;
 
                         // Terminate this future if parent(collocated) future is terminated by rollback.
-                        fut.listen(new IgniteInClosure<IgniteInternalFuture<Boolean>>() {
-                            @Override public void apply(IgniteInternalFuture<Boolean> fut) {
+                        fut.listen(new IgniteInClosure<IgniteInternalFuture>() {
+                            @Override public void apply(IgniteInternalFuture fut) {
                                 try {
                                     fut.get();
                                 }
@@ -643,7 +646,7 @@
     /**
      * @param t Error.
      */
-    public void onError(Throwable t) {
+    @Override public void onError(Throwable t) {
         synchronized (this) {
             if (err != null)
                 return;
@@ -1307,6 +1310,10 @@
                             try {
                                 if (entry.initialValue(info.value(),
                                     info.version(),
+                                    cctx.mvccEnabled() ? ((MvccVersionAware)info).mvccVersion() : null,
+                                    cctx.mvccEnabled() ? ((MvccUpdateVersionAware)info).newMvccVersion() : null,
+                                    cctx.mvccEnabled() ? ((MvccVersionAware)entry).mvccTxState() : TxState.NA,
+                                    cctx.mvccEnabled() ? ((MvccUpdateVersionAware)entry).newMvccTxState() : TxState.NA,
                                     info.ttl(),
                                     info.expireTime(),
                                     true,
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopology.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopology.java
index 42ef309..3ae3c8f 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopology.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopology.java
@@ -35,6 +35,7 @@
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionFullMap;
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap;
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccCoordinator;
 import org.apache.ignite.internal.util.tostring.GridToStringExclude;
 import org.jetbrains.annotations.Nullable;
 
@@ -74,6 +75,7 @@
     public void updateTopologyVersion(
         GridDhtTopologyFuture exchFut,
         DiscoCache discoCache,
+        MvccCoordinator mvccCrd,
         long updateSeq,
         boolean stopping
     ) throws IgniteInterruptedCheckedException;
@@ -421,4 +423,6 @@
      * @param updateRebalanceVer {@code True} if need check rebalance state.
      */
     public void onExchangeDone(GridDhtPartitionsExchangeFuture fut, AffinityAssignment assignment, boolean updateRebalanceVer);
+
+    public MvccCoordinator mvccCoordinator();
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopologyImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopologyImpl.java
index c5c574f..1256a8b 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopologyImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionTopologyImpl.java
@@ -52,6 +52,7 @@
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionFullMap;
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap;
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccCoordinator;
 import org.apache.ignite.internal.util.F0;
 import org.apache.ignite.internal.util.GridAtomicLong;
 import org.apache.ignite.internal.util.GridPartitionStateMap;
@@ -146,6 +147,9 @@
     /** */
     private volatile AffinityTopologyVersion rebalancedTopVer = AffinityTopologyVersion.NONE;
 
+    /** */
+    private volatile MvccCoordinator mvccCrd;
+
     /**
      * @param ctx Cache shared context.
      * @param grp Cache group.
@@ -238,6 +242,11 @@
     }
 
     /** {@inheritDoc} */
+    @Override public MvccCoordinator mvccCoordinator() {
+        return mvccCrd;
+    }
+
+    /** {@inheritDoc} */
     @Override public boolean holdsLock() {
         return lock.isWriteLockedByCurrentThread() || lock.getReadHoldCount() > 0;
     }
@@ -246,6 +255,7 @@
     @Override public void updateTopologyVersion(
         GridDhtTopologyFuture exchFut,
         @NotNull DiscoCache discoCache,
+        MvccCoordinator mvccCrd,
         long updSeq,
         boolean stopping
     ) throws IgniteInterruptedCheckedException {
@@ -272,6 +282,7 @@
             lastTopChangeVer = exchTopVer;
 
             this.discoCache = discoCache;
+            this.mvccCrd = mvccCrd;
         }
         finally {
             lock.writeLock().unlock();
@@ -662,8 +673,15 @@
 
                 if (locPart == null)
                     updateLocal(p, EVICTED, updateSeq, topVer);
-                else
-                    updateLocal(p, locPart.state(), updateSeq, topVer);
+                else {
+                    GridDhtPartitionState state = locPart.state();
+
+                    updateLocal(p, state, updateSeq, topVer);
+
+                    // Restart cleaning.
+                    if (state == RENTING)
+                        locPart.clearAsync();
+                }
             }
         }
         finally {
@@ -1429,6 +1447,13 @@
                                 log.debug("Removing left node from full map update [grp=" + grp.cacheOrGroupName() +
                                     ", nodeId=" + nodeId + ", partMap=" + partMap + ']');
 
+                            if (node2part.containsKey(nodeId)) {
+                                GridDhtPartitionMap map = partMap.get(nodeId);
+
+                                if (map != null)
+                                    leftNode2Part.put(nodeId, map);
+                            }
+
                             it.remove();
                         }
                     }
@@ -1723,7 +1748,8 @@
                     assert cur != null;
 
                     String msg = "Stale update for single partition map update (will ignore) [" +
-                        "grp=" + grp.cacheOrGroupName() +
+                        "nodeId=" + parts.nodeId() +
+                        ", grp=" + grp.cacheOrGroupName() +
                         ", exchId=" + exchId +
                         ", curMap=" + cur +
                         ", newMap=" + parts + ']';
@@ -2275,12 +2301,15 @@
 
                     // If all affinity nodes are owners, then evict partition from local node.
                     if (nodeIds.containsAll(F.nodeIds(affNodes))) {
-                        IgniteInternalFuture<?> rentFuture = part.rent(false);
-                        rentingFutures.add(rentFuture);
+                        GridDhtPartitionState state0 = part.state();
+
+                        IgniteInternalFuture<?> rentFut = part.rent(false);
+
+                        rentingFutures.add(rentFut);
 
                         updateSeq = updateLocal(part.id(), part.state(), updateSeq, aff.topologyVersion());
 
-                        changed = true;
+                        changed = state0 != part.state();
 
                         if (log.isDebugEnabled()) {
                             log.debug("Evicted local partition (all affinity nodes are owners) [grp=" + grp.cacheOrGroupName() +
@@ -2301,12 +2330,15 @@
                                 ClusterNode n = nodes.get(i);
 
                                 if (locId.equals(n.id())) {
-                                    IgniteInternalFuture<?> rentFuture = part.rent(false);
-                                    rentingFutures.add(rentFuture);
+                                    GridDhtPartitionState state0 = part.state();
+
+                                    IgniteInternalFuture<?> rentFut = part.rent(false);
+
+                                    rentingFutures.add(rentFut);
 
                                     updateSeq = updateLocal(part.id(), part.state(), updateSeq, aff.topologyVersion());
 
-                                    changed = true;
+                                    changed = state0 != part.state();
 
                                     if (log.isDebugEnabled()) {
                                         log.debug("Evicted local partition (this node is oldest non-affinity node) [" +
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionsEvictor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionsEvictor.java
deleted file mode 100644
index 7206397..0000000
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionsEvictor.java
+++ /dev/null
@@ -1,310 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ignite.internal.processors.cache.distributed.dht;
-
-import java.util.Queue;
-import java.util.Set;
-import java.util.concurrent.Callable;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.function.Function;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteLogger;
-import org.apache.ignite.IgniteSystemProperties;
-import org.apache.ignite.internal.IgniteInternalFuture;
-import org.apache.ignite.internal.processors.cache.CacheGroupContext;
-import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
-import org.apache.ignite.internal.util.GridConcurrentHashSet;
-import org.apache.ignite.internal.util.future.GridFutureAdapter;
-import org.apache.ignite.internal.util.typedef.internal.LT;
-import org.apache.ignite.internal.util.typedef.internal.U;
-
-/**
- * Class that serves asynchronous part eviction process.
- * Only one partition from group can be evicted at the moment.
- */
-public class GridDhtPartitionsEvictor {
-    /** Default eviction progress show frequency. */
-    private static final int DEFAULT_SHOW_EVICTION_PROGRESS_FREQ_MS = 2 * 60 * 1000; // 2 Minutes.
-
-    /** Eviction progress frequency property name. */
-    private static final String SHOW_EVICTION_PROGRESS_FREQ = "SHOW_EVICTION_PROGRESS_FREQ";
-
-    /** */
-    private final GridCacheSharedContext<?, ?> ctx;
-
-    /** */
-    private final CacheGroupContext grp;
-
-    /** */
-    private final IgniteLogger log;
-
-    /** Lock object. */
-    private final Object mux = new Object();
-
-    /** Queue contains partitions scheduled for eviction. */
-    private final DeduplicationQueue<Integer, GridDhtLocalPartition> evictionQueue = new DeduplicationQueue<>(GridDhtLocalPartition::id);
-
-    /**
-     * Flag indicates that eviction process is running at the moment.
-     * This is needed to schedule partition eviction if there are no currently running self-scheduling eviction tasks.
-     * Guarded by {@link #mux}.
-     */
-    private boolean evictionRunning;
-
-    /** Flag indicates that eviction process has stopped. */
-    private volatile boolean stop;
-
-    /** Future for currently running partition eviction task. */
-    private volatile GridFutureAdapter<Boolean> evictionFut;
-
-    /** Eviction progress frequency in ms. */
-    private final long evictionProgressFreqMs = IgniteSystemProperties.getLong(SHOW_EVICTION_PROGRESS_FREQ,
-        DEFAULT_SHOW_EVICTION_PROGRESS_FREQ_MS);
-
-    /** Next time of show eviction progress. */
-    private long nextShowProgressTime;
-
-    /**
-     * Constructor.
-     *
-     * @param grp Cache group context.
-     */
-    public GridDhtPartitionsEvictor(CacheGroupContext grp) {
-        assert grp != null;
-
-        this.grp = grp;
-        this.ctx = grp.shared();
-
-        this.log = ctx.logger(getClass());
-    }
-
-    /**
-     * Adds partition to eviction queue and starts eviction process.
-     *
-     * @param part Partition to evict.
-     */
-    public void evictPartitionAsync(GridDhtLocalPartition part) {
-        if (stop)
-            return;
-
-        boolean added = evictionQueue.offer(part);
-
-        if (!added)
-            return;
-
-        synchronized (mux) {
-            if (!evictionRunning) {
-                nextShowProgressTime = U.currentTimeMillis() + evictionProgressFreqMs;
-
-                scheduleNextPartitionEviction();
-            }
-        }
-    }
-
-    /**
-     * Stops eviction process.
-     * Method awaits last offered partition eviction.
-     */
-    public void stop() {
-        stop = true;
-
-        synchronized (mux) {
-            // Wait for last offered partition eviction completion.
-            IgniteInternalFuture<Boolean> evictionFut0 = evictionFut;
-
-            if (evictionFut0 != null) {
-                try {
-                    evictionFut0.get();
-                }
-                catch (IgniteCheckedException e) {
-                    if (log.isDebugEnabled())
-                        log.warning("Failed to await partition eviction during stopping", e);
-                }
-            }
-        }
-    }
-
-    /**
-     * Gets next partition from the queue and schedules it for eviction.
-     */
-    private void scheduleNextPartitionEviction() {
-        if (stop)
-            return;
-
-        synchronized (mux) {
-            GridDhtLocalPartition next = evictionQueue.poll();
-
-            if (next != null) {
-                showProgress();
-
-                evictionFut = new GridFutureAdapter<>();
-
-                ctx.kernalContext().closure().callLocalSafe(new PartitionEvictionTask(next, () -> stop), true);
-            }
-            else
-                evictionRunning = false;
-        }
-    }
-
-    /**
-     * Shows progress of eviction.
-     */
-    private void showProgress() {
-        if (U.currentTimeMillis() >= nextShowProgressTime) {
-            int size = evictionQueue.size() + 1; // Queue size plus current partition.
-
-            if (log.isInfoEnabled())
-                log.info("Eviction in progress [grp=" + grp.cacheOrGroupName()
-                    + ", remainingPartsCnt=" + size + "]");
-
-            nextShowProgressTime = U.currentTimeMillis() + evictionProgressFreqMs;
-        }
-    }
-
-    /**
-     * Task for self-scheduled partition eviction / clearing.
-     */
-    private class PartitionEvictionTask implements Callable<Boolean> {
-        /** Partition to evict. */
-        private final GridDhtLocalPartition part;
-
-        /** Eviction context. */
-        private final EvictionContext evictionCtx;
-
-        /**
-         * @param part Partition.
-         * @param evictionCtx Eviction context.
-         */
-        public PartitionEvictionTask(GridDhtLocalPartition part, EvictionContext evictionCtx) {
-            this.part = part;
-            this.evictionCtx = evictionCtx;
-        }
-
-        /** {@inheritDoc} */
-        @Override public Boolean call() throws Exception {
-            if (stop) {
-                evictionFut.onDone();
-
-                return false;
-            }
-
-            try {
-                boolean success = part.tryClear(evictionCtx);
-
-                if (success) {
-                    if (part.state() == GridDhtPartitionState.EVICTED && part.markForDestroy())
-                        part.destroy();
-                }
-                else // Re-offer partition if clear was unsuccessful due to partition reservation.
-                    evictionQueue.offer(part);
-
-                // Complete eviction future before schedule new to prevent deadlock with
-                // simultaneous eviction stopping and scheduling new eviction.
-                evictionFut.onDone();
-
-                scheduleNextPartitionEviction();
-
-                return true;
-            }
-            catch (Throwable ex) {
-                evictionFut.onDone(ex);
-
-                if (ctx.kernalContext().isStopping()) {
-                    LT.warn(log, ex, "Partition eviction failed (current node is stopping).",
-                        false,
-                        true);
-                }
-                else
-                    LT.error(log, ex, "Partition eviction failed, this can cause grid hang.");
-            }
-
-            return false;
-        }
-    }
-
-    /**
-     * Thread-safe blocking queue with items deduplication.
-     *
-     * @param <K> Key type of item used for deduplication.
-     * @param <V> Queue item type.
-     */
-    private static class DeduplicationQueue<K, V> {
-        /** Queue. */
-        private final Queue<V> queue;
-
-        /** Unique items set. */
-        private final Set<K> uniqueItems;
-
-        /** Key mapping function. */
-        private final Function<V, K> keyMappingFunction;
-
-        /**
-         * Constructor.
-         *
-         * @param keyExtractor Function to extract a key from a queue item.
-         *                     This key is used for deduplication if some item has offered twice.
-         */
-        public DeduplicationQueue(Function<V, K> keyExtractor) {
-            keyMappingFunction = keyExtractor;
-            queue = new LinkedBlockingQueue<>();
-            uniqueItems = new GridConcurrentHashSet<>();
-        }
-
-        /**
-         * Offers item to the queue.
-         *
-         * @param item Item.
-         * @return {@code true} if item has been successfully offered to the queue,
-         *         {@code false} if item was rejected because already exists in the queue.
-         */
-        public boolean offer(V item) {
-            K key = keyMappingFunction.apply(item);
-
-            if (uniqueItems.add(key)) {
-                queue.offer(item);
-
-                return true;
-            }
-
-            return false;
-        }
-
-        /**
-         * Polls next item from queue.
-         *
-         * @return Next item or {@code null} if queue is empty.
-         */
-        public V poll() {
-            V item = queue.poll();
-
-            if (item != null) {
-                K key = keyMappingFunction.apply(item);
-
-                uniqueItems.remove(key);
-            }
-
-            return item;
-        }
-
-        /**
-         * @return Size of queue.
-         */
-        public int size() {
-            return queue.size();
-        }
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionsStateValidator.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionsStateValidator.java
index 866c5133..3b99758 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionsStateValidator.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtPartitionsStateValidator.java
@@ -32,6 +32,7 @@
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.CachePartitionPartialCountersMap;
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture;
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsSingleMessage;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUtils;
 import org.apache.ignite.internal.util.typedef.T2;
 import org.apache.ignite.internal.util.typedef.internal.SB;
 import org.apache.ignite.lang.IgniteProductVersion;
@@ -93,11 +94,13 @@
                 ignoringNodes.add(id);
         }
 
-        // Validate cache sizes.
-        result = validatePartitionsSizes(top, messages, ignoringNodes);
+        if (!MvccUtils.mvccEnabled(cctx.kernalContext())) { // TODO: Remove "if" clause in IGNITE-9451.
+            // Validate cache sizes.
+            result = validatePartitionsSizes(top, messages, ignoringNodes);
 
-        if (!result.isEmpty())
-            throw new IgniteCheckedException("Partitions cache sizes are inconsistent for " + fold(topVer, result));
+            if (!result.isEmpty())
+                throw new IgniteCheckedException("Partitions cache sizes are inconsistent for " + fold(topVer, result));
+        }
     }
 
     /**
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTopologyFutureAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTopologyFutureAdapter.java
index 3c3150a..539fef4 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTopologyFutureAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTopologyFutureAdapter.java
@@ -44,6 +44,9 @@
     /** Cache groups validation results. */
     protected volatile Map<Integer, CacheValidation> grpValidRes;
 
+    /** Whether or not cluster is active. */
+    protected volatile boolean clusterIsActive = true;
+
     /**
      * @param grp Cache group.
      * @param topNodes Topology nodes.
@@ -80,7 +83,7 @@
         if (err != null)
             return err;
 
-        if (!cctx.shared().kernalContext().state().publicApiActiveState(true))
+        if (!clusterIsActive)
             return new CacheInvalidStateException(
                 "Failed to perform cache operation (cluster is not activated): " + cctx.name());
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTransactionalCacheAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTransactionalCacheAdapter.java
index 161c542c..be1c7e2 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTransactionalCacheAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTransactionalCacheAdapter.java
@@ -28,7 +28,9 @@
 import java.util.Map;
 import java.util.UUID;
 import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteException;
 import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.cluster.ClusterTopologyException;
 import org.apache.ignite.internal.IgniteInternalFuture;
 import org.apache.ignite.internal.NodeStoppingException;
 import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException;
@@ -44,7 +46,6 @@
 import org.apache.ignite.internal.processors.cache.GridCacheLockTimeoutException;
 import org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate;
 import org.apache.ignite.internal.processors.cache.GridCacheReturn;
-import org.apache.ignite.internal.processors.cache.GridCacheVersionedFuture;
 import org.apache.ignite.internal.processors.cache.KeyCacheObject;
 import org.apache.ignite.internal.processors.cache.distributed.GridDistributedCacheEntry;
 import org.apache.ignite.internal.processors.cache.distributed.GridDistributedLockCancelledException;
@@ -56,8 +57,16 @@
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockResponse;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearSingleGetRequest;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTransactionalCache;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxQueryEnlistFuture;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxQueryEnlistRequest;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxQueryEnlistResponse;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxQueryResultsEnlistFuture;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxQueryResultsEnlistRequest;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxQueryResultsEnlistResponse;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxRemote;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearUnlockRequest;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshotWithoutTxs;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxLocalEx;
@@ -77,7 +86,7 @@
 import org.apache.ignite.internal.util.typedef.X;
 import org.apache.ignite.internal.util.typedef.internal.CU;
 import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.lang.IgniteInClosure;
+import org.apache.ignite.lang.IgniteUuid;
 import org.apache.ignite.thread.IgniteThread;
 import org.apache.ignite.transactions.TransactionIsolation;
 import org.jetbrains.annotations.Nullable;
@@ -85,7 +94,9 @@
 import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC;
 import static org.apache.ignite.internal.processors.cache.GridCacheOperation.NOOP;
 import static org.apache.ignite.internal.processors.cache.GridCacheUtils.isNearEnabled;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_OP_COUNTER_NA;
 import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC;
+import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ;
 import static org.apache.ignite.transactions.TransactionState.COMMITTING;
 
 /**
@@ -166,6 +177,18 @@
             }
         });
 
+        ctx.io().addCacheHandler(ctx.cacheId(), GridNearTxQueryEnlistRequest.class, new CI2<UUID, GridNearTxQueryEnlistRequest>() {
+            @Override public void apply(UUID nodeId, GridNearTxQueryEnlistRequest req) {
+                processNearTxQueryEnlistRequest(nodeId, req);
+            }
+        });
+
+        ctx.io().addCacheHandler(ctx.cacheId(), GridNearTxQueryEnlistResponse.class, new CI2<UUID, GridNearTxQueryEnlistResponse>() {
+            @Override public void apply(UUID nodeId, GridNearTxQueryEnlistResponse req) {
+                processNearEnlistResponse(nodeId, req);
+            }
+        });
+
         ctx.io().addCacheHandler(ctx.cacheId(), GridDhtForceKeysRequest.class,
             new MessageHandler<GridDhtForceKeysRequest>() {
                 @Override public void onMessage(ClusterNode node, GridDhtForceKeysRequest msg) {
@@ -179,6 +202,41 @@
                     processForceKeyResponse(node, msg);
                 }
             });
+
+        ctx.io().addCacheHandler(ctx.cacheId(), GridNearTxQueryResultsEnlistRequest.class,
+            new CI2<UUID, GridNearTxQueryResultsEnlistRequest>() {
+                @Override public void apply(UUID nodeId, GridNearTxQueryResultsEnlistRequest req) {
+                    processNearTxQueryResultsEnlistRequest(nodeId, req);
+                }
+            });
+
+        ctx.io().addCacheHandler(ctx.cacheId(), GridNearTxQueryResultsEnlistResponse.class,
+            new CI2<UUID, GridNearTxQueryResultsEnlistResponse>() {
+                @Override public void apply(UUID nodeId, GridNearTxQueryResultsEnlistResponse req) {
+                    processNearTxEnlistResponse(nodeId, req);
+                }
+            });
+
+        ctx.io().addCacheHandler(ctx.cacheId(), GridDhtTxQueryEnlistRequest.class,
+            new CI2<UUID, GridDhtTxQueryEnlistRequest>() {
+                @Override public void apply(UUID nodeId, GridDhtTxQueryEnlistRequest msg) {
+                    processDhtTxQueryEnlistRequest(nodeId, msg, false);
+                }
+            });
+
+        ctx.io().addCacheHandler(ctx.cacheId(), GridDhtTxQueryFirstEnlistRequest.class,
+            new CI2<UUID, GridDhtTxQueryEnlistRequest>() {
+                @Override public void apply(UUID nodeId, GridDhtTxQueryEnlistRequest msg) {
+                    processDhtTxQueryEnlistRequest(nodeId, msg, true);
+                }
+            });
+
+        ctx.io().addCacheHandler(ctx.cacheId(), GridDhtTxQueryEnlistResponse.class,
+            new CI2<UUID, GridDhtTxQueryEnlistResponse>() {
+                @Override public void apply(UUID nodeId, GridDhtTxQueryEnlistResponse msg) {
+                    processDhtTxQueryEnlistResponse(nodeId, msg);
+                }
+            });
     }
 
     /** {@inheritDoc} */
@@ -629,6 +687,85 @@
      * @param nodeId Node ID.
      * @param req Request.
      */
+    private void processNearTxQueryEnlistRequest(UUID nodeId, final GridNearTxQueryEnlistRequest req) {
+        assert nodeId != null;
+        assert req != null;
+
+        ClusterNode nearNode = ctx.discovery().node(nodeId);
+
+        GridDhtTxLocal tx;
+
+        try {
+            tx = initTxTopologyVersion(nodeId,
+                nearNode,
+                req.version(),
+                req.futureId(),
+                req.miniId(),
+                req.firstClientRequest(),
+                req.topologyVersion(),
+                req.threadId(),
+                req.txTimeout(),
+                req.subjectId(),
+                req.taskNameHash());
+        }
+        catch (IgniteCheckedException | IgniteException ex) {
+            GridNearTxQueryEnlistResponse res = new GridNearTxQueryEnlistResponse(req.cacheId(),
+                req.futureId(),
+                req.miniId(),
+                req.version(),
+                ex);
+
+            try {
+                ctx.io().send(nearNode, res, ctx.ioPolicy());
+            }
+            catch (IgniteCheckedException e) {
+                U.error(log, "Failed to send near enlist response [" +
+                    "txId=" + req.version() +
+                    ", node=" + nodeId +
+                    ", res=" + res + ']', e);
+            }
+
+            return;
+        }
+
+        GridDhtTxQueryEnlistFuture fut = new GridDhtTxQueryEnlistFuture(
+            nodeId,
+            req.version(),
+            req.mvccSnapshot(),
+            req.threadId(),
+            req.futureId(),
+            req.miniId(),
+            tx,
+            req.cacheIds(),
+            req.partitions(),
+            req.schemaName(),
+            req.query(),
+            req.parameters(),
+            req.flags(),
+            req.pageSize(),
+            req.timeout(),
+            ctx);
+
+        fut.listen(NearTxQueryEnlistResultHandler.instance());
+
+        fut.init();
+    }
+
+    /**
+     * @param nodeId Node ID.
+     * @param res Response.
+     */
+    private void processNearEnlistResponse(UUID nodeId, final GridNearTxQueryEnlistResponse res) {
+        GridNearTxQueryEnlistFuture fut = (GridNearTxQueryEnlistFuture)ctx.mvcc().versionedFuture(res.version(), res.futureId());
+
+        if (fut != null)
+            fut.onResult(nodeId, res);
+    }
+
+    /**
+     * @param nodeId Node ID.
+     * @param req Request.
+     */
     private void processNearLockRequest(UUID nodeId, GridNearLockRequest req) {
         assert ctx.affinityNode();
         assert nodeId != null;
@@ -697,6 +834,8 @@
                 final IgniteThread thread = (IgniteThread)curThread;
 
                 if (thread.cachePoolThread()) {
+                    // Near transaction's finish on timeout will unlock topFut if it was held for too long,
+                    // so need to listen with timeout. This is not true for optimistic transactions.
                     topFut.listen(new CI1<IgniteInternalFuture<AffinityTopologyVersion>>() {
                         @Override public void apply(IgniteInternalFuture<AffinityTopologyVersion> fut) {
                             ctx.kernalContext().closure().runLocalWithThreadPolicy(thread, new Runnable() {
@@ -920,7 +1059,7 @@
             GridDhtPartitionTopology top = null;
 
             if (req.firstClientRequest()) {
-                assert CU.clientNode(nearNode);
+                assert nearNode.isClient();
 
                 top = topology();
 
@@ -1276,7 +1415,7 @@
 
                                 CacheObject val = null;
 
-                                if (ret)
+                                if (ret) {
                                     val = e.innerGet(
                                         null,
                                         tx,
@@ -1287,7 +1426,9 @@
                                         null,
                                         tx != null ? tx.resolveTaskName() : null,
                                         null,
-                                        req.keepBinary());
+                                        req.keepBinary(),
+                                        null); // TODO IGNITE-7371
+                                }
 
                                 assert e.lockedBy(mappedVer) || ctx.mvcc().isRemoved(e.context(), mappedVer) :
                                     "Entry does not own lock for tx [locNodeId=" + ctx.localNodeId() +
@@ -1380,6 +1521,7 @@
             U.error(log, "Failed to acquire lock for request: " + req, err);
 
         try {
+            // TODO Async rollback
             // Don't send reply message to this node or if lock was cancelled or tx was rolled back asynchronously.
             if (!nearNode.id().equals(ctx.nodeId()) && !X.hasCause(err, GridDistributedLockCancelledException.class) &&
                 !X.hasCause(err, IgniteTxRollbackCheckedException.class)) {
@@ -1495,7 +1637,7 @@
                                     "(added to cancelled locks set): " + req);
                         }
 
-                        ctx.evicts().touch(entry, ctx.affinity().affinityTopologyVersion());
+                        entry.touch(ctx.affinity().affinityTopologyVersion());
 
                         break;
                     }
@@ -1528,15 +1670,14 @@
      * @param readers Readers for this entry.
      * @param dhtMap DHT map.
      * @param nearMap Near map.
-     * @throws IgniteCheckedException If failed.
      */
     private void map(UUID nodeId,
         AffinityTopologyVersion topVer,
         GridCacheEntryEx cached,
         Collection<UUID> readers,
         Map<ClusterNode, List<KeyCacheObject>> dhtMap,
-        Map<ClusterNode, List<KeyCacheObject>> nearMap)
-        throws IgniteCheckedException {
+        Map<ClusterNode, List<KeyCacheObject>> nearMap
+    ) {
         List<ClusterNode> dhtNodes = ctx.dht().topology().nodes(cached.partition(), topVer);
 
         ClusterNode primary = dhtNodes.get(0);
@@ -1577,7 +1718,7 @@
      * @param nodes Nodes.
      * @param map Map.
      */
-    @SuppressWarnings( {"MismatchedQueryAndUpdateOfCollection"})
+    @SuppressWarnings({"MismatchedQueryAndUpdateOfCollection"})
     private void map(GridCacheEntryEx entry,
         @Nullable Iterable<? extends ClusterNode> nodes,
         Map<ClusterNode, List<KeyCacheObject>> map) {
@@ -1609,6 +1750,8 @@
         // Remove mapped versions.
         GridCacheVersion dhtVer = unmap ? ctx.mvcc().unmapVersion(ver) : ver;
 
+        ctx.mvcc().addRemoved(ctx, ver);
+
         Map<ClusterNode, List<KeyCacheObject>> dhtMap = new HashMap<>();
         Map<ClusterNode, List<KeyCacheObject>> nearMap = new HashMap<>();
 
@@ -1682,7 +1825,7 @@
                     if (created && entry.markObsolete(dhtVer))
                         removeEntry(entry);
 
-                    ctx.evicts().touch(entry, topVer);
+                    entry.touch(topVer);
 
                     break;
                 }
@@ -1690,9 +1833,6 @@
                     if (log.isDebugEnabled())
                         log.debug("Received remove lock request for removed entry (will retry): " + entry);
                 }
-                catch (IgniteCheckedException e) {
-                    U.error(log, "Failed to remove locks for keys: " + keys, e);
-                }
             }
         }
 
@@ -1787,4 +1927,321 @@
         if (nearEntry != null)
             nearEntry.markObsolete(ctx.versions().next());
     }
+
+    /**
+     * @param nodeId Node ID.
+     * @param req Request.
+     */
+    private void processNearTxQueryResultsEnlistRequest(UUID nodeId, final GridNearTxQueryResultsEnlistRequest req) {
+        assert nodeId != null;
+        assert req != null;
+
+        ClusterNode nearNode = ctx.discovery().node(nodeId);
+
+        GridDhtTxLocal tx;
+
+        try {
+            tx = initTxTopologyVersion(nodeId,
+                nearNode,
+                req.version(),
+                req.futureId(),
+                req.miniId(),
+                req.firstClientRequest(),
+                req.topologyVersion(),
+                req.threadId(),
+                req.txTimeout(),
+                req.subjectId(),
+                req.taskNameHash());
+        }
+        catch (IgniteCheckedException | IgniteException ex) {
+            GridNearTxQueryResultsEnlistResponse res = new GridNearTxQueryResultsEnlistResponse(req.cacheId(),
+                req.futureId(),
+                req.miniId(),
+                req.version(),
+                ex);
+
+            try {
+                ctx.io().send(nearNode, res, ctx.ioPolicy());
+            }
+            catch (IgniteCheckedException e) {
+                U.error(log, "Failed to send near enlist response [" +
+                    "txId=" + req.version() +
+                    ", node=" + nodeId +
+                    ", res=" + res + ']', e);
+            }
+
+            return;
+        }
+
+        GridDhtTxQueryResultsEnlistFuture fut = new GridDhtTxQueryResultsEnlistFuture(
+            nodeId,
+            req.version(),
+            req.mvccSnapshot(),
+            req.threadId(),
+            req.futureId(),
+            req.miniId(),
+            tx,
+            req.timeout(),
+            ctx,
+            req.rows(),
+            req.operation());
+
+        fut.listen(NearTxQueryEnlistResultHandler.instance());
+
+        fut.init();
+    }
+
+    /**
+     * @param nodeId Near node id.
+     * @param nearNode Near node.
+     * @param nearLockVer Near lock version.
+     * @param nearFutId Near future id.
+     * @param nearMiniId Near mini-future id.
+     * @param firstClientReq First client request flag.
+     * @param topVer Topology version.
+     * @param nearThreadId Near node thread id.
+     * @param timeout Timeout.
+     * @param txSubjectId Transaction subject id.
+     * @param txTaskNameHash Transaction task name hash.
+     * @return Transaction.
+     */
+    public GridDhtTxLocal initTxTopologyVersion(UUID nodeId,
+        ClusterNode nearNode,
+        GridCacheVersion nearLockVer,
+        IgniteUuid nearFutId,
+        int nearMiniId,
+        boolean firstClientReq,
+        AffinityTopologyVersion topVer,
+        long nearThreadId,
+        long timeout,
+        UUID txSubjectId,
+        int txTaskNameHash) throws IgniteException, IgniteCheckedException {
+
+        assert ctx.affinityNode();
+
+        if (txLockMsgLog.isDebugEnabled()) {
+            txLockMsgLog.debug("Received near enlist request [txId=" + nearLockVer +
+                ", node=" + nodeId + ']');
+        }
+
+        if (nearNode == null) {
+            U.warn(txLockMsgLog, "Received near enlist request from unknown node (will ignore) [txId=" + nearLockVer +
+                ", node=" + nodeId + ']');
+
+            return null;
+        }
+
+        GridDhtTxLocal tx = null;
+
+        GridCacheVersion dhtVer = ctx.tm().mappedVersion(nearLockVer);
+
+        if (dhtVer != null)
+            tx = ctx.tm().tx(dhtVer);
+
+        GridDhtPartitionTopology top = null;
+
+        if (tx == null) {
+            if (firstClientReq) {
+                assert nearNode.isClient();
+
+                top = topology();
+
+                top.readLock();
+
+                GridDhtTopologyFuture topFut = top.topologyVersionFuture();
+
+                if (!topFut.isDone() || !topFut.topologyVersion().equals(topVer)) {
+                    // TODO IGNITE-7164 Wait for topology change, remap client TX in case affinity was changed.
+                    top.readUnlock();
+
+                    throw new ClusterTopologyException("Topology was changed. Please retry on stable topology.");
+                }
+            }
+
+            try {
+                tx = new GridDhtTxLocal(
+                    ctx.shared(),
+                    topVer,
+                    nearNode.id(),
+                    nearLockVer,
+                    nearFutId,
+                    nearMiniId,
+                    nearThreadId,
+                    false,
+                    false,
+                    ctx.systemTx(),
+                    false,
+                    ctx.ioPolicy(),
+                    PESSIMISTIC,
+                    REPEATABLE_READ,
+                    timeout,
+                    false,
+                    false,
+                    false,
+                    -1,
+                    null,
+                    txSubjectId,
+                    txTaskNameHash);
+
+                // if (req.syncCommit())
+                tx.syncMode(FULL_SYNC);
+
+                tx = ctx.tm().onCreated(null, tx);
+
+                if (tx == null || !tx.init()) {
+                    String msg = "Failed to acquire lock (transaction has been completed): " +
+                        nearLockVer;
+
+                    U.warn(log, msg);
+
+                    try {
+                        if (tx != null)
+                            tx.rollbackDhtLocal();
+                    }
+                    catch (IgniteCheckedException ex) {
+                        U.error(log, "Failed to rollback the transaction: " + tx, ex);
+                    }
+
+                    throw new IgniteCheckedException(msg);
+                }
+
+                tx.topologyVersion(topVer);
+            }
+            finally {
+                if (top != null)
+                    top.readUnlock();
+            }
+        }
+
+        ctx.tm().txContext(tx);
+
+        return tx;
+    }
+
+    /**
+     * @param nodeId Node ID.
+     * @param res Response.
+     */
+    private void processNearTxEnlistResponse(UUID nodeId, final GridNearTxQueryResultsEnlistResponse res) {
+        GridNearTxQueryResultsEnlistFuture fut = (GridNearTxQueryResultsEnlistFuture)
+            ctx.mvcc().versionedFuture(res.version(), res.futureId());
+
+        if (fut != null)
+            fut.onResult(nodeId, res);
+    }
+
+    /**
+     * @param primary Primary node.
+     * @param req Request.
+     * @param e Error.
+     */
+    private void onError(UUID primary, GridDhtTxQueryEnlistRequest req, Throwable e) {
+        GridDhtTxQueryEnlistResponse res = new GridDhtTxQueryEnlistResponse(ctx.cacheId(),
+            req.dhtFutureId(),
+            req.batchId(),
+            e);
+
+        try {
+            ctx.io().send(primary, res, ctx.ioPolicy());
+        }
+        catch (IgniteCheckedException ioEx) {
+            U.error(log, "Failed to send DHT enlist reply to primary node [node: " + primary + ", req=" + req +
+                ']', ioEx);
+        }
+    }
+
+    /**
+     * @param primary Primary node.
+     * @param req Message.
+     * @param first Flag if this is a first request in current operation.
+     */
+    private void processDhtTxQueryEnlistRequest(UUID primary, GridDhtTxQueryEnlistRequest req, boolean first) {
+        try {
+            assert req.version() != null && req.op() != null;
+
+            GridDhtTxRemote tx = ctx.tm().tx(req.version());
+
+            if (tx == null) {
+                if (!first)
+                    throw new IgniteCheckedException("Can not find a transaction for version [version="
+                        + req.version() + ']');
+
+                GridDhtTxQueryFirstEnlistRequest req0 = (GridDhtTxQueryFirstEnlistRequest)req;
+
+                tx = new GridDhtTxRemote(ctx.shared(),
+                    req0.nearNodeId(),
+                    req0.dhtFutureId(),
+                    primary,
+                    req0.nearXidVersion(),
+                    req0.topologyVersion(),
+                    req0.version(),
+                    null,
+                    ctx.systemTx(),
+                    ctx.ioPolicy(),
+                    PESSIMISTIC,
+                    REPEATABLE_READ,
+                    false,
+                    req0.timeout(),
+                    -1,
+                    req0.subjectId(),
+                    req0.taskNameHash(),
+                    false);
+
+                tx.mvccSnapshot(new MvccSnapshotWithoutTxs(req0.coordinatorVersion(), req0.counter(),
+                    MVCC_OP_COUNTER_NA, req0.cleanupVersion()));
+
+                tx = ctx.tm().onCreated(null, tx);
+
+                if (tx == null || !ctx.tm().onStarted(tx)) {
+                    throw new IgniteTxRollbackCheckedException("Failed to update backup " +
+                        "(transaction has been completed): " + req0.version());
+                }
+            }
+
+            assert tx != null;
+
+            MvccSnapshot s0 = tx.mvccSnapshot();
+
+            MvccSnapshot snapshot = new MvccSnapshotWithoutTxs(s0.coordinatorVersion(), s0.counter(),
+                req.operationCounter(), s0.cleanupVersion());
+
+            tx.mvccEnlistBatch(ctx, req.op(), req.keys(), req.values(), snapshot, req.updateCounters());
+
+            GridDhtTxQueryEnlistResponse res = new GridDhtTxQueryEnlistResponse(req.cacheId(),
+                req.dhtFutureId(),
+                req.batchId(),
+                null);
+
+            try {
+                ctx.io().send(primary, res, ctx.ioPolicy());
+            }
+            catch (IgniteCheckedException ioEx) {
+                U.error(log, "Failed to send DHT enlist reply to primary node [node: " + primary + ", req=" +
+                    req + ']', ioEx);
+            }
+        }
+        catch (IgniteCheckedException e) {
+            onError(primary, req, e);
+        }
+    }
+
+    /**
+     * @param backup Backup node.
+     * @param res Response message.
+     */
+    private void processDhtTxQueryEnlistResponse(UUID backup, GridDhtTxQueryEnlistResponse res) {
+        GridDhtTxAbstractEnlistFuture fut = (GridDhtTxAbstractEnlistFuture)
+            ctx.mvcc().future(res.futureId());
+
+        if (fut == null) {
+            U.warn(log, "Received dht enlist response for unknown future [futId=" + res.futureId() +
+                ", batchId=" + res.batchId() +
+                ", node=" + backup + ']');
+
+            return;
+        }
+
+        fut.onResult(backup, res);
+    }
+
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxAbstractEnlistFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxAbstractEnlistFuture.java
new file mode 100644
index 0000000..a3471c7
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxAbstractEnlistFuture.java
@@ -0,0 +1,1139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed.dht;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteLogger;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException;
+import org.apache.ignite.internal.pagemem.wal.WALPointer;
+import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
+import org.apache.ignite.internal.processors.cache.CacheEntryInfoCollection;
+import org.apache.ignite.internal.processors.cache.CacheObject;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.GridCacheEntryInfo;
+import org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException;
+import org.apache.ignite.internal.processors.cache.GridCacheFutureAdapter;
+import org.apache.ignite.internal.processors.cache.GridCacheMvccEntryInfo;
+import org.apache.ignite.internal.processors.cache.GridCacheUpdateTxResult;
+import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.distributed.GridDistributedTxMapping;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxAbstractEnlistFuture;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxSelectForUpdateFuture;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUtils;
+import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxState;
+import org.apache.ignite.internal.processors.cache.persistence.CacheDataRowAdapter;
+import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccDataRow;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.search.MvccLinkAwareSearchRow;
+import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
+import org.apache.ignite.internal.processors.query.EnlistOperation;
+import org.apache.ignite.internal.processors.query.IgniteSQLException;
+import org.apache.ignite.internal.processors.query.UpdateSourceIterator;
+import org.apache.ignite.internal.processors.timeout.GridTimeoutObjectAdapter;
+import org.apache.ignite.internal.transactions.IgniteTxTimeoutCheckedException;
+import org.apache.ignite.internal.util.GridLongList;
+import org.apache.ignite.internal.util.tostring.GridToStringExclude;
+import org.apache.ignite.internal.util.typedef.CI1;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteBiTuple;
+import org.apache.ignite.lang.IgniteInClosure;
+import org.apache.ignite.lang.IgniteUuid;
+import org.apache.ignite.plugin.extensions.communication.Message;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Abstract future processing transaction enlisting and locking
+ * of entries produced with DML and SELECT FOR UPDATE queries.
+ */
+public abstract class GridDhtTxAbstractEnlistFuture extends GridCacheFutureAdapter<Long>
+    implements DhtLockFuture<Long> {
+    /** Done field updater. */
+    private static final AtomicIntegerFieldUpdater<GridDhtTxAbstractEnlistFuture> DONE_UPD =
+        AtomicIntegerFieldUpdater.newUpdater(GridDhtTxAbstractEnlistFuture.class, "done");
+
+    /** SkipCntr field updater. */
+    private static final AtomicIntegerFieldUpdater<GridDhtTxAbstractEnlistFuture> SKIP_UPD =
+        AtomicIntegerFieldUpdater.newUpdater(GridDhtTxAbstractEnlistFuture.class, "skipCntr");
+
+    /** Marker object. */
+    private static final Object FINISHED = new Object();
+
+    /** */
+    private static final int BATCH_SIZE = 1024;
+
+    /** In-flight batches per node limit. */
+    private static final int BATCHES_PER_NODE = 5;
+
+    /** */
+    private static final int FIRST_BATCH_ID = 0;
+
+    /** Future ID. */
+    protected final IgniteUuid futId;
+
+    /** Cache registry. */
+    @GridToStringExclude
+    protected final GridCacheContext<?, ?> cctx;
+
+    /** Logger. */
+    @GridToStringExclude
+    protected final IgniteLogger log;
+
+    /** Thread. */
+    protected final long threadId;
+
+    /** Future ID. */
+    protected final IgniteUuid nearFutId;
+
+    /** Future ID. */
+    protected final int nearMiniId;
+
+    /** Partitions. */
+    protected final int[] parts;
+
+    /** Transaction. */
+    protected final GridDhtTxLocalAdapter tx;
+
+    /** Lock version. */
+    protected final GridCacheVersion lockVer;
+
+    /** */
+    protected final MvccSnapshot mvccSnapshot;
+
+    /** Processed entries count. */
+    protected long cnt;
+
+    /** Near node ID. */
+    protected final UUID nearNodeId;
+
+    /** Near lock version. */
+    protected final GridCacheVersion nearLockVer;
+
+    /** Timeout object. */
+    @GridToStringExclude
+    protected LockTimeoutObject timeoutObj;
+
+    /** Lock timeout. */
+    protected final long timeout;
+
+    /** Query iterator */
+    private UpdateSourceIterator<?> it;
+
+    /** Row extracted from iterator but not yet used. */
+    private Object peek;
+
+    /** */
+    @SuppressWarnings({"FieldCanBeLocal"})
+    @GridToStringExclude
+    private volatile int skipCntr;
+
+    /** */
+    @SuppressWarnings("unused")
+    @GridToStringExclude
+    private volatile int done;
+
+    /** */
+    @GridToStringExclude
+    private int batchIdCntr;
+
+    /** Batches for sending to remote nodes. */
+    private Map<UUID, Batch> batches;
+
+    /** Batches already sent to remotes, but their acks are not received yet. */
+    private ConcurrentMap<UUID, ConcurrentMap<Integer, Batch>> pending;
+
+    /** */
+    private WALPointer walPtr;
+
+    /** Do not send DHT requests to near node. */
+    protected boolean skipNearNodeUpdates;
+
+    /** There are keys belonging to backup partitions on near node. */
+    protected boolean hasNearNodeUpdates;
+
+    /** Moving partitions. */
+    private Map<Integer, Boolean> movingParts;
+
+    /** Update counters to be sent to the near node in case it is a backup node also. */
+    protected GridLongList nearUpdCntrs;
+
+    /**
+     * @param nearNodeId Near node ID.
+     * @param nearLockVer Near lock version.
+     * @param mvccSnapshot Mvcc snapshot.
+     * @param threadId Thread ID.
+     * @param nearFutId Near future id.
+     * @param nearMiniId Near mini future id.
+     * @param parts Partitions.
+     * @param tx Transaction.
+     * @param timeout Lock acquisition timeout.
+     * @param cctx Cache context.
+     */
+    protected GridDhtTxAbstractEnlistFuture(UUID nearNodeId,
+        GridCacheVersion nearLockVer,
+        MvccSnapshot mvccSnapshot,
+        long threadId,
+        IgniteUuid nearFutId,
+        int nearMiniId,
+        @Nullable int[] parts,
+        GridDhtTxLocalAdapter tx,
+        long timeout,
+        GridCacheContext<?, ?> cctx) {
+        assert tx != null;
+        assert timeout >= 0;
+        assert nearNodeId != null;
+        assert nearLockVer != null;
+        assert threadId == tx.threadId();
+
+        this.threadId = threadId;
+        this.cctx = cctx;
+        this.nearNodeId = nearNodeId;
+        this.nearLockVer = nearLockVer;
+        this.nearFutId = nearFutId;
+        this.nearMiniId = nearMiniId;
+        this.mvccSnapshot = mvccSnapshot;
+        this.timeout = timeout;
+        this.tx = tx;
+        this.parts = parts;
+
+        lockVer = tx.xidVersion();
+
+        futId = IgniteUuid.randomUuid();
+
+        log = cctx.logger(GridDhtTxAbstractEnlistFuture.class);
+    }
+
+    /**
+     * @return iterator.
+     * @throws IgniteCheckedException If failed.
+     */
+    protected abstract UpdateSourceIterator<?> createIterator() throws IgniteCheckedException;
+
+    /**
+     *
+     */
+    public void init() {
+        if (timeout < 0) {
+            // Time is out.
+            onDone(timeoutException());
+
+            return;
+        }
+        else if (timeout > 0)
+            timeoutObj = new LockTimeoutObject();
+
+        while(true) {
+            IgniteInternalFuture<?> fut = tx.lockFut;
+
+            if (fut == GridDhtTxLocalAdapter.ROLLBACK_FUT) {
+                onDone(tx.timedOut() ? tx.timeoutException() : tx.rollbackException());
+
+                return;
+            }
+            else if (fut != null) {
+                // Wait for previous future.
+                assert fut instanceof GridNearTxAbstractEnlistFuture
+                    || fut instanceof GridDhtTxAbstractEnlistFuture
+                    || fut instanceof CompoundLockFuture
+                    || fut instanceof GridNearTxSelectForUpdateFuture : fut;
+
+                // Terminate this future if parent future is terminated by rollback.
+                if (!fut.isDone()) {
+                    fut.listen(new IgniteInClosure<IgniteInternalFuture>() {
+                        @Override public void apply(IgniteInternalFuture fut) {
+                            if (fut.error() != null)
+                                onDone(fut.error());
+                        }
+                    });
+                }
+                else if (fut.error() != null)
+                    onDone(fut.error());
+
+                break;
+            }
+            else if (tx.updateLockFuture(null, this))
+                break;
+        }
+
+        boolean added = cctx.mvcc().addFuture(this, futId);
+
+        assert added;
+
+        if (isDone()) {
+            cctx.mvcc().removeFuture(futId);
+
+            return;
+        }
+
+        if (timeoutObj != null)
+            cctx.time().addTimeoutObject(timeoutObj);
+
+        try {
+            checkPartitions(parts);
+
+            UpdateSourceIterator<?> it = createIterator();
+
+            if (!it.hasNext()) {
+                U.close(it, log);
+
+                onDone(0L);
+
+                return;
+            }
+
+            tx.addActiveCache(cctx, false);
+
+            this.it = it;
+        }
+        catch (Throwable e) {
+            onDone(e);
+
+            if (e instanceof Error)
+                throw (Error)e;
+
+            return;
+        }
+
+        continueLoop(false);
+    }
+
+    /**
+     * Clears lock future.
+     */
+    protected void clearLockFuture() {
+        tx.clearLockFuture(this);
+    }
+
+    /**
+     * Iterates over iterator, applies changes locally and sends it on backups.
+     *
+     * @param ignoreCntr {@code True} if need to ignore skip counter.
+     */
+    private void continueLoop(boolean ignoreCntr) {
+        if (isDone() || (!ignoreCntr && (SKIP_UPD.getAndIncrement(this) != 0)))
+            return;
+
+        GridDhtCacheAdapter cache = cctx.dhtCache();
+        EnlistOperation op = it.operation();
+        AffinityTopologyVersion topVer = tx.topologyVersionSnapshot();
+
+        try {
+            while (true) {
+                while (hasNext0()) {
+                    Object cur = next0();
+
+                    KeyCacheObject key = cctx.toCacheKeyObject(op.isDeleteOrLock() ? cur : ((IgniteBiTuple)cur).getKey());
+
+                    if (!ensureFreeSlot(key)) {
+                        // Can't advance further at the moment.
+                        peek = cur;
+
+                        it.beforeDetach();
+
+                        break;
+                    }
+
+                    GridDhtCacheEntry entry = cache.entryExx(key);
+
+                    if (log.isDebugEnabled())
+                        log.debug("Adding entry: " + entry);
+
+                    assert !entry.detached();
+
+                    CacheObject val = op.isDeleteOrLock() ? null : cctx.toCacheObject(((IgniteBiTuple)cur).getValue());
+
+                    tx.markQueryEnlisted(mvccSnapshot);
+
+                    GridCacheUpdateTxResult res;
+
+                    while (true) {
+                        cctx.shared().database().checkpointReadLock();
+
+                        try {
+                            switch (op) {
+                                case DELETE:
+                                    res = entry.mvccRemove(
+                                        tx,
+                                        cctx.localNodeId(),
+                                        topVer,
+                                        null,
+                                        mvccSnapshot,
+                                        isMoving(key.partition()));
+
+                                    break;
+
+                                case INSERT:
+                                case UPSERT:
+                                case UPDATE:
+                                    res = entry.mvccSet(
+                                        tx,
+                                        cctx.localNodeId(),
+                                        val,
+                                        0,
+                                        topVer,
+                                        null,
+                                        mvccSnapshot,
+                                        op.cacheOperation(),
+                                        isMoving(key.partition()),
+                                        op.noCreate());
+
+                                    break;
+
+                                case LOCK:
+                                    res = entry.mvccLock(
+                                        tx,
+                                        mvccSnapshot);
+
+                                    break;
+
+                                default:
+                                    throw new IgniteSQLException("Cannot acquire lock for operation [op= " + op + "]" +
+                                        "Operation is unsupported at the moment ", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
+                            }
+
+                            break;
+                        }
+                        catch (GridCacheEntryRemovedException ignored) {
+                            entry = cache.entryExx(entry.key(), topVer);
+                        }
+                        finally {
+                            cctx.shared().database().checkpointReadUnlock();
+                        }
+                    }
+
+                    IgniteInternalFuture<GridCacheUpdateTxResult> updateFut = res.updateFuture();
+
+                    if (updateFut != null) {
+                        if (updateFut.isDone())
+                            res = updateFut.get();
+                        else {
+                            CacheObject val0 = val;
+                            GridDhtCacheEntry entry0 = entry;
+
+                            it.beforeDetach();
+
+                            updateFut.listen(new CI1<IgniteInternalFuture<GridCacheUpdateTxResult>>() {
+                                @Override public void apply(IgniteInternalFuture<GridCacheUpdateTxResult> fut) {
+                                    try {
+                                        processEntry(entry0, op, fut.get(), val0);
+
+                                        continueLoop(true);
+                                    }
+                                    catch (Throwable e) {
+                                        onDone(e);
+                                    }
+                                }
+                            });
+
+                            // Can't move further. Exit loop without decrementing the counter.
+                            return;
+                        }
+                    }
+
+                    processEntry(entry, op, res, val);
+                }
+
+                if (!hasNext0()) {
+                    if (walPtr != null && !cctx.tm().logTxRecords()) {
+                        cctx.shared().wal().flush(walPtr, true);
+
+                        walPtr = null; // Avoid additional flushing.
+                    }
+
+                    if (!F.isEmpty(batches)) {
+                        // Flush incomplete batches.
+                        // Need to skip batches for nodes where first request (contains tx info) is still in-flight.
+                        // Otherwise, the regular enlist request (without tx info) may beat it to the primary node.
+                        Iterator<Map.Entry<UUID, Batch>> it = batches.entrySet().iterator();
+
+                        while (it.hasNext()) {
+                            Map.Entry<UUID, Batch> e = it.next();
+
+                            ConcurrentMap<Integer, Batch> pending0 =
+                                pending == null ? null : pending.get(e.getKey());
+
+                            if (pending0 == null || !pending0.containsKey(FIRST_BATCH_ID)) {
+                                it.remove();
+
+                                sendBatch(e.getValue());
+                            }
+                        }
+                    }
+
+                    if (noPendingRequests()) {
+                        onDone(cnt);
+
+                        return;
+                    }
+                }
+
+                if (SKIP_UPD.decrementAndGet(this) == 0)
+                    break;
+
+                skipCntr = 1;
+            }
+        }
+        catch (Throwable e) {
+            onDone(e);
+
+            if (e instanceof Error)
+                throw (Error)e;
+        }
+    }
+
+    /** */
+    private Object next0() {
+        if (!hasNext0())
+            throw new NoSuchElementException();
+
+        Object cur;
+
+        if ((cur = peek) != null)
+            peek = null;
+        else
+            cur = it.next();
+
+        return cur;
+    }
+
+    /** */
+    private boolean hasNext0() {
+        if (peek == null && !it.hasNext())
+            peek = FINISHED;
+
+        return peek != FINISHED;
+    }
+
+    /**
+     * @return {@code True} if in-flight batches map is empty.
+     */
+    private boolean noPendingRequests() {
+        if (F.isEmpty(pending))
+            return true;
+
+        for (ConcurrentMap<Integer, Batch> e : pending.values()) {
+            if (!e.isEmpty())
+                return false;
+        }
+
+        return true;
+    }
+
+    /**
+     * @param entry Cache entry.
+     * @param op Operation.
+     * @param updRes Update result.
+     * @param val New value.
+     * @throws IgniteCheckedException If failed.
+     */
+    private void processEntry(GridDhtCacheEntry entry, EnlistOperation op,
+        GridCacheUpdateTxResult updRes, CacheObject val) throws IgniteCheckedException {
+        checkCompleted();
+
+        assert updRes != null && updRes.updateFuture() == null;
+
+        WALPointer ptr0 = updRes.loggedPointer();
+
+        if (ptr0 != null)
+            walPtr = ptr0;
+
+        if (!updRes.success())
+            return;
+
+        cnt++;
+
+        if (op != EnlistOperation.LOCK)
+            addToBatch(entry.key(), val, updRes.mvccHistory(), updRes.updateCounter(), entry.context().cacheId());
+    }
+
+    /**
+     * Adds row to batch.
+     * <b>IMPORTANT:</b> This method should be called from the critical section in {@link this.sendNextBatches()}
+     *
+     * @param key Key.
+     * @param val Value.
+     * @param hist History rows.
+     * @param updCntr Update counter.
+     */
+    private void addToBatch(KeyCacheObject key, CacheObject val, List<MvccLinkAwareSearchRow> hist, long updCntr,
+        int cacheId) throws IgniteCheckedException {
+        List<ClusterNode> backups = backupNodes(key);
+
+        int part = cctx.affinity().partition(key);
+
+        tx.touchPartition(cacheId, part);
+
+        if (F.isEmpty(backups))
+            return;
+
+        CacheEntryInfoCollection hist0 = null;
+
+        for (ClusterNode node : backups) {
+            assert !node.isLocal();
+
+            boolean moving = isMoving(node, part);
+
+            if (skipNearNodeUpdates && node.id().equals(nearNodeId) && !moving) {
+                updateMappings(node);
+
+                if (newRemoteTx(node))
+                    tx.addLockTransactionNode(node);
+
+                hasNearNodeUpdates = true;
+
+                if (nearUpdCntrs == null)
+                    nearUpdCntrs = new GridLongList();
+
+                nearUpdCntrs.add(updCntr);
+
+                continue;
+            }
+
+            Batch batch = null;
+
+            if (batches == null)
+                batches = new HashMap<>();
+            else
+                batch = batches.get(node.id());
+
+            if (batch == null)
+                batches.put(node.id(), batch = new Batch(node));
+
+            if (moving && hist0 == null) {
+                assert !F.isEmpty(hist);
+
+                hist0 = fetchHistoryInfo(key, hist);
+            }
+
+            batch.add(key, moving ? hist0 : val, updCntr);
+
+            if (batch.size() == BATCH_SIZE) {
+                assert batches != null;
+
+                batches.remove(node.id());
+
+                sendBatch(batch);
+            }
+        }
+    }
+
+    /**
+     *
+     * @param key Key.
+     * @param hist History rows.
+     * @return History entries.
+     * @throws IgniteCheckedException, if failed.
+     */
+    private CacheEntryInfoCollection fetchHistoryInfo(KeyCacheObject key, List<MvccLinkAwareSearchRow> hist)
+        throws IgniteCheckedException {
+        List<GridCacheEntryInfo> res = new ArrayList<>();
+
+        for (int i = 0; i < hist.size(); i++) {
+            MvccLinkAwareSearchRow row0 = hist.get(i);
+
+            MvccDataRow row = new MvccDataRow(cctx.group(),
+                row0.hash(),
+                row0.link(),
+                key.partition(),
+                CacheDataRowAdapter.RowData.NO_KEY,
+                row0.mvccCoordinatorVersion(),
+                row0.mvccCounter(),
+                row0.mvccOperationCounter());
+
+            GridCacheMvccEntryInfo entry = new GridCacheMvccEntryInfo();
+
+            entry.version(row.version());
+            entry.mvccVersion(row);
+            entry.newMvccVersion(row);
+            entry.value(row.value());
+            entry.expireTime(row.expireTime());
+
+            if (MvccUtils.compare(mvccSnapshot, row.mvccCoordinatorVersion(), row.mvccCounter()) != 0) {
+                entry.mvccTxState(row.mvccTxState() != TxState.NA ? row.mvccTxState() :
+                    MvccUtils.state(cctx, row.mvccCoordinatorVersion(), row.mvccCounter(), row.mvccOperationCounter()));
+            }
+
+            if (MvccUtils.compare(mvccSnapshot, row.newMvccCoordinatorVersion(), row.newMvccCounter()) != 0) {
+                entry.newMvccTxState(row.newMvccTxState() != TxState.NA ? row.newMvccTxState() :
+                    MvccUtils.state(cctx, row.newMvccCoordinatorVersion(), row.newMvccCounter(),
+                    row.newMvccOperationCounter()));
+            }
+
+            res.add(entry);
+        }
+
+        return new CacheEntryInfoCollection(res);
+    }
+
+    /** */
+    private boolean newRemoteTx(ClusterNode node) {
+        Set<ClusterNode> nodes = tx.lockTransactionNodes();
+
+        return nodes == null || !nodes.contains(node);
+    }
+
+    /**
+     * Checks if there free space in batches or free slot in in-flight batches is available for the given key.
+     *
+     * @param key Key.
+     * @return {@code True} if there is possible to add this key to batch or send ready batch.
+     */
+    @SuppressWarnings("ForLoopReplaceableByForEach")
+    private boolean ensureFreeSlot(KeyCacheObject key) {
+        if (F.isEmpty(batches) || F.isEmpty(pending))
+            return true;
+
+        // Check possibility of adding to batch and sending.
+        for (ClusterNode node : backupNodes(key)) {
+            if (skipNearNodeUpdates && node.id().equals(nearNodeId) && !isMoving(node, key.partition()))
+                continue;
+
+            Batch batch = batches.get(node.id());
+
+            // We can add key if batch is not full.
+            if (batch == null || batch.size() < BATCH_SIZE - 1)
+                continue;
+
+            ConcurrentMap<Integer, Batch> pending0 = pending.get(node.id());
+
+            assert pending0 == null || pending0.size() <= BATCHES_PER_NODE;
+
+            if (pending0 != null && (pending0.containsKey(FIRST_BATCH_ID) || pending0.size() == BATCHES_PER_NODE))
+                return false;
+        }
+
+        return true;
+    }
+
+    /**
+     * Send batch request to remote data node.
+     *
+     * @param batch Batch.
+     */
+    private void sendBatch(Batch batch) throws IgniteCheckedException {
+        assert batch != null && !batch.node().isLocal();
+
+        ClusterNode node = batch.node();
+
+        updateMappings(node);
+
+        GridDhtTxQueryEnlistRequest req;
+
+        if (newRemoteTx(node)) {
+            tx.addLockTransactionNode(node);
+
+            // If this is a first request to this node, send full info.
+            req = new GridDhtTxQueryFirstEnlistRequest(cctx.cacheId(),
+                futId,
+                cctx.localNodeId(),
+                tx.topologyVersionSnapshot(),
+                lockVer,
+                mvccSnapshot,
+                tx.remainingTime(),
+                tx.taskNameHash(),
+                nearNodeId,
+                nearLockVer,
+                it.operation(),
+                FIRST_BATCH_ID,
+                batch.keys(),
+                batch.values(),
+                batch.updateCounters());
+        }
+        else {
+            // Send only keys, values, LockVersion and batchId if this is not a first request to this backup.
+            req = new GridDhtTxQueryEnlistRequest(cctx.cacheId(),
+                futId,
+                lockVer,
+                it.operation(),
+                ++batchIdCntr,
+                mvccSnapshot.operationCounter(),
+                batch.keys(),
+                batch.values(),
+                batch.updateCounters());
+        }
+
+        ConcurrentMap<Integer, Batch> pending0 = null;
+
+        if (pending == null)
+            pending = new ConcurrentHashMap<>();
+        else
+            pending0 = pending.get(node.id());
+
+        if (pending0 == null)
+            pending.put(node.id(), pending0 = new ConcurrentHashMap<>());
+
+        Batch prev = pending0.put(req.batchId(), batch);
+
+        assert prev == null;
+
+        cctx.io().send(node, req, cctx.ioPolicy());
+    }
+
+    /** */
+    private synchronized void updateMappings(ClusterNode node) throws IgniteCheckedException {
+        checkCompleted();
+
+        Map<UUID, GridDistributedTxMapping> m = tx.dhtMap;
+
+        GridDistributedTxMapping mapping = m.get(node.id());
+
+        if (mapping == null)
+            m.put(node.id(), mapping = new GridDistributedTxMapping(node));
+
+        mapping.markQueryUpdate();
+    }
+
+    /**
+     * @param key Key.
+     * @return Backup nodes for the given key.
+     */
+    @NotNull private List<ClusterNode> backupNodes(KeyCacheObject key) {
+        List<ClusterNode> dhtNodes = cctx.affinity().nodesByKey(key, tx.topologyVersion());
+
+        assert !dhtNodes.isEmpty() && dhtNodes.get(0).id().equals(cctx.localNodeId()) :
+            "localNode = " + cctx.localNodeId() + ", dhtNodes = " + dhtNodes;
+
+        if (dhtNodes.size() == 1)
+            return Collections.emptyList();
+
+        return dhtNodes.subList(1, dhtNodes.size());
+    }
+
+    /**
+     * Checks whether all the necessary partitions are in {@link GridDhtPartitionState#OWNING} state.
+     *
+     * @param parts Partitions.
+     * @throws ClusterTopologyCheckedException If failed.
+     */
+    @SuppressWarnings("ForLoopReplaceableByForEach")
+    private void checkPartitions(@Nullable int[] parts) throws ClusterTopologyCheckedException {
+        if (cctx.isLocal() || !cctx.rebalanceEnabled())
+            return;
+
+        if (parts == null)
+            parts = U.toIntArray(
+                cctx.affinity()
+                    .primaryPartitions(cctx.localNodeId(), tx.topologyVersionSnapshot()));
+
+        GridDhtPartitionTopology top = cctx.topology();
+
+        try {
+            top.readLock();
+
+            for (int i = 0; i < parts.length; i++) {
+                GridDhtLocalPartition p = top.localPartition(parts[i]);
+
+                if (p == null || p.state() != GridDhtPartitionState.OWNING)
+                    throw new ClusterTopologyCheckedException("Cannot run update query. " +
+                        "Node must own all the necessary partitions."); // TODO IGNITE-7185 Send retry instead.
+            }
+        }
+        finally {
+            top.readUnlock();
+        }
+    }
+
+    /**
+     * @param part Partition.
+     * @return {@code true} if the given partition is rebalancing to any backup node.
+     */
+    private boolean isMoving(int part) {
+        if (movingParts == null)
+            movingParts = new HashMap<>();
+
+        Boolean res = movingParts.get(part);
+
+        if (res != null)
+            return res;
+
+        List<ClusterNode> dhtNodes = cctx.affinity().nodesByPartition(part, tx.topologyVersion());
+
+        for (int i = 1; i < dhtNodes.size(); i++) {
+            ClusterNode node = dhtNodes.get(i);
+            if (isMoving(node, part)) {
+                movingParts.put(part, Boolean.TRUE);
+
+                return true;
+            }
+        }
+
+        movingParts.put(part, Boolean.FALSE);
+
+        return false;
+    }
+
+    /**
+     * @param node Cluster node.
+     * @param part Partition.
+     * @return {@code true} if the given partition is rebalancing to the given node.
+     */
+    private boolean isMoving(ClusterNode node, int part) {
+        GridDhtPartitionState partState = cctx.topology().partitionState(node.id(), part);
+
+        return partState != GridDhtPartitionState.OWNING && partState != GridDhtPartitionState.EVICTED;
+    }
+
+    /** */
+    private void checkCompleted() throws IgniteCheckedException {
+        if (isDone())
+            throw new IgniteCheckedException("Future is done.");
+    }
+
+    /**
+     * Callback on backup response.
+     *
+     * @param nodeId Backup node.
+     * @param res Response.
+     */
+    public void onResult(UUID nodeId, GridDhtTxQueryEnlistResponse res) {
+        if (res.error() != null) {
+            onDone(new IgniteCheckedException("Failed to update backup node: [localNodeId=" + cctx.localNodeId() +
+                ", remoteNodeId=" + nodeId + ']', res.error()));
+
+            return;
+        }
+
+        assert pending != null;
+
+        ConcurrentMap<Integer, Batch> pending0 = pending.get(nodeId);
+
+        assert pending0 != null;
+
+        Batch rmv = pending0.remove(res.batchId());
+
+        assert rmv != null;
+
+        continueLoop(false);
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean trackable() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void markNotTrackable() {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgniteUuid futureId() {
+        return futId;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean onNodeLeft(UUID nodeId) {
+        boolean backupLeft = false;
+
+        Set<ClusterNode> nodes = tx.lockTransactionNodes();
+
+        if (!F.isEmpty(nodes)) {
+            for (ClusterNode node : nodes) {
+                if (node.id().equals(nodeId)) {
+                    backupLeft = true;
+
+                    break;
+                }
+            }
+        }
+
+        return (backupLeft || nearNodeId.equals(nodeId)) && onDone(
+            new ClusterTopologyCheckedException((backupLeft ? "Backup" : "Requesting") +
+                " node left the grid [nodeId=" + nodeId + ']'));
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean onDone(@Nullable Long res, @Nullable Throwable err) {
+        assert res != null || err != null;
+
+        if (!DONE_UPD.compareAndSet(this, 0, 1))
+            return false;
+
+        if (err == null)
+            clearLockFuture();
+
+        // To prevent new remote transactions creation
+        // after future is cancelled by rollback.
+        synchronized (this) {
+            boolean done = super.onDone(res, err);
+
+            assert done;
+
+            if (log.isDebugEnabled())
+                log.debug("Completing future: " + this);
+
+            // Clean up.
+            cctx.mvcc().removeFuture(futId);
+
+            if (timeoutObj != null)
+                cctx.time().removeTimeoutObject(timeoutObj);
+
+            U.close(it, log);
+
+            return true;
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onError(Throwable error) {
+        onDone(error);
+    }
+
+    /**
+     * @return Timeout exception.
+     */
+    @NotNull protected IgniteTxTimeoutCheckedException timeoutException() {
+        return new IgniteTxTimeoutCheckedException("Failed to acquire lock within provided timeout for " +
+            "transaction [timeout=" + timeout + ", tx=" + tx + ']');
+    }
+
+    /**
+     * A batch of rows
+     */
+    private static class Batch {
+        /** Node ID. */
+        @GridToStringExclude
+        private final ClusterNode node;
+
+        /** */
+        private List<KeyCacheObject> keys;
+
+        /**
+         * Values collection.
+         * Items can be either {@link CacheObject} or preload entries collection {@link CacheEntryInfoCollection}.
+         */
+        private List<Message> vals;
+
+        /** Update counters. */
+        private GridLongList updCntrs;
+
+        /**
+         * @param node Cluster node.
+         */
+        private Batch(ClusterNode node) {
+            this.node = node;
+        }
+
+        /**
+         * @return Node.
+         */
+        public ClusterNode node() {
+            return node;
+        }
+
+        /**
+         * Adds a row to batch.
+         *
+         * @param key Key.
+         * @param val Value or preload entries collection.
+         */
+        public void add(KeyCacheObject key, Message val, long updCntr) {
+            assert val == null || val instanceof CacheObject || val instanceof CacheEntryInfoCollection;
+            assert updCntr > 0;
+
+            if (keys == null)
+                keys = new ArrayList<>();
+
+            keys.add(key);
+
+            if (val != null) {
+                if (vals == null)
+                    vals = new ArrayList<>();
+
+                vals.add(val);
+            }
+
+            if (updCntrs == null)
+                updCntrs = new GridLongList();
+
+            updCntrs.add(updCntr);
+
+            assert (vals == null) || keys.size() == vals.size();
+        }
+
+        /**
+         * @return number of rows.
+         */
+        public int size() {
+            return keys == null ? 0 : keys.size();
+        }
+
+        /**
+         * @return Collection of row keys.
+         */
+        public List<KeyCacheObject> keys() {
+            return keys;
+        }
+
+        /**
+         * @return Collection of row values.
+         */
+        public List<Message> values() {
+            return vals;
+        }
+
+        /**
+         * @return Update counters.
+         */
+        public GridLongList updateCounters() {
+            return updCntrs;
+        }
+    }
+
+    /**
+     * Lock request timeout object.
+     */
+    protected class LockTimeoutObject extends GridTimeoutObjectAdapter {
+        /**
+         * Default constructor.
+         */
+        LockTimeoutObject() {
+            super(timeout);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void onTimeout() {
+            if (log.isDebugEnabled())
+                log.debug("Timed out waiting for lock response: " + this);
+
+            onDone(timeoutException());
+        }
+
+        /** {@inheritDoc} */
+        @Override public String toString() {
+            return S.toString(LockTimeoutObject.class, this);
+        }
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxFinishFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxFinishFuture.java
index 0ed8419..4c72e6f 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxFinishFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxFinishFuture.java
@@ -26,18 +26,23 @@
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.IgniteLogger;
 import org.apache.ignite.cluster.ClusterNode;
-import org.apache.ignite.internal.InvalidEnvironmentException;
 import org.apache.ignite.internal.IgniteDiagnosticAware;
 import org.apache.ignite.internal.IgniteDiagnosticPrepareContext;
 import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.InvalidEnvironmentException;
+import org.apache.ignite.internal.NodeStoppingException;
 import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException;
 import org.apache.ignite.internal.processors.cache.GridCacheCompoundIdentityFuture;
 import org.apache.ignite.internal.processors.cache.GridCacheFuture;
 import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
 import org.apache.ignite.internal.processors.cache.distributed.GridDistributedTxMapping;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccCoordinator;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccFuture;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
+import org.apache.ignite.internal.util.GridLongList;
 import org.apache.ignite.internal.util.future.GridFutureAdapter;
 import org.apache.ignite.internal.util.tostring.GridToStringExclude;
 import org.apache.ignite.internal.util.tostring.GridToStringInclude;
@@ -168,7 +173,7 @@
         if (ERR_UPD.compareAndSet(this, null, e)) {
             tx.setRollbackOnly();
 
-            if (X.hasCause(e, InvalidEnvironmentException.class))
+            if (X.hasCause(e, InvalidEnvironmentException.class, NodeStoppingException.class))
                 onComplete();
             else
                 finish(false);
@@ -225,7 +230,7 @@
 
             if (this.tx.onePhaseCommit() && (this.tx.state() == COMMITTING)) {
                 try {
-                    boolean hasInvalidEnvironmentIssue = X.hasCause(err, InvalidEnvironmentException.class);
+                    boolean hasInvalidEnvironmentIssue = X.hasCause(err, InvalidEnvironmentException.class, NodeStoppingException.class);
 
                     this.tx.tmFinish(err == null, hasInvalidEnvironmentIssue, false);
                 }
@@ -283,6 +288,8 @@
     public void finish(boolean commit) {
         boolean sync;
 
+        assert !tx.queryEnlisted() || tx.mvccSnapshot() != null;
+
         if (!F.isEmpty(dhtMap) || !F.isEmpty(nearMap))
             sync = finish(commit, dhtMap, nearMap);
         else if (!commit && !F.isEmpty(tx.lockTransactionNodes()))
@@ -291,6 +298,22 @@
             // No backup or near nodes to send commit message to (just complete then).
             sync = false;
 
+        GridLongList waitTxs = tx.mvccWaitTransactions();
+
+        if (waitTxs != null) {
+            MvccSnapshot snapshot = tx.mvccSnapshot();
+
+            assert snapshot != null;
+
+            MvccCoordinator crd = cctx.coordinators().currentCoordinator();
+
+            if (crd != null && crd.coordinatorVersion() == snapshot.coordinatorVersion()) {
+                add((IgniteInternalFuture)cctx.coordinators().waitTxsFuture(crd.nodeId(), waitTxs));
+
+                sync = true;
+            }
+        }
+
         markInitialized();
 
         if (!sync)
@@ -309,7 +332,7 @@
 
         boolean sync = tx.syncMode() == FULL_SYNC;
 
-        if (tx.explicitLock())
+        if (tx.explicitLock() || tx.queryEnlisted())
             sync = true;
 
         boolean res = false;
@@ -347,7 +370,8 @@
                 tx.taskNameHash(),
                 tx.activeCachesDeploymentEnabled(),
                 false,
-                false);
+                false,
+                tx.mvccSnapshot());
 
             try {
                 cctx.io().send(n, req, tx.ioPolicy());
@@ -395,15 +419,23 @@
         if (tx.onePhaseCommit())
             return false;
 
+        assert !commit || !tx.txState().mvccEnabled(cctx) || tx.mvccSnapshot() != null || F.isEmpty(tx.writeEntries());
+
         boolean sync = tx.syncMode() == FULL_SYNC;
 
-        if (tx.explicitLock())
+        if (tx.explicitLock() || tx.queryEnlisted())
             sync = true;
 
         boolean res = false;
 
         int miniId = 0;
 
+        // Do not need process active transactions on backups.
+        MvccSnapshot mvccSnapshot = tx.mvccSnapshot();
+
+        if (mvccSnapshot != null)
+            mvccSnapshot = mvccSnapshot.withoutActiveTransactions();
+
         // Create mini futures.
         for (GridDistributedTxMapping dhtMapping : dhtMap.values()) {
             ClusterNode n = dhtMapping.primary();
@@ -412,7 +444,7 @@
 
             GridDistributedTxMapping nearMapping = nearMap.get(n.id());
 
-            if (dhtMapping.empty() && nearMapping != null && nearMapping.empty())
+            if (!dhtMapping.queryUpdate() && dhtMapping.empty() && nearMapping != null && nearMapping.empty())
                 // Nothing to send.
                 continue;
 
@@ -425,6 +457,11 @@
             for (IgniteTxEntry e : dhtMapping.entries())
                 updCntrs.add(e.updateCounter());
 
+            Map<Integer, PartitionUpdateCounters> updCntrsForNode = null;
+
+            if (dhtMapping.queryUpdate() && commit)
+                updCntrsForNode = tx.filterUpdateCountersForBackupNode(n);
+
             GridDhtTxFinishRequest req = new GridDhtTxFinishRequest(
                 tx.nearNodeId(),
                 futId,
@@ -450,7 +487,9 @@
                 tx.activeCachesDeploymentEnabled(),
                 updCntrs,
                 false,
-                false);
+                false,
+                mvccSnapshot,
+                updCntrsForNode);
 
             req.writeVersion(tx.writeVersion() != null ? tx.writeVersion() : tx.xidVersion());
 
@@ -519,7 +558,8 @@
                     tx.taskNameHash(),
                     tx.activeCachesDeploymentEnabled(),
                     false,
-                    false);
+                    false,
+                    mvccSnapshot);
 
                 req.writeVersion(tx.writeVersion());
 
@@ -564,22 +604,35 @@
         if (!isDone()) {
             for (IgniteInternalFuture fut : futures()) {
                 if (!fut.isDone()) {
-                    MiniFuture f = (MiniFuture)fut;
+                    if (MiniFuture.class.isInstance(fut)) {
+                        MiniFuture f = (MiniFuture)fut;
 
-                    if (!f.node().isLocal()) {
-                        GridCacheVersion dhtVer = tx.xidVersion();
-                        GridCacheVersion nearVer = tx.nearXidVersion();
+                        if (!f.node().isLocal()) {
+                            GridCacheVersion dhtVer = tx.xidVersion();
+                            GridCacheVersion nearVer = tx.nearXidVersion();
 
-                        ctx.remoteTxInfo(f.node().id(), dhtVer, nearVer, "GridDhtTxFinishFuture " +
-                            "waiting for response [node=" + f.node().id() +
-                            ", topVer=" + tx.topologyVersion() +
-                            ", dhtVer=" + dhtVer +
-                            ", nearVer=" + nearVer +
-                            ", futId=" + futId +
-                            ", miniId=" + f.futId +
-                            ", tx=" + tx + ']');
+                            ctx.remoteTxInfo(f.node().id(), dhtVer, nearVer, "GridDhtTxFinishFuture " +
+                                "waiting for response [node=" + f.node().id() +
+                                ", topVer=" + tx.topologyVersion() +
+                                ", dhtVer=" + dhtVer +
+                                ", nearVer=" + nearVer +
+                                ", futId=" + futId +
+                                ", miniId=" + f.futId +
+                                ", tx=" + tx + ']');
 
-                        return;
+                            return;
+                        }
+                    }
+                    else if (fut instanceof MvccFuture) {
+                        MvccFuture f = (MvccFuture)fut;
+
+                        if (!cctx.localNodeId().equals(f.coordinatorNodeId())) {
+                            ctx.basicInfo(f.coordinatorNodeId(), "GridDhtTxFinishFuture " +
+                                "waiting for mvcc coordinator reply [mvccCrdNode=" + f.coordinatorNodeId() +
+                                ", loc=" + f.coordinatorNodeId().equals(cctx.localNodeId()) + ']');
+
+                            return;
+                        }
                     }
                 }
             }
@@ -591,9 +644,20 @@
         Collection<String> futs = F.viewReadOnly(futures(), new C1<IgniteInternalFuture<?>, String>() {
             @SuppressWarnings("unchecked")
             @Override public String apply(IgniteInternalFuture<?> f) {
-                return "[node=" + ((MiniFuture)f).node().id() +
-                    ", loc=" + ((MiniFuture)f).node().isLocal() +
-                    ", done=" + f.isDone() + "]";
+                if (f.getClass() == MiniFuture.class) {
+                    return "[node=" + ((MiniFuture)f).node().id() +
+                        ", loc=" + ((MiniFuture)f).node().isLocal() +
+                        ", done=" + f.isDone() + "]";
+                }
+                else if (f instanceof MvccFuture) {
+                    MvccFuture crdFut = (MvccFuture)f;
+
+                    return "[mvccCrdNode=" + crdFut.coordinatorNodeId() +
+                        ", loc=" + crdFut.coordinatorNodeId().equals(cctx.localNodeId()) +
+                        ", done=" + f.isDone() + "]";
+                }
+                else
+                    return f.toString();
             }
         });
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxFinishRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxFinishRequest.java
index 823b5fe..8e9ece6 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxFinishRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxFinishRequest.java
@@ -20,11 +20,14 @@
 import java.io.Externalizable;
 import java.nio.ByteBuffer;
 import java.util.Collection;
+import java.util.Map;
 import java.util.UUID;
 import org.apache.ignite.cache.CacheWriteSynchronizationMode;
 import org.apache.ignite.internal.GridDirectCollection;
+import org.apache.ignite.internal.GridDirectMap;
 import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
 import org.apache.ignite.internal.processors.cache.distributed.GridDistributedTxFinishRequest;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
 import org.apache.ignite.internal.util.GridLongList;
 import org.apache.ignite.internal.util.tostring.GridToStringInclude;
@@ -67,6 +70,13 @@
     /** One phase commit write version. */
     private GridCacheVersion writeVer;
 
+    /** */
+    private MvccSnapshot mvccSnapshot;
+
+    /** */
+    @GridDirectMap(keyType = Integer.class, valueType = PartitionUpdateCounters.class)
+    private Map<Integer, PartitionUpdateCounters> updCntrs;
+
     /**
      * Empty constructor required for {@link Externalizable}.
      */
@@ -122,7 +132,8 @@
         int taskNameHash,
         boolean addDepInfo,
         boolean retVal,
-        boolean waitRemoteTxs
+        boolean waitRemoteTxs,
+        MvccSnapshot mvccSnapshot
     ) {
         super(
             xidVer,
@@ -151,6 +162,7 @@
         this.nearNodeId = nearNodeId;
         this.isolation = isolation;
         this.miniId = miniId;
+        this.mvccSnapshot = mvccSnapshot;
 
         needReturnValue(retVal);
         waitRemoteTransactions(waitRemoteTxs);
@@ -207,7 +219,9 @@
         boolean addDepInfo,
         Collection<Long> updateIdxs,
         boolean retVal,
-        boolean waitRemoteTxs
+        boolean waitRemoteTxs,
+        MvccSnapshot mvccSnapshot,
+        Map<Integer, PartitionUpdateCounters> updCntrs
     ) {
         this(nearNodeId,
             futId,
@@ -232,7 +246,8 @@
             taskNameHash,
             addDepInfo,
             retVal,
-            waitRemoteTxs);
+            waitRemoteTxs,
+            mvccSnapshot);
 
         if (updateIdxs != null && !updateIdxs.isEmpty()) {
             partUpdateCnt = new GridLongList(updateIdxs.size());
@@ -240,6 +255,15 @@
             for (Long idx : updateIdxs)
                 partUpdateCnt.add(idx);
         }
+
+        this.updCntrs = updCntrs;
+    }
+
+    /**
+     * @return Counter.
+     */
+    public MvccSnapshot mvccSnapshot() {
+        return mvccSnapshot;
     }
 
     /**
@@ -340,6 +364,13 @@
         setFlag(retVal, NEED_RETURN_VALUE_FLAG_MASK);
     }
 
+    /**
+     * @return Partition counters update deferred until transaction commit.
+     */
+    public Map<Integer, PartitionUpdateCounters> updateCounters() {
+        return updCntrs;
+    }
+
     /** {@inheritDoc} */
     @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) {
         writer.setBuffer(buf);
@@ -391,6 +422,18 @@
 
                 writer.incrementState();
 
+            case 27:
+                if (!writer.writeMessage("mvccSnapshot", mvccSnapshot))
+                    return false;
+
+                writer.incrementState();
+
+            case 28:
+                if (!writer.writeMap("updCntrs", updCntrs, MessageCollectionItemType.INT, MessageCollectionItemType.MSG))
+                    return false;
+
+                writer.incrementState();
+
         }
 
         return true;
@@ -459,6 +502,22 @@
 
                 reader.incrementState();
 
+            case 27:
+                mvccSnapshot = reader.readMessage("mvccSnapshot");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 28:
+                updCntrs = reader.readMap("updCntrs", MessageCollectionItemType.INT, MessageCollectionItemType.MSG, false);
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
         }
 
         return reader.afterMessageRead(GridDhtTxFinishRequest.class);
@@ -471,7 +530,7 @@
 
     /** {@inheritDoc} */
     @Override public byte fieldsCount() {
-        return 27;
+        return 29;
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxLocal.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxLocal.java
index 2e19df2..a091d44 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxLocal.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxLocal.java
@@ -160,7 +160,6 @@
 
         assert nearNodeId != null;
         assert nearFutId != null;
-        assert nearMiniId != 0;
         assert nearXidVer != null;
 
         this.nearNodeId = nearNodeId;
@@ -424,10 +423,10 @@
             final IgniteInternalFuture<?> lockFut = tryRollbackAsync();
 
             if (lockFut != null) {
-                if (lockFut instanceof GridDhtLockFuture)
-                    ((GridDhtLockFuture)lockFut).onError(rollbackException());
-                else {
-                    /**
+                if (lockFut instanceof DhtLockFuture)
+                    ((DhtLockFuture<?>)lockFut).onError(rollbackException());
+                else if (!lockFut.isDone()) {
+                    /*
                      * Prevents race with {@link GridDhtTransactionalCacheAdapter#lockAllAsync
                      * (GridCacheContext, ClusterNode, GridNearLockRequest, CacheEntryPredicate[])}
                      */
@@ -546,6 +545,8 @@
     public IgniteInternalFuture<IgniteInternalTx> rollbackDhtLocalAsync() {
         final GridDhtTxFinishFuture fut = new GridDhtTxFinishFuture<>(cctx, this, false);
 
+        rollbackFuture(fut);
+
         cctx.mvcc().addFuture(fut, fut.futureId());
 
         GridDhtTxPrepareFuture prepFut = this.prepFut;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxLocalAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxLocalAdapter.java
index 604fe06..613f160 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxLocalAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxLocalAdapter.java
@@ -20,16 +20,19 @@
 import java.io.Externalizable;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.UUID;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.cluster.ClusterNode;
 import org.apache.ignite.internal.IgniteInternalFuture;
 import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
+import org.apache.ignite.internal.processors.cache.GridCacheAffinityManager;
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
 import org.apache.ignite.internal.processors.cache.GridCacheEntryEx;
 import org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException;
@@ -41,6 +44,7 @@
 import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxLocalAdapter;
+import org.apache.ignite.internal.processors.cache.transactions.TxCounters;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
 import org.apache.ignite.internal.util.F0;
 import org.apache.ignite.internal.util.GridLeanMap;
@@ -59,7 +63,6 @@
 import org.apache.ignite.transactions.TransactionIsolation;
 import org.apache.ignite.transactions.TransactionState;
 import org.jetbrains.annotations.Nullable;
-import java.util.concurrent.ConcurrentHashMap;
 
 import static org.apache.ignite.internal.processors.cache.GridCacheOperation.NOOP;
 import static org.apache.ignite.transactions.TransactionState.COMMITTED;
@@ -78,7 +81,7 @@
     private static final long serialVersionUID = 0L;
 
     /** Asynchronous rollback marker for lock futures. */
-    protected static final IgniteInternalFuture<Boolean> ROLLBACK_FUT = new GridFutureAdapter<>();
+    public static final IgniteInternalFuture<Boolean> ROLLBACK_FUT = new GridFutureAdapter<>();
 
     /** Lock future updater. */
     private static final AtomicReferenceFieldUpdater<GridDhtTxLocalAdapter, IgniteInternalFuture> LOCK_FUT_UPD =
@@ -108,7 +111,7 @@
     /** Enlist or lock future what is currently in progress. */
     @SuppressWarnings("UnusedDeclaration")
     @GridToStringExclude
-    protected volatile IgniteInternalFuture<Boolean> lockFut;
+    protected volatile IgniteInternalFuture<?> lockFut;
 
     /**
      * Empty constructor required for {@link Externalizable}.
@@ -853,11 +856,17 @@
     }
 
     /**
+     * @return Lock future.
+     */
+    public IgniteInternalFuture<?> lockFuture() {
+        return lockFut;
+    }
+
+    /**
      * Atomically updates lock future.
      *
      * @param oldFut Old future.
      * @param newFut New future.
-     *
      * @return {@code true} If future was changed.
      */
     public boolean updateLockFuture(IgniteInternalFuture<?> oldFut, IgniteInternalFuture<?> newFut) {
@@ -870,20 +879,21 @@
      * @param cond Clear lock condition.
      */
     public void clearLockFuture(@Nullable IgniteInternalFuture cond) {
-        IgniteInternalFuture f = lockFut;
+        while (true) {
+            IgniteInternalFuture f = lockFut;
 
-        if (cond != null && f != cond)
-            return;
-
-        lockFut = null;
+            if (f == null
+                || f == ROLLBACK_FUT
+                || (cond != null && f != cond)
+                || updateLockFuture(f, null))
+                return;
+        }
     }
 
     /**
-     *
      * @param f Future to finish.
      * @param err Error.
      * @param clearLockFut {@code True} if need to clear lock future.
-     *
      * @return Finished future.
      */
     public <T> GridFutureAdapter<T> finishFuture(GridFutureAdapter<T> f, Throwable err, boolean clearLockFut) {
@@ -900,17 +910,14 @@
      *
      * @return Current lock future or null if it's safe to roll back.
      */
-    public @Nullable IgniteInternalFuture<?> tryRollbackAsync() {
-        IgniteInternalFuture<Boolean> fut;
+    @Nullable public IgniteInternalFuture<?> tryRollbackAsync() {
+        while (true) {
+            final IgniteInternalFuture fut = lockFut;
 
-        while(true) {
-            fut = lockFut;
-
-            if (fut != null)
-                return fut == ROLLBACK_FUT ? null : fut;
-
-            if (updateLockFuture(null, ROLLBACK_FUT))
+            if (fut == ROLLBACK_FUT)
                 return null;
+            else if (updateLockFuture(fut, ROLLBACK_FUT))
+                return fut;
         }
     }
 
@@ -924,8 +931,7 @@
         if (commitOnPrepare()) {
             return finishFuture().chain(new CX1<IgniteInternalFuture<IgniteInternalTx>, GridNearTxPrepareResponse>() {
                 @Override public GridNearTxPrepareResponse applyx(IgniteInternalFuture<IgniteInternalTx> finishFut)
-                    throws IgniteCheckedException
-                {
+                    throws IgniteCheckedException {
                     return prepFut.get();
                 }
             });
@@ -934,6 +940,52 @@
         return prepFut;
     }
 
+    /**
+     * @param node Backup node.
+     * @return Partition counters map for the given backup node.
+     */
+    public Map<Integer, PartitionUpdateCounters> filterUpdateCountersForBackupNode(ClusterNode node) {
+        TxCounters txCntrs = txCounters(false);
+
+        if (txCntrs == null)
+            return null;
+
+        Map<Integer, PartitionUpdateCounters> updCntrs = txCntrs.updateCounters();
+
+        Map<Integer, PartitionUpdateCounters> res = new HashMap<>();
+
+        AffinityTopologyVersion top = topologyVersionSnapshot();
+
+        for (Map.Entry<Integer, PartitionUpdateCounters> entry : updCntrs.entrySet()) {
+            Integer cacheId = entry.getKey();
+
+            Map<Integer, Long> partsCntrs = entry.getValue().updateCounters();
+
+            assert !F.isEmpty(partsCntrs);
+
+            GridCacheAffinityManager affinity = cctx.cacheContext(cacheId).affinity();
+
+            Map<Integer, Long> resCntrs = new HashMap<>(partsCntrs.size());
+
+            for (Map.Entry<Integer, Long> e : partsCntrs.entrySet()) {
+                Integer p = e.getKey();
+
+                Long cntr = e.getValue();
+
+                if (affinity.backupByPartition(node, p, top)) {
+                    assert cntr != null && cntr > 0 : cntr;
+
+                    resCntrs.put(p, cntr);
+                }
+            }
+
+            if (!resCntrs.isEmpty())
+                res.put(cacheId, new PartitionUpdateCounters(resCntrs));
+        }
+
+        return res;
+    }
+
     /** {@inheritDoc} */
     @Override public String toString() {
         return GridToStringBuilder.toString(GridDhtTxLocalAdapter.class, this, "nearNodes", nearMap.keySet(),
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java
index ba1210e..1974038 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java
@@ -51,16 +51,21 @@
 import org.apache.ignite.internal.processors.cache.GridCacheEntryInfo;
 import org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException;
 import org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate;
-import org.apache.ignite.internal.processors.cache.GridCacheVersionedFuture;
 import org.apache.ignite.internal.processors.cache.GridCacheOperation;
 import org.apache.ignite.internal.processors.cache.GridCacheReturn;
 import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
+import org.apache.ignite.internal.processors.cache.GridCacheVersionedFuture;
 import org.apache.ignite.internal.processors.cache.KeyCacheObject;
 import org.apache.ignite.internal.processors.cache.distributed.GridDistributedCacheEntry;
 import org.apache.ignite.internal.processors.cache.distributed.GridDistributedTxMapping;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearCacheAdapter;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxPrepareRequest;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxPrepareResponse;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccCoordinator;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUpdateVersionAware;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccVersionAware;
+import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxState;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey;
@@ -85,8 +90,10 @@
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.lang.IgniteFutureCancelledException;
+import org.apache.ignite.lang.IgniteInClosure;
 import org.apache.ignite.lang.IgniteReducer;
 import org.apache.ignite.lang.IgniteUuid;
+import org.apache.ignite.thread.IgniteThread;
 import org.jetbrains.annotations.Nullable;
 
 import static org.apache.ignite.events.EventType.EVT_CACHE_REBALANCE_OBJECT_LOADED;
@@ -255,6 +262,11 @@
     }
 
     /** {@inheritDoc} */
+    @Nullable @Override public IgniteLogger logger() {
+        return log;
+    }
+
+    /** {@inheritDoc} */
     @Override public IgniteUuid futureId() {
         return futId;
     }
@@ -393,7 +405,8 @@
                         entryProc,
                         tx.resolveTaskName(),
                         null,
-                        keepBinary);
+                        keepBinary,
+                        null);  // TODO IGNITE-7371
 
                     if (retVal || txEntry.op() == TRANSFORM) {
                         if (!F.isEmpty(txEntry.entryProcessors())) {
@@ -412,10 +425,12 @@
                             txEntry.oldValueOnPrimary(val != null);
 
                             for (T2<EntryProcessor<Object, Object, Object>, Object[]> t : txEntry.entryProcessors()) {
-                                 CacheInvokeEntry<Object, Object> invokeEntry = new CacheInvokeEntry<>(key, val,
-                                     txEntry.cached().version(), keepBinary, txEntry.cached());
+                                CacheInvokeEntry<Object, Object> invokeEntry = new CacheInvokeEntry<>(key, val,
+                                    txEntry.cached().version(), keepBinary, txEntry.cached());
 
-                                 try {
+                                IgniteThread.onEntryProcessorEntered(false);
+
+                                try {
                                     EntryProcessor<Object, Object, Object> processor = t.get1();
 
                                     procRes = processor.process(invokeEntry, t.get2());
@@ -430,8 +445,11 @@
 
                                     break;
                                 }
+                                finally {
+                                    IgniteThread.onEntryProcessorLeft();
+                                }
 
-                                 modified |= invokeEntry.modified();
+                                modified |= invokeEntry.modified();
                             }
 
                             if (modified)
@@ -501,7 +519,8 @@
                             /*transformClo*/null,
                             /*taskName*/null,
                             /*expiryPlc*/null,
-                            /*keepBinary*/true);
+                            /*keepBinary*/true,
+                            null); // TODO IGNITE-7371
                     }
 
                     if (oldVal != null)
@@ -784,13 +803,10 @@
             if (REPLIED_UPD.compareAndSet(this, 0, 1)) {
                 GridNearTxPrepareResponse res = createPrepareResponse(this.err);
 
-                try {
-                    sendPrepareResponse(res);
-                }
-                finally {
-                    // Will call super.onDone().
-                    onComplete(res);
-                }
+                // Will call super.onDone().
+                onComplete(res);
+
+                sendPrepareResponse(res);
 
                 return true;
             }
@@ -880,6 +896,8 @@
             tx.onePhaseCommit(),
             tx.activeCachesDeploymentEnabled());
 
+        res.mvccSnapshot(tx.mvccSnapshot());
+
         if (prepErr == null) {
             if (tx.needReturnValue() || tx.nearOnOriginatingNode() || tx.hasInterceptor())
                 addDhtValues(res);
@@ -983,7 +1001,7 @@
      * @return {@code True} if {@code done} flag was changed as a result of this call.
      */
     private boolean onComplete(@Nullable GridNearTxPrepareResponse res) {
-        if (last || tx.isSystemInvalidate())
+        if ((last || tx.isSystemInvalidate()) && !(tx.near() && tx.local()))
             tx.state(PREPARED);
 
         if (super.onDone(res, res == null ? err : null)) {
@@ -1019,7 +1037,7 @@
     public void prepare(GridNearTxPrepareRequest req) {
         assert req != null;
 
-        if (tx.empty()) {
+        if (tx.empty() && !req.queryUpdate()) {
             tx.setRollbackOnly();
 
             onDone((GridNearTxPrepareResponse)null);
@@ -1199,6 +1217,8 @@
      *
      */
     private void prepare0() {
+        boolean skipInit = false;
+
         try {
             if (tx.serializable() && tx.optimistic()) {
                 IgniteCheckedException err0;
@@ -1233,6 +1253,29 @@
                 }
             }
 
+            IgniteInternalFuture<MvccSnapshot> waitCrdCntrFut = null;
+
+            if (req.requestMvccCounter()) {
+                assert last;
+
+                assert tx.txState().mvccEnabled(cctx);
+
+                try {
+                    // Request snapshot locally only because
+                    // Mvcc Coordinator is expected to be local.
+                    MvccSnapshot snapshot = cctx.coordinators().tryRequestSnapshotLocal(tx);
+
+                    assert snapshot != null : tx.topologyVersion();
+
+                    tx.mvccSnapshot(snapshot);
+                }
+                catch (ClusterTopologyCheckedException e) {
+                    onDone(e);
+
+                    return;
+                }
+            }
+
             onEntriesLocked();
 
             // We are holding transaction-level locks for entries here, so we can get next write version.
@@ -1252,11 +1295,37 @@
             if (isDone())
                 return;
 
-            if (last)
-                sendPrepareRequests();
+            if (last) {
+                if (waitCrdCntrFut != null) {
+                    skipInit = true;
+
+                    waitCrdCntrFut.listen(new IgniteInClosure<IgniteInternalFuture<MvccSnapshot>>() {
+                        @Override public void apply(IgniteInternalFuture<MvccSnapshot> fut) {
+                            try {
+                                fut.get();
+
+                                sendPrepareRequests();
+
+                                markInitialized();
+                            }
+                            catch (Throwable e) {
+                                U.error(log, "Failed to get mvcc version for tx [txId=" + tx.nearXidVersion() +
+                                    ", err=" + e + ']', e);
+
+                                GridNearTxPrepareResponse res = createPrepareResponse(e);
+
+                                onDone(res, res.error());
+                            }
+                        }
+                    });
+                }
+                else
+                    sendPrepareRequests();
+            }
         }
         finally {
-            markInitialized();
+            if (!skipInit)
+                markInitialized();
         }
     }
 
@@ -1274,15 +1343,23 @@
             }
         }
 
+        assert !tx.txState().mvccEnabled(cctx) || !tx.onePhaseCommit() || tx.mvccSnapshot() != null;
+
         int miniId = 0;
 
         assert tx.transactionNodes() != null;
 
         final long timeout = timeoutObj != null ? timeoutObj.timeout : 0;
 
+        // Do not need process active transactions on backups.
+        MvccSnapshot mvccSnapshot = tx.mvccSnapshot();
+
+        if (mvccSnapshot != null)
+            mvccSnapshot = mvccSnapshot.withoutActiveTransactions();
+
         // Create mini futures.
         for (GridDistributedTxMapping dhtMapping : tx.dhtMap().values()) {
-            assert !dhtMapping.empty();
+            assert !dhtMapping.empty() || dhtMapping.queryUpdate();
 
             ClusterNode n = dhtMapping.primary();
 
@@ -1294,7 +1371,7 @@
 
             Collection<IgniteTxEntry> dhtWrites = dhtMapping.writes();
 
-            if (F.isEmpty(dhtWrites) && F.isEmpty(nearWrites))
+            if (!dhtMapping.queryUpdate() && F.isEmpty(dhtWrites) && F.isEmpty(nearWrites))
                 continue;
 
             MiniFuture fut = new MiniFuture(n.id(), ++miniId, dhtMapping, nearMapping);
@@ -1319,7 +1396,10 @@
                 tx.taskNameHash(),
                 tx.activeCachesDeploymentEnabled(),
                 tx.storeWriteThrough(),
-                retVal);
+                retVal,
+                mvccSnapshot);
+
+            req.queryUpdate(dhtMapping.queryUpdate());
 
             int idx = 0;
 
@@ -1422,7 +1502,8 @@
                     tx.taskNameHash(),
                     tx.activeCachesDeploymentEnabled(),
                     tx.storeWriteThrough(),
-                    retVal);
+                    retVal,
+                    mvccSnapshot);
 
                 for (IgniteTxEntry entry : nearMapping.entries()) {
                     if (CU.writes().apply(entry)) {
@@ -1793,7 +1874,7 @@
                         }
                     }
 
-                    if (dhtMapping.empty()) {
+                    if (!dhtMapping.queryUpdate() && dhtMapping.empty()) {
                         dhtMap.remove(nodeId);
 
                         if (log.isDebugEnabled())
@@ -1824,6 +1905,10 @@
                         try {
                             if (entry.initialValue(info.value(),
                                 info.version(),
+                                cacheCtx.mvccEnabled() ? ((MvccVersionAware)info).mvccVersion() : null,
+                                cacheCtx.mvccEnabled() ? ((MvccUpdateVersionAware)info).newMvccVersion() : null,
+                                cacheCtx.mvccEnabled() ? ((MvccVersionAware)info).mvccTxState() : TxState.NA,
+                                cacheCtx.mvccEnabled() ? ((MvccUpdateVersionAware)info).newMvccTxState() : TxState.NA,
                                 info.ttl(),
                                 info.expireTime(),
                                 true,
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareRequest.java
index 88da7b0..68c1f39 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareRequest.java
@@ -34,6 +34,7 @@
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
 import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
 import org.apache.ignite.internal.processors.cache.distributed.GridDistributedTxPrepareRequest;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
@@ -103,6 +104,9 @@
     @GridDirectTransient
     private List<IgniteTxKey> nearWritesCacheMissed;
 
+    /** */
+    private MvccSnapshot mvccSnapshot;
+
     /** {@code True} if remote tx should skip adding itself to completed versions map on finish. */
     private boolean skipCompletedVers;
 
@@ -144,7 +148,8 @@
         int taskNameHash,
         boolean addDepInfo,
         boolean storeWriteThrough,
-        boolean retVal) {
+        boolean retVal,
+        MvccSnapshot mvccInfo) {
         super(tx,
             timeout,
             null,
@@ -165,6 +170,7 @@
         this.nearXidVer = nearXidVer;
         this.subjId = subjId;
         this.taskNameHash = taskNameHash;
+        this.mvccSnapshot = mvccInfo;
 
         storeWriteThrough(storeWriteThrough);
         needReturnValue(retVal);
@@ -177,6 +183,13 @@
     }
 
     /**
+     * @return Mvcc info.
+     */
+    public MvccSnapshot mvccSnapshot() {
+        return mvccSnapshot;
+    }
+
+    /**
      * @return Near cache writes for which cache was not found (possible if client near cache was closed).
      */
     @Nullable public List<IgniteTxKey> nearWritesCacheMissed() {
@@ -473,6 +486,12 @@
 
                 writer.incrementState();
 
+            case 33:
+                if (!writer.writeMessage("mvccSnapshot", mvccSnapshot))
+                    return false;
+
+                writer.incrementState();
+
         }
 
         return true;
@@ -593,6 +612,14 @@
 
                 reader.incrementState();
 
+            case 33:
+                mvccSnapshot = reader.readMessage("mvccSnapshot");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
         }
 
         return reader.afterMessageRead(GridDhtTxPrepareRequest.class);
@@ -605,7 +632,7 @@
 
     /** {@inheritDoc} */
     @Override public byte fieldsCount() {
-        return 33;
+        return 34;
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxQueryEnlistFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxQueryEnlistFuture.java
new file mode 100644
index 0000000..dd30855
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxQueryEnlistFuture.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed.dht;
+
+import java.util.Objects;
+import java.util.UUID;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
+import org.apache.ignite.internal.processors.query.GridQueryCancel;
+import org.apache.ignite.internal.processors.query.UpdateSourceIterator;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.lang.IgniteUuid;
+
+/**
+ * Cache lock future.
+ */
+public final class GridDhtTxQueryEnlistFuture extends GridDhtTxAbstractEnlistFuture {
+    /** Involved cache ids. */
+    private final int[] cacheIds;
+
+    /** Schema name. */
+    private final String schema;
+
+    /** Query string. */
+    private final String qry;
+
+    /** Query parameters. */
+    private final Object[] params;
+
+    /** Flags. */
+    private final int flags;
+
+    /** Fetch page size. */
+    private final int pageSize;
+
+    /**
+     * @param nearNodeId Near node ID.
+     * @param nearLockVer Near lock version.
+     * @param mvccSnapshot Mvcc snapshot.
+     * @param threadId Thread ID.
+     * @param nearFutId Near future id.
+     * @param nearMiniId Near mini future id.
+     * @param tx Transaction.
+     * @param cacheIds Involved cache ids.
+     * @param parts Partitions.
+     * @param schema Schema name.
+     * @param qry Query string.
+     * @param params Query parameters.
+     * @param flags Flags.
+     * @param pageSize Fetch page size.
+     * @param timeout Lock acquisition timeout.
+     * @param cctx Cache context.
+     */
+    public GridDhtTxQueryEnlistFuture(
+        UUID nearNodeId,
+        GridCacheVersion nearLockVer,
+        MvccSnapshot mvccSnapshot,
+        long threadId,
+        IgniteUuid nearFutId,
+        int nearMiniId,
+        GridDhtTxLocalAdapter tx,
+        int[] cacheIds,
+        int[] parts,
+        String schema,
+        String qry,
+        Object[] params,
+        int flags,
+        int pageSize,
+        long timeout,
+        GridCacheContext<?, ?> cctx) {
+        super(nearNodeId,
+            nearLockVer,
+            mvccSnapshot,
+            threadId,
+            nearFutId,
+            nearMiniId,
+            parts,
+            tx,
+            timeout,
+            cctx);
+
+        assert timeout >= 0;
+        assert nearNodeId != null;
+        assert nearLockVer != null;
+        assert threadId == tx.threadId();
+
+        this.cacheIds = cacheIds;
+        this.schema = schema;
+        this.qry = qry;
+        this.params = params;
+        this.flags = flags;
+        this.pageSize = pageSize;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected UpdateSourceIterator<?> createIterator() throws IgniteCheckedException {
+        return cctx.kernalContext().query().prepareDistributedUpdate(cctx, cacheIds, parts, schema, qry,
+                params, flags, pageSize, 0, tx.topologyVersionSnapshot(), mvccSnapshot, new GridQueryCancel());
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean equals(Object o) {
+        if (this == o)
+            return true;
+
+        if (o == null || getClass() != o.getClass())
+            return false;
+
+        GridDhtTxQueryEnlistFuture future = (GridDhtTxQueryEnlistFuture)o;
+
+        return Objects.equals(futId, future.futId);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int hashCode() {
+        return futId.hashCode();
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(GridDhtTxQueryEnlistFuture.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxQueryEnlistRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxQueryEnlistRequest.java
new file mode 100644
index 0000000..650e1dc
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxQueryEnlistRequest.java
@@ -0,0 +1,404 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed.dht;
+
+import java.nio.ByteBuffer;
+import java.util.List;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.GridDirectCollection;
+import org.apache.ignite.internal.processors.cache.CacheEntryInfoCollection;
+import org.apache.ignite.internal.processors.cache.CacheObject;
+import org.apache.ignite.internal.processors.cache.CacheObjectContext;
+import org.apache.ignite.internal.processors.cache.GridCacheEntryInfo;
+import org.apache.ignite.internal.processors.cache.GridCacheIdMessage;
+import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
+import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
+import org.apache.ignite.internal.processors.query.EnlistOperation;
+import org.apache.ignite.internal.util.GridLongList;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.lang.IgniteUuid;
+import org.apache.ignite.plugin.extensions.communication.Message;
+import org.apache.ignite.plugin.extensions.communication.MessageCollectionItemType;
+import org.apache.ignite.plugin.extensions.communication.MessageReader;
+import org.apache.ignite.plugin.extensions.communication.MessageWriter;
+
+/**
+ *
+ */
+public class GridDhtTxQueryEnlistRequest extends GridCacheIdMessage {
+    /** */
+    private static final long serialVersionUID = 5103887309729425173L;
+
+    /** */
+    private IgniteUuid dhtFutId;
+
+    /** */
+    private int batchId;
+
+    /** DHT tx version. */
+    private GridCacheVersion lockVer;
+
+    /** */
+    private EnlistOperation op;
+
+    /** */
+    private int mvccOpCnt;
+
+    /** */
+    @GridDirectCollection(KeyCacheObject.class)
+    private List<KeyCacheObject> keys;
+
+    /** */
+    @GridDirectCollection(Message.class)
+    private List<Message> vals;
+
+    /** */
+    private GridLongList updCntrs;
+
+    /**
+     *
+     */
+    public GridDhtTxQueryEnlistRequest() {
+    }
+
+    /**
+     * @param cacheId Cache id.
+     * @param dhtFutId DHT future id.
+     * @param lockVer Lock version.
+     * @param op Operation.
+     * @param batchId Batch id.
+     * @param mvccOpCnt Mvcc operation counter.
+     * @param keys Keys.
+     * @param vals Values.
+     * @param updCntrs Update counters.
+     */
+    GridDhtTxQueryEnlistRequest(int cacheId,
+        IgniteUuid dhtFutId,
+        GridCacheVersion lockVer,
+        EnlistOperation op,
+        int batchId,
+        int mvccOpCnt,
+        List<KeyCacheObject> keys,
+        List<Message> vals,
+        GridLongList updCntrs) {
+        this.cacheId = cacheId;
+        this.dhtFutId = dhtFutId;
+        this.lockVer = lockVer;
+        this.op = op;
+        this.batchId = batchId;
+        this.mvccOpCnt = mvccOpCnt;
+        this.keys = keys;
+        this.vals = vals;
+        this.updCntrs = updCntrs;
+    }
+
+    /**
+     * Returns request rows number.
+     *
+     * @return Request rows number.
+     */
+    public int batchSize() {
+        return keys == null ? 0  : keys.size();
+    }
+
+    /**
+     * @return Dht future id.
+     */
+    public IgniteUuid dhtFutureId() {
+        return dhtFutId;
+    }
+
+    /**
+     * @return Lock version.
+     */
+    public GridCacheVersion version() {
+        return lockVer;
+    }
+
+    /**
+     * @return Mvcc operation counter.
+     */
+    public int operationCounter() {
+        return mvccOpCnt;
+    }
+
+    /**
+     * @return Operation.
+     */
+    public EnlistOperation op() {
+        return op;
+    }
+
+    /**
+     * @return Keys.
+     */
+    public List<KeyCacheObject> keys() {
+        return keys;
+    }
+
+    /**
+     * @return Values.
+     */
+    public List<Message> values() {
+        return vals;
+    }
+
+    /**
+     * @return Update counters.
+     */
+    public GridLongList updateCounters() {
+        return updCntrs;
+    }
+
+    /**
+     * @return Batch id.
+     */
+    public int batchId() {
+        return batchId;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean addDeploymentInfo() {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override public short directType() {
+        return 155;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void prepareMarshal(GridCacheSharedContext ctx) throws IgniteCheckedException {
+        super.prepareMarshal(ctx);
+
+        CacheObjectContext objCtx = ctx.cacheContext(cacheId).cacheObjectContext();
+
+        if (keys != null) {
+            for (int i = 0; i < keys.size(); i++) {
+
+                keys.get(i).prepareMarshal(objCtx);
+
+                if (vals != null) {
+                    Message val = vals.get(i);
+
+                    if (val instanceof CacheObject)
+                        ((CacheObject)val).prepareMarshal(objCtx);
+                    else if (val instanceof CacheEntryInfoCollection) {
+                        for (GridCacheEntryInfo entry : ((CacheEntryInfoCollection)val).infos()) {
+                            CacheObject entryVal = entry.value();
+
+                            if (entryVal != null)
+                                entryVal.prepareMarshal(objCtx);
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void finishUnmarshal(GridCacheSharedContext ctx, ClassLoader ldr) throws IgniteCheckedException {
+        super.finishUnmarshal(ctx, ldr);
+
+        CacheObjectContext objCtx = ctx.cacheContext(cacheId).cacheObjectContext();
+
+        if (keys != null) {
+            for (int i = 0; i < keys.size(); i++) {
+                keys.get(i).finishUnmarshal(objCtx, ldr);
+
+                if (vals != null) {
+                    Message val = vals.get(i);
+
+                    if (val instanceof CacheObject)
+                        ((CacheObject)val).finishUnmarshal(objCtx, ldr);
+                    else if (val instanceof CacheEntryInfoCollection) {
+                        for (GridCacheEntryInfo entry : ((CacheEntryInfoCollection)val).infos()) {
+                            CacheObject entryVal = entry.value();
+
+                            if (entryVal != null)
+                                entryVal.finishUnmarshal(objCtx, ldr);
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) {
+        writer.setBuffer(buf);
+
+        if (!super.writeTo(buf, writer))
+            return false;
+
+        if (!writer.isHeaderWritten()) {
+            if (!writer.writeHeader(directType(), fieldsCount()))
+                return false;
+
+            writer.onHeaderWritten();
+        }
+
+        switch (writer.state()) {
+            case 3:
+                if (!writer.writeInt("batchId", batchId))
+                    return false;
+
+                writer.incrementState();
+
+            case 4:
+                if (!writer.writeIgniteUuid("dhtFutId", dhtFutId))
+                    return false;
+
+                writer.incrementState();
+
+            case 5:
+                if (!writer.writeCollection("keys", keys, MessageCollectionItemType.MSG))
+                    return false;
+
+                writer.incrementState();
+
+            case 6:
+                if (!writer.writeMessage("lockVer", lockVer))
+                    return false;
+
+                writer.incrementState();
+
+            case 7:
+                if (!writer.writeInt("mvccOpCnt", mvccOpCnt))
+                    return false;
+
+                writer.incrementState();
+
+            case 8:
+                if (!writer.writeByte("op", op != null ? (byte)op.ordinal() : -1))
+                    return false;
+
+                writer.incrementState();
+
+            case 9:
+                if (!writer.writeMessage("updCntrs", updCntrs))
+                    return false;
+
+                writer.incrementState();
+
+            case 10:
+                if (!writer.writeCollection("vals", vals, MessageCollectionItemType.MSG))
+                    return false;
+
+                writer.incrementState();
+
+        }
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) {
+        reader.setBuffer(buf);
+
+        if (!reader.beforeMessageRead())
+            return false;
+
+        if (!super.readFrom(buf, reader))
+            return false;
+
+        switch (reader.state()) {
+            case 3:
+                batchId = reader.readInt("batchId");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 4:
+                dhtFutId = reader.readIgniteUuid("dhtFutId");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 5:
+                keys = reader.readCollection("keys", MessageCollectionItemType.MSG);
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 6:
+                lockVer = reader.readMessage("lockVer");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 7:
+                mvccOpCnt = reader.readInt("mvccOpCnt");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 8:
+                byte opOrd;
+
+                opOrd = reader.readByte("op");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                op = EnlistOperation.fromOrdinal(opOrd);
+
+                reader.incrementState();
+
+            case 9:
+                updCntrs = reader.readMessage("updCntrs");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 10:
+                vals = reader.readCollection("vals", MessageCollectionItemType.MSG);
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+        }
+
+        return reader.afterMessageRead(GridDhtTxQueryEnlistRequest.class);
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte fieldsCount() {
+        return 11;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(GridDhtTxQueryEnlistRequest.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxQueryEnlistResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxQueryEnlistResponse.java
new file mode 100644
index 0000000..f3b4aa7
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxQueryEnlistResponse.java
@@ -0,0 +1,205 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed.dht;
+
+import java.nio.ByteBuffer;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.GridDirectTransient;
+import org.apache.ignite.internal.processors.cache.GridCacheIdMessage;
+import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteUuid;
+import org.apache.ignite.plugin.extensions.communication.MessageReader;
+import org.apache.ignite.plugin.extensions.communication.MessageWriter;
+
+/**
+ *
+ */
+public class GridDhtTxQueryEnlistResponse extends GridCacheIdMessage {
+    /** */
+    private static final long serialVersionUID = -1510546400896574705L;
+
+    /** Future ID. */
+    private IgniteUuid futId;
+
+    /** */
+    private int batchId;
+
+    /** Error. */
+    @GridDirectTransient
+    private Throwable err;
+
+    /** Serialized error. */
+    private byte[] errBytes;
+
+    /**
+     *
+     */
+    public GridDhtTxQueryEnlistResponse() {
+    }
+
+    /**
+     * @param cacheId Cache id.
+     * @param futId Future id.
+     * @param batchId Batch id.
+     * @param err Error.
+     */
+    GridDhtTxQueryEnlistResponse(int cacheId, IgniteUuid futId, int batchId,
+        Throwable err) {
+        this.cacheId = cacheId;
+        this.futId = futId;
+        this.batchId = batchId;
+        this.err = err;
+    }
+
+    /**
+     * @return Future id.
+     */
+    public IgniteUuid futureId() {
+        return futId;
+    }
+
+    /**
+     * @return Batch id.
+     */
+    public int batchId() {
+        return batchId;
+    }
+
+    /**
+     * @return Error.
+     */
+    @Override public Throwable error() {
+        return err;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void prepareMarshal(GridCacheSharedContext ctx) throws IgniteCheckedException {
+        super.prepareMarshal(ctx);
+
+        if (err != null && errBytes == null)
+            errBytes = U.marshal(ctx.marshaller(), err);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void finishUnmarshal(GridCacheSharedContext ctx, ClassLoader ldr) throws IgniteCheckedException {
+        super.finishUnmarshal(ctx, ldr);
+
+        if (errBytes != null)
+            err = U.unmarshal(ctx, errBytes, U.resolveClassLoader(ldr, ctx.gridConfig()));
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean addDeploymentInfo() {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override public short directType() {
+        return 144;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte fieldsCount() {
+        return 6;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) {
+        writer.setBuffer(buf);
+
+        if (!super.writeTo(buf, writer))
+            return false;
+
+        if (!writer.isHeaderWritten()) {
+            if (!writer.writeHeader(directType(), fieldsCount()))
+                return false;
+
+            writer.onHeaderWritten();
+        }
+
+        switch (writer.state()) {
+            case 3:
+                if (!writer.writeInt("batchId", batchId))
+                    return false;
+
+                writer.incrementState();
+
+            case 4:
+                if (!writer.writeByteArray("errBytes", errBytes))
+                    return false;
+
+                writer.incrementState();
+
+            case 5:
+                if (!writer.writeIgniteUuid("futId", futId))
+                    return false;
+
+                writer.incrementState();
+
+        }
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) {
+        reader.setBuffer(buf);
+
+        if (!reader.beforeMessageRead())
+            return false;
+
+        if (!super.readFrom(buf, reader))
+            return false;
+
+        switch (reader.state()) {
+            case 3:
+                batchId = reader.readInt("batchId");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 4:
+                errBytes = reader.readByteArray("errBytes");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 5:
+                futId = reader.readIgniteUuid("futId");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+        }
+
+        return reader.afterMessageRead(GridDhtTxQueryEnlistResponse.class);
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(GridDhtTxQueryEnlistResponse.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxQueryFirstEnlistRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxQueryFirstEnlistRequest.java
new file mode 100644
index 0000000..5df1d58
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxQueryFirstEnlistRequest.java
@@ -0,0 +1,370 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed.dht;
+
+import java.nio.ByteBuffer;
+import java.util.List;
+import java.util.UUID;
+import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
+import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshotWithoutTxs;
+import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
+import org.apache.ignite.internal.processors.query.EnlistOperation;
+import org.apache.ignite.internal.util.GridLongList;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.lang.IgniteUuid;
+import org.apache.ignite.plugin.extensions.communication.Message;
+import org.apache.ignite.plugin.extensions.communication.MessageReader;
+import org.apache.ignite.plugin.extensions.communication.MessageWriter;
+
+/**
+ * First enlist request.
+ */
+public class GridDhtTxQueryFirstEnlistRequest extends GridDhtTxQueryEnlistRequest {
+    /** */
+    private static final long serialVersionUID = -7494735627739420176L;
+
+    /** Tx initiator. Primary node in case of remote DHT tx. */
+    private UUID subjId;
+
+    /** */
+    private AffinityTopologyVersion topVer;
+
+    /** */
+    private long crdVer;
+
+    /** */
+    private long cntr;
+
+    /** */
+    private long cleanupVer;
+
+    /** */
+    private long timeout;
+
+    /** */
+    private int taskNameHash;
+
+    /** */
+    private UUID nearNodeId;
+
+    /** Near tx version. */
+    private GridCacheVersion nearXidVer;
+
+    /**
+     *
+     */
+    public GridDhtTxQueryFirstEnlistRequest() {
+    }
+
+    /**
+     * @param cacheId Cache id.
+     * @param dhtFutId DHT future id.
+     * @param subjId Subject id.
+     * @param topVer Topology version.
+     * @param lockVer Lock version.
+     * @param snapshot Mvcc snapshot.
+     * @param timeout Timeout.
+     * @param taskNameHash Task name hash.
+     * @param nearNodeId Near node id.
+     * @param nearXidVer Near xid version.
+     * @param op Operation.
+     * @param batchId Batch id.
+     * @param keys Keys.
+     * @param vals Values.
+     * @param updCntrs Update counters.
+     */
+    GridDhtTxQueryFirstEnlistRequest(int cacheId,
+        IgniteUuid dhtFutId,
+        UUID subjId,
+        AffinityTopologyVersion topVer,
+        GridCacheVersion lockVer,
+        MvccSnapshot snapshot,
+        long timeout,
+        int taskNameHash,
+        UUID nearNodeId,
+        GridCacheVersion nearXidVer,
+        EnlistOperation op,
+        int batchId,
+        List<KeyCacheObject> keys,
+        List<Message> vals,
+        GridLongList updCntrs) {
+        super(cacheId, dhtFutId, lockVer, op, batchId, snapshot.operationCounter(), keys, vals, updCntrs);
+        this.cacheId = cacheId;
+        this.subjId = subjId;
+        this.topVer = topVer;
+        this.crdVer = snapshot.coordinatorVersion();
+        this.cntr = snapshot.counter();
+        this.cleanupVer = snapshot.cleanupVersion();
+        this.timeout = timeout;
+        this.taskNameHash = taskNameHash;
+        this.nearNodeId = nearNodeId;
+        this.nearXidVer = nearXidVer;
+    }
+
+    /** {@inheritDoc} */
+    @Override public AffinityTopologyVersion topologyVersion() {
+        return topVer;
+    }
+
+    /**
+     * @return Near node id.
+     */
+    public UUID nearNodeId() {
+        return nearNodeId;
+    }
+
+    /**
+     * @return Near transaction ID.
+     */
+    public GridCacheVersion nearXidVersion() {
+        return nearXidVer;
+    }
+
+    /**
+     * @return Max lock wait time.
+     */
+    public long timeout() {
+        return timeout;
+    }
+
+    /**
+     * @return Subject id.
+     */
+    public UUID subjectId() {
+        return subjId;
+    }
+
+    /**
+     * @return Task name hash.
+     */
+    public int taskNameHash() {
+        return taskNameHash;
+    }
+
+    /**
+     * @return MVCC snapshot.
+     */
+    public MvccSnapshot mvccSnapshot() {
+        return new MvccSnapshotWithoutTxs(crdVer, cntr, operationCounter(), cleanupVer);
+    }
+
+    /**
+     * @return Coordinator version.
+     */
+    public long coordinatorVersion() {
+        return crdVer;
+    }
+
+    /**
+     * @return Counter.
+     */
+    public long counter() {
+        return cntr;
+    }
+
+    /**
+     * @return Cleanup version.
+     */
+    public long cleanupVersion() {
+        return cleanupVer;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean addDeploymentInfo() {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override public short directType() {
+        return 156;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) {
+        writer.setBuffer(buf);
+
+        if (!super.writeTo(buf, writer))
+            return false;
+
+        if (!writer.isHeaderWritten()) {
+            if (!writer.writeHeader(directType(), fieldsCount()))
+                return false;
+
+            writer.onHeaderWritten();
+        }
+
+        switch (writer.state()) {
+            case 11:
+                if (!writer.writeLong("cleanupVer", cleanupVer))
+                    return false;
+
+                writer.incrementState();
+
+            case 12:
+                if (!writer.writeLong("cntr", cntr))
+                    return false;
+
+                writer.incrementState();
+
+            case 13:
+                if (!writer.writeLong("crdVer", crdVer))
+                    return false;
+
+                writer.incrementState();
+
+            case 14:
+                if (!writer.writeUuid("nearNodeId", nearNodeId))
+                    return false;
+
+                writer.incrementState();
+
+            case 15:
+                if (!writer.writeMessage("nearXidVer", nearXidVer))
+                    return false;
+
+                writer.incrementState();
+
+            case 16:
+                if (!writer.writeUuid("subjId", subjId))
+                    return false;
+
+                writer.incrementState();
+
+            case 17:
+                if (!writer.writeInt("taskNameHash", taskNameHash))
+                    return false;
+
+                writer.incrementState();
+
+            case 18:
+                if (!writer.writeLong("timeout", timeout))
+                    return false;
+
+                writer.incrementState();
+
+            case 19:
+                if (!writer.writeMessage("topVer", topVer))
+                    return false;
+
+                writer.incrementState();
+
+        }
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) {
+        reader.setBuffer(buf);
+
+        if (!reader.beforeMessageRead())
+            return false;
+
+        if (!super.readFrom(buf, reader))
+            return false;
+
+        switch (reader.state()) {
+            case 11:
+                cleanupVer = reader.readLong("cleanupVer");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 12:
+                cntr = reader.readLong("cntr");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 13:
+                crdVer = reader.readLong("crdVer");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 14:
+                nearNodeId = reader.readUuid("nearNodeId");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 15:
+                nearXidVer = reader.readMessage("nearXidVer");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 16:
+                subjId = reader.readUuid("subjId");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 17:
+                taskNameHash = reader.readInt("taskNameHash");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 18:
+                timeout = reader.readLong("timeout");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 19:
+                topVer = reader.readMessage("topVer");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+        }
+
+        return reader.afterMessageRead(GridDhtTxQueryFirstEnlistRequest.class);
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte fieldsCount() {
+        return 20;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(GridDhtTxQueryFirstEnlistRequest.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxQueryResultsEnlistFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxQueryResultsEnlistFuture.java
new file mode 100644
index 0000000..b3d15d4
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxQueryResultsEnlistFuture.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed.dht;
+
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.Objects;
+import java.util.UUID;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
+import org.apache.ignite.internal.processors.query.EnlistOperation;
+import org.apache.ignite.internal.processors.query.UpdateSourceIterator;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.lang.IgniteUuid;
+
+/**
+ * Future processing transaction enlisting and locking of entries
+ * produces by complex DML queries with reduce step.
+ */
+public final class GridDhtTxQueryResultsEnlistFuture extends GridDhtTxAbstractEnlistFuture implements UpdateSourceIterator<Object> {
+    /** */
+    private static final long serialVersionUID = -4933550335145438798L;
+    /** */
+    private EnlistOperation op;
+
+    /** */
+    private Iterator<Object> it;
+
+    /**
+     * @param nearNodeId Near node ID.
+     * @param nearLockVer Near lock version.
+     * @param mvccSnapshot Mvcc snapshot.
+     * @param threadId Thread ID.
+     * @param nearFutId Near future id.
+     * @param nearMiniId Near mini future id.
+     * @param tx Transaction.
+     * @param timeout Lock acquisition timeout.
+     * @param cctx Cache context.
+     * @param rows Collection of rows.
+     * @param op Operation.
+     */
+    public GridDhtTxQueryResultsEnlistFuture(UUID nearNodeId,
+        GridCacheVersion nearLockVer,
+        MvccSnapshot mvccSnapshot,
+        long threadId,
+        IgniteUuid nearFutId,
+        int nearMiniId,
+        GridDhtTxLocalAdapter tx,
+        long timeout,
+        GridCacheContext<?, ?> cctx,
+        Collection<Object> rows,
+        EnlistOperation op) {
+        super(nearNodeId,
+            nearLockVer,
+            mvccSnapshot,
+            threadId,
+            nearFutId,
+            nearMiniId,
+            null,
+            tx,
+            timeout,
+            cctx);
+
+        this.op = op;
+
+        it = rows.iterator();
+
+        skipNearNodeUpdates = true;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected UpdateSourceIterator<?> createIterator() throws IgniteCheckedException {
+        return this;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean equals(Object o) {
+        if (this == o)
+            return true;
+
+        if (o == null || getClass() != o.getClass())
+            return false;
+
+        GridDhtTxQueryResultsEnlistFuture future = (GridDhtTxQueryResultsEnlistFuture)o;
+
+        return Objects.equals(futId, future.futId);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int hashCode() {
+        return futId.hashCode();
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(GridDhtTxQueryResultsEnlistFuture.class, this);
+    }
+
+    /** {@inheritDoc} */
+    @Override public EnlistOperation operation() {
+        return op;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean hasNextX() {
+        return it.hasNext();
+    }
+
+    /** {@inheritDoc} */
+    @Override public Object nextX() {
+        return it.next();
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxRemote.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxRemote.java
index 746eb38..2fdb6ac 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxRemote.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxRemote.java
@@ -21,26 +21,38 @@
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.List;
 import java.util.Map;
 import java.util.UUID;
 import javax.cache.processor.EntryProcessor;
 import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.cluster.ClusterTopologyException;
+import org.apache.ignite.internal.pagemem.wal.WALPointer;
 import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
+import org.apache.ignite.internal.processors.cache.CacheEntryInfoCollection;
 import org.apache.ignite.internal.processors.cache.CacheObject;
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException;
 import org.apache.ignite.internal.processors.cache.GridCacheOperation;
 import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
+import org.apache.ignite.internal.processors.cache.GridCacheUpdateTxResult;
 import org.apache.ignite.internal.processors.cache.KeyCacheObject;
 import org.apache.ignite.internal.processors.cache.distributed.GridDistributedTxRemoteAdapter;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxRemoteSingleStateImpl;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxRemoteStateImpl;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
+import org.apache.ignite.internal.processors.query.EnlistOperation;
+import org.apache.ignite.internal.processors.query.IgniteSQLException;
+import org.apache.ignite.internal.util.GridLongList;
 import org.apache.ignite.internal.util.tostring.GridToStringBuilder;
 import org.apache.ignite.internal.util.typedef.T2;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.lang.IgniteUuid;
+import org.apache.ignite.plugin.extensions.communication.Message;
 import org.apache.ignite.transactions.TransactionConcurrency;
 import org.apache.ignite.transactions.TransactionIsolation;
 import org.jetbrains.annotations.Nullable;
@@ -224,7 +236,7 @@
     /**
      * @param txNodes Transaction nodes.
      */
-    public void transactionNodes(Map<UUID, Collection<UUID>> txNodes) {
+    @Override public void transactionNodes(Map<UUID, Collection<UUID>> txNodes) {
         this.txNodes = txNodes;
     }
 
@@ -368,6 +380,126 @@
         txState.addWriteEntry(key, txEntry);
     }
 
+    /**
+     *
+     * @param ctx Cache context.
+     * @param op Operation.
+     * @param keys Keys.
+     * @param vals Values.
+     * @param snapshot Mvcc snapshot.
+     * @param updCntrs Update counters.
+     * @throws IgniteCheckedException If failed.
+     */
+    public void mvccEnlistBatch(GridCacheContext ctx, EnlistOperation op, List<KeyCacheObject> keys,
+        List<Message> vals, MvccSnapshot snapshot, GridLongList updCntrs) throws IgniteCheckedException {
+        assert keys != null && updCntrs != null && keys.size() == updCntrs.size();
+
+        WALPointer ptr = null;
+
+        GridDhtCacheAdapter dht = ctx.dht();
+
+        addActiveCache(ctx, false);
+
+        for (int i = 0; i < keys.size(); i++) {
+            KeyCacheObject key = keys.get(i);
+
+            assert key != null;
+
+            int part = ctx.affinity().partition(key);
+
+            GridDhtLocalPartition locPart = ctx.topology().localPartition(part, topologyVersion(), false);
+
+            if (locPart == null || !locPart.reserve())
+                throw new ClusterTopologyException("Can not reserve partition. Please retry on stable topology.");
+
+            try {
+                CacheObject val = null;
+
+                Message val0 = vals != null ? vals.get(i) : null;
+
+                CacheEntryInfoCollection entries =
+                    val0 instanceof CacheEntryInfoCollection ? (CacheEntryInfoCollection)val0 : null;
+
+                if (entries == null && !op.isDeleteOrLock())
+                    val = (val0 instanceof CacheObject) ? (CacheObject)val0 : null;
+
+                GridDhtCacheEntry entry = dht.entryExx(key, topologyVersion());
+
+                GridCacheUpdateTxResult updRes;
+
+                while (true) {
+                    ctx.shared().database().checkpointReadLock();
+
+                    try {
+                        if (entries == null) {
+                            switch (op) {
+                                case DELETE:
+                                    updRes = entry.mvccRemove(
+                                        this,
+                                        ctx.localNodeId(),
+                                        topologyVersion(),
+                                        updCntrs.get(i),
+                                        snapshot,
+                                        false);
+
+                                    break;
+
+                                case INSERT:
+                                case UPSERT:
+                                case UPDATE:
+                                    updRes = entry.mvccSet(
+                                        this,
+                                        ctx.localNodeId(),
+                                        val,
+                                        0,
+                                        topologyVersion(),
+                                        updCntrs.get(i),
+                                        snapshot,
+                                        op.cacheOperation(),
+                                        false,
+                                        false);
+
+                                    break;
+
+                                default:
+                                    throw new IgniteSQLException("Cannot acquire lock for operation [op= "
+                                        + op + "]" + "Operation is unsupported at the moment ",
+                                        IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
+                            }
+                        }
+                        else {
+                            updRes = entry.mvccUpdateRowsWithPreloadInfo(this,
+                                ctx.localNodeId(),
+                                topologyVersion(),
+                                updCntrs.get(i),
+                                entries.infos(),
+                                op.cacheOperation(),
+                                snapshot);
+                        }
+
+                        break;
+                    }
+                    catch (GridCacheEntryRemovedException ignore) {
+                        entry = dht.entryExx(key);
+                    }
+                    finally {
+                        ctx.shared().database().checkpointReadUnlock();
+                    }
+                }
+
+                assert updRes.updateFuture() == null : "Entry should not be locked on the backup";
+
+                ptr = updRes.loggedPointer();
+            }
+            finally {
+                locPart.release();
+            }
+        }
+
+        if (ptr != null && !ctx.tm().logTxRecords())
+            ctx.shared().wal().flush(ptr, true);
+    }
+
     /** {@inheritDoc} */
     @Override public String toString() {
         return GridToStringBuilder.toString(GridDhtTxRemote.class, this, "super", super.toString());
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedGetFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedGetFuture.java
index 204a0ce..a76844a 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedGetFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedGetFuture.java
@@ -43,6 +43,10 @@
 import org.apache.ignite.internal.processors.cache.KeyCacheObject;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearGetRequest;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearGetResponse;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccQueryTrackerImpl;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccQueryTracker;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshotResponseListener;
 import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
 import org.apache.ignite.internal.util.GridLeanMap;
@@ -64,7 +68,8 @@
 /**
  * Colocated get future.
  */
-public class GridPartitionedGetFuture<K, V> extends CacheDistributedGetFutureAdapter<K, V> {
+public class GridPartitionedGetFuture<K, V> extends CacheDistributedGetFutureAdapter<K, V>
+    implements MvccSnapshotResponseListener {
     /** */
     private static final long serialVersionUID = 0L;
 
@@ -74,6 +79,12 @@
     /** Logger. */
     private static IgniteLogger log;
 
+    /** */
+    protected final MvccSnapshot mvccSnapshot;
+
+    /** */
+    private MvccQueryTracker mvccTracker;
+
     /**
      * @param cctx Context.
      * @param keys Keys.
@@ -88,6 +99,7 @@
      * @param skipVals Skip values flag.
      * @param needVer If {@code true} returns values as tuples containing value and version.
      * @param keepCacheObjects Keep cache objects flag.
+     * @param mvccSnapshot Mvcc snapshot.
      */
     public GridPartitionedGetFuture(
         GridCacheContext<K, V> cctx,
@@ -101,7 +113,8 @@
         @Nullable IgniteCacheExpiryPolicy expiryPlc,
         boolean skipVals,
         boolean needVer,
-        boolean keepCacheObjects
+        boolean keepCacheObjects,
+        @Nullable MvccSnapshot mvccSnapshot
     ) {
         super(cctx,
             keys,
@@ -115,12 +128,32 @@
             needVer,
             keepCacheObjects,
             recovery);
+        assert mvccSnapshot == null || cctx.mvccEnabled();
+
+        this.mvccSnapshot = mvccSnapshot;
 
         if (log == null)
             log = U.logger(cctx.kernalContext(), logRef, GridPartitionedGetFuture.class);
     }
 
     /**
+     * @return Mvcc snapshot if mvcc is enabled for cache.
+     */
+    @Nullable private MvccSnapshot mvccSnapshot() {
+        if (!cctx.mvccEnabled())
+            return null;
+
+        if (mvccSnapshot != null)
+            return mvccSnapshot;
+
+        MvccSnapshot snapshot = mvccTracker.snapshot();
+
+        assert snapshot != null : "[fut=" + this + ", mvccTracker=" + mvccTracker + "]";
+
+        return snapshot;
+    }
+
+    /**
      * Initializes future.
      *
      * @param topVer Topology version.
@@ -129,17 +162,46 @@
         AffinityTopologyVersion lockedTopVer = cctx.shared().lockedTopologyVersion(null);
 
         if (lockedTopVer != null) {
-            canRemap = false;
+            topVer = lockedTopVer;
 
-            map(keys, Collections.<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>>emptyMap(), lockedTopVer);
+            canRemap = false;
         }
         else {
             topVer = topVer.topologyVersion() > 0 ? topVer :
                 canRemap ? cctx.affinity().affinityTopologyVersion() : cctx.shared().exchange().readyAffinityVersion();
-
-            map(keys, Collections.<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>>emptyMap(), topVer);
         }
 
+        if (!cctx.mvccEnabled() || mvccSnapshot != null)
+            initialMap(topVer);
+        else {
+            mvccTracker = new MvccQueryTrackerImpl(cctx, canRemap);
+
+            trackable = true;
+
+            cctx.mvcc().addFuture(this, futId);
+
+            mvccTracker.requestSnapshot(topVer, this);
+        }
+    }
+
+    @Override public void onResponse(MvccSnapshot res) {
+        AffinityTopologyVersion topVer = mvccTracker.topologyVersion();
+
+        assert topVer != null;
+
+        initialMap(topVer);
+    }
+
+    @Override public void onError(IgniteCheckedException e) {
+        onDone(e);
+    }
+
+    /**
+     * @param topVer Topology version.
+     */
+    private void initialMap(AffinityTopologyVersion topVer) {
+        map(keys, Collections.<ClusterNode, LinkedHashMap<KeyCacheObject, Boolean>>emptyMap(), topVer);
+
         markInitialized();
     }
 
@@ -197,10 +259,12 @@
     /** {@inheritDoc} */
     @Override public boolean onDone(Map<K, V> res, Throwable err) {
         if (super.onDone(res, err)) {
-            // Don't forget to clean up.
             if (trackable)
                 cctx.mvcc().removeFuture(futId);
 
+            if (mvccTracker != null)
+                mvccTracker.onDone();
+
             cache().sendTtlUpdateRequest(expiryPlc);
 
             return true;
@@ -293,7 +357,8 @@
                         taskName == null ? 0 : taskName.hashCode(),
                         expiryPlc,
                         skipVals,
-                        recovery);
+                        recovery,
+                        mvccSnapshot());
 
                 final Collection<Integer> invalidParts = fut.invalidPartitions();
 
@@ -350,7 +415,8 @@
                     false,
                     skipVals,
                     cctx.deploymentEnabled(),
-                    recovery);
+                    recovery,
+                    mvccSnapshot());
 
                 add(fut); // Append new future.
 
@@ -463,7 +529,9 @@
                 GridCacheVersion ver = null;
 
                 if (readNoEntry) {
-                    CacheDataRow row = cctx.offheap().read(cctx, key);
+                    CacheDataRow row = cctx.mvccEnabled() ?
+                        cctx.offheap().mvccRead(cctx, key, mvccSnapshot()) :
+                        cctx.offheap().read(cctx, key);
 
                     if (row != null) {
                         long expireTime = row.expireTime();
@@ -506,6 +574,7 @@
                                 taskName,
                                 expiryPlc,
                                 !deserializeBinary,
+                                mvccSnapshot(),
                                 null);
 
                             if (getRes != null) {
@@ -524,10 +593,11 @@
                                 null,
                                 taskName,
                                 expiryPlc,
-                                !deserializeBinary);
+                                !deserializeBinary,
+                                mvccSnapshot());
                         }
 
-                        cache.context().evicts().touch(entry, topVer);
+                        entry.touch(topVer);
 
                         // Entry was not in memory or in swap, so we remove it from cache.
                         if (v == null) {
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedSingleGetFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedSingleGetFuture.java
index e0aea9a..fd9bc77 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedSingleGetFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridPartitionedSingleGetFuture.java
@@ -46,6 +46,7 @@
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearGetResponse;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearSingleGetRequest;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearSingleGetResponse;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
 import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
 import org.apache.ignite.internal.util.tostring.GridToStringInclude;
@@ -124,6 +125,9 @@
     @GridToStringInclude
     private ClusterNode node;
 
+    /** */
+    protected final MvccSnapshot mvccSnapshot;
+
     /** Post processing closure. */
     private volatile BackupPostProcessingClosure postProcessingClos;
 
@@ -154,9 +158,11 @@
         boolean skipVals,
         boolean needVer,
         boolean keepCacheObjects,
-        boolean recovery
+        boolean recovery,
+        @Nullable MvccSnapshot mvccSnapshot
     ) {
         assert key != null;
+        assert mvccSnapshot == null || cctx.mvccEnabled();
 
         AffinityTopologyVersion lockedTopVer = cctx.shared().lockedTopologyVersion(null);
 
@@ -181,6 +187,7 @@
         this.keepCacheObjects = keepCacheObjects;
         this.recovery = recovery;
         this.topVer = topVer;
+        this.mvccSnapshot = mvccSnapshot;
 
         futId = IgniteUuid.randomUuid();
 
@@ -235,7 +242,8 @@
                 taskName == null ? 0 : taskName.hashCode(),
                 expiryPlc,
                 skipVals,
-                recovery);
+                recovery,
+                mvccSnapshot);
 
             final Collection<Integer> invalidParts = fut.invalidPartitions();
 
@@ -305,7 +313,8 @@
                 /*add reader*/false,
                 needVer,
                 cctx.deploymentEnabled(),
-                recovery);
+                recovery,
+                mvccSnapshot);
 
             try {
                 cctx.io().send(node, req, cctx.ioPolicy());
@@ -379,7 +388,8 @@
                 boolean skipEntry = readNoEntry;
 
                 if (readNoEntry) {
-                    CacheDataRow row = cctx.offheap().read(cctx, key);
+                    CacheDataRow row = mvccSnapshot != null ? cctx.offheap().mvccRead(cctx, key, mvccSnapshot) :
+                        cctx.offheap().read(cctx, key);
 
                     if (row != null) {
                         long expireTime = row.expireTime();
@@ -422,6 +432,7 @@
                                 taskName,
                                 expiryPlc,
                                 true,
+                                mvccSnapshot,
                                 null);
 
                             if (res != null) {
@@ -440,10 +451,11 @@
                                 null,
                                 taskName,
                                 expiryPlc,
-                                true);
+                                true,
+                                mvccSnapshot);
                         }
 
-                        colocated.context().evicts().touch(entry, topVer);
+                        entry.touch(topVer);
 
                         // Entry was not in memory or in swap, so we remove it from cache.
                         if (v == null) {
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/NearTxQueryEnlistResultHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/NearTxQueryEnlistResultHandler.java
new file mode 100644
index 0000000..bb863fb
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/NearTxQueryEnlistResultHandler.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed.dht;
+
+import java.util.UUID;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxQueryEnlistResponse;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxQueryResultsEnlistResponse;
+import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx;
+import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
+import org.apache.ignite.internal.util.GridLongList;
+import org.apache.ignite.internal.util.lang.GridClosureException;
+import org.apache.ignite.internal.util.typedef.CI1;
+import org.apache.ignite.internal.util.typedef.internal.CU;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteUuid;
+import org.jetbrains.annotations.NotNull;
+
+/**
+ *
+ */
+public final class NearTxQueryEnlistResultHandler implements CI1<IgniteInternalFuture<Long>> {
+    /** */
+    private static final long serialVersionUID = 5189735824793607906L;
+
+    /** */
+    private static final NearTxQueryEnlistResultHandler INSTANCE = new NearTxQueryEnlistResultHandler();
+
+    /** */
+    private NearTxQueryEnlistResultHandler() {}
+
+    /**
+     * @return Handler instance.
+     */
+    public static NearTxQueryEnlistResultHandler instance() {
+        return INSTANCE;
+    }
+
+    /**
+     * @param future Enlist future.
+     * @return Enlist response.
+     */
+    @SuppressWarnings("unchecked")
+    public static <T extends GridNearTxQueryEnlistResponse> T createResponse(IgniteInternalFuture<?> future) {
+        assert future != null;
+
+        Class<?> clazz = future.getClass();
+
+        if (clazz == GridDhtTxQueryResultsEnlistFuture.class)
+            return (T)createResponse((GridDhtTxQueryResultsEnlistFuture)future);
+        else if (clazz == GridDhtTxQueryEnlistFuture.class)
+            return (T)createResponse((GridDhtTxQueryEnlistFuture)future);
+        else
+            throw new IllegalStateException();
+    }
+
+    /**
+     * @param future Enlist future.
+     * @return Enlist response.
+     */
+    @NotNull private static GridNearTxQueryEnlistResponse createResponse(GridDhtTxQueryEnlistFuture future) {
+        try {
+            future.get();
+
+            assert future.tx.queryEnlisted() || future.cnt == 0;
+
+            return new GridNearTxQueryEnlistResponse(future.cctx.cacheId(), future.nearFutId, future.nearMiniId,
+                future.nearLockVer, future.cnt, future.tx.empty() && !future.tx.queryEnlisted());
+        }
+        catch (IgniteCheckedException e) {
+            return new GridNearTxQueryEnlistResponse(future.cctx.cacheId(), future.nearFutId, future.nearMiniId, future.nearLockVer, e);
+        }
+    }
+
+    /**
+     * @param future Enlist future.
+     * @return Enlist response.
+     */
+    @NotNull private static GridNearTxQueryResultsEnlistResponse createResponse(GridDhtTxQueryResultsEnlistFuture future) {
+        try {
+            future.get();
+
+            GridCacheVersion ver = null;
+            IgniteUuid id = null;
+            GridLongList updCntrs = null;
+
+            if (future.hasNearNodeUpdates) {
+                ver = future.cctx.tm().mappedVersion(future.nearLockVer);
+                id = future.futId;
+                updCntrs = future.nearUpdCntrs;
+            }
+
+            return new GridNearTxQueryResultsEnlistResponse(future.cctx.cacheId(), future.nearFutId, future.nearMiniId,
+                future.nearLockVer, future.cnt, ver, id, updCntrs);
+        }
+        catch (IgniteCheckedException e) {
+            return new GridNearTxQueryResultsEnlistResponse(future.cctx.cacheId(), future.nearFutId, future.nearMiniId,
+                future.nearLockVer, e);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void apply(IgniteInternalFuture<Long> fut0) {
+        GridDhtTxAbstractEnlistFuture fut = (GridDhtTxAbstractEnlistFuture)fut0;
+
+        GridCacheContext<?, ?> cctx = fut.cctx;
+        GridDhtTxLocal tx = (GridDhtTxLocal)fut.tx;
+        UUID nearNodeId = fut.nearNodeId;
+
+        GridNearTxQueryEnlistResponse res = createResponse(fut);
+
+        if (res.removeMapping()) {
+            // TODO IGNITE-9133
+            tx.rollbackDhtLocalAsync().listen(new CI1<IgniteInternalFuture<IgniteInternalTx>>() {
+                @Override public void apply(IgniteInternalFuture<IgniteInternalTx> fut0) {
+                    try {
+                        cctx.io().send(nearNodeId, res, cctx.ioPolicy());
+                    }
+                    catch (IgniteCheckedException e) {
+                        U.error(fut.log, "Failed to send near enlist response [" +
+                            "tx=" + CU.txString(tx) +
+                            ", node=" + nearNodeId +
+                            ", res=" + res + ']', e);
+
+                        throw new GridClosureException(e);
+                    }
+                }
+            });
+
+            return;
+        }
+
+        try {
+            cctx.io().send(nearNodeId, res, cctx.ioPolicy());
+        }
+        catch (IgniteCheckedException e) {
+            U.error(fut.log, "Failed to send near enlist response (will rollback transaction) [" +
+                "tx=" + CU.txString(tx) +
+                ", node=" + nearNodeId +
+                ", res=" + res + ']', e);
+
+            try {
+                tx.rollbackDhtLocalAsync();
+            }
+            catch (Throwable e1) {
+                e.addSuppressed(e1);
+            }
+
+            throw new GridClosureException(e);
+        }
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/PartitionUpdateCounters.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/PartitionUpdateCounters.java
new file mode 100644
index 0000000..5b1eccd
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/PartitionUpdateCounters.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed.dht;
+
+import java.nio.ByteBuffer;
+import java.util.Map;
+import org.apache.ignite.internal.GridDirectMap;
+import org.apache.ignite.plugin.extensions.communication.Message;
+import org.apache.ignite.plugin.extensions.communication.MessageCollectionItemType;
+import org.apache.ignite.plugin.extensions.communication.MessageReader;
+import org.apache.ignite.plugin.extensions.communication.MessageWriter;
+
+/**
+ * Partition update counters message.
+ */
+public class PartitionUpdateCounters implements Message {
+    /** */
+    private static final long serialVersionUID = 193442457510062844L;
+
+    /** Map of update counters made by this tx. Mapping: partId -> updCntr. */
+    @GridDirectMap(keyType = Integer.class, valueType = Long.class)
+    private Map<Integer, Long> updCntrs;
+
+    /** */
+    public PartitionUpdateCounters() {
+        // No-op.
+    }
+
+    /**
+     * @param updCntrs Update counters map.
+     */
+    public PartitionUpdateCounters(Map<Integer, Long> updCntrs) {
+        this.updCntrs = updCntrs;
+    }
+
+    /**
+     * @return Update counters.
+     */
+    public Map<Integer, Long> updateCounters() {
+        return updCntrs;
+    }
+
+    /**
+     * @param updCntrs Update counters.
+     */
+    public void updateCounters(Map<Integer, Long> updCntrs) {
+        this.updCntrs = updCntrs;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) {
+        writer.setBuffer(buf);
+
+        if (!writer.isHeaderWritten()) {
+            if (!writer.writeHeader(directType(), fieldsCount()))
+                return false;
+
+            writer.onHeaderWritten();
+        }
+
+        switch (writer.state()) {
+            case 0:
+                if (!writer.writeMap("updCntrs", updCntrs, MessageCollectionItemType.INT, MessageCollectionItemType.LONG))
+                    return false;
+
+                writer.incrementState();
+
+        }
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) {
+        reader.setBuffer(buf);
+
+        if (!reader.beforeMessageRead())
+            return false;
+
+        switch (reader.state()) {
+            case 0:
+                updCntrs = reader.readMap("updCntrs", MessageCollectionItemType.INT, MessageCollectionItemType.LONG, false);
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+        }
+
+        return reader.afterMessageRead(PartitionUpdateCounters.class);
+    }
+
+    /** {@inheritDoc} */
+    @Override public short directType() {
+        return 157;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte fieldsCount() {
+        return 1;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onAckReceived() {
+        // No-op.
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/PartitionsEvictManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/PartitionsEvictManager.java
new file mode 100644
index 0000000..780ca91
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/PartitionsEvictManager.java
@@ -0,0 +1,569 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed.dht;
+
+import java.util.Collection;
+import java.util.Comparator;
+import java.util.Map;
+import java.util.Queue;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.PriorityBlockingQueue;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.managers.communication.GridIoPolicy;
+import org.apache.ignite.internal.processors.cache.CacheGroupContext;
+import org.apache.ignite.internal.processors.cache.GridCacheSharedManagerAdapter;
+import org.apache.ignite.internal.util.GridConcurrentHashSet;
+import org.apache.ignite.internal.util.future.GridFutureAdapter;
+import org.apache.ignite.internal.util.typedef.internal.LT;
+import org.apache.ignite.internal.util.typedef.internal.U;
+
+import static org.apache.ignite.IgniteSystemProperties.IGNITE_EVICTION_PERMITS;
+import static org.apache.ignite.IgniteSystemProperties.getInteger;
+import static org.apache.ignite.IgniteSystemProperties.getLong;
+
+/**
+ * Class that serves asynchronous part eviction process.
+ * Multiple partition from group can be evicted at the same time.
+ */
+public class PartitionsEvictManager extends GridCacheSharedManagerAdapter {
+
+    /** Default eviction progress show frequency. */
+    private static final int DEFAULT_SHOW_EVICTION_PROGRESS_FREQ_MS = 2 * 60 * 1000; // 2 Minutes.
+
+    /** Eviction progress frequency property name. */
+    private static final String SHOW_EVICTION_PROGRESS_FREQ = "SHOW_EVICTION_PROGRESS_FREQ";
+
+    /** Eviction thread pool policy. */
+    private static final byte EVICT_POOL_PLC = GridIoPolicy.SYSTEM_POOL;
+
+    /** Eviction progress frequency in ms. */
+    private final long evictionProgressFreqMs = getLong(SHOW_EVICTION_PROGRESS_FREQ, DEFAULT_SHOW_EVICTION_PROGRESS_FREQ_MS);
+
+    /** */
+    private final int confPermits = getInteger(IGNITE_EVICTION_PERMITS, -1);
+
+    /** Next time of show eviction progress. */
+    private long nextShowProgressTime;
+
+    private final Map<Integer, GroupEvictionContext> evictionGroupsMap = new ConcurrentHashMap<>();
+
+    /** Flag indicates that eviction process has stopped. */
+    private volatile boolean stop;
+
+    /** Check stop eviction context. */
+    private final EvictionContext sharedEvictionContext = () -> stop;
+
+    /** Number of maximum concurrent operations. */
+    private volatile int threads;
+
+    /** How many eviction task may execute concurrent. */
+    private volatile int permits;
+
+    /** Bucket queue for load balance partitions to the threads via count of partition size.
+     *  Is not thread-safe.
+     *  All method should be called under mux synchronization.
+     */
+    private volatile BucketQueue evictionQueue;
+
+    /** Lock object. */
+    private final Object mux = new Object();
+
+    /**
+     * Stops eviction process for group.
+     *
+     * Method awaits last offered partition eviction.
+     *
+     * @param grp Group context.
+     */
+    public void onCacheGroupStopped(CacheGroupContext  grp){
+        GroupEvictionContext groupEvictionContext = evictionGroupsMap.remove(grp.groupId());
+
+        if (groupEvictionContext != null){
+            groupEvictionContext.stop();
+
+            groupEvictionContext.awaitFinishAll();
+        }
+    }
+
+    /**
+     * Adds partition to eviction queue and starts eviction process if permit available.
+     *
+     * @param grp Group context.
+     * @param part Partition to evict.
+     */
+    public void evictPartitionAsync(CacheGroupContext grp, GridDhtLocalPartition part) {
+        // Check node stop.
+        if (sharedEvictionContext.shouldStop())
+            return;
+
+        GroupEvictionContext groupEvictionContext = evictionGroupsMap.computeIfAbsent(
+            grp.groupId(), (k) -> new GroupEvictionContext(grp));
+
+        PartitionEvictionTask evictionTask = groupEvictionContext.createEvictPartitionTask(part);
+
+        if (evictionTask == null)
+            return;
+
+        int bucket;
+
+        synchronized (mux) {
+            bucket = evictionQueue.offer(evictionTask);
+        }
+
+        scheduleNextPartitionEviction(bucket);
+    }
+
+    /**
+     * Gets next partition from the queue and schedules it for eviction.
+     *
+     * @param bucket Bucket.
+     */
+    private void scheduleNextPartitionEviction(int bucket) {
+        // Check node stop.
+        if (sharedEvictionContext.shouldStop())
+            return;
+
+        synchronized (mux) {
+            // Check that we have permits for next operation.
+            if (permits > 0) {
+                // If queue is empty not need to do.
+                if (evictionQueue.isEmpty())
+                    return;
+
+                // Get task until we have permits.
+                while (permits >= 0) {
+                    // Get task from bucket.
+                    PartitionEvictionTask evictionTask = evictionQueue.poll(bucket);
+
+                    // If bucket empty try get from another.
+                    if (evictionTask == null) {
+                        // Until queue have tasks.
+                        while (!evictionQueue.isEmpty()) {
+                            // Get task from any other bucket.
+                            evictionTask = evictionQueue.pollAny();
+
+                            // Stop iteration if we found task.
+                            if (evictionTask != null)
+                                break;
+                        }
+
+                        // If task not found no need to do some.
+                        if (evictionTask == null)
+                            return;
+                    }
+
+                    // Print current eviction progress.
+                    showProgress();
+
+                    GroupEvictionContext groupEvictionContext = evictionTask.groupEvictionContext;
+
+                    // Check that group or node stopping.
+                    if (groupEvictionContext.shouldStop())
+                        continue;
+
+                    // Get permit for this task.
+                    permits--;
+
+                    // Register task future, may need if group or node will be stopped.
+                    groupEvictionContext.taskScheduled(evictionTask);
+
+                    evictionTask.finishFut.listen(f -> {
+                        synchronized (mux) {
+                            // Return permit after task completed.
+                            permits++;
+                        }
+
+                        // Re-schedule new one task form same bucket.
+                        scheduleNextPartitionEviction(bucket);
+                    });
+
+                    // Submit task to executor.
+                     cctx.kernalContext()
+                        .closure()
+                        .runLocalSafe(evictionTask, EVICT_POOL_PLC);
+                }
+            }
+        }
+    }
+
+    /**
+     * Shows progress of eviction.
+     */
+    private void showProgress() {
+        if (U.currentTimeMillis() >= nextShowProgressTime) {
+            int size = evictionQueue.size() + 1; // Queue size plus current partition.
+
+            if (log.isInfoEnabled())
+                log.info("Eviction in progress [permits=" + permits+
+                    ", threads=" + threads +
+                    ", groups=" + evictionGroupsMap.keySet().size() +
+                    ", remainingPartsToEvict=" + size + "]");
+
+            evictionGroupsMap.values().forEach(GroupEvictionContext::showProgress);
+
+            nextShowProgressTime = U.currentTimeMillis() + evictionProgressFreqMs;
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void start0() throws IgniteCheckedException {
+        super.start0();
+
+        // If property is not setup, calculate permits as parts of sys pool.
+        if (confPermits == -1) {
+            int sysPoolSize = cctx.kernalContext().config().getSystemThreadPoolSize();
+
+            threads = permits = sysPoolSize / 4;
+        }
+        else
+            threads = permits = confPermits;
+
+        // Avoid 0 permits if sys pool size less that 4.
+        if (threads == 0)
+            threads = permits = 1;
+
+        log.info("Evict partition permits=" + permits);
+
+        evictionQueue = new BucketQueue(threads);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void stop0(boolean cancel) {
+        super.stop0(cancel);
+
+        stop = true;
+
+        Collection<GroupEvictionContext> evictionGrps = evictionGroupsMap.values();
+
+        evictionGrps.forEach(GroupEvictionContext::stop);
+
+        evictionGrps.forEach(GroupEvictionContext::awaitFinishAll);
+    }
+
+    /**
+     *
+     */
+    private class GroupEvictionContext implements EvictionContext {
+        /** */
+        private final CacheGroupContext grp;
+
+        /** Deduplicate set partition ids. */
+        private final Set<Integer> partIds = new GridConcurrentHashSet<>();
+
+        /** Future for currently running partition eviction task. */
+        private final Map<Integer, IgniteInternalFuture<?>> partsEvictFutures = new ConcurrentHashMap<>();
+
+        /** Flag indicates that eviction process has stopped for this group. */
+        private volatile boolean stop;
+
+        /** Total partition to evict. */
+        private AtomicInteger totalTasks = new AtomicInteger();
+
+        /** Total partition evict in progress. */
+        private int taskInProgress;
+
+        /**
+         * @param grp Group context.
+         */
+        private GroupEvictionContext(CacheGroupContext grp) {
+            this.grp = grp;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean shouldStop() {
+            return stop || sharedEvictionContext.shouldStop();
+        }
+
+        /**
+         *
+         * @param part Grid local partition.
+         */
+        private PartitionEvictionTask createEvictPartitionTask(GridDhtLocalPartition part){
+            if (shouldStop() || !partIds.add(part.id()))
+                return null;
+
+            totalTasks.incrementAndGet();
+
+            return new PartitionEvictionTask(part, this);
+        }
+
+        /**
+         *
+         * @param task Partition eviction task.
+         */
+        private synchronized void taskScheduled(PartitionEvictionTask task) {
+            if (shouldStop())
+                return;
+
+            taskInProgress++;
+
+            GridFutureAdapter<?> fut = task.finishFut;
+
+            int partId = task.part.id();
+
+            partsEvictFutures.put(partId, fut);
+
+            fut.listen(f -> {
+                synchronized (this) {
+                    taskInProgress--;
+
+                    partsEvictFutures.remove(partId, f);
+
+                    if (totalTasks.decrementAndGet() == 0)
+                        evictionGroupsMap.remove(grp.groupId());
+                }
+            });
+        }
+
+        /**
+         * Stop eviction for group.
+         */
+        private void stop() {
+            stop = true;
+        }
+
+        /**
+         * Await evict finish.
+         */
+        private void awaitFinishAll(){
+            partsEvictFutures.forEach(this::awaitFinish);
+
+            evictionGroupsMap.remove(grp.groupId());
+        }
+
+        /**
+         * Await evict finish partition.
+         */
+        private void awaitFinish(Integer part, IgniteInternalFuture<?> fut) {
+            // Wait for last offered partition eviction completion
+            try {
+                log.info("Await partition evict, grpName=" + grp.cacheOrGroupName() +
+                    ", grpId=" + grp.groupId() + ", partId=" + part);
+
+                fut.get();
+            }
+            catch (IgniteCheckedException e) {
+                if (log.isDebugEnabled())
+                    log.warning("Failed to await partition eviction during stopping.", e);
+            }
+        }
+
+        /**
+         * Shows progress group of eviction.
+         */
+        private void showProgress() {
+            if (log.isInfoEnabled())
+                log.info("Group eviction in progress [grpName=" + grp.cacheOrGroupName()+
+                    ", grpId=" + grp.groupId() +
+                    ", remainingPartsToEvict=" + (totalTasks.get() - taskInProgress) +
+                    ", partsEvictInProgress=" + taskInProgress +
+                    ", totalParts= " + grp.topology().localPartitions().size() + "]");
+        }
+    }
+
+    /**
+     * Task for self-scheduled partition eviction / clearing.
+     */
+    private class PartitionEvictionTask implements Runnable {
+        /** Partition to evict. */
+        private final GridDhtLocalPartition part;
+
+        private final long size;
+
+        /** Eviction context. */
+        private final GroupEvictionContext groupEvictionContext;
+
+        /** */
+        private final GridFutureAdapter<?> finishFut = new GridFutureAdapter<>();
+
+        /**
+         * @param part Partition.
+         */
+        private PartitionEvictionTask(
+            GridDhtLocalPartition part,
+            GroupEvictionContext groupEvictionContext
+        ) {
+            this.part = part;
+            this.groupEvictionContext = groupEvictionContext;
+
+            size = part.fullSize();
+        }
+
+        /** {@inheritDoc} */
+        @Override public void run() {
+            if (groupEvictionContext.shouldStop()) {
+                finishFut.onDone();
+
+                return;
+            }
+
+            try {
+                boolean success = part.tryClear(groupEvictionContext);
+
+                if (success) {
+                    if (part.state() == GridDhtPartitionState.EVICTED && part.markForDestroy())
+                        part.destroy();
+                }
+                else // Re-offer partition if clear was unsuccessful due to partition reservation.
+                    evictionQueue.offer(this);
+
+                // Complete eviction future before schedule new to prevent deadlock with
+                // simultaneous eviction stopping and scheduling new eviction.
+                finishFut.onDone();
+            }
+            catch (Throwable ex) {
+                finishFut.onDone(ex);
+
+                if (cctx.kernalContext().isStopping()) {
+                    LT.warn(log, ex, "Partition eviction failed (current node is stopping).",
+                        false,
+                        true);
+                }
+                else{
+                    LT.error(log, ex, "Partition eviction failed, this can cause grid hang.");
+                }
+            }
+        }
+    }
+
+    /**
+     *
+     */
+    private class BucketQueue {
+        /** Queues contains partitions scheduled for eviction. */
+        private final Queue<PartitionEvictionTask>[] buckets;
+
+        /** */
+        private final long[] bucketSizes;
+
+        /**
+         * @param buckets Number of buckets.
+         */
+        BucketQueue(int buckets) {
+            this.buckets = new Queue[buckets];
+
+            for (int i = 0; i < buckets; i++)
+                this.buckets[i] = createEvictPartitionQueue();
+
+            bucketSizes = new long[buckets];
+        }
+
+        /**
+         * Poll eviction task from queue for specific bucket.
+         *
+         * @param bucket Bucket index.
+         * @return Partition evict task, or {@code null} if bucket queue is empty.
+         */
+        PartitionEvictionTask poll(int bucket) {
+            PartitionEvictionTask task = buckets[bucket].poll();
+
+            if (task != null)
+                bucketSizes[bucket] -= task.size;
+
+            return task;
+        }
+
+        /**
+         * Poll eviction task from queue (bucket is not specific).
+         *
+         * @return Partition evict task.
+         */
+        PartitionEvictionTask pollAny() {
+            for (int bucket = 0; bucket < bucketSizes.length; bucket++){
+                if (!buckets[bucket].isEmpty())
+                    return poll(bucket);
+            }
+
+            return null;
+        }
+
+        /**
+         * Offer task to queue.
+         *
+         * @return Bucket index.
+         */
+        int offer(PartitionEvictionTask task) {
+            int bucket = calculateBucket();
+
+            buckets[bucket].offer(task);
+
+            bucketSizes[bucket] += task.size;
+
+            return bucket;
+        }
+
+
+        /**
+         * @return {@code True} if queue is empty, {@code} False if not empty.
+         */
+        boolean isEmpty(){
+            return size() == 0;
+        }
+
+        /**
+         * @return Queue size.
+         */
+        int size(){
+            int size = 0;
+
+            for (Queue<PartitionEvictionTask> queue : buckets) {
+                size += queue.size();
+            }
+
+            return size;
+        }
+
+        /***
+         * @return Bucket index.
+         */
+        private int calculateBucket() {
+            int min = 0;
+
+            for (int bucket = min; bucket < bucketSizes.length; bucket++) {
+                if (bucketSizes[min] > bucketSizes[bucket])
+                    min = bucket;
+            }
+
+            return min;
+        }
+
+        /**
+         * 0 - PRIORITY QUEUE (compare by partition size).
+         * default (any other values) - FIFO.
+         */
+        private static final byte QUEUE_TYPE = 1;
+
+        /**
+         *
+         * @return Queue for evict partitions.
+         */
+        private Queue<PartitionEvictionTask> createEvictPartitionQueue() {
+            switch (QUEUE_TYPE) {
+                case 1:
+                    return new PriorityBlockingQueue<>(
+                        1000, Comparator.comparingLong(p -> p.part.fullSize()));
+                default:
+                    return new LinkedBlockingQueue<>();
+            }
+        }
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/DhtAtomicUpdateResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/DhtAtomicUpdateResult.java
index e7d2b199..15db625 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/DhtAtomicUpdateResult.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/DhtAtomicUpdateResult.java
@@ -45,6 +45,12 @@
     private IgniteCacheExpiryPolicy expiry;
 
     /**
+     * If batch update was interrupted in the middle, it should be continued from processedEntriesCount to avoid
+     * extra update closure invocation.
+     */
+    private int processedEntriesCount;
+
+    /**
      *
      */
     DhtAtomicUpdateResult() {
@@ -97,11 +103,20 @@
     /**
      * @return Deleted entries.
      */
-    Collection<IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion>> deleted() {
+    public Collection<IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion>> deleted() {
         return deleted;
     }
 
     /**
+     * Sets deleted entries.
+     *
+     * @param deleted deleted entries.
+     */
+    void deleted(Collection<IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion>> deleted) {
+        this.deleted = deleted;
+    }
+
+    /**
      * @return DHT future.
      */
     GridDhtAtomicAbstractUpdateFuture dhtFuture() {
@@ -128,4 +143,20 @@
     void dhtFuture(@Nullable GridDhtAtomicAbstractUpdateFuture dhtFut) {
         this.dhtFut = dhtFut;
     }
+
+    /**
+     * Sets processed entries count.
+     * @param idx processed entries count.
+     */
+    public void processedEntriesCount(int idx) {
+        processedEntriesCount = idx;
+    }
+
+    /**
+     * Returns processed entries count.
+     * @return processed entries count.
+     */
+    public int processedEntriesCount() {
+        return processedEntriesCount;
+    }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicCache.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicCache.java
index c39842e..7bc04dc 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicCache.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicCache.java
@@ -37,10 +37,11 @@
 import org.apache.ignite.cluster.ClusterNode;
 import org.apache.ignite.internal.IgniteInternalFuture;
 import org.apache.ignite.internal.NodeStoppingException;
+import org.apache.ignite.internal.UnregisteredBinaryTypeException;
 import org.apache.ignite.internal.UnregisteredClassException;
 import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException;
 import org.apache.ignite.internal.mem.IgniteOutOfMemoryException;
-import org.apache.ignite.internal.pagemem.wal.StorageException;
+import org.apache.ignite.internal.processors.cache.persistence.StorageException;
 import org.apache.ignite.internal.processors.affinity.AffinityAssignment;
 import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
 import org.apache.ignite.internal.processors.cache.CacheEntryPredicate;
@@ -64,7 +65,6 @@
 import org.apache.ignite.internal.processors.cache.IgniteCacheExpiryPolicy;
 import org.apache.ignite.internal.processors.cache.KeyCacheObject;
 import org.apache.ignite.internal.processors.cache.binary.CacheObjectBinaryProcessorImpl;
-import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheAdapter;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtFuture;
@@ -82,6 +82,7 @@
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearSingleGetResponse;
 import org.apache.ignite.internal.processors.cache.dr.GridCacheDrExpirationInfo;
 import org.apache.ignite.internal.processors.cache.dr.GridCacheDrInfo;
+import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxLocalEx;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersionConflictContext;
@@ -110,6 +111,7 @@
 import org.apache.ignite.lang.IgniteRunnable;
 import org.apache.ignite.lang.IgniteUuid;
 import org.apache.ignite.plugin.security.SecurityPermission;
+import org.apache.ignite.thread.IgniteThread;
 import org.apache.ignite.transactions.TransactionIsolation;
 import org.jetbrains.annotations.Nullable;
 
@@ -1420,7 +1422,8 @@
             skipVals,
             needVer,
             false,
-            recovery);
+            recovery,
+            null);
 
         fut.init();
 
@@ -1529,6 +1532,7 @@
                                             taskName,
                                             expiry,
                                             true,
+                                            null,
                                             null);
 
                                         if (getRes != null) {
@@ -1547,7 +1551,8 @@
                                             null,
                                             taskName,
                                             expiry,
-                                            !deserializeBinary);
+                                            !deserializeBinary,
+                                            null);
                                     }
 
                                     // Entry was not in memory or in swap, so we remove it from cache.
@@ -1587,7 +1592,7 @@
                             }
                             finally {
                                 if (entry != null)
-                                    ctx.evicts().touch(entry, topVer);
+                                    entry.touch(topVer);
                             }
                         }
                     }
@@ -1624,7 +1629,8 @@
             expiry,
             skipVals,
             needVer,
-            false);
+            false,
+            null);
 
         fut.init(topVer);
 
@@ -1741,6 +1747,8 @@
 
             Collection<IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion>> deleted = null;
 
+            DhtAtomicUpdateResult  updDhtRes = new DhtAtomicUpdateResult();
+
             try {
                 while (true) {
                     try {
@@ -1769,11 +1777,11 @@
                             }
 
                             if (!remap) {
-                                DhtAtomicUpdateResult updRes = update(node, locked, req, res);
+                                update(node, locked, req, res, updDhtRes);
 
-                                dhtFut = updRes.dhtFuture();
-                                deleted = updRes.deleted();
-                                expiry = updRes.expiryPolicy();
+                                dhtFut = updDhtRes.dhtFuture();
+                                deleted = updDhtRes.deleted();
+                                expiry = updDhtRes.expiryPolicy();
                             }
                             else
                                 // Should remap all keys.
@@ -1783,6 +1791,10 @@
                             top.readUnlock();
                         }
 
+                        // This call will convert entry processor invocation results to cache object instances.
+                        // Must be done outside topology read lock to avoid deadlocks.
+                        res.returnValue().marshalResult(ctx);
+
                         break;
                     }
                     catch (UnregisteredClassException ex) {
@@ -1790,7 +1802,16 @@
 
                         assert cacheObjProc instanceof CacheObjectBinaryProcessorImpl;
 
-                        ((CacheObjectBinaryProcessorImpl)cacheObjProc).binaryContext().descriptorForClass(ex.cls(), false, false);
+                        ((CacheObjectBinaryProcessorImpl)cacheObjProc)
+                            .binaryContext().descriptorForClass(ex.cls(), false, false);
+                    }
+                    catch (UnregisteredBinaryTypeException ex) {
+                        IgniteCacheObjectProcessor cacheObjProc = ctx.cacheObjects();
+
+                        assert cacheObjProc instanceof CacheObjectBinaryProcessorImpl;
+
+                        ((CacheObjectBinaryProcessorImpl)cacheObjProc)
+                            .binaryContext().updateMetadata(ex.typeId(), ex.binaryMetadata(), false);
                     }
                 }
             }
@@ -1863,6 +1884,7 @@
      * @param locked Entries.
      * @param req Request.
      * @param res Response.
+     * @param dhtUpdRes DHT update result
      * @return Operation result.
      * @throws GridCacheEntryRemovedException If got obsolete entry.
      */
@@ -1870,7 +1892,8 @@
         ClusterNode node,
         List<GridDhtCacheEntry> locked,
         GridNearAtomicAbstractUpdateRequest req,
-        GridNearAtomicUpdateResponse res)
+        GridNearAtomicUpdateResponse res,
+        DhtAtomicUpdateResult dhtUpdRes)
         throws GridCacheEntryRemovedException
     {
         GridDhtPartitionTopology top = topology();
@@ -1894,14 +1917,13 @@
 
         boolean sndPrevVal = !top.rebalanceFinished(req.topologyVersion());
 
-        GridDhtAtomicAbstractUpdateFuture dhtFut = createDhtFuture(ver, req);
+        if (dhtUpdRes.dhtFuture() == null)
+            dhtUpdRes.dhtFuture(createDhtFuture(ver, req));
 
         IgniteCacheExpiryPolicy expiry = expiryPolicy(req.expiry());
 
         GridCacheReturn retVal = null;
 
-        DhtAtomicUpdateResult updRes;
-
         if (req.size() > 1 &&                    // Several keys ...
             writeThrough() && !req.skipStore() && // and store is enabled ...
             !ctx.store().isLocal() &&             // and this is not local store ...
@@ -1909,40 +1931,39 @@
             !ctx.dr().receiveEnabled()            // and no DR.
             ) {
             // This method can only be used when there are no replicated entries in the batch.
-            updRes = updateWithBatch(node,
+            updateWithBatch(node,
                 hasNear,
                 req,
                 res,
                 locked,
                 ver,
-                dhtFut,
                 ctx.isDrEnabled(),
                 taskName,
                 expiry,
-                sndPrevVal);
-
-            dhtFut = updRes.dhtFuture();
+                sndPrevVal,
+                dhtUpdRes);
 
             if (req.operation() == TRANSFORM)
-                retVal = updRes.returnValue();
+                retVal = dhtUpdRes.returnValue();
         }
         else {
-            updRes = updateSingle(node,
+            updateSingle(node,
                 hasNear,
                 req,
                 res,
                 locked,
                 ver,
-                dhtFut,
                 ctx.isDrEnabled(),
                 taskName,
                 expiry,
-                sndPrevVal);
+                sndPrevVal,
+                dhtUpdRes);
 
-            retVal = updRes.returnValue();
-            dhtFut = updRes.dhtFuture();
+            retVal = dhtUpdRes.returnValue();
         }
 
+        GridDhtAtomicAbstractUpdateFuture dhtFut = dhtUpdRes.dhtFuture();
+
         if (retVal == null)
             retVal = new GridCacheReturn(ctx, node.isLocal(), true, null, true);
 
@@ -1955,7 +1976,7 @@
                 && !dhtFut.isDone()) {
                 final IgniteRunnable tracker = GridNioBackPressureControl.threadTracker();
 
-                if (tracker != null && tracker instanceof GridNioMessageTracker) {
+                if (tracker instanceof GridNioMessageTracker) {
                     ((GridNioMessageTracker)tracker).onMessageReceived();
 
                     dhtFut.listen(new IgniteInClosure<IgniteInternalFuture<Void>>() {
@@ -1969,9 +1990,9 @@
             ctx.mvcc().addAtomicFuture(dhtFut.id(), dhtFut);
         }
 
-        updRes.expiryPolicy(expiry);
+        dhtUpdRes.expiryPolicy(expiry);
 
-        return updRes;
+        return dhtUpdRes;
     }
 
     /**
@@ -1983,27 +2004,26 @@
      * @param res Update response.
      * @param locked Locked entries.
      * @param ver Assigned version.
-     * @param dhtFut Optional DHT future.
      * @param replicate Whether replication is enabled.
      * @param taskName Task name.
      * @param expiry Expiry policy.
      * @param sndPrevVal If {@code true} sends previous value to backups.
-     * @return Deleted entries.
+     * @param dhtUpdRes DHT update result.
      * @throws GridCacheEntryRemovedException Should not be thrown.
      */
     @SuppressWarnings("unchecked")
-    private DhtAtomicUpdateResult updateWithBatch(
+    private void updateWithBatch(
         final ClusterNode node,
         final boolean hasNear,
         final GridNearAtomicAbstractUpdateRequest req,
         final GridNearAtomicUpdateResponse res,
         final List<GridDhtCacheEntry> locked,
         final GridCacheVersion ver,
-        @Nullable GridDhtAtomicAbstractUpdateFuture dhtFut,
         final boolean replicate,
         final String taskName,
         @Nullable final IgniteCacheExpiryPolicy expiry,
-        final boolean sndPrevVal
+        final boolean sndPrevVal,
+        final DhtAtomicUpdateResult dhtUpdRes
     ) throws GridCacheEntryRemovedException {
         assert !ctx.dr().receiveEnabled(); // Cannot update in batches during DR due to possible conflicts.
         assert !req.returnValue() || req.operation() == TRANSFORM; // Should not request return values for putAll.
@@ -2015,7 +2035,7 @@
             catch (IgniteCheckedException e) {
                 res.addFailedKeys(req.keys(), e);
 
-                return new DhtAtomicUpdateResult();
+                return;
             }
         }
 
@@ -2029,8 +2049,6 @@
 
         List<CacheObject> writeVals = null;
 
-        DhtAtomicUpdateResult updRes = new DhtAtomicUpdateResult();
-
         List<GridDhtCacheEntry> filtered = new ArrayList<>(size);
 
         GridCacheOperation op = req.operation();
@@ -2041,7 +2059,7 @@
 
         boolean intercept = ctx.config().getInterceptor() != null;
 
-        for (int i = 0; i < locked.size(); i++) {
+        for (int i = dhtUpdRes.processedEntriesCount(); i < locked.size(); i++) {
             GridDhtCacheEntry entry = locked.get(i);
 
             try {
@@ -2083,7 +2101,8 @@
                         entryProcessor,
                         taskName,
                         null,
-                        req.keepBinary());
+                        req.keepBinary(),
+                        null);
 
                     Object oldVal = null;
                     Object updatedVal = null;
@@ -2100,6 +2119,8 @@
 
                     boolean validation = false;
 
+                    IgniteThread.onEntryProcessorEntered(true);
+
                     try {
                         Object computed = entryProcessor.process(invokeEntry, req.invokeArguments());
 
@@ -2126,6 +2147,9 @@
                         }
                     }
                     catch (Exception e) {
+                        if (e instanceof UnregisteredClassException || e instanceof UnregisteredBinaryTypeException)
+                            throw (IgniteException) e;
+
                         curInvokeRes = CacheInvokeResult.fromError(e);
 
                         updated = old;
@@ -2137,6 +2161,8 @@
                         }
                     }
                     finally {
+                        IgniteThread.onEntryProcessorLeft();
+
                         if (curInvokeRes != null) {
                             invokeRes.addEntryProcessResult(ctx, entry.key(), invokeEntry.key(), curInvokeRes.result(),
                                 curInvokeRes.error(), req.keepBinary());
@@ -2155,7 +2181,7 @@
 
                         // Update previous batch.
                         if (putMap != null) {
-                            dhtFut = updatePartialBatch(
+                            updatePartialBatch(
                                 hasNear,
                                 firstEntryIdx,
                                 filtered,
@@ -2165,11 +2191,10 @@
                                 putMap,
                                 null,
                                 entryProcessorMap,
-                                dhtFut,
                                 req,
                                 res,
                                 replicate,
-                                updRes,
+                                dhtUpdRes,
                                 taskName,
                                 expiry,
                                 sndPrevVal);
@@ -2203,7 +2228,7 @@
 
                         // Update previous batch.
                         if (rmvKeys != null) {
-                            dhtFut = updatePartialBatch(
+                            updatePartialBatch(
                                 hasNear,
                                 firstEntryIdx,
                                 filtered,
@@ -2213,11 +2238,10 @@
                                 null,
                                 rmvKeys,
                                 entryProcessorMap,
-                                dhtFut,
                                 req,
                                 res,
                                 replicate,
-                                updRes,
+                                dhtUpdRes,
                                 taskName,
                                 expiry,
                                 sndPrevVal);
@@ -2258,7 +2282,8 @@
                             null,
                             taskName,
                             null,
-                            req.keepBinary());
+                            req.keepBinary(),
+                            null);
 
                         Object val = ctx.config().getInterceptor().onBeforePut(
                             new CacheLazyEntry(
@@ -2303,7 +2328,8 @@
                             null,
                             taskName,
                             null,
-                            req.keepBinary());
+                            req.keepBinary(),
+                            null);
 
                         IgniteBiTuple<Boolean, ?> interceptorRes = ctx.config().getInterceptor()
                             .onBeforeRemove(new CacheLazyEntry(ctx, entry.key(), old, req.keepBinary()));
@@ -2327,7 +2353,7 @@
 
         // Store final batch.
         if (putMap != null || rmvKeys != null) {
-            dhtFut = updatePartialBatch(
+            updatePartialBatch(
                 hasNear,
                 firstEntryIdx,
                 filtered,
@@ -2337,11 +2363,10 @@
                 putMap,
                 rmvKeys,
                 entryProcessorMap,
-                dhtFut,
                 req,
                 res,
                 replicate,
-                updRes,
+                dhtUpdRes,
                 taskName,
                 expiry,
                 sndPrevVal);
@@ -2349,11 +2374,7 @@
         else
             assert filtered.isEmpty();
 
-        updRes.dhtFuture(dhtFut);
-
-        updRes.returnValue(invokeRes);
-
-        return updRes;
+        dhtUpdRes.returnValue(invokeRes);
     }
 
     /**
@@ -2416,29 +2437,30 @@
      * @param res Update response.
      * @param locked Locked entries.
      * @param ver Assigned update version.
-     * @param dhtFut Optional DHT future.
      * @param replicate Whether DR is enabled for that cache.
      * @param taskName Task name.
      * @param expiry Expiry policy.
      * @param sndPrevVal If {@code true} sends previous value to backups.
-     * @return Return value.
+     * @param dhtUpdRes Dht update result
      * @throws GridCacheEntryRemovedException Should be never thrown.
      */
-    private DhtAtomicUpdateResult updateSingle(
+    private void updateSingle(
         ClusterNode nearNode,
         boolean hasNear,
         GridNearAtomicAbstractUpdateRequest req,
         GridNearAtomicUpdateResponse res,
         List<GridDhtCacheEntry> locked,
         GridCacheVersion ver,
-        @Nullable GridDhtAtomicAbstractUpdateFuture dhtFut,
         boolean replicate,
         String taskName,
         @Nullable IgniteCacheExpiryPolicy expiry,
-        boolean sndPrevVal
+        boolean sndPrevVal,
+        DhtAtomicUpdateResult dhtUpdRes
     ) throws GridCacheEntryRemovedException {
-        GridCacheReturn retVal = null;
-        Collection<IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion>> deleted = null;
+        GridCacheReturn retVal = dhtUpdRes.returnValue();
+        GridDhtAtomicAbstractUpdateFuture dhtFut = dhtUpdRes.dhtFuture();
+        Collection<IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion>> deleted = dhtUpdRes.deleted();
+
 
         AffinityTopologyVersion topVer = req.topologyVersion();
 
@@ -2447,7 +2469,7 @@
         AffinityAssignment affAssignment = ctx.affinity().assignment(topVer);
 
         // Avoid iterator creation.
-        for (int i = 0; i < req.size(); i++) {
+        for (int i = dhtUpdRes.processedEntriesCount(); i < req.size(); i++) {
             KeyCacheObject k = req.key(i);
 
             GridCacheOperation op = req.operation();
@@ -2611,9 +2633,13 @@
             catch (IgniteCheckedException e) {
                 res.addFailedKey(k, e);
             }
+
+            dhtUpdRes.processedEntriesCount(i + 1);
         }
 
-        return new DhtAtomicUpdateResult(retVal, deleted, dhtFut);
+        dhtUpdRes.returnValue(retVal);
+        dhtUpdRes.deleted(deleted);
+        dhtUpdRes.dhtFuture(dhtFut);
     }
 
     /**
@@ -2626,18 +2652,16 @@
      * @param putMap Values to put.
      * @param rmvKeys Keys to remove.
      * @param entryProcessorMap Entry processors.
-     * @param dhtFut DHT update future if has backups.
      * @param req Request.
      * @param res Response.
      * @param replicate Whether replication is enabled.
-     * @param batchRes Batch update result.
+     * @param dhtUpdRes Batch update result.
      * @param taskName Task name.
      * @param expiry Expiry policy.
      * @param sndPrevVal If {@code true} sends previous value to backups.
-     * @return Deleted entries.
      */
     @SuppressWarnings("ForLoopReplaceableByForEach")
-    @Nullable private GridDhtAtomicAbstractUpdateFuture updatePartialBatch(
+    @Nullable private void updatePartialBatch(
         final boolean hasNear,
         final int firstEntryIdx,
         final List<GridDhtCacheEntry> entries,
@@ -2647,11 +2671,10 @@
         @Nullable final Map<KeyCacheObject, CacheObject> putMap,
         @Nullable final Collection<KeyCacheObject> rmvKeys,
         @Nullable final Map<KeyCacheObject, EntryProcessor<Object, Object, Object>> entryProcessorMap,
-        @Nullable GridDhtAtomicAbstractUpdateFuture dhtFut,
         final GridNearAtomicAbstractUpdateRequest req,
         final GridNearAtomicUpdateResponse res,
         final boolean replicate,
-        final DhtAtomicUpdateResult batchRes,
+        final DhtAtomicUpdateResult dhtUpdRes,
         final String taskName,
         @Nullable final IgniteCacheExpiryPolicy expiry,
         final boolean sndPrevVal
@@ -2699,6 +2722,8 @@
 
             AffinityAssignment affAssignment = ctx.affinity().assignment(topVer);
 
+            final GridDhtAtomicAbstractUpdateFuture dhtFut = dhtUpdRes.dhtFuture();
+
             // Avoid iterator creation.
             for (int i = 0; i < entries.size(); i++) {
                 GridDhtCacheEntry entry = entries.get(i);
@@ -2778,7 +2803,7 @@
                         }
                     }
 
-                    batchRes.addDeleted(entry, updRes, entries);
+                    dhtUpdRes.addDeleted(entry, updRes, entries);
 
                     if (dhtFut != null) {
 
@@ -2838,7 +2863,10 @@
 
                     e.printStackTrace();
                 }
+
+                dhtUpdRes.processedEntriesCount(firstEntryIdx + i + 1);
             }
+
         }
         catch (IgniteCheckedException e) {
             res.addFailedKeys(putMap != null ? putMap.keySet() : rmvKeys, e);
@@ -2852,8 +2880,6 @@
 
             res.addFailedKeys(failed, storeErr.getCause());
         }
-
-        return dhtFut;
     }
 
     /**
@@ -2979,7 +3005,7 @@
         for (int i = 0; i < size; i++) {
             GridCacheMapEntry entry = locked.get(i);
             if (entry != null && (skip == null || !skip.contains(entry.key())))
-                ctx.evicts().touch(entry, topVer);
+                entry.touch(topVer);
         }
     }
 
@@ -3259,7 +3285,7 @@
                         }
                         finally {
                             if (entry != null)
-                                ctx.evicts().touch(entry, req.topologyVersion());
+                                entry.touch(req.topologyVersion());
                         }
                     }
                 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicSingleUpdateFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicSingleUpdateFuture.java
index b2f9218..82a7964 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicSingleUpdateFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicSingleUpdateFuture.java
@@ -682,7 +682,7 @@
     }
 
     /** {@inheritDoc} */
-    public synchronized String toString() {
+    @Override public synchronized String toString() {
         return S.toString(GridNearAtomicSingleUpdateFuture.class, this, super.toString());
     }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicUpdateFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicUpdateFuture.java
index 78725d6..fd6b63e 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicUpdateFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridNearAtomicUpdateFuture.java
@@ -1159,7 +1159,7 @@
     }
 
     /** {@inheritDoc} */
-    public synchronized String toString() {
+    @Override public synchronized String toString() {
         return S.toString(GridNearAtomicUpdateFuture.class, this, super.toString());
     }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtColocatedCache.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtColocatedCache.java
index 14d3866..f6de594 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtColocatedCache.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtColocatedCache.java
@@ -42,6 +42,7 @@
 import org.apache.ignite.internal.processors.cache.GridCacheReturn;
 import org.apache.ignite.internal.processors.cache.IgniteCacheExpiryPolicy;
 import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
 import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
 import org.apache.ignite.internal.processors.cache.distributed.GridDistributedCacheEntry;
 import org.apache.ignite.internal.processors.cache.distributed.GridDistributedLockCancelledException;
@@ -241,7 +242,8 @@
             skipVals,
             needVer,
             /*keepCacheObjects*/false,
-            opCtx != null && opCtx.recovery());
+            opCtx != null && opCtx.recovery(),
+            null);
 
         fut.init();
 
@@ -319,7 +321,7 @@
      * @param needVer Need version.
      * @return Loaded values.
      */
-    public IgniteInternalFuture<Map<K, V>> loadAsync(
+    private IgniteInternalFuture<Map<K, V>> loadAsync(
         @Nullable Collection<KeyCacheObject> keys,
         boolean readThrough,
         boolean forcePrimary,
@@ -341,7 +343,8 @@
             expiryPlc,
             skipVals,
             needVer,
-            false);
+            false,
+            null);
     }
 
     /**
@@ -370,7 +373,8 @@
         boolean skipVals,
         boolean needVer,
         boolean keepCacheObj,
-        boolean recovery
+        boolean recovery,
+        @Nullable MvccSnapshot mvccSnapshot
     ) {
         GridPartitionedSingleGetFuture fut = new GridPartitionedSingleGetFuture(ctx,
             ctx.toCacheKeyObject(key),
@@ -384,7 +388,8 @@
             skipVals,
             needVer,
             keepCacheObj,
-            recovery);
+            recovery,
+            mvccSnapshot);
 
         fut.init();
 
@@ -403,6 +408,7 @@
      * @param skipVals Skip values flag.
      * @param needVer If {@code true} returns values as tuples containing value and version.
      * @param keepCacheObj Keep cache objects flag.
+     * @param mvccSnapshot Mvcc snapshot.
      * @return Load future.
      */
     public final IgniteInternalFuture<Map<K, V>> loadAsync(
@@ -417,8 +423,11 @@
         @Nullable IgniteCacheExpiryPolicy expiryPlc,
         boolean skipVals,
         boolean needVer,
-        boolean keepCacheObj
+        boolean keepCacheObj,
+        @Nullable MvccSnapshot mvccSnapshot
     ) {
+        assert mvccSnapshot == null || ctx.mvccEnabled();
+
         if (keys == null || keys.isEmpty())
             return new GridFinishedFuture<>(Collections.<K, V>emptyMap());
 
@@ -426,7 +435,7 @@
             expiryPlc = expiryPolicy(null);
 
         // Optimization: try to resolve value locally and escape 'get future' creation.
-        if (!forcePrimary && ctx.affinityNode()) {
+        if (!forcePrimary && ctx.affinityNode() && (!ctx.mvccEnabled() || mvccSnapshot != null)) {
             try {
                 Map<K, V> locVals = null;
 
@@ -499,6 +508,7 @@
                                             taskName,
                                             expiryPlc,
                                             !deserializeBinary,
+                                            mvccSnapshot,
                                             null);
 
                                         if (getRes != null) {
@@ -517,7 +527,8 @@
                                             null,
                                             taskName,
                                             expiryPlc,
-                                            !deserializeBinary);
+                                            !deserializeBinary,
+                                            mvccSnapshot);
                                     }
 
                                     // Entry was not in memory or in swap, so we remove it from cache.
@@ -562,7 +573,7 @@
                             }
                             finally {
                                 if (entry != null)
-                                    context().evicts().touch(entry, topVer);
+                                    entry.touch(topVer);
                             }
                         }
                     }
@@ -600,7 +611,8 @@
             expiryPlc,
             skipVals,
             needVer,
-            keepCacheObj);
+            keepCacheObj,
+            mvccSnapshot);
 
         fut.init(topVer);
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtColocatedLockFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtColocatedLockFuture.java
index da0858f..9dbb8be 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtColocatedLockFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/colocated/GridDhtColocatedLockFuture.java
@@ -23,7 +23,6 @@
 import java.util.Deque;
 import java.util.HashSet;
 import java.util.Iterator;
-import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.UUID;
@@ -134,7 +133,7 @@
 
     /** Timeout object. */
     @GridToStringExclude
-    private LockTimeoutObject timeoutObj;
+    private volatile LockTimeoutObject timeoutObj;
 
     /** Lock timeout. */
     private final long timeout;
@@ -180,6 +179,9 @@
     /** */
     private int miniId;
 
+    /** {@code True} when mappings are ready for processing. */
+    private boolean mappingsReady;
+
     /**
      * @param cctx Registry.
      * @param keys Keys to lock.
@@ -444,32 +446,59 @@
      * @param nodeId Sender.
      * @param res Result.
      */
+    @SuppressWarnings("SynchronizeOnNonFinalField")
     void onResult(UUID nodeId, GridNearLockResponse res) {
-        if (!isDone()) {
-            MiniFuture mini = miniFuture(res.miniId());
+        boolean done = isDone();
 
-            if (mini != null) {
-                assert mini.node().id().equals(nodeId);
-
-                mini.onResult(res);
+        if (!done) {
+            // onResult is always called after map() and timeoutObj is never reset to null, so this is
+            // a race-free null check.
+            if (timeoutObj == null) {
+                onResult0(nodeId, res);
 
                 return;
             }
 
-            //  This warning can be triggered by deadlock detection code which clears pending futures.
-            U.warn(msgLog, "Collocated lock fut, failed to find mini future [txId=" + lockVer +
-                ", tx=" + (inTx() ? CU.txString(tx) : "N/A") +
-                ", node=" + nodeId +
-                ", res=" + res +
-                ", fut=" + this + ']');
-        }
-        else {
-            if (msgLog.isDebugEnabled()) {
-                msgLog.debug("Collocated lock fut, response for finished future [txId=" + lockVer +
-                    ", inTx=" + inTx() +
-                    ", node=" + nodeId + ']');
+            synchronized (timeoutObj) {
+                if (!isDone()) {
+                    if (onResult0(nodeId, res))
+                        return;
+                }
+                else
+                    done = true;
             }
         }
+
+        if (done && msgLog.isDebugEnabled()) {
+            msgLog.debug("Collocated lock fut, response for finished future [txId=" + lockVer +
+                ", inTx=" + inTx() +
+                ", node=" + nodeId + ']');
+        }
+    }
+
+    /**
+     * @param nodeId Sender.
+     * @param res Result.
+     */
+    private boolean onResult0(UUID nodeId, GridNearLockResponse res) {
+        MiniFuture mini = miniFuture(res.miniId());
+
+        if (mini != null) {
+            assert mini.node().id().equals(nodeId);
+
+            mini.onResult(res);
+
+            return true;
+        }
+
+        //  This warning can be triggered by deadlock detection code which clears pending futures.
+        U.warn(msgLog, "Collocated lock fut, failed to find mini future [txId=" + lockVer +
+            ", tx=" + (inTx() ? CU.txString(tx) : "N/A") +
+            ", node=" + nodeId +
+            ", res=" + res +
+            ", fut=" + this + ']');
+
+        return false;
     }
 
     /**
@@ -547,9 +576,24 @@
      * Cancellation has special meaning for lock futures. It's called then lock must be released on rollback.
      */
     @Override public boolean cancel() {
-        if (inTx())
+        if (inTx()) {
             onError(tx.rollbackException());
 
+            /** Should wait until {@link mappings} are ready before continuing with async rollback
+             * or some primary nodes might not receive tx finish messages because of race.
+             * If prepare phase has not started waiting is not necessary.
+             */
+            synchronized (this) {
+                while (!mappingsReady)
+                    try {
+                        wait();
+                    }
+                    catch (InterruptedException e) {
+                        // Ignore interrupts.
+                    }
+            }
+        }
+
         return onComplete(false, true);
     }
 
@@ -610,6 +654,17 @@
             if (timeoutObj != null)
                 cctx.time().removeTimeoutObject(timeoutObj);
 
+            /** Ensures what waiters for ready {@link mappings} will be unblocked if error has occurred while mapping. */
+            if (tx != null) {
+                synchronized (this) {
+                    if (!mappingsReady) {
+                        mappingsReady = true;
+
+                        notifyAll();
+                    }
+                }
+            }
+
             return true;
         }
 
@@ -809,19 +864,15 @@
                 markInitialized();
             }
             else {
-                fut.listen(new CI1<IgniteInternalFuture<AffinityTopologyVersion>>() {
-                    @Override public void apply(IgniteInternalFuture<AffinityTopologyVersion> fut) {
-                        try {
-                            fut.get();
+                cctx.time().waitAsync(fut, tx == null ? 0 : tx.remainingTime(), (e, timedOut) -> {
+                    if (errorOrTimeoutOnTopologyVersion(e, timedOut))
+                        return;
 
-                            mapOnTopology(remap, c);
-                        }
-                        catch (IgniteCheckedException e) {
-                            onDone(e);
-                        }
-                        finally {
-                            cctx.shared().txContextReset();
-                        }
+                    try {
+                        mapOnTopology(remap, c);
+                    }
+                    finally {
+                        cctx.shared().txContextReset();
                     }
                 });
             }
@@ -863,225 +914,235 @@
         boolean remap,
         boolean topLocked
     ) throws IgniteCheckedException {
-        AffinityTopologyVersion topVer = this.topVer;
+        try {
+            AffinityTopologyVersion topVer = this.topVer;
 
-        assert topVer != null;
+            assert topVer != null;
 
-        assert topVer.topologyVersion() > 0;
+            assert topVer.topologyVersion() > 0;
 
-        if (CU.affinityNodes(cctx, topVer).isEmpty()) {
-            onDone(new ClusterTopologyServerNotFoundException("Failed to map keys for cache " +
-                "(all partition nodes left the grid): " + cctx.name()));
+            if (CU.affinityNodes(cctx, topVer).isEmpty()) {
+                onDone(new ClusterTopologyServerNotFoundException("Failed to map keys for cache " +
+                    "(all partition nodes left the grid): " + cctx.name()));
 
-            return;
-        }
-
-        boolean clientNode = cctx.kernalContext().clientNode();
-
-        assert !remap || (clientNode && (tx == null || !tx.hasRemoteLocks()));
-
-        // First assume this node is primary for all keys passed in.
-        if (!clientNode && mapAsPrimary(keys, topVer))
-            return;
-
-        mappings = new ArrayDeque<>();
-
-        // Assign keys to primary nodes.
-        GridNearLockMapping map = null;
-
-        for (KeyCacheObject key : keys) {
-            GridNearLockMapping updated = map(key, map, topVer);
-
-            // If new mapping was created, add to collection.
-            if (updated != map) {
-                mappings.add(updated);
-
-                if (tx != null && updated.node().isLocal())
-                    tx.colocatedLocallyMapped(true);
+                return;
             }
 
-            map = updated;
-        }
+            boolean clientNode = cctx.kernalContext().clientNode();
 
-        if (isDone()) {
-            if (log.isDebugEnabled())
-                log.debug("Abandoning (re)map because future is done: " + this);
+            assert !remap || (clientNode && (tx == null || !tx.hasRemoteLocks()));
 
-            return;
-        }
+            // First assume this node is primary for all keys passed in.
+            if (!clientNode && mapAsPrimary(keys, topVer))
+                return;
 
-        if (log.isDebugEnabled())
-            log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']');
+            mappings = new ArrayDeque<>();
 
-        boolean hasRmtNodes = false;
+            // Assign keys to primary nodes.
+            GridNearLockMapping map = null;
 
-        boolean first = true;
+            for (KeyCacheObject key : keys) {
+                GridNearLockMapping updated = map(key, map, topVer);
 
-        // Create mini futures.
-        for (Iterator<GridNearLockMapping> iter = mappings.iterator(); iter.hasNext(); ) {
-            GridNearLockMapping mapping = iter.next();
+                // If new mapping was created, add to collection.
+                if (updated != map) {
+                    mappings.add(updated);
 
-            ClusterNode node = mapping.node();
-            Collection<KeyCacheObject> mappedKeys = mapping.mappedKeys();
-
-            boolean loc = node.equals(cctx.localNode());
-
-            assert !mappedKeys.isEmpty();
-
-            GridNearLockRequest req = null;
-
-            Collection<KeyCacheObject> distributedKeys = new ArrayList<>(mappedKeys.size());
-
-            for (KeyCacheObject key : mappedKeys) {
-                IgniteTxKey txKey = cctx.txKey(key);
-
-                GridDistributedCacheEntry entry = null;
-
-                if (tx != null) {
-                    IgniteTxEntry txEntry = tx.entry(txKey);
-
-                    if (txEntry != null) {
-                        entry = (GridDistributedCacheEntry)txEntry.cached();
-
-                        if (entry != null && loc == entry.detached()) {
-                            entry = cctx.colocated().entryExx(key, topVer, true);
-
-                            txEntry.cached(entry);
-                        }
-                    }
+                    if (tx != null && updated.node().isLocal())
+                        tx.colocatedLocallyMapped(true);
                 }
 
-                boolean explicit;
+                map = updated;
+            }
 
-                while (true) {
-                    try {
-                        if (entry == null)
-                            entry = cctx.colocated().entryExx(key, topVer, true);
+            if (isDone()) {
+                if (log.isDebugEnabled())
+                    log.debug("Abandoning (re)map because future is done: " + this);
 
-                        if (!cctx.isAll(entry, filter)) {
-                            if (log.isDebugEnabled())
-                                log.debug("Entry being locked did not pass filter (will not lock): " + entry);
+                return;
+            }
 
-                            onComplete(false, false);
+            if (log.isDebugEnabled())
+                log.debug("Starting (re)map for mappings [mappings=" + mappings + ", fut=" + this + ']');
 
-                            return;
+            boolean hasRmtNodes = false;
+
+            boolean first = true;
+
+            // Create mini futures.
+            for (Iterator<GridNearLockMapping> iter = mappings.iterator(); iter.hasNext(); ) {
+                GridNearLockMapping mapping = iter.next();
+
+                ClusterNode node = mapping.node();
+                Collection<KeyCacheObject> mappedKeys = mapping.mappedKeys();
+
+                boolean loc = node.equals(cctx.localNode());
+
+                assert !mappedKeys.isEmpty();
+
+                GridNearLockRequest req = null;
+
+                Collection<KeyCacheObject> distributedKeys = new ArrayList<>(mappedKeys.size());
+
+                for (KeyCacheObject key : mappedKeys) {
+                    IgniteTxKey txKey = cctx.txKey(key);
+
+                    GridDistributedCacheEntry entry = null;
+
+                    if (tx != null) {
+                        IgniteTxEntry txEntry = tx.entry(txKey);
+
+                        if (txEntry != null) {
+                            entry = (GridDistributedCacheEntry)txEntry.cached();
+
+                            if (entry != null && loc == entry.detached()) {
+                                entry = cctx.colocated().entryExx(key, topVer, true);
+
+                                txEntry.cached(entry);
+                            }
                         }
+                    }
 
-                        assert loc ^ entry.detached() : "Invalid entry [loc=" + loc + ", entry=" + entry + ']';
+                    boolean explicit;
 
-                        GridCacheMvccCandidate cand = addEntry(entry);
+                    while (true) {
+                        try {
+                            if (entry == null)
+                                entry = cctx.colocated().entryExx(key, topVer, true);
 
-                        // Will either return value from dht cache or null if this is a miss.
-                        IgniteBiTuple<GridCacheVersion, CacheObject> val = entry.detached() ? null :
-                            ((GridDhtCacheEntry)entry).versionedValue(topVer);
+                            if (!cctx.isAll(entry, filter)) {
+                                if (log.isDebugEnabled())
+                                    log.debug("Entry being locked did not pass filter (will not lock): " + entry);
 
-                        GridCacheVersion dhtVer = null;
+                                onComplete(false, false);
 
-                        if (val != null) {
-                            dhtVer = val.get1();
-
-                            valMap.put(key, val);
-                        }
-
-                        if (cand != null && !cand.reentry()) {
-                            if (req == null) {
-                                boolean clientFirst = false;
-
-                                if (first) {
-                                    clientFirst = clientNode &&
-                                        !topLocked &&
-                                        (tx == null || !tx.hasRemoteLocks());
-
-                                    first = false;
-                                }
-
-                                assert !implicitTx() && !implicitSingleTx() : tx;
-
-                                req = new GridNearLockRequest(
-                                    cctx.cacheId(),
-                                    topVer,
-                                    cctx.nodeId(),
-                                    threadId,
-                                    futId,
-                                    lockVer,
-                                    inTx(),
-                                    read,
-                                    retval,
-                                    isolation(),
-                                    isInvalidate(),
-                                    timeout,
-                                    mappedKeys.size(),
-                                    inTx() ? tx.size() : mappedKeys.size(),
-                                    inTx() && tx.syncMode() == FULL_SYNC,
-                                    inTx() ? tx.subjectId() : null,
-                                    inTx() ? tx.taskNameHash() : 0,
-                                    read ? createTtl : -1L,
-                                    read ? accessTtl : -1L,
-                                    skipStore,
-                                    keepBinary,
-                                    clientFirst,
-                                    false,
-                                    cctx.deploymentEnabled());
-
-                                mapping.request(req);
+                                return;
                             }
 
-                            distributedKeys.add(key);
+                            assert loc ^ entry.detached() : "Invalid entry [loc=" + loc + ", entry=" + entry + ']';
 
-                            if (tx != null)
+                            GridCacheMvccCandidate cand = addEntry(entry);
+
+                            // Will either return value from dht cache or null if this is a miss.
+                            IgniteBiTuple<GridCacheVersion, CacheObject> val = entry.detached() ? null :
+                                ((GridDhtCacheEntry)entry).versionedValue(topVer);
+
+                            GridCacheVersion dhtVer = null;
+
+                            if (val != null) {
+                                dhtVer = val.get1();
+
+                                valMap.put(key, val);
+                            }
+
+                            if (cand != null && !cand.reentry()) {
+                                if (req == null) {
+                                    boolean clientFirst = false;
+
+                                    if (first) {
+                                        clientFirst = clientNode &&
+                                            !topLocked &&
+                                            (tx == null || !tx.hasRemoteLocks());
+
+                                        first = false;
+                                    }
+
+                                    assert !implicitTx() && !implicitSingleTx() : tx;
+
+                                    req = new GridNearLockRequest(
+                                        cctx.cacheId(),
+                                        topVer,
+                                        cctx.nodeId(),
+                                        threadId,
+                                        futId,
+                                        lockVer,
+                                        inTx(),
+                                        read,
+                                        retval,
+                                        isolation(),
+                                        isInvalidate(),
+                                        timeout,
+                                        mappedKeys.size(),
+                                        inTx() ? tx.size() : mappedKeys.size(),
+                                        inTx() && tx.syncMode() == FULL_SYNC,
+                                        inTx() ? tx.subjectId() : null,
+                                        inTx() ? tx.taskNameHash() : 0,
+                                        read ? createTtl : -1L,
+                                        read ? accessTtl : -1L,
+                                        skipStore,
+                                        keepBinary,
+                                        clientFirst,
+                                        false,
+                                        cctx.deploymentEnabled());
+
+                                    mapping.request(req);
+                                }
+
+                                distributedKeys.add(key);
+
+                                if (tx != null)
+                                    tx.addKeyMapping(txKey, mapping.node());
+
+                                req.addKeyBytes(
+                                    key,
+                                    retval,
+                                    dhtVer, // Include DHT version to match remote DHT entry.
+                                    cctx);
+                            }
+
+                            explicit = inTx() && cand == null;
+
+                            if (explicit)
                                 tx.addKeyMapping(txKey, mapping.node());
 
-                            req.addKeyBytes(
-                                key,
-                                retval,
-                                dhtVer, // Include DHT version to match remote DHT entry.
-                                cctx);
+                            break;
                         }
+                        catch (GridCacheEntryRemovedException ignored) {
+                            if (log.isDebugEnabled())
+                                log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
 
-                        explicit = inTx() && cand == null;
-
-                        if (explicit)
-                            tx.addKeyMapping(txKey, mapping.node());
-
-                        break;
+                            entry = null;
+                        }
                     }
-                    catch (GridCacheEntryRemovedException ignored) {
-                        if (log.isDebugEnabled())
-                            log.debug("Got removed entry in lockAsync(..) method (will retry): " + entry);
 
-                        entry = null;
+                    // Mark mapping explicit lock flag.
+                    if (explicit) {
+                        boolean marked = tx != null && tx.markExplicit(node.id());
+
+                        assert tx == null || marked;
                     }
                 }
 
-                // Mark mapping explicit lock flag.
-                if (explicit) {
-                    boolean marked = tx != null && tx.markExplicit(node.id());
+                if (!distributedKeys.isEmpty()) {
+                    mapping.distributedKeys(distributedKeys);
 
-                    assert tx == null || marked;
+                    hasRmtNodes |= !mapping.node().isLocal();
+                }
+                else {
+                    assert mapping.request() == null;
+
+                    iter.remove();
                 }
             }
 
-            if (!distributedKeys.isEmpty()) {
-                mapping.distributedKeys(distributedKeys);
+            if (hasRmtNodes) {
+                trackable = true;
 
-                hasRmtNodes |= !mapping.node().isLocal();
+                if (!remap && !cctx.mvcc().addFuture(this))
+                    throw new IllegalStateException("Duplicate future ID: " + this);
             }
-            else {
-                assert mapping.request() == null;
+            else
+                trackable = false;
+        }
+        finally {
+            /** Notify ready {@link mappings} waiters. See {@link #cancel()} */
+            if (tx != null) {
+                mappingsReady = true;
 
-                iter.remove();
+                notifyAll();
             }
         }
 
-        if (hasRmtNodes) {
-            trackable = true;
-
-            if (!remap && !cctx.mvcc().addFuture(this))
-                throw new IllegalStateException("Duplicate future ID: " + this);
-        }
-        else
-            trackable = false;
-
         proceedMapping();
     }
 
@@ -1110,11 +1171,27 @@
         throws IgniteCheckedException {
         GridNearLockMapping map;
 
+        // Fail fast if future is completed (in case of async rollback)
+        if (isDone()) {
+            clear();
+
+            return;
+        }
+
+        // Fail fast if the transaction is timed out.
+        if (tx != null && tx.remainingTime() == -1) {
+            GridDhtColocatedLockFuture.this.onDone(false, tx.timeoutException());
+
+            clear();
+
+            return;
+        }
+
         synchronized (this) {
             map = mappings.poll();
         }
 
-        // If there are no more mappings to process, complete the future.
+        // If there are no more mappings to process or prepare has timed out, complete the future.
         if (map == null)
             return;
 
@@ -1401,6 +1478,23 @@
     }
 
     /**
+     * @param e Exception.
+     * @param timedOut {@code True} if timed out.
+     */
+    private boolean errorOrTimeoutOnTopologyVersion(IgniteCheckedException e, boolean timedOut) {
+        if (e != null || timedOut) {
+            // Can timeout only if tx is not null.
+            assert e != null || tx != null : "Timeout is possible only in transaction";
+
+            onDone(e == null ? tx.timeoutException() : e);
+
+            return true;
+        }
+
+        return false;
+    }
+
+    /**
      * Lock request timeout object.
      */
     private class LockTimeoutObject extends GridTimeoutObjectAdapter {
@@ -1452,15 +1546,20 @@
                                 U.warn(log, "Failed to detect deadlock.", e);
                             }
 
-                            onComplete(false, true);
+                            synchronized (LockTimeoutObject.this) {
+                                onComplete(false, true);
+                            }
                         }
                     });
                 }
                 else
                     err = tx.timeoutException();
             }
-            else
-                onComplete(false, true);
+            else {
+                synchronized (this) {
+                    onComplete(false, true);
+                }
+            }
         }
 
         /** {@inheritDoc} */
@@ -1593,25 +1692,17 @@
                 IgniteInternalFuture<?> affFut =
                     cctx.shared().exchange().affinityReadyFuture(res.clientRemapVersion());
 
-                if (affFut != null && !affFut.isDone()) {
-                    affFut.listen(new CI1<IgniteInternalFuture<?>>() {
-                        @Override public void apply(IgniteInternalFuture<?> fut) {
-                            try {
-                                fut.get();
+                cctx.time().waitAsync(affFut, tx == null ? 0 : tx.remainingTime(), (e, timedOut) -> {
+                    if (errorOrTimeoutOnTopologyVersion(e, timedOut))
+                        return;
 
-                                remap();
-                            }
-                            catch (IgniteCheckedException e) {
-                                onDone(e);
-                            }
-                            finally {
-                                cctx.shared().txContextReset();
-                            }
-                        }
-                    });
-                }
-                else
-                    remap();
+                    try {
+                        remap();
+                    }
+                    finally {
+                        cctx.shared().txContextReset();
+                    }
+                });
             }
             else {
                 int i = 0;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/CacheGroupAffinityMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/CacheGroupAffinityMessage.java
index 8a1ffb4..7da4051 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/CacheGroupAffinityMessage.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/CacheGroupAffinityMessage.java
@@ -28,6 +28,7 @@
 import org.apache.ignite.internal.managers.discovery.DiscoCache;
 import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
 import org.apache.ignite.internal.processors.affinity.GridAffinityAssignmentCache;
+import org.apache.ignite.internal.processors.cache.CacheGroupContext;
 import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
 import org.apache.ignite.internal.util.GridLongList;
 import org.apache.ignite.internal.util.typedef.F;
@@ -144,7 +145,8 @@
         GridCacheSharedContext cctx,
         AffinityTopologyVersion topVer,
         Collection<Integer> affReq,
-        @Nullable Map<Integer, CacheGroupAffinityMessage> cachesAff) {
+        @Nullable Map<Integer, CacheGroupAffinityMessage> cachesAff
+    ) {
         assert !F.isEmpty(affReq) : affReq;
 
         if (cachesAff == null)
@@ -152,7 +154,18 @@
 
         for (Integer grpId : affReq) {
             if (!cachesAff.containsKey(grpId)) {
-                GridAffinityAssignmentCache aff = cctx.affinity().affinity(grpId);
+                GridAffinityAssignmentCache aff = cctx.affinity().groupAffinity(grpId);
+
+                // If no coordinator group holder on the node, try fetch affinity from existing cache group.
+                if (aff == null) {
+                    CacheGroupContext grp = cctx.cache().cacheGroup(grpId);
+
+                    assert grp != null : "No cache group holder or cache group to create AffinityMessage"
+                        + ". Requested group id: " + grpId
+                        + ". Topology version: " + topVer;
+
+                    aff = grp.affinity();
+                }
 
                 List<List<ClusterNode>> assign = aff.readyAssignments(topVer);
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/CachePartitionPartialCountersMap.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/CachePartitionPartialCountersMap.java
index c8cf3f8..9fc7f94 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/CachePartitionPartialCountersMap.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/CachePartitionPartialCountersMap.java
@@ -225,7 +225,7 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public String toString() {
         StringBuilder sb = new StringBuilder("CachePartitionPartialCountersMap {");
 
         for (int i = 0; i < partIds.length; i++) {
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtForceKeysFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtForceKeysFuture.java
index fe216a0..3b03958 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtForceKeysFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtForceKeysFuture.java
@@ -42,6 +42,9 @@
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtFuture;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUpdateVersionAware;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccVersionAware;
+import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxState;
 import org.apache.ignite.internal.util.F0;
 import org.apache.ignite.internal.util.GridLeanSet;
 import org.apache.ignite.internal.util.future.GridCompoundFuture;
@@ -271,8 +274,8 @@
 
                         assert !n.id().equals(loc.id());
 
-                        if (log.isDebugEnabled())
-                            log.debug("Sending force key request [cacheName=" + cctx.name() + "node=" + n.id() +
+                        if (log.isTraceEnabled())
+                            log.trace("Sending force key request [cacheName=" + cctx.name() + "node=" + n.id() +
                                 ", req=" + req + ']');
 
                         cctx.io().send(n, req, cctx.ioPolicy());
@@ -307,10 +310,10 @@
 
         try {
             if (e != null && !e.isNewLocked()) {
-                if (log.isDebugEnabled()) {
+                if (log.isTraceEnabled()) {
                     int part = cctx.affinity().partition(key);
 
-                    log.debug("Will not rebalance key (entry is not new) [cacheName=" + cctx.name() +
+                    log.trace("Will not rebalance key (entry is not new) [cacheName=" + cctx.name() +
                         ", key=" + key + ", part=" + part + ", locId=" + cctx.nodeId() + ']');
                 }
 
@@ -319,8 +322,8 @@
             }
         }
         catch (GridCacheEntryRemovedException ignore) {
-            if (log.isDebugEnabled())
-                log.debug("Received removed DHT entry for force keys request [entry=" + e +
+            if (log.isTraceEnabled())
+                log.trace("Received removed DHT entry for force keys request [entry=" + e +
                     ", locId=" + cctx.nodeId() + ']');
         }
 
@@ -330,8 +333,8 @@
             new ArrayList<>(F.view(top.owners(part, topVer), F.notIn(exc)));
 
         if (owners.isEmpty() || (owners.contains(loc) && cctx.rebalanceEnabled())) {
-            if (log.isDebugEnabled())
-                log.debug("Will not rebalance key (local node is owner) [key=" + key + ", part=" + part +
+            if (log.isTraceEnabled())
+                log.trace("Will not rebalance key (local node is owner) [key=" + key + ", part=" + part +
                     "topVer=" + topVer + ", locId=" + cctx.nodeId() + ']');
 
             // Key is already rebalanced.
@@ -341,8 +344,8 @@
         // Create partition.
         GridDhtLocalPartition locPart = top.localPartition(part, topVer, false);
 
-        if (log.isDebugEnabled())
-            log.debug("Mapping local partition [loc=" + cctx.localNodeId() + ", topVer" + topVer +
+        if (log.isTraceEnabled())
+            log.trace("Mapping local partition [loc=" + cctx.localNodeId() + ", topVer" + topVer +
                 ", part=" + locPart + ", owners=" + owners + ", allOwners=" + U.toShortString(top.owners(part)) + ']');
 
         if (locPart == null)
@@ -359,8 +362,8 @@
                 pick = F.first(F.view(owners, F.remoteNodes(loc.id())));
 
             if (pick == null) {
-                if (log.isDebugEnabled())
-                    log.debug("Will not rebalance key (no nodes to request from with rebalancing disabled) [key=" +
+                if (log.isTraceEnabled())
+                    log.trace("Will not rebalance key (no nodes to request from with rebalancing disabled) [key=" +
                         key + ", part=" + part + ", locId=" + cctx.nodeId() + ']');
 
                 return mappings;
@@ -375,15 +378,15 @@
 
             mappedKeys.add(key);
 
-            if (log.isDebugEnabled())
-                log.debug("Will rebalance key from node [cacheName=" + cctx.name() + ", key=" + key + ", part=" +
+            if (log.isTraceEnabled())
+                log.trace("Will rebalance key from node [cacheName=" + cctx.name() + ", key=" + key + ", part=" +
                     part + ", node=" + pick.id() + ", locId=" + cctx.nodeId() + ']');
         }
         else if (locPart.state() != OWNING)
             invalidParts.add(part);
         else {
-            if (log.isDebugEnabled())
-                log.debug("Will not rebalance key (local partition is not MOVING) [cacheName=" + cctx.name() +
+            if (log.isTraceEnabled())
+                log.trace("Will not rebalance key (local partition is not MOVING) [cacheName=" + cctx.name() +
                     ", key=" + key + ", part=" + locPart + ", locId=" + cctx.nodeId() + ']');
         }
 
@@ -537,6 +540,10 @@
                         if (entry.initialValue(
                             info.value(),
                             info.version(),
+                            cctx.mvccEnabled() ? ((MvccVersionAware)info).mvccVersion() : null,
+                            cctx.mvccEnabled() ? ((MvccUpdateVersionAware)info).newMvccVersion() : null,
+                            cctx.mvccEnabled() ? ((MvccVersionAware)entry).mvccTxState() : TxState.NA,
+                            cctx.mvccEnabled() ? ((MvccUpdateVersionAware)entry).newMvccTxState() : TxState.NA,
                             info.ttl(),
                             info.expireTime(),
                             true,
@@ -556,8 +563,8 @@
                         return;
                     }
                     catch (GridCacheEntryRemovedException ignore) {
-                        if (log.isDebugEnabled())
-                            log.debug("Trying to rebalance removed entry (will ignore) [cacheName=" +
+                        if (log.isTraceEnabled())
+                            log.trace("Trying to rebalance removed entry (will ignore) [cacheName=" +
                                 cctx.name() + ", entry=" + entry + ']');
                     }
                     finally {
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java
index 54d3c93..29573cb 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java
@@ -52,6 +52,9 @@
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtInvalidPartitionException;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUpdateVersionAware;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccVersionAware;
+import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxState;
 import org.apache.ignite.internal.processors.timeout.GridTimeoutObject;
 import org.apache.ignite.internal.processors.timeout.GridTimeoutObjectAdapter;
 import org.apache.ignite.internal.util.future.GridCompoundFuture;
@@ -685,10 +688,20 @@
         if (log.isDebugEnabled())
             log.debug("Received supply message [grp=" + grp.cacheOrGroupName() + ", msg=" + supply + ']');
 
-        // Check whether there were class loading errors on unmarshal
+        // Check whether there were error during supply message unmarshalling process.
         if (supply.classError() != null) {
             U.warn(log, "Rebalancing from node cancelled [grp=" + grp.cacheOrGroupName() + ", node=" + nodeId +
-                "]. Class got undeployed during preloading: " + supply.classError());
+                "]. Supply message couldn't be unmarshalled: " + supply.classError());
+
+            fut.cancel(nodeId);
+
+            return;
+        }
+
+        // Check whether there were error during supplying process.
+        if (supply.error() != null) {
+            U.warn(log, "Rebalancing from node cancelled [grp=" + grp.cacheOrGroupName() + ", node=" + nodeId +
+                "]. Supplier has failed with error: " + supply.error());
 
             fut.cancel(nodeId);
 
@@ -758,9 +771,9 @@
                                         GridCacheEntryInfo entry = infos.next();
 
                                         if (!preloadEntry(node, p, entry, topVer)) {
-                                            if (log.isDebugEnabled())
-                                                log.debug("Got entries for invalid partition during " +
-                                                        "preloading (will skip) [p=" + p + ", entry=" + entry + ']');
+                                            if (log.isTraceEnabled())
+                                                log.trace("Got entries for invalid partition during " +
+                                                    "preloading (will skip) [p=" + p + ", entry=" + entry + ']');
 
                                             break;
                                         }
@@ -869,15 +882,22 @@
             try {
                 GridCacheContext cctx = grp.sharedGroup() ? ctx.cacheContext(entry.cacheId()) : grp.singleCacheContext();
 
-                cached = cctx.dhtCache().entryEx(entry.key());
+                if (cctx.isNear())
+                    cctx = cctx.dhtCache().context();
 
-                if (log.isDebugEnabled())
-                    log.debug("Rebalancing key [key=" + entry.key() + ", part=" + p + ", node=" + from.id() + ']');
+                cached = cctx.cache().entryEx(entry.key());
+
+                if (log.isTraceEnabled())
+                    log.trace("Rebalancing key [key=" + entry.key() + ", part=" + p + ", node=" + from.id() + ']');
 
                 if (preloadPred == null || preloadPred.apply(entry)) {
                     if (cached.initialValue(
                         entry.value(),
                         entry.version(),
+                        cctx.mvccEnabled() ? ((MvccVersionAware)entry).mvccVersion() : null,
+                        cctx.mvccEnabled() ? ((MvccUpdateVersionAware)entry).newMvccVersion() : null,
+                        cctx.mvccEnabled() ? ((MvccVersionAware)entry).mvccTxState() : TxState.NA,
+                        cctx.mvccEnabled() ? ((MvccUpdateVersionAware)entry).newMvccTxState() : TxState.NA,
                         entry.ttl(),
                         entry.expireTime(),
                         true,
@@ -885,7 +905,7 @@
                         cctx.isDrEnabled() ? DR_PRELOAD : DR_NONE,
                         false
                     )) {
-                        cctx.evicts().touch(cached, topVer); // Start tracking.
+                        cached.touch(topVer); // Start tracking.
 
                         if (cctx.events().isRecordable(EVT_CACHE_REBALANCE_OBJECT_LOADED) && !cached.isInternal())
                             cctx.events().addEvent(cached.partition(), cached.key(), cctx.localNodeId(),
@@ -893,19 +913,19 @@
                                 false, null, null, null, true);
                     }
                     else {
-                        cctx.evicts().touch(cached, topVer); // Start tracking.
+                        cached.touch(topVer); // Start tracking.
 
-                        if (log.isDebugEnabled())
-                            log.debug("Rebalancing entry is already in cache (will ignore) [key=" + cached.key() +
+                        if (log.isTraceEnabled())
+                            log.trace("Rebalancing entry is already in cache (will ignore) [key=" + cached.key() +
                                 ", part=" + p + ']');
                     }
                 }
-                else if (log.isDebugEnabled())
-                    log.debug("Rebalance predicate evaluated to false for entry (will ignore): " + entry);
+                else if (log.isTraceEnabled())
+                    log.trace("Rebalance predicate evaluated to false for entry (will ignore): " + entry);
             }
             catch (GridCacheEntryRemovedException ignored) {
-                if (log.isDebugEnabled())
-                    log.debug("Entry has been concurrently removed while rebalancing (will ignore) [key=" +
+                if (log.isTraceEnabled())
+                    log.trace("Entry has been concurrently removed while rebalancing (will ignore) [key=" +
                         cached.key() + ", part=" + p + ']');
             }
             catch (GridDhtInvalidPartitionException ignored) {
@@ -1245,7 +1265,7 @@
         }
 
         /** {@inheritDoc} */
-        public String toString() {
+        @Override public String toString() {
             return S.toString(RebalanceFuture.class, this);
         }
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplier.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplier.java
index ea7f4c9..9b099ae 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplier.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplier.java
@@ -33,10 +33,15 @@
 import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
 import org.apache.ignite.internal.processors.cache.CacheGroupContext;
 import org.apache.ignite.internal.processors.cache.GridCacheEntryInfo;
+import org.apache.ignite.internal.processors.cache.GridCacheMvccEntryInfo;
 import org.apache.ignite.internal.processors.cache.IgniteRebalanceIterator;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUpdateVersionAware;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUtils;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccVersionAware;
+import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxState;
 import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
 import org.apache.ignite.internal.util.tostring.GridToStringExclude;
 import org.apache.ignite.internal.util.typedef.T3;
@@ -213,9 +218,11 @@
         if (node == null)
             return;
 
-        try {
-            SupplyContext sctx;
+        IgniteRebalanceIterator iter = null;
 
+        SupplyContext sctx = null;
+
+        try {
             synchronized (scMap) {
                 sctx = scMap.remove(contextId);
 
@@ -224,7 +231,7 @@
                     scMap.put(contextId, sctx);
 
                     if (log.isDebugEnabled())
-                        log.debug("Stale demand message [grp=" + grp.cacheOrGroupName()
+                        log.debug("Stale demand message [cache=" + grp.cacheOrGroupName()
                             + ", actualContext=" + sctx
                             + ", from=" + nodeId
                             + ", demandMsg=" + d + "]");
@@ -236,7 +243,7 @@
             // Demand request should not contain empty partitions if no supply context is associated with it.
             if (sctx == null && (d.partitions() == null || d.partitions().isEmpty())) {
                 if (log.isDebugEnabled())
-                    log.debug("Empty demand message [grp=" + grp.cacheOrGroupName()
+                    log.debug("Empty demand message [cache=" + grp.cacheOrGroupName()
                         + ", from=" + nodeId
                         + ", topicId=" + topicId
                         + ", demandMsg=" + d + "]");
@@ -267,8 +274,6 @@
                     d.topologyVersion(),
                     grp.deploymentEnabled());
 
-            IgniteRebalanceIterator iter;
-
             Set<Integer> remainingParts;
 
             if (sctx == null || sctx.iterator == null) {
@@ -361,19 +366,44 @@
                 if (!remainingParts.contains(part))
                     continue;
 
-                GridCacheEntryInfo info = new GridCacheEntryInfo();
+                GridCacheEntryInfo info = grp.mvccEnabled() ?
+                    new GridCacheMvccEntryInfo() : new GridCacheEntryInfo();
 
                 info.key(row.key());
-                info.expireTime(row.expireTime());
-                info.version(row.version());
-                info.value(row.value());
                 info.cacheId(row.cacheId());
 
+                if (grp.mvccEnabled()) {
+                    byte txState = row.mvccTxState() != TxState.NA ? row.mvccTxState() :
+                        MvccUtils.state(grp, row.mvccCoordinatorVersion(), row.mvccCounter(),
+                        row.mvccOperationCounter());
+
+                    if (txState != TxState.COMMITTED)
+                        continue;
+
+                    ((MvccVersionAware)info).mvccVersion(row);
+                    ((GridCacheMvccEntryInfo)info).mvccTxState(TxState.COMMITTED);
+
+                    byte newTxState = row.newMvccTxState() != TxState.NA ? row.newMvccTxState() :
+                        MvccUtils.state(grp, row.newMvccCoordinatorVersion(), row.newMvccCounter(),
+                        row.newMvccOperationCounter());
+
+                    if (newTxState != TxState.ABORTED) {
+                        ((MvccUpdateVersionAware)info).newMvccVersion(row);
+
+                        if (newTxState == TxState.COMMITTED)
+                            ((GridCacheMvccEntryInfo)info).newMvccTxState(TxState.COMMITTED);
+                    }
+                }
+
+                info.value(row.value());
+                info.version(row.version());
+                info.expireTime(row.expireTime());
+
                 if (preloadPred == null || preloadPred.apply(info))
                     s.addEntry0(part, iter.historical(part), info, grp.shared(), grp.cacheObjectContext());
                 else {
-                    if (log.isDebugEnabled())
-                        log.debug("Rebalance predicate evaluated to false (will not send " +
+                    if (log.isTraceEnabled())
+                        log.trace("Rebalance predicate evaluated to false (will not send " +
                             "cache entry): " + info);
                 }
 
@@ -422,13 +452,56 @@
                     ", topology=" + demTop + ", rebalanceId=" + d.rebalanceId() +
                     ", topicId=" + topicId + "]");
         }
-        catch (IgniteCheckedException e) {
-            U.error(log, "Failed to send partition supply message to node: " + nodeId, e);
-        }
-        catch (IgniteSpiException e) {
-            if (log.isDebugEnabled())
-                log.debug("Failed to send message to node (current node is stopping?) [node=" + node.id() +
-                    ", msg=" + e.getMessage() + ']');
+        catch (Throwable t) {
+            if (grp.shared().kernalContext().isStopping())
+                return;
+
+            // Sending supply messages with error requires new protocol.
+            boolean sendErrMsg = node.version().compareTo(GridDhtPartitionSupplyMessageV2.AVAILABLE_SINCE) >= 0;
+
+            if (t instanceof IgniteSpiException) {
+                if (log.isDebugEnabled())
+                    log.debug("Failed to send message to node (current node is stopping?) [node=" + node.id() +
+                        ", msg=" + t.getMessage() + ']');
+
+                sendErrMsg = false;
+            }
+            else
+                U.error(log, "Failed to continue supplying process for " +
+                    "[cache=" + grp.cacheOrGroupName() + ", node=" + nodeId
+                    + ", topicId=" + contextId.get2() + ", topVer=" + contextId.get3() + "]", t);
+
+            try {
+                if (sctx != null)
+                    clearContext(sctx, log);
+                else if (iter != null)
+                    iter.close();
+            }
+            catch (Throwable t1) {
+                U.error(log, "Failed to cleanup supplying context " +
+                        "[cache=" + grp.cacheOrGroupName() + ", node=" + nodeId
+                        + ", topicId=" + contextId.get2() + ", topVer=" + contextId.get3() + "]", t1);
+            }
+
+            if (!sendErrMsg)
+                return;
+
+            try {
+                GridDhtPartitionSupplyMessageV2 errMsg = new GridDhtPartitionSupplyMessageV2(
+                    d.rebalanceId(),
+                    grp.groupId(),
+                    d.topologyVersion(),
+                    grp.deploymentEnabled(),
+                    t
+                );
+
+                reply(node, d, errMsg, contextId);
+            }
+            catch (Throwable t1) {
+                U.error(log, "Failed to send supply error message for " +
+                    "[cache=" + grp.cacheOrGroupName() + ", node=" + nodeId
+                        + ", topicId=" + contextId.get2() + ", topVer=" + contextId.get3() + "]", t1);
+            }
         }
     }
 
@@ -519,7 +592,7 @@
         }
 
         /** {@inheritDoc} */
-        public String toString() {
+        @Override public String toString() {
             return S.toString(SupplyContext.class, this);
         }
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplyMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplyMessage.java
index 4ecffc4..284700a 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplyMessage.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplyMessage.java
@@ -89,10 +89,12 @@
      * @param topVer Topology version.
      * @param addDepInfo Deployment info flag.
      */
-    GridDhtPartitionSupplyMessage(long rebalanceId,
+    GridDhtPartitionSupplyMessage(
+        long rebalanceId,
         int grpId,
         AffinityTopologyVersion topVer,
-        boolean addDepInfo) {
+        boolean addDepInfo
+    ) {
         this.grpId = grpId;
         this.rebalanceId = rebalanceId;
         this.topVer = topVer;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplyMessageV2.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplyMessageV2.java
new file mode 100644
index 0000000..b6bff0e
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionSupplyMessageV2.java
@@ -0,0 +1,153 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed.dht.preloader;
+
+import java.nio.ByteBuffer;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.GridDirectTransient;
+import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
+import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteProductVersion;
+import org.apache.ignite.plugin.extensions.communication.MessageReader;
+import org.apache.ignite.plugin.extensions.communication.MessageWriter;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Supply message with supplier error transfer support.
+ */
+public class GridDhtPartitionSupplyMessageV2 extends GridDhtPartitionSupplyMessage {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** Available since. */
+    public static final IgniteProductVersion AVAILABLE_SINCE = IgniteProductVersion.fromString("2.7.0");
+
+    /** Supplying process error. */
+    @GridDirectTransient
+    private Throwable err;
+
+    /** Supplying process error bytes. */
+    private byte[] errBytes;
+
+    /**
+     * Default constructor.
+     */
+    public GridDhtPartitionSupplyMessageV2() {
+    }
+
+    /**
+     * @param rebalanceId Rebalance id.
+     * @param grpId Group id.
+     * @param topVer Topology version.
+     * @param addDepInfo Add dep info.
+     * @param err Supply process error.
+     */
+    public GridDhtPartitionSupplyMessageV2(
+        long rebalanceId,
+        int grpId,
+        AffinityTopologyVersion topVer,
+        boolean addDepInfo,
+        Throwable err
+    ) {
+        super(rebalanceId, grpId, topVer, addDepInfo);
+
+        this.err = err;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void prepareMarshal(GridCacheSharedContext ctx) throws IgniteCheckedException {
+        super.prepareMarshal(ctx);
+
+        if (err != null && errBytes == null)
+            errBytes = U.marshal(ctx, err);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void finishUnmarshal(GridCacheSharedContext ctx, ClassLoader ldr) throws IgniteCheckedException {
+        super.finishUnmarshal(ctx, ldr);
+
+        if (errBytes != null && err == null)
+            err = U.unmarshal(ctx, errBytes, U.resolveClassLoader(ldr, ctx.gridConfig()));
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) {
+        writer.setBuffer(buf);
+
+        if (!super.writeTo(buf, writer))
+            return false;
+
+        if (!writer.isHeaderWritten()) {
+            if (!writer.writeHeader(directType(), fieldsCount()))
+                return false;
+
+            writer.onHeaderWritten();
+        }
+
+        switch (writer.state()) {
+            case 12:
+                if (!writer.writeByteArray("errBytes", errBytes))
+                    return false;
+
+                writer.incrementState();
+
+        }
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) {
+        reader.setBuffer(buf);
+
+        if (!reader.beforeMessageRead())
+            return false;
+
+        if (!super.readFrom(buf, reader))
+            return false;
+
+        switch (reader.state()) {
+            case 12:
+                errBytes = reader.readByteArray("errBytes");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+        }
+
+        return reader.afterMessageRead(GridDhtPartitionSupplyMessageV2.class);
+    }
+
+    /** {@inheritDoc} */
+    @Nullable @Override public Throwable error() {
+        return err;
+    }
+
+    /** {@inheritDoc} */
+    @Override public short directType() {
+        return 158;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte fieldsCount() {
+        return 13;
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java
index 4d0b583..265c48d 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java
@@ -24,8 +24,10 @@
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Optional;
 import java.util.Set;
 import java.util.UUID;
 import java.util.concurrent.Callable;
@@ -37,6 +39,7 @@
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReadWriteLock;
+import java.util.stream.Stream;
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.IgniteLogger;
 import org.apache.ignite.IgniteSystemProperties;
@@ -87,6 +90,7 @@
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionsStateValidator;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFutureAdapter;
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.latch.Latch;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccCoordinator;
 import org.apache.ignite.internal.processors.cache.persistence.snapshot.SnapshotDiscoveryMessage;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
@@ -94,6 +98,7 @@
 import org.apache.ignite.internal.processors.cluster.ChangeGlobalStateFinishMessage;
 import org.apache.ignite.internal.processors.cluster.ChangeGlobalStateMessage;
 import org.apache.ignite.internal.processors.cluster.DiscoveryDataClusterState;
+import org.apache.ignite.internal.util.GridLongList;
 import org.apache.ignite.internal.util.IgniteUtils;
 import org.apache.ignite.internal.util.future.GridFutureAdapter;
 import org.apache.ignite.internal.util.tostring.GridToStringExclude;
@@ -340,6 +345,8 @@
         this.exchActions = exchActions;
         this.affChangeMsg = affChangeMsg;
         this.validator = new GridDhtPartitionsStateValidator(cctx);
+        if (exchActions != null && exchActions.deactivate())
+            this.clusterIsActive = false;
 
         log = cctx.logger(getClass());
         exchLog = cctx.logger(EXCHANGE_LOG);
@@ -502,7 +509,7 @@
         assert exchId.equals(this.exchId);
 
         this.exchId.discoveryEvent(discoEvt);
-        this.firstDiscoEvt= discoEvt;
+        this.firstDiscoEvt = discoEvt;
         this.firstEvtDiscoCache = discoCache;
 
         evtLatch.countDown();
@@ -644,7 +651,17 @@
 
             boolean crdNode = crd != null && crd.isLocal();
 
-            exchCtx = new ExchangeContext(crdNode, this);
+            MvccCoordinator mvccCrd = firstEvtDiscoCache.mvccCoordinator();
+
+            boolean mvccCrdChange = mvccCrd != null &&
+                (initialVersion().equals(mvccCrd.topologyVersion()) || activateCluster());
+
+            // Mvcc coordinator should has been initialized before exchange context is created.
+            cctx.kernalContext().coordinators().updateCoordinator(mvccCrd);
+
+            exchCtx = new ExchangeContext(crdNode, mvccCrdChange, this);
+
+            cctx.kernalContext().coordinators().onExchangeStart(mvccCrd, exchCtx, crd);
 
             assert state == null : state;
 
@@ -655,6 +672,8 @@
 
             if (exchLog.isInfoEnabled()) {
                 exchLog.info("Started exchange init [topVer=" + topVer +
+                    ", mvccCrd=" + mvccCrd +
+                    ", mvccCrdChange=" + mvccCrdChange +
                     ", crd=" + crdNode +
                     ", evt=" + IgniteUtils.gridEventName(firstDiscoEvt.type()) +
                     ", evtNode=" + firstDiscoEvt.eventNode().id() +
@@ -727,7 +746,7 @@
                         }
                     }
                     else {
-                        if (CU.clientNode(firstDiscoEvt.eventNode()))
+                        if (firstDiscoEvt.eventNode().isClient())
                             exchange = onClientNodeEvent(crdNode);
                         else
                             exchange = cctx.kernalContext().clientNode() ? ExchangeType.CLIENT : ExchangeType.ALL;
@@ -737,12 +756,12 @@
                         onLeft();
                 }
                 else {
-                    exchange = CU.clientNode(firstDiscoEvt.eventNode()) ? onClientNodeEvent(crdNode) :
+                    exchange = firstDiscoEvt.eventNode().isClient() ? onClientNodeEvent(crdNode) :
                         onServerNodeEvent(crdNode);
                 }
             }
 
-            updateTopologies(crdNode);
+            updateTopologies(crd, crdNode, cctx.coordinators().currentCoordinator());
 
             switch (exchange) {
                 case ALL: {
@@ -892,10 +911,12 @@
     /**
      * Updates topology versions and discovery caches on all topologies.
      *
+     * @param exchCrd Exchange coordinator node.
      * @param crd Coordinator flag.
+     * @param mvccCrd Mvcc coordinator.
      * @throws IgniteCheckedException If failed.
      */
-    private void updateTopologies(boolean crd) throws IgniteCheckedException {
+    private void updateTopologies(ClusterNode exchCrd, boolean crd, MvccCoordinator mvccCrd) throws IgniteCheckedException {
         for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
             if (grp.isLocal())
                 continue;
@@ -922,12 +943,18 @@
             top.updateTopologyVersion(
                 this,
                 events().discoveryCache(),
+                mvccCrd,
                 updSeq,
                 cacheGroupStopping(grp.groupId()));
         }
 
-        for (GridClientPartitionTopology top : cctx.exchange().clientTopologies())
-            top.updateTopologyVersion(this, events().discoveryCache(), -1, cacheGroupStopping(top.groupId()));
+        for (GridClientPartitionTopology top : cctx.exchange().clientTopologies()) {
+            top.updateTopologyVersion(this,
+                events().discoveryCache(),
+                mvccCrd,
+                -1,
+                cacheGroupStopping(top.groupId()));
+        }
     }
 
     /**
@@ -1113,7 +1140,7 @@
      * @return Exchange type.
      */
     private ExchangeType onClientNodeEvent(boolean crd) throws IgniteCheckedException {
-        assert CU.clientNode(firstDiscoEvt.eventNode()) : this;
+        assert firstDiscoEvt.eventNode().isClient() : this;
 
         if (firstDiscoEvt.type() == EVT_NODE_LEFT || firstDiscoEvt.type() == EVT_NODE_FAILED) {
             onLeft();
@@ -1134,7 +1161,7 @@
      * @return Exchange type.
      */
     private ExchangeType onServerNodeEvent(boolean crd) throws IgniteCheckedException {
-        assert !CU.clientNode(firstDiscoEvt.eventNode()) : this;
+        assert !firstDiscoEvt.eventNode().isClient() : this;
 
         if (firstDiscoEvt.type() == EVT_NODE_LEFT || firstDiscoEvt.type() == EVT_NODE_FAILED) {
             onLeft();
@@ -1200,8 +1227,8 @@
 
         boolean distributed = true;
 
-        // Do not perform distributed partition release in case of cluster activation or caches start.
-        if (activateCluster() || hasCachesToStart())
+        // Do not perform distributed partition release in case of cluster activation.
+        if (activateCluster())
             distributed = false;
 
         // On first phase we wait for finishing all local tx updates, atomic updates and lock releases on all nodes.
@@ -1567,6 +1594,12 @@
                 msg.partitionHistoryCounters(partHistReserved0);
         }
 
+        if (exchCtx.newMvccCoordinator() && cctx.coordinators().currentCoordinatorId().equals(node.id())) {
+            Map<UUID, GridLongList> activeQueries = exchCtx.activeQueries();
+
+            msg.activeQueries(activeQueries != null ? activeQueries.get(cctx.localNodeId()) : null);
+        }
+
         if ((stateChangeExchange() || dynamicCacheStartExchange()) && exchangeLocE != null)
             msg.setError(exchangeLocE);
         else if (localJoinExchange())
@@ -1608,74 +1641,90 @@
     }
 
     /**
-     * @param msg Message to send.
+     * @param fullMsg Message to send.
      * @param nodes Nodes.
      * @param mergedJoinExchMsgs Messages received from merged 'join node' exchanges.
-     * @param joinedNodeAff Affinity if was requested by some nodes.
+     * @param affinityForJoinedNodes Affinity if was requested by some nodes.
      */
     private void sendAllPartitions(
-        GridDhtPartitionsFullMessage msg,
+        GridDhtPartitionsFullMessage fullMsg,
         Collection<ClusterNode> nodes,
         Map<UUID, GridDhtPartitionsSingleMessage> mergedJoinExchMsgs,
-        Map<Integer, CacheGroupAffinityMessage> joinedNodeAff) {
-        boolean singleNode = nodes.size() == 1;
-
-        GridDhtPartitionsFullMessage joinedNodeMsg = null;
-
+        Map<Integer, CacheGroupAffinityMessage> affinityForJoinedNodes
+    ) {
         assert !nodes.contains(cctx.localNode());
 
         if (log.isDebugEnabled()) {
             log.debug("Sending full partition map [nodeIds=" + F.viewReadOnly(nodes, F.node2id()) +
-                ", exchId=" + exchId + ", msg=" + msg + ']');
+                ", exchId=" + exchId + ", msg=" + fullMsg + ']');
         }
 
-        for (ClusterNode node : nodes) {
-            GridDhtPartitionsFullMessage sndMsg = msg;
+        // Find any single message with affinity request. This request exists only for newly joined nodes.
+        Optional<GridDhtPartitionsSingleMessage> singleMsgWithAffinityReq = nodes.stream()
+            .flatMap(node -> Optional.ofNullable(msgs.get(node.id()))
+                .filter(singleMsg -> singleMsg.cacheGroupsAffinityRequest() != null)
+                .map(Stream::of)
+                .orElse(Stream.empty()))
+            .findAny();
 
-            if (joinedNodeAff != null) {
-                if (singleNode)
-                    msg.joinedNodeAffinity(joinedNodeAff);
-                else {
-                    GridDhtPartitionsSingleMessage singleMsg = msgs.get(node.id());
+        // Prepare full message for newly joined nodes with affinity request.
+        final GridDhtPartitionsFullMessage fullMsgWithAffinity = singleMsgWithAffinityReq
+            .filter(singleMessage -> affinityForJoinedNodes != null)
+            .map(singleMessage -> fullMsg.copy().joinedNodeAffinity(affinityForJoinedNodes))
+            .orElse(null);
 
-                    if (singleMsg != null && singleMsg.cacheGroupsAffinityRequest() != null) {
-                        if (joinedNodeMsg == null) {
-                            joinedNodeMsg = msg.copy();
+        // Prepare and send full messages for given nodes.
+        nodes.stream()
+            .map(node -> {
+                // No joined nodes, just send a regular full message.
+                if (fullMsgWithAffinity == null)
+                    return new T2<>(node, fullMsg);
 
-                            joinedNodeMsg.joinedNodeAffinity(joinedNodeAff);
-                        }
+                return new T2<>(
+                    node,
+                    // If single message contains affinity request, use special full message for such single messages.
+                    Optional.ofNullable(msgs.get(node.id()))
+                        .filter(singleMsg -> singleMsg.cacheGroupsAffinityRequest() != null)
+                        .map(singleMsg -> fullMsgWithAffinity)
+                        .orElse(fullMsg)
+                );
+            })
+            .map(nodeAndMsg -> {
+                ClusterNode node = nodeAndMsg.get1();
+                GridDhtPartitionsFullMessage fullMsgToSend = nodeAndMsg.get2();
 
-                        sndMsg = joinedNodeMsg;
-                    }
-                }
-            }
-
-            try {
-                GridDhtPartitionExchangeId sndExchId = exchangeId();
-
-                if (mergedJoinExchMsgs != null) {
-                    GridDhtPartitionsSingleMessage mergedMsg = mergedJoinExchMsgs.get(node.id());
-
-                    if (mergedMsg != null)
-                        sndExchId = mergedMsg.exchangeId();
-                }
+                // If exchange has merged, use merged version of exchange id.
+                GridDhtPartitionExchangeId sndExchId = mergedJoinExchMsgs != null
+                    ? Optional.ofNullable(mergedJoinExchMsgs.get(node.id()))
+                        .map(GridDhtPartitionsAbstractMessage::exchangeId)
+                        .orElse(exchangeId())
+                    : exchangeId();
 
                 if (sndExchId != null && !sndExchId.equals(exchangeId())) {
-                    sndMsg = sndMsg.copy();
+                    GridDhtPartitionsFullMessage fullMsgWithUpdatedExchangeId = fullMsgToSend.copy();
 
-                    sndMsg.exchangeId(sndExchId);
+                    fullMsgWithUpdatedExchangeId.exchangeId(sndExchId);
+
+                    return new T2<>(node, fullMsgWithUpdatedExchangeId);
                 }
 
-                cctx.io().send(node, sndMsg, SYSTEM_POOL);
-            }
-            catch (ClusterTopologyCheckedException e) {
-                if (log.isDebugEnabled())
-                    log.debug("Failed to send partitions, node failed: " + node);
-            }
-            catch (IgniteCheckedException e) {
-                U.error(log, "Failed to send partitions [node=" + node + ']', e);
-            }
-        }
+                return new T2<>(node, fullMsgToSend);
+            })
+            .forEach(nodeAndMsg -> {
+                ClusterNode node = nodeAndMsg.get1();
+                GridDhtPartitionsFullMessage fullMsgToSend = nodeAndMsg.get2();
+
+                try {
+                    cctx.io().send(node, fullMsgToSend, SYSTEM_POOL);
+                }
+                catch (ClusterTopologyCheckedException e) {
+                    if (log.isDebugEnabled())
+                        log.debug("Failed to send partitions, node failed: " + node);
+                }
+                catch (IgniteCheckedException e) {
+                    U.error(log, "Failed to send partitions [node=" + node + ']', e);
+                }
+            });
     }
 
     /**
@@ -1794,6 +1843,10 @@
         if (!cctx.localNode().isClient())
             tryToPerformLocalSnapshotOperation();
 
+        if (err == null)
+            cctx.coordinators().onExchangeDone(exchCtx.newMvccCoordinator(), exchCtx.events().discoveryCache(),
+                exchCtx.activeQueries());
+
         cctx.cache().onExchangeDone(initialVersion(), exchActions, err);
 
         cctx.exchange().onExchangeDone(res, initialVersion(), err);
@@ -1965,7 +2018,7 @@
 
         boolean wait = false;
 
-        if (CU.clientNode(node)) {
+        if (node.isClient()) {
             if (msg != null)
                 waitAndReplyToNode(nodeId, msg);
         }
@@ -2213,6 +2266,34 @@
     }
 
     /**
+     * Tries to fast reply with {@link GridDhtPartitionsFullMessage} on received single message
+     * in case of exchange future has already completed.
+     *
+     * @param node Cluster node which sent single message.
+     * @param msg Single message.
+     * @return {@code true} if fast reply succeed.
+     */
+    public boolean fastReplyOnSingleMessage(final ClusterNode node, final GridDhtPartitionsSingleMessage msg) {
+        GridDhtPartitionsExchangeFuture futToFastReply = this;
+
+        ExchangeLocalState currState;
+
+        synchronized (mux) {
+            currState = state;
+
+            if (currState == ExchangeLocalState.MERGED)
+                futToFastReply = mergedWith;
+        }
+
+        if (currState == ExchangeLocalState.DONE)
+            futToFastReply.processSingleMessage(node.id(), msg);
+        else if (currState == ExchangeLocalState.MERGED)
+            futToFastReply.processMergedMessage(node, msg);
+
+        return currState == ExchangeLocalState.MERGED || currState == ExchangeLocalState.DONE;
+    }
+
+    /**
      * @param nodeId Node ID.
      * @param msg Client's message.
      */
@@ -2237,7 +2318,7 @@
                 }
 
                 if (finishState0 == null) {
-                    assert firstDiscoEvt.type() == EVT_NODE_JOINED && CU.clientNode(firstDiscoEvt.eventNode()) : this;
+                    assert firstDiscoEvt.type() == EVT_NODE_JOINED && firstDiscoEvt.eventNode().isClient() : this;
 
                     ClusterNode node = cctx.node(nodeId);
 
@@ -2272,6 +2353,9 @@
      */
     private void processSingleMessage(UUID nodeId, GridDhtPartitionsSingleMessage msg) {
         if (msg.client()) {
+            if (msg.activeQueries() != null)
+                cctx.coordinators().processClientActiveQueries(nodeId, msg.activeQueries());
+
             waitAndReplyToNode(nodeId, msg);
 
             return;
@@ -2809,6 +2893,9 @@
             for (Map.Entry<UUID, GridDhtPartitionsSingleMessage> e : msgs.entrySet()) {
                 GridDhtPartitionsSingleMessage msg = e.getValue();
 
+                if (exchCtx.newMvccCoordinator())
+                    exchCtx.addActiveQueries(e.getKey(), msg.activeQueries());
+
                 // Apply update counters after all single messages are received.
                 for (Map.Entry<Integer, GridDhtPartitionMap> entry : msg.partitions().entrySet()) {
                     Integer grpId = entry.getKey();
@@ -2835,11 +2922,7 @@
                 }
             }
 
-            // Don't validate partitions state in case of caches start.
-            boolean skipValidation = hasCachesToStart();
-
-            if (!skipValidation)
-                validatePartitionsState();
+            validatePartitionsState();
 
             if (firstDiscoEvt.type() == EVT_DISCOVERY_CUSTOM_EVT) {
                 assert firstDiscoEvt instanceof DiscoveryCustomEvent;
@@ -2931,9 +3014,7 @@
                 synchronized (mux) {
                     srvNodes.remove(cctx.localNode());
 
-                    nodes = U.newHashSet(srvNodes.size());
-
-                    nodes.addAll(srvNodes);
+                    nodes = new LinkedHashSet<>(srvNodes);
 
                     mergedJoinExchMsgs0 = mergedJoinExchMsgs;
 
@@ -3094,48 +3175,50 @@
     private void sendAllPartitionsToNode(FinishState finishState, GridDhtPartitionsSingleMessage msg, UUID nodeId) {
         ClusterNode node = cctx.node(nodeId);
 
-        if (node != null) {
-            GridDhtPartitionsFullMessage fullMsg = finishState.msg.copy();
+        if (node == null) {
+            if (log.isDebugEnabled())
+                log.debug("Failed to send partitions, node failed: " + nodeId);
 
-            Collection<Integer> affReq = msg.cacheGroupsAffinityRequest();
+            return;
+        }
 
-            if (affReq != null) {
-                Map<Integer, CacheGroupAffinityMessage> aff = CacheGroupAffinityMessage.createAffinityMessages(
-                    cctx,
-                    finishState.resTopVer,
-                    affReq,
-                    null);
+        GridDhtPartitionsFullMessage fullMsg = finishState.msg.copy();
 
-                fullMsg.joinedNodeAffinity(aff);
-            }
+        Collection<Integer> affReq = msg.cacheGroupsAffinityRequest();
 
-            if (!fullMsg.exchangeId().equals(msg.exchangeId())) {
-                fullMsg = fullMsg.copy();
+        if (affReq != null) {
+            Map<Integer, CacheGroupAffinityMessage> aff = CacheGroupAffinityMessage.createAffinityMessages(
+                cctx,
+                finishState.resTopVer,
+                affReq,
+                null);
 
-                fullMsg.exchangeId(msg.exchangeId());
-            }
+            fullMsg.joinedNodeAffinity(aff);
+        }
 
-            try {
-                cctx.io().send(node, fullMsg, SYSTEM_POOL);
+        if (!fullMsg.exchangeId().equals(msg.exchangeId())) {
+            fullMsg = fullMsg.copy();
 
-                if (log.isDebugEnabled()) {
-                    log.debug("Full message was sent to node: " +
-                        node +
-                        ", fullMsg: " + fullMsg
-                    );
-                }
-            }
-            catch (ClusterTopologyCheckedException e) {
-                if (log.isDebugEnabled())
-                    log.debug("Failed to send partitions, node failed: " + node);
-            }
-            catch (IgniteCheckedException e) {
-                U.error(log, "Failed to send partitions [node=" + node + ']', e);
+            fullMsg.exchangeId(msg.exchangeId());
+        }
+
+        try {
+            cctx.io().send(node, fullMsg, SYSTEM_POOL);
+
+            if (log.isDebugEnabled()) {
+                log.debug("Full message was sent to node: " +
+                    node +
+                    ", fullMsg: " + fullMsg
+                );
             }
         }
-        else if (log.isDebugEnabled())
-            log.debug("Failed to send partitions, node failed: " + nodeId);
-
+        catch (ClusterTopologyCheckedException e) {
+            if (log.isDebugEnabled())
+                log.debug("Failed to send partitions, node failed: " + node);
+        }
+        catch (IgniteCheckedException e) {
+            U.error(log, "Failed to send partitions [node=" + node + ']', e);
+        }
     }
 
     /**
@@ -3170,7 +3253,7 @@
      */
     public void onReceivePartitionRequest(final ClusterNode node, final GridDhtPartitionsSingleRequest msg) {
         assert !cctx.kernalContext().clientNode() || msg.restoreState();
-        assert !node.isDaemon() && !CU.clientNode(node) : node;
+        assert !node.isDaemon() && !node.isClient() : node;
 
         initFut.listen(new CI1<IgniteInternalFuture<Boolean>>() {
             @Override public void apply(IgniteInternalFuture<Boolean> fut) {
@@ -3793,9 +3876,8 @@
 
                             crd0 = crd;
 
-                            if (crd0 == null) {
+                            if (crd0 == null)
                                 finishState = new FinishState(null, initialVersion(), null);
-                            }
                         }
 
                         if (crd0 == null) {
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsFullMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsFullMessage.java
index 5962468..ab45d8b 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsFullMessage.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsFullMessage.java
@@ -218,8 +218,10 @@
     /**
      * @param joinedNodeAff Caches affinity for joining nodes.
      */
-    void joinedNodeAffinity(Map<Integer, CacheGroupAffinityMessage> joinedNodeAff) {
+    GridDhtPartitionsFullMessage joinedNodeAffinity(Map<Integer, CacheGroupAffinityMessage> joinedNodeAff) {
         this.joinedNodeAff = joinedNodeAff;
+
+        return this;
     }
 
     /**
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsSingleMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsSingleMessage.java
index 804cc03..7998e07 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsSingleMessage.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsSingleMessage.java
@@ -30,6 +30,7 @@
 import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
+import org.apache.ignite.internal.util.GridLongList;
 import org.apache.ignite.internal.util.tostring.GridToStringInclude;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.T2;
@@ -108,6 +109,9 @@
      */
     private GridDhtPartitionsFullMessage finishMsg;
 
+    /** */
+    private GridLongList activeQryTrackers;
+
     /**
      * Required by {@link Externalizable}.
      */
@@ -132,6 +136,20 @@
     }
 
     /**
+     * @return Active queries started with previous coordinator.
+     */
+    GridLongList activeQueries() {
+        return activeQryTrackers;
+    }
+
+    /**
+     * @param activeQrys Active queries started with previous coordinator.
+     */
+    void activeQueries(GridLongList activeQrys) {
+        this.activeQryTrackers = activeQrys;
+    }
+
+    /**
      * @param finishMsg Exchange finish message (used to restore exchange state on new coordinator).
      */
     void finishMessage(GridDhtPartitionsFullMessage finishMsg) {
@@ -331,7 +349,7 @@
             byte[] partsBytes0 = null;
             byte[] partCntrsBytes0 = null;
             byte[] partHistCntrsBytes0 = null;
-            byte[] partSizesBytes0 = null;
+            byte[] partsSizesBytes0 = null;
             byte[] errBytes0 = null;
 
             if (parts != null && partsBytes == null)
@@ -344,7 +362,7 @@
                 partHistCntrsBytes0 = U.marshal(ctx, partHistCntrs);
 
             if (partsSizes != null && partsSizesBytes == null)
-                partSizesBytes0 = U.marshal(ctx, partsSizes);
+                partsSizesBytes0 = U.marshal(ctx, partsSizes);
 
             if (err != null && errBytes == null)
                 errBytes0 = U.marshal(ctx, err);
@@ -356,13 +374,13 @@
                     byte[] partsBytesZip = U.zip(partsBytes0);
                     byte[] partCntrsBytesZip = U.zip(partCntrsBytes0);
                     byte[] partHistCntrsBytesZip = U.zip(partHistCntrsBytes0);
-                    byte[] partSizesBytesZip = U.zip(partSizesBytes0);
+                    byte[] partsSizesBytesZip = U.zip(partsSizesBytes0);
                     byte[] exBytesZip = U.zip(errBytes0);
 
                     partsBytes0 = partsBytesZip;
                     partCntrsBytes0 = partCntrsBytesZip;
                     partHistCntrsBytes0 = partHistCntrsBytesZip;
-                    partSizesBytes0 = partSizesBytesZip;
+                    partsSizesBytes0 = partsSizesBytesZip;
                     errBytes0 = exBytesZip;
 
                     compressed(true);
@@ -375,7 +393,7 @@
             partsBytes = partsBytes0;
             partCntrsBytes = partCntrsBytes0;
             partHistCntrsBytes = partHistCntrsBytes0;
-            partsSizesBytes = partSizesBytes0;
+            partsSizesBytes = partsSizesBytes0;
             errBytes = errBytes0;
         }
     }
@@ -508,6 +526,12 @@
                     return false;
 
                 writer.incrementState();
+
+            case 14:
+                if (!writer.writeMessage("activeQryTrackers", activeQryTrackers))
+                    return false;
+
+                writer.incrementState();
         }
 
         return true;
@@ -595,6 +619,14 @@
                     return false;
 
                 reader.incrementState();
+
+            case 14:
+                activeQryTrackers = reader.readMessage("activeQryTrackers");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
         }
 
         return reader.afterMessageRead(GridDhtPartitionsSingleMessage.class);
@@ -607,7 +639,7 @@
 
     /** {@inheritDoc} */
     @Override public byte fieldsCount() {
-        return 14;
+        return 15;
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPreloader.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPreloader.java
index 7cf55a3..f886767 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPreloader.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPreloader.java
@@ -313,7 +313,7 @@
                     msg.partitions().addHistorical(p, part.initialUpdateCounter(), countersMap.updateCounter(p), partCnt);
                 }
                 else {
-                    Collection<ClusterNode> picked = pickOwners(p, topVer);
+                    List<ClusterNode> picked = remoteOwners(p, topVer);
 
                     if (picked.isEmpty()) {
                         top.own(part);
@@ -330,7 +330,7 @@
                             log.debug("Owning partition as there are no other owners: " + part);
                     }
                     else {
-                        ClusterNode n = F.rand(picked);
+                        ClusterNode n = picked.get(0);
 
                         GridDhtPartitionDemandMessage msg = assignments.get(n);
 
@@ -359,42 +359,23 @@
     }
 
     /**
-     * Picks owners for specified partition {@code p} from affinity.
-     *
-     * @param p Partition.
-     * @param topVer Topology version.
-     * @return Picked owners.
-     */
-    private Collection<ClusterNode> pickOwners(int p, AffinityTopologyVersion topVer) {
-        Collection<ClusterNode> affNodes = grp.affinity().cachedAffinity(topVer).get(p);
-
-        int affCnt = affNodes.size();
-
-        Collection<ClusterNode> rmts = remoteOwners(p, topVer);
-
-        int rmtCnt = rmts.size();
-
-        if (rmtCnt <= affCnt)
-            return rmts;
-
-        List<ClusterNode> sorted = new ArrayList<>(rmts);
-
-        // Sort in descending order, so nodes with higher order will be first.
-        Collections.sort(sorted, CU.nodeComparator(false));
-
-        // Pick newest nodes.
-        return sorted.subList(0, affCnt);
-    }
-
-    /**
      * Returns remote owners (excluding local node) for specified partition {@code p}.
      *
      * @param p Partition.
      * @param topVer Topology version.
      * @return Nodes owning this partition.
      */
-    private Collection<ClusterNode> remoteOwners(int p, AffinityTopologyVersion topVer) {
-        return F.view(grp.topology().owners(p, topVer), F.remoteNodes(ctx.localNodeId()));
+    private List<ClusterNode> remoteOwners(int p, AffinityTopologyVersion topVer) {
+        List<ClusterNode> owners = grp.topology().owners(p, topVer);
+
+        List<ClusterNode> res = new ArrayList<>(owners.size());
+
+        for (ClusterNode owner : owners) {
+            if (!owner.id().equals(ctx.localNodeId()))
+                res.add(owner);
+        }
+
+        return res;
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/latch/ExchangeLatchManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/latch/ExchangeLatchManager.java
index 0798d33..1ffe5af 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/latch/ExchangeLatchManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/latch/ExchangeLatchManager.java
@@ -231,7 +231,7 @@
             Collection<ClusterNode> histNodes = discovery.topology(topVer.topologyVersion());
 
             if (histNodes != null)
-                return histNodes.stream().filter(n -> !CU.clientNode(n) && !n.isDaemon() && discovery.alive(n))
+                return histNodes.stream().filter(n -> !n.isClient() && !n.isDaemon() && discovery.alive(n))
                         .collect(Collectors.toList());
             else
                 throw new IgniteException("Topology " + topVer + " not found in discovery history "
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/AckCoordinatorOnRollback.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/AckCoordinatorOnRollback.java
new file mode 100644
index 0000000..1648da9
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/AckCoordinatorOnRollback.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed.near;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccQueryTracker;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx;
+import org.apache.ignite.internal.util.typedef.CIX1;
+
+/** */
+public class AckCoordinatorOnRollback extends CIX1<IgniteInternalFuture<IgniteInternalTx>> {
+    /** */
+    private static final long serialVersionUID = 8172699207968328284L;
+
+    /** */
+    private final GridNearTxLocal tx;
+
+    /**
+     * @param tx Transaction.
+     */
+    public AckCoordinatorOnRollback(GridNearTxLocal tx) {
+        this.tx = tx;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void applyx(IgniteInternalFuture<IgniteInternalTx> fut) throws IgniteCheckedException {
+        assert fut.isDone();
+
+        MvccQueryTracker tracker = tx.mvccQueryTracker();
+        MvccSnapshot mvccSnapshot = tx.mvccSnapshot();
+
+        if (tracker != null) // Optimistic tx.
+            tracker.onDone(tx, false);
+        else if (mvccSnapshot != null)// Pessimistic tx.
+            tx.context().coordinators().ackTxRollback(mvccSnapshot);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearAtomicCache.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearAtomicCache.java
index 23c2480..3651bad 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearAtomicCache.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearAtomicCache.java
@@ -284,7 +284,7 @@
                 }
                 finally {
                     if (entry != null)
-                        ctx.evicts().touch(entry, topVer);
+                        entry.touch(topVer);
                 }
             }
         }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearCacheAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearCacheAdapter.java
index 672693c..3904781 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearCacheAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearCacheAdapter.java
@@ -396,16 +396,6 @@
     }
 
     /** {@inheritDoc} */
-    @Override public boolean isMongoDataCache() {
-        return dht().isMongoDataCache();
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean isMongoMetaCache() {
-        return dht().isMongoMetaCache();
-    }
-
-    /** {@inheritDoc} */
     @Override public List<GridCacheClearAllRunnable<K, V>> splitClearLocally(boolean srv, boolean near,
         boolean readers) {
         assert configuration().getNearConfiguration() != null;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearGetFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearGetFuture.java
index a101e7f..95c952c 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearGetFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearGetFuture.java
@@ -202,7 +202,7 @@
      * @param nodeId Sender.
      * @param res Result.
      */
-    public void onResult(UUID nodeId, GridNearGetResponse res) {
+    @Override public void onResult(UUID nodeId, GridNearGetResponse res) {
         for (IgniteInternalFuture<Map<K, V>> fut : futures())
             if (isMini(fut)) {
                 MiniFuture f = (MiniFuture)fut;
@@ -319,7 +319,8 @@
                         taskName == null ? 0 : taskName.hashCode(),
                         expiryPlc,
                         skipVals,
-                        recovery);
+                        recovery,
+                        null); // TODO IGNITE-7371
 
                 final Collection<Integer> invalidParts = fut.invalidPartitions();
 
@@ -382,7 +383,8 @@
                     true,
                     skipVals,
                     cctx.deploymentEnabled(),
-                    recovery);
+                    recovery,
+                    null); // TODO IGNITE-7371
 
                 add(fut); // Append new future.
 
@@ -455,7 +457,8 @@
                             taskName,
                             expiryPlc,
                             !deserializeBinary,
-                            null);
+                            null,
+                            null); // TODO IGNITE-7371
 
                         if (res != null) {
                             v = res.value();
@@ -473,7 +476,8 @@
                             null,
                             taskName,
                             expiryPlc,
-                            !deserializeBinary);
+                            !deserializeBinary,
+                            null); // TODO IGNITE-7371
                     }
                 }
 
@@ -554,7 +558,7 @@
             }
             finally {
                 if (entry != null && tx == null)
-                    cctx.evicts().touch(entry, topVer);
+                    entry.touch(topVer);
             }
         }
 
@@ -599,7 +603,8 @@
                             taskName,
                             expiryPlc,
                             !deserializeBinary,
-                            null);
+                            null,
+                            null); // TODO IGNITE-7371
 
                         if (res != null) {
                             v = res.value();
@@ -617,7 +622,8 @@
                             null,
                             taskName,
                             expiryPlc,
-                            !deserializeBinary);
+                            !deserializeBinary,
+                            null); // TODO IGNITE-7371
                     }
 
                     // Entry was not in memory or in swap, so we remove it from cache.
@@ -655,7 +661,7 @@
                 if (dhtEntry != null)
                     // Near cache is enabled, so near entry will be enlisted in the transaction.
                     // Always touch DHT entry in this case.
-                    dht.context().evicts().touch(dhtEntry, topVer);
+                    dhtEntry.touch(topVer);
             }
         }
     }
@@ -799,7 +805,7 @@
                 entry.releaseEviction();
 
                 if (tx == null)
-                    cctx.evicts().touch(entry, topVer);
+                    entry.touch(topVer);
             }
         }
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearGetRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearGetRequest.java
index dcb167d..f594e2b 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearGetRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearGetRequest.java
@@ -34,6 +34,7 @@
 import org.apache.ignite.internal.processors.cache.GridCacheIdMessage;
 import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
 import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersionable;
 import org.apache.ignite.internal.util.tostring.GridToStringInclude;
@@ -45,6 +46,7 @@
 import org.apache.ignite.plugin.extensions.communication.MessageReader;
 import org.apache.ignite.plugin.extensions.communication.MessageWriter;
 import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
 
 /**
  * Get request. Responsible for obtaining entry from primary node. 'Near' means 'Initiating node' here, not 'Near Cache'.
@@ -106,6 +108,9 @@
     /** TTL for read operation. */
     private long accessTtl;
 
+    /** */
+    private MvccSnapshot mvccSnapshot;
+
     /**
      * Empty constructor required for {@link Externalizable}.
      */
@@ -128,6 +133,7 @@
      * @param createTtl New TTL to set after entry is created, -1 to leave unchanged.
      * @param accessTtl New TTL to set after entry is accessed, -1 to leave unchanged.
      * @param addDepInfo Deployment info.
+     * @param mvccSnapshot Mvcc snapshot.
      */
     public GridNearGetRequest(
         int cacheId,
@@ -144,7 +150,8 @@
         boolean addReader,
         boolean skipVals,
         boolean addDepInfo,
-        boolean recovery
+        boolean recovery,
+        @Nullable MvccSnapshot mvccSnapshot
     ) {
         assert futId != null;
         assert miniId != null;
@@ -173,6 +180,7 @@
         this.createTtl = createTtl;
         this.accessTtl = accessTtl;
         this.addDepInfo = addDepInfo;
+        this.mvccSnapshot = mvccSnapshot;
 
         if (readThrough)
             flags |= READ_THROUGH_FLAG_MASK;
@@ -188,6 +196,13 @@
     }
 
     /**
+     * @return Mvcc version.
+     */
+    @Nullable public MvccSnapshot mvccSnapshot() {
+        return mvccSnapshot;
+    }
+
+    /**
      * @return Future ID.
      */
     public IgniteUuid futureId() {
@@ -411,6 +426,12 @@
 
                 writer.incrementState();
 
+            case 14:
+                if (!writer.writeMessage("mvccSnapshot", mvccSnapshot))
+                    return false;
+
+                writer.incrementState();
+
         }
 
         return true;
@@ -515,6 +536,14 @@
 
                 reader.incrementState();
 
+            case 14:
+                mvccSnapshot = reader.readMessage("mvccSnapshot");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
         }
 
         return reader.afterMessageRead(GridNearGetRequest.class);
@@ -527,7 +556,7 @@
 
     /** {@inheritDoc} */
     @Override public byte fieldsCount() {
-        return 14;
+        return 15;
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockFuture.java
index 4b46bda..6cd4514 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockFuture.java
@@ -129,7 +129,7 @@
 
     /** Timeout object. */
     @GridToStringExclude
-    private LockTimeoutObject timeoutObj;
+    private volatile LockTimeoutObject timeoutObj;
 
     /** Lock timeout. */
     private final long timeout;
@@ -481,37 +481,66 @@
      * @param nodeId Sender.
      * @param res Result.
      */
+    @SuppressWarnings("SynchronizeOnNonFinalField")
     void onResult(UUID nodeId, GridNearLockResponse res) {
-        if (!isDone()) {
-            if (log.isDebugEnabled())
-                log.debug("Received lock response from node [nodeId=" + nodeId + ", res=" + res + ", fut=" + this + ']');
+        boolean done = isDone();
 
-            MiniFuture mini = miniFuture(res.miniId());
-
-            if (mini != null) {
-                assert mini.node().id().equals(nodeId);
-
-                if (log.isDebugEnabled())
-                    log.debug("Found mini future for response [mini=" + mini + ", res=" + res + ']');
-
-                mini.onResult(res);
-
-                if (log.isDebugEnabled())
-                    log.debug("Future after processed lock response [fut=" + this + ", mini=" + mini +
-                        ", res=" + res + ']');
+        if (!done) {
+            // onResult is always called after map() and timeoutObj is never reset to null, so this is
+            // a race-free null check.
+            if (timeoutObj == null) {
+                onResult0(nodeId, res);
 
                 return;
             }
 
-            U.warn(log, "Failed to find mini future for response (perhaps due to stale message) [res=" + res +
-                ", fut=" + this + ']');
+            synchronized (timeoutObj) {
+                if (!isDone()) {
+                    if (onResult0(nodeId, res))
+                        return;
+                }
+                else
+                    done = true;
+            }
         }
-        else if (log.isDebugEnabled())
+
+        if (done && log.isDebugEnabled())
             log.debug("Ignoring lock response from node (future is done) [nodeId=" + nodeId + ", res=" + res +
                 ", fut=" + this + ']');
     }
 
     /**
+     * @param nodeId Sender.
+     * @param res Result.
+     */
+    private boolean onResult0(UUID nodeId, GridNearLockResponse res) {
+        if (log.isDebugEnabled())
+            log.debug("Received lock response from node [nodeId=" + nodeId + ", res=" + res + ", fut=" + this + ']');
+
+        MiniFuture mini = miniFuture(res.miniId());
+
+        if (mini != null) {
+            assert mini.node().id().equals(nodeId);
+
+            if (log.isDebugEnabled())
+                log.debug("Found mini future for response [mini=" + mini + ", res=" + res + ']');
+
+            mini.onResult(res);
+
+            if (log.isDebugEnabled())
+                log.debug("Future after processed lock response [fut=" + this + ", mini=" + mini +
+                    ", res=" + res + ']');
+
+            return true;
+        }
+
+        U.warn(log, "Failed to find mini future for response (perhaps due to stale message) [res=" + res +
+            ", fut=" + this + ']');
+
+        return false;
+    }
+
+    /**
      * @return Keys for which locks requested from remote nodes but response isn't received.
      */
     public synchronized Set<IgniteTxKey> requestedKeys() {
@@ -1496,15 +1525,20 @@
                                 U.warn(log, "Failed to detect deadlock.", e);
                             }
 
-                            onComplete(false, true);
+                            synchronized (LockTimeoutObject.this) {
+                                onComplete(false, true);
+                            }
                         }
                     });
                 }
                 else
                     err = tx.timeoutException();
             }
-            else
-                onComplete(false, true);
+            else {
+                synchronized (this) {
+                    onComplete(false, true);
+                }
+            }
         }
 
         /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockMapping.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockMapping.java
index 6c8e388..b21f6ad 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockMapping.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearLockMapping.java
@@ -112,7 +112,7 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public String toString() {
         return S.toString(GridNearLockMapping.class, this);
     }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticSerializableTxPrepareFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticSerializableTxPrepareFuture.java
index 75a768f..140c1d5 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticSerializableTxPrepareFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticSerializableTxPrepareFuture.java
@@ -38,12 +38,11 @@
 import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
 import org.apache.ignite.internal.processors.cache.distributed.GridDistributedTxMapping;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxMapping;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccCoordinator;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
 import org.apache.ignite.internal.transactions.IgniteTxOptimisticCheckedException;
-import org.apache.ignite.internal.transactions.IgniteTxRollbackCheckedException;
-import org.apache.ignite.internal.transactions.IgniteTxTimeoutCheckedException;
 import org.apache.ignite.internal.util.future.GridCompoundFuture;
 import org.apache.ignite.internal.util.future.GridFinishedFuture;
 import org.apache.ignite.internal.util.future.GridFutureAdapter;
@@ -52,14 +51,15 @@
 import org.apache.ignite.internal.util.typedef.C1;
 import org.apache.ignite.internal.util.typedef.CI1;
 import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.P1;
 import org.apache.ignite.internal.util.typedef.X;
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteBiInClosure;
 import org.apache.ignite.lang.IgniteReducer;
 import org.jetbrains.annotations.Nullable;
 
 import static org.apache.ignite.internal.processors.cache.GridCacheOperation.TRANSFORM;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.noCoordinatorError;
 import static org.apache.ignite.transactions.TransactionState.PREPARED;
 import static org.apache.ignite.transactions.TransactionState.PREPARING;
 
@@ -185,10 +185,20 @@
                 tx.removeMapping(m.primary().id());
         }
 
+        prepareError(e);
+    }
+
+    /**
+     * @param e Error.
+     */
+    private void prepareError(Throwable e) {
         ERR_UPD.compareAndSet(this, null, e);
 
         if (keyLockFut != null)
             keyLockFut.onDone(e);
+
+        if (mvccVerFut != null)
+            mvccVerFut.onDone();
     }
 
     /** {@inheritDoc} */
@@ -230,7 +240,7 @@
 
             // Avoid iterator creation.
             for (int i = 0; i < size; i++) {
-                IgniteInternalFuture<GridNearTxPrepareResponse> fut = future(i);
+                IgniteInternalFuture fut = future(i);
 
                 if (!isMini(fut))
                     continue;
@@ -339,11 +349,25 @@
 
         boolean hasNearCache = false;
 
+        MvccCoordinator mvccCrd = null;
+
         for (IgniteTxEntry write : writes) {
             map(write, topVer, mappings, txMapping, remap, topLocked);
 
-            if (write.context().isNear())
+            GridCacheContext cctx = write.context();
+
+            if (cctx.isNear())
                 hasNearCache = true;
+
+            if (cctx.mvccEnabled() && mvccCrd == null) {
+                mvccCrd = cctx.affinity().mvccCoordinator(topVer);
+
+                if (mvccCrd == null) {
+                    onDone(noCoordinatorError(topVer));
+
+                    return;
+                }
+            }
         }
 
         for (IgniteTxEntry read : reads)
@@ -359,6 +383,8 @@
             return;
         }
 
+        assert !tx.txState().mvccEnabled(cctx) || mvccCrd != null || F.isEmpty(writes);
+
         tx.addEntryMapping(mappings.values());
 
         cctx.mvcc().recheckPendingLocks();
@@ -370,23 +396,32 @@
 
         MiniFuture locNearEntriesFut = null;
 
+        int lockCnt = keyLockFut != null ? 1 : 0;
+
         // Create futures in advance to have all futures when process {@link GridNearTxPrepareResponse#clientRemapVersion}.
         for (GridDistributedTxMapping m : mappings.values()) {
             assert !m.empty();
 
             MiniFuture fut = new MiniFuture(this, m, ++miniId);
 
-            add(fut);
+            lockCnt++;
+
+            add((IgniteInternalFuture)fut);
 
             if (m.primary().isLocal() && m.hasNearCacheEntries() && m.hasColocatedCacheEntries()) {
                 assert locNearEntriesFut == null;
 
                 locNearEntriesFut = fut;
 
-                add(new MiniFuture(this, m, ++miniId));
+                add((IgniteInternalFuture)new MiniFuture(this, m, ++miniId));
+
+                lockCnt++;
             }
         }
 
+        if (mvccCrd != null)
+            initMvccVersionFuture(lockCnt, remap);
+
         Collection<IgniteInternalFuture<?>> futs = (Collection)futures();
 
         Iterator<IgniteInternalFuture<?>> it = futs.iterator();
@@ -640,7 +675,7 @@
                 if (keyLockFut == null) {
                     keyLockFut = new KeyLockFuture();
 
-                    add(keyLockFut);
+                    add((IgniteInternalFuture)keyLockFut);
                 }
 
                 keyLockFut.addLockKey(entry.txKey());
@@ -697,20 +732,20 @@
         Collection<String> futs = F.viewReadOnly(futures(),
             new C1<IgniteInternalFuture<?>, String>() {
                 @Override public String apply(IgniteInternalFuture<?> f) {
-                    return "[node=" + ((MiniFuture)f).primary().id() +
-                        ", loc=" + ((MiniFuture)f).primary().isLocal() +
-                        ", done=" + f.isDone() + "]";
-                }
-            },
-            new P1<IgniteInternalFuture<?>>() {
-                @Override public boolean apply(IgniteInternalFuture<?> f) {
-                    return isMini(f);
+                    if (isMini(f)) {
+                        return "[node=" + ((MiniFuture)f).primary().id() +
+                            ", loc=" + ((MiniFuture)f).primary().isLocal() +
+                            ", done=" + f.isDone() +
+                            ", err=" + f.error() + "]";
+                    }
+                    else
+                        return f.toString();
                 }
             });
 
         return S.toString(GridNearOptimisticSerializableTxPrepareFuture.class, this,
             "innerFuts", futs,
-            "keyLockFut", keyLockFut,
+            "remap", remapFut != null,
             "tx", tx,
             "super", super.toString());
     }
@@ -761,7 +796,7 @@
      */
     private static class MiniFuture extends GridFutureAdapter<GridNearTxPrepareResponse> {
         /** Receive result flag updater. */
-        private static AtomicIntegerFieldUpdater<MiniFuture> RCV_RES_UPD =
+        private static final AtomicIntegerFieldUpdater<MiniFuture> RCV_RES_UPD =
             AtomicIntegerFieldUpdater.newUpdater(MiniFuture.class, "rcvRes");
 
         /** */
@@ -910,18 +945,12 @@
                                                 parent.remapFut = null;
                                             }
 
-                                            affFut.listen(new CI1<IgniteInternalFuture<?>>() {
-                                                @Override public void apply(IgniteInternalFuture<?> affFut) {
-                                                    try {
-                                                        affFut.get();
+                                            parent.cctx.time().waitAsync(affFut, parent.tx.remainingTime(), new IgniteBiInClosure<IgniteCheckedException, Boolean>() {
+                                                @Override public void apply(IgniteCheckedException e, Boolean timedOut) {
+                                                    if (parent.errorOrTimeoutOnTopologyVersion(e, timedOut))
+                                                        return;
 
-                                                        remap(res);
-                                                    }
-                                                    catch (IgniteCheckedException e) {
-                                                        ERR_UPD.compareAndSet(parent, null, e);
-
-                                                        onDone(e);
-                                                    }
+                                                    remap(res);
                                                 }
                                             });
                                         }
@@ -931,7 +960,7 @@
 
                                             err0.retryReadyFuture(affFut);
 
-                                            ERR_UPD.compareAndSet(parent, null, err0);
+                                            parent.prepareError(err0);
 
                                             onDone(err0);
                                         }
@@ -942,7 +971,7 @@
                                                 parent);
                                         }
 
-                                        ERR_UPD.compareAndSet(parent, null, e);
+                                        parent.prepareError(e);
 
                                         onDone(e);
                                     }
@@ -957,6 +986,9 @@
 
                         // Finish this mini future (need result only on client node).
                         onDone(parent.cctx.kernalContext().clientNode() ? res : null);
+
+                        if (parent.mvccVerFut != null)
+                            parent.mvccVerFut.onLockReceived();
                     }
                 }
             }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticTxPrepareFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticTxPrepareFuture.java
index 36755b8..06d7a8c 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticTxPrepareFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticTxPrepareFuture.java
@@ -43,6 +43,7 @@
 import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
 import org.apache.ignite.internal.processors.cache.distributed.GridDistributedTxMapping;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxMapping;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccCoordinator;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey;
@@ -52,7 +53,6 @@
 import org.apache.ignite.internal.transactions.IgniteTxTimeoutCheckedException;
 import org.apache.ignite.internal.util.future.GridEmbeddedFuture;
 import org.apache.ignite.internal.util.future.GridFutureAdapter;
-import org.apache.ignite.internal.util.tostring.GridToStringExclude;
 import org.apache.ignite.internal.util.tostring.GridToStringInclude;
 import org.apache.ignite.internal.util.typedef.C1;
 import org.apache.ignite.internal.util.typedef.CI1;
@@ -69,6 +69,7 @@
 import org.jetbrains.annotations.Nullable;
 
 import static org.apache.ignite.internal.processors.cache.GridCacheOperation.TRANSFORM;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.noCoordinatorError;
 import static org.apache.ignite.transactions.TransactionState.PREPARED;
 import static org.apache.ignite.transactions.TransactionState.PREPARING;
 
@@ -221,7 +222,7 @@
             int size = futuresCountNoLock();
 
             for (int i = 0; i < size; i++) {
-                IgniteInternalFuture<GridNearTxPrepareResponse> fut = future(i);
+                IgniteInternalFuture fut = future(i);
 
                 if (isMini(fut) && !fut.isDone()) {
                     MiniFuture miniFut = (MiniFuture)fut;
@@ -255,7 +256,7 @@
 
             // Avoid iterator creation.
             for (int i = size - 1; i >= 0; i--) {
-                IgniteInternalFuture<GridNearTxPrepareResponse> fut = future(i);
+                IgniteInternalFuture fut = future(i);
 
                 if (!isMini(fut))
                     continue;
@@ -381,6 +382,18 @@
                 tx.colocatedLocallyMapped(true);
         }
 
+        if (write.context().mvccEnabled()) {
+            MvccCoordinator mvccCrd = write.context().affinity().mvccCoordinator(topVer);
+
+            if (mvccCrd == null) {
+                onDone(noCoordinatorError(topVer));
+
+                return;
+            }
+
+            initMvccVersionFuture(keyLockFut != null ? 2 : 1, remap);
+        }
+
         if (keyLockFut != null)
             keyLockFut.onAllKeysAdded();
 
@@ -425,6 +438,8 @@
 
         boolean hasNearCache = false;
 
+        MvccCoordinator mvccCrd = null;
+
         for (IgniteTxEntry write : writes) {
             write.clearEntryReadVersion();
 
@@ -434,6 +449,16 @@
                 // an exception occurred while transaction mapping, stop further processing
                 break;
 
+            if (write.context().mvccEnabled() && mvccCrd == null) {
+                mvccCrd = write.context().affinity().mvccCoordinator(topVer);
+
+                if (mvccCrd == null) {
+                    onDone(noCoordinatorError(topVer));
+
+                    break;
+                }
+            }
+
             if (write.context().isNear())
                 hasNearCache = true;
 
@@ -473,6 +498,11 @@
             return;
         }
 
+        assert !tx.txState().mvccEnabled(cctx) || mvccCrd != null;
+
+        if (mvccCrd != null)
+            initMvccVersionFuture(keyLockFut != null ? 2 : 1, remap);
+
         if (keyLockFut != null)
             keyLockFut.onAllKeysAdded();
 
@@ -496,8 +526,12 @@
     private void proceedPrepare(final Queue<GridDistributedTxMapping> mappings) {
         final GridDistributedTxMapping m = mappings.poll();
 
-        if (m == null)
+        if (m == null) {
+            if (mvccVerFut != null)
+                mvccVerFut.onLockReceived();
+
             return;
+        }
 
         proceedPrepare(m, mappings);
     }
@@ -563,7 +597,7 @@
 
                 req.miniId(fut.futureId());
 
-                add(fut); // Append new future.
+                add((IgniteInternalFuture)fut); // Append new future.
 
                 if (n.isLocal()) {
                     assert !(m.hasColocatedCacheEntries() && m.hasNearCacheEntries()) : m;
@@ -680,7 +714,7 @@
                 if (keyLockFut == null) {
                     keyLockFut = new KeyLockFuture();
 
-                    add(keyLockFut);
+                    add((IgniteInternalFuture)keyLockFut);
                 }
 
                 keyLockFut.addLockKey(entry.txKey());
@@ -739,7 +773,7 @@
                     int size = futuresCountNoLock();
 
                     for (int i = 0; i < size; i++) {
-                        IgniteInternalFuture<GridNearTxPrepareResponse> fut = future(i);
+                        IgniteInternalFuture fut = future(i);
 
                         if (isMini(fut) && !fut.isDone()) {
                             MiniFuture miniFut = (MiniFuture)fut;
@@ -757,7 +791,7 @@
                 }
             }
 
-            add(new GridEmbeddedFuture<>(new IgniteBiClosure<TxDeadlock, Exception, GridNearTxPrepareResponse>() {
+            add(new GridEmbeddedFuture<>(new IgniteBiClosure<TxDeadlock, Exception, Object>() {
                 @Override public GridNearTxPrepareResponse apply(TxDeadlock deadlock, Exception e) {
                     if (e != null)
                         U.warn(log, "Failed to detect deadlock.", e);
@@ -788,7 +822,7 @@
     /** {@inheritDoc} */
     @Override public void addDiagnosticRequest(IgniteDiagnosticPrepareContext ctx) {
         if (!isDone()) {
-            for (IgniteInternalFuture<GridNearTxPrepareResponse> fut : futures()) {
+            for (IgniteInternalFuture fut : futures()) {
                 if (!fut.isDone()) {
                     if (fut instanceof MiniFuture) {
                         MiniFuture miniFut = (MiniFuture)fut;
@@ -844,19 +878,22 @@
     @Override public String toString() {
         Collection<String> futs = F.viewReadOnly(futures(), new C1<IgniteInternalFuture<?>, String>() {
             @Override public String apply(IgniteInternalFuture<?> f) {
-                return "[node=" + ((MiniFuture)f).node().id() +
-                    ", loc=" + ((MiniFuture)f).node().isLocal() +
-                    ", done=" + f.isDone() + "]";
+                if (isMini(f)) {
+                    return "[node=" + ((MiniFuture)f).node().id() +
+                        ", loc=" + ((MiniFuture)f).node().isLocal() +
+                        ", done=" + f.isDone() + "]";
+                }
+                else
+                    return f.toString();
             }
-        }, new P1<IgniteInternalFuture<GridNearTxPrepareResponse>>() {
-            @Override public boolean apply(IgniteInternalFuture<GridNearTxPrepareResponse> fut) {
+        }, new P1<IgniteInternalFuture<Object>>() {
+            @Override public boolean apply(IgniteInternalFuture<Object> fut) {
                 return isMini(fut);
             }
         });
 
         return S.toString(GridNearOptimisticTxPrepareFuture.class, this,
             "innerFuts", futs,
-            "keyLockFut", keyLockFut,
             "tx", tx,
             "super", super.toString());
     }
@@ -989,22 +1026,12 @@
                         IgniteInternalFuture<?> affFut =
                             parent.cctx.exchange().affinityReadyFuture(res.clientRemapVersion());
 
-                        if (affFut != null && !affFut.isDone()) {
-                            affFut.listen(new CI1<IgniteInternalFuture<?>>() {
-                                @Override public void apply(IgniteInternalFuture<?> fut) {
-                                    try {
-                                        fut.get();
+                        parent.cctx.time().waitAsync(affFut, parent.tx.remainingTime(), (e, timedOut) -> {
+                            if (parent.errorOrTimeoutOnTopologyVersion(e, timedOut))
+                                return;
 
-                                        remap();
-                                    }
-                                    catch (IgniteCheckedException e) {
-                                        onDone(e);
-                                    }
-                                }
-                            });
-                        }
-                        else
                             remap();
+                        });
                     }
                     else {
                         parent.onPrepareResponse(m, res, m.hasNearCacheEntries());
@@ -1012,6 +1039,8 @@
                         // Proceed prepare before finishing mini future.
                         if (mappings != null)
                             parent.proceedPrepare(mappings);
+                        else if (parent.mvccVerFut != null)
+                            parent.mvccVerFut.onLockReceived();
 
                         // Finish this mini future.
                         onDone((GridNearTxPrepareResponse)null);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticTxPrepareFutureAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticTxPrepareFutureAdapter.java
index 317ada3d..6f541d3 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticTxPrepareFutureAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticTxPrepareFutureAdapter.java
@@ -18,21 +18,24 @@
 package org.apache.ignite.internal.processors.cache.distributed.near;
 
 import java.util.Collection;
+import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException;
 import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
 import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshotResponseListener;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey;
 import org.apache.ignite.internal.transactions.IgniteTxTimeoutCheckedException;
 import org.apache.ignite.internal.util.GridConcurrentHashSet;
 import org.apache.ignite.internal.util.future.GridFutureAdapter;
-import org.apache.ignite.internal.util.lang.GridPlainRunnable;
 import org.apache.ignite.internal.util.tostring.GridToStringExclude;
 import org.apache.ignite.internal.util.tostring.GridToStringInclude;
-import org.apache.ignite.internal.util.typedef.CI1;
 import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.lang.IgniteInClosure;
 import org.jetbrains.annotations.Nullable;
 
 /**
@@ -40,9 +43,20 @@
  */
 public abstract class GridNearOptimisticTxPrepareFutureAdapter extends GridNearTxPrepareFutureAdapter {
     /** */
+    private static final long serialVersionUID = 7460376140787916619L;
+
+    /** */
+    private static final AtomicIntegerFieldUpdater<MvccSnapshotFutureExt> LOCK_CNT_UPD =
+        AtomicIntegerFieldUpdater.newUpdater(MvccSnapshotFutureExt.class, "lockCnt");
+
+    /** */
     @GridToStringExclude
     protected KeyLockFuture keyLockFut;
 
+    /** */
+    @GridToStringExclude
+    protected MvccSnapshotFutureExt mvccVerFut;
+
     /**
      * @param cctx Context.
      * @param tx Transaction.
@@ -71,7 +85,7 @@
             }
 
             if (keyLockFut != null)
-                add(keyLockFut);
+                add((IgniteInternalFuture)keyLockFut);
         }
     }
 
@@ -179,23 +193,15 @@
                 c.run();
         }
         else {
-            topFut.listen(new CI1<IgniteInternalFuture<AffinityTopologyVersion>>() {
-                @Override public void apply(final IgniteInternalFuture<AffinityTopologyVersion> fut) {
-                    cctx.kernalContext().closure().runLocalSafe(new GridPlainRunnable() {
-                        @Override public void run() {
-                            try {
-                                fut.get();
+            cctx.time().waitAsync(topFut, tx.remainingTime(), (e, timedOut) -> {
+                if (errorOrTimeoutOnTopologyVersion(e, timedOut))
+                    return;
 
-                                prepareOnTopology(remap, c);
-                            }
-                            catch (IgniteCheckedException e) {
-                                onDone(e);
-                            }
-                            finally {
-                                cctx.txContextReset();
-                            }
-                        }
-                    });
+                try {
+                    prepareOnTopology(remap, c);
+                }
+                finally {
+                    cctx.txContextReset();
                 }
             });
         }
@@ -208,9 +214,51 @@
     protected abstract void prepare0(boolean remap, boolean topLocked);
 
     /**
+     * @param lockCnt Expected number of lock responses.
+     * @param remap Remap flag.
+     */
+    @SuppressWarnings("unchecked")
+    final void initMvccVersionFuture(int lockCnt, boolean remap) {
+        if (!remap) {
+            mvccVerFut = new MvccSnapshotFutureExt();
+
+            mvccVerFut.init(lockCnt);
+
+            if (keyLockFut != null)
+                keyLockFut.listen(mvccVerFut);
+
+            add((IgniteInternalFuture)mvccVerFut);
+        }
+        else {
+            assert mvccVerFut != null;
+
+            mvccVerFut.init(lockCnt);
+        }
+    }
+
+    /**
+     * @param e Exception.
+     * @param timedOut {@code True} if timed out.
+     */
+    protected boolean errorOrTimeoutOnTopologyVersion(IgniteCheckedException e, boolean timedOut) {
+        if (e != null || timedOut) {
+            if (timedOut)
+                e = tx.timeoutException();
+
+            ERR_UPD.compareAndSet(this, null, e);
+
+            onDone(e);
+
+            return true;
+        }
+
+        return false;
+    }
+
+    /**
      * Keys lock future.
      */
-    protected static class KeyLockFuture extends GridFutureAdapter<GridNearTxPrepareResponse> {
+    protected static class KeyLockFuture extends GridFutureAdapter<Void> {
         /** */
         @GridToStringInclude
         protected Collection<IgniteTxKey> lockKeys = new GridConcurrentHashSet<>();
@@ -245,24 +293,20 @@
             checkLocks();
         }
 
-        /**
-         * @return {@code True} if all locks are owned.
-         */
-        private boolean checkLocks() {
+        /** */
+        private void checkLocks() {
             boolean locked = lockKeys.isEmpty();
 
             if (locked && allKeysAdded) {
                 if (log.isDebugEnabled())
                     log.debug("All locks are acquired for near prepare future: " + this);
 
-                onDone((GridNearTxPrepareResponse)null);
+                onDone((Void)null);
             }
             else {
                 if (log.isDebugEnabled())
                     log.debug("Still waiting for locks [fut=" + this + ", keys=" + lockKeys + ']');
             }
-
-            return locked;
         }
 
         /** {@inheritDoc} */
@@ -270,4 +314,82 @@
             return S.toString(KeyLockFuture.class, this, super.toString());
         }
     }
+
+    /**
+     *
+     */
+    class MvccSnapshotFutureExt extends GridFutureAdapter<Void> implements MvccSnapshotResponseListener, IgniteInClosure<IgniteInternalFuture<Void>> {
+        /** */
+        private static final long serialVersionUID = 5883078648683911226L;
+
+        /** */
+        volatile int lockCnt;
+
+        /** {@inheritDoc} */
+        @Override public void apply(IgniteInternalFuture<Void> keyLockFut) {
+            try {
+                keyLockFut.get();
+
+                onLockReceived();
+            }
+            catch (IgniteCheckedException e) {
+                if (log.isDebugEnabled())
+                    log.debug("MvccSnapshotFutureExt ignores key lock future failure: " + e);
+            }
+        }
+
+        /**
+         * @param lockCnt Expected number of lock responses.
+         */
+        void init(int lockCnt) {
+            assert lockCnt > 0;
+
+            this.lockCnt = lockCnt;
+
+            assert !isDone();
+        }
+
+        /** */
+        void onLockReceived() {
+            int remaining = LOCK_CNT_UPD.decrementAndGet(this);
+
+            assert remaining >= 0 : remaining;
+
+            if (remaining == 0) {
+                try {
+                    MvccSnapshot snapshot = cctx.coordinators().tryRequestSnapshotLocal(tx);
+
+                    if (snapshot != null)
+                        onResponse(snapshot);
+                    else
+                        cctx.coordinators().requestSnapshotAsync(tx, this);
+                }
+                catch (ClusterTopologyCheckedException e) {
+                    onError(e);
+                }
+            }
+        }
+
+        /** {@inheritDoc} */
+        @Override public void onResponse(MvccSnapshot res) {
+            tx.mvccSnapshot(res);
+
+            onDone();
+        }
+
+        /** {@inheritDoc} */
+        @Override public void onError(IgniteCheckedException e) {
+            if (e instanceof ClusterTopologyCheckedException)
+                ((ClusterTopologyCheckedException)e).retryReadyFuture(cctx.nextAffinityReadyFuture(tx.topologyVersion()));
+
+            ERR_UPD.compareAndSet(GridNearOptimisticTxPrepareFutureAdapter.this, null, e);
+
+            onDone();
+        }
+
+        /** {@inheritDoc} */
+        @Override public String toString() {
+            return S.toString(MvccSnapshotFutureExt.class, this, super.toString());
+        }
+    }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearPessimisticTxPrepareFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearPessimisticTxPrepareFuture.java
index c16a934..dbf54c2 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearPessimisticTxPrepareFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearPessimisticTxPrepareFuture.java
@@ -36,19 +36,23 @@
 import org.apache.ignite.internal.processors.cache.distributed.GridDistributedTxMapping;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxMapping;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccCoordinator;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshotResponseListener;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry;
-import org.apache.ignite.internal.transactions.IgniteTxRollbackCheckedException;
 import org.apache.ignite.internal.transactions.IgniteTxTimeoutCheckedException;
 import org.apache.ignite.internal.util.future.GridFutureAdapter;
 import org.apache.ignite.internal.util.typedef.C1;
 import org.apache.ignite.internal.util.typedef.CI1;
 import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.internal.CU;
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.jetbrains.annotations.Nullable;
 
 import static org.apache.ignite.internal.processors.cache.GridCacheOperation.TRANSFORM;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.noCoordinatorError;
 import static org.apache.ignite.transactions.TransactionState.PREPARED;
 import static org.apache.ignite.transactions.TransactionState.PREPARING;
 
@@ -56,6 +60,9 @@
  *
  */
 public class GridNearPessimisticTxPrepareFuture extends GridNearTxPrepareFutureAdapter {
+    /** */
+    private static final long serialVersionUID = 4014479758215810181L;
+
     /**
      * @param cctx Context.
      * @param tx Transaction.
@@ -81,17 +88,19 @@
         boolean found = false;
 
         for (IgniteInternalFuture<?> fut : futures()) {
-            MiniFuture f = (MiniFuture)fut;
+            if (fut instanceof MiniFuture) {
+                MiniFuture f = (MiniFuture)fut;
 
-            if (f.primary().id().equals(nodeId)) {
-                ClusterTopologyCheckedException e = new ClusterTopologyCheckedException("Remote node left grid: " +
-                    nodeId);
+                if (f.primary().id().equals(nodeId)) {
+                    ClusterTopologyCheckedException e = new ClusterTopologyCheckedException("Remote node left grid: " +
+                        nodeId);
 
-                e.retryReadyFuture(cctx.nextAffinityReadyFuture(tx.topologyVersion()));
+                    e.retryReadyFuture(cctx.nextAffinityReadyFuture(tx.topologyVersion()));
 
-                f.onNodeLeft(e);
+                    f.onNodeLeft(e);
 
-                found = true;
+                    found = true;
+                }
             }
         }
 
@@ -143,13 +152,17 @@
 
             // Avoid iterator creation.
             for (int i = 0; i < size; i++) {
-                MiniFuture mini = (MiniFuture)future(i);
+                IgniteInternalFuture fut = future(i);
 
-                if (mini.futureId() == miniId) {
-                    if (!mini.isDone())
-                        return mini;
-                    else
-                        return null;
+                if (fut instanceof MiniFuture) {
+                    MiniFuture mini = (MiniFuture)fut;
+
+                    if (mini.futureId() == miniId) {
+                        if (!mini.isDone())
+                            return mini;
+                        else
+                            return null;
+                    }
                 }
             }
         }
@@ -218,6 +231,8 @@
             true,
             tx.activeCachesDeploymentEnabled());
 
+        req.queryUpdate(m.queryUpdate());
+
         for (IgniteTxEntry txEntry : writes) {
             if (txEntry.op() == TRANSFORM)
                 req.addDhtVersion(txEntry.txKey(), null);
@@ -232,6 +247,7 @@
      * @param miniId Mini future ID.
      * @param nearEntries {@code True} if prepare near cache entries.
      */
+    @SuppressWarnings("unchecked")
     private void prepareLocal(GridNearTxPrepareRequest req,
         GridDistributedTxMapping m,
         int miniId,
@@ -240,7 +256,7 @@
 
         req.miniId(fut.futureId());
 
-        add(fut);
+        add((IgniteInternalFuture)fut);
 
         IgniteInternalFuture<GridNearTxPrepareResponse> prepFut = nearEntries ?
             cctx.tm().txHandler().prepareNearTxLocal(req) :
@@ -261,6 +277,7 @@
     /**
      *
      */
+    @SuppressWarnings("unchecked")
     private void preparePessimistic() {
         Map<UUID, GridDistributedTxMapping> mappings = new HashMap<>();
 
@@ -268,48 +285,77 @@
 
         GridDhtTxMapping txMapping = new GridDhtTxMapping();
 
+        boolean queryMapped = false;
+
+        for (GridDistributedTxMapping m : F.view(tx.mappings().mappings(), CU.FILTER_QUERY_MAPPING)) {
+            GridDistributedTxMapping nodeMapping = mappings.get(m.primary().id());
+
+            if(nodeMapping == null)
+                mappings.put(m.primary().id(), m);
+
+            txMapping.addMapping(F.asList(m.primary()));
+
+            queryMapped = true;
+        }
+
+        MvccCoordinator mvccCrd = null;
+
         boolean hasNearCache = false;
 
-        for (IgniteTxEntry txEntry : tx.allEntries()) {
-            txEntry.clearEntryReadVersion();
+        if (!queryMapped) {
+            for (IgniteTxEntry txEntry : tx.allEntries()) {
+                txEntry.clearEntryReadVersion();
 
-            GridCacheContext cacheCtx = txEntry.context();
+                GridCacheContext cacheCtx = txEntry.context();
 
-            if (cacheCtx.isNear())
-                hasNearCache = true;
+                if (cacheCtx.isNear())
+                    hasNearCache = true;
 
-            List<ClusterNode> nodes;
+                List<ClusterNode> nodes;
 
-            if (!cacheCtx.isLocal()) {
-                GridDhtPartitionTopology top = cacheCtx.topology();
+                if (!cacheCtx.isLocal()) {
+                    GridDhtPartitionTopology top = cacheCtx.topology();
 
-                nodes = top.nodes(cacheCtx.affinity().partition(txEntry.key()), topVer);
+                    nodes = top.nodes(cacheCtx.affinity().partition(txEntry.key()), topVer);
+                }
+                else
+                    nodes = cacheCtx.affinity().nodesByKey(txEntry.key(), topVer);
+
+                if (tx.mvccSnapshot() == null && mvccCrd == null && cacheCtx.mvccEnabled()) {
+                    mvccCrd = cacheCtx.affinity().mvccCoordinator(topVer);
+
+                    if (mvccCrd == null) {
+                        onDone(noCoordinatorError(topVer));
+
+                        return;
+                    }
+                }
+
+                if (F.isEmpty(nodes)) {
+                    onDone(new ClusterTopologyServerNotFoundException("Failed to map keys to nodes (partition " +
+                        "is not mapped to any node) [key=" + txEntry.key() +
+                        ", partition=" + cacheCtx.affinity().partition(txEntry.key()) + ", topVer=" + topVer + ']'));
+
+                    return;
+                }
+
+                ClusterNode primary = nodes.get(0);
+
+                GridDistributedTxMapping nodeMapping = mappings.get(primary.id());
+
+                if (nodeMapping == null)
+                    mappings.put(primary.id(), nodeMapping = new GridDistributedTxMapping(primary));
+
+                txEntry.nodeId(primary.id());
+
+                nodeMapping.add(txEntry);
+
+                txMapping.addMapping(nodes);
             }
-            else
-                nodes = cacheCtx.affinity().nodesByKey(txEntry.key(), topVer);
-
-            if (F.isEmpty(nodes)) {
-                onDone(new ClusterTopologyServerNotFoundException("Failed to map keys to nodes (partition " +
-                    "is not mapped to any node) [key=" + txEntry.key() +
-                    ", partition=" + cacheCtx.affinity().partition(txEntry.key()) + ", topVer=" + topVer + ']'));
-
-                return;
-            }
-
-            ClusterNode primary = nodes.get(0);
-
-            GridDistributedTxMapping nodeMapping = mappings.get(primary.id());
-
-            if (nodeMapping == null)
-                mappings.put(primary.id(), nodeMapping = new GridDistributedTxMapping(primary));
-
-            txEntry.nodeId(primary.id());
-
-            nodeMapping.add(txEntry);
-
-            txMapping.addMapping(nodes);
         }
 
+        assert !tx.txState().mvccEnabled(cctx) || tx.mvccSnapshot() != null || mvccCrd != null;
+
         tx.transactionNodes(txMapping.transactionNodes());
 
         if (!hasNearCache)
@@ -330,6 +376,16 @@
         for (final GridDistributedTxMapping m : mappings.values()) {
             final ClusterNode primary = m.primary();
 
+            boolean needCntr = false;
+
+            if (mvccCrd != null) {
+                if (tx.onePhaseCommit() || mvccCrd.nodeId().equals(primary.id())) {
+                    needCntr = true;
+
+                    mvccCrd = null;
+                }
+            }
+
             if (primary.isLocal()) {
                 if (m.hasNearCacheEntries() && m.hasColocatedCacheEntries()) {
                     GridNearTxPrepareRequest nearReq = createRequest(txMapping.transactionNodes(),
@@ -338,6 +394,8 @@
                         m.nearEntriesReads(),
                         m.nearEntriesWrites());
 
+                    nearReq.requestMvccCounter(needCntr);
+
                     prepareLocal(nearReq, m, ++miniId, true);
 
                     GridNearTxPrepareRequest colocatedReq = createRequest(txNodes,
@@ -351,6 +409,8 @@
                 else {
                     GridNearTxPrepareRequest req = createRequest(txNodes, m, timeout, m.reads(), m.writes());
 
+                    req.requestMvccCounter(needCntr);
+
                     prepareLocal(req, m, ++miniId, m.hasNearCacheEntries());
                 }
             }
@@ -361,11 +421,13 @@
                     m.reads(),
                     m.writes());
 
+                req.requestMvccCounter(needCntr);
+
                 final MiniFuture fut = new MiniFuture(m, ++miniId);
 
                 req.miniId(fut.futureId());
 
-                add(fut);
+                add((IgniteInternalFuture)fut);
 
                 try {
                     cctx.io().send(primary, req, tx.ioPolicy());
@@ -393,6 +455,16 @@
             }
         }
 
+        if (mvccCrd != null) {
+            assert !tx.onePhaseCommit();
+
+            MvccSnapshotFutureExt fut = new MvccSnapshotFutureExt();
+
+            cctx.coordinators().requestSnapshotAsync(tx, fut);
+
+            add((IgniteInternalFuture)fut);
+        }
+
         markInitialized();
     }
 
@@ -424,20 +496,52 @@
     @Override public String toString() {
         Collection<String> futs = F.viewReadOnly(futures(), new C1<IgniteInternalFuture<?>, String>() {
             @Override public String apply(IgniteInternalFuture<?> f) {
-                return "[node=" + ((MiniFuture)f).primary().id() +
-                    ", loc=" + ((MiniFuture)f).primary().isLocal() +
-                    ", done=" + f.isDone() + "]";
+                if (f instanceof MiniFuture) {
+                    return "[node=" + ((MiniFuture)f).primary().id() +
+                        ", loc=" + ((MiniFuture)f).primary().isLocal() +
+                        ", done=" + f.isDone() + "]";
+                }
+                else
+                    return f.toString();
             }
         });
 
         return S.toString(GridNearPessimisticTxPrepareFuture.class, this,
             "innerFuts", futs,
+            "txId", tx.nearXidVersion(),
             "super", super.toString());
     }
 
     /**
      *
      */
+    private class MvccSnapshotFutureExt extends GridFutureAdapter<Void> implements MvccSnapshotResponseListener {
+        /** {@inheritDoc} */
+        @Override public void onResponse(MvccSnapshot res) {
+            tx.mvccSnapshot(res);
+
+            onDone();
+        }
+
+        /** {@inheritDoc} */
+        @Override public void onError(IgniteCheckedException e) {
+            if (log.isDebugEnabled())
+                log.debug("Error on tx prepare [fut=" + this + ", err=" + e + ", tx=" + tx +  ']');
+
+            if (ERR_UPD.compareAndSet(GridNearPessimisticTxPrepareFuture.this, null, e))
+                tx.setRollbackOnly();
+
+            onDone(e);
+        }
+
+        /** {@inheritDoc} */
+        @Override public String toString() {
+            return S.toString(MvccSnapshotFutureExt.class, this, super.toString());
+        }
+    }
+
+
+    /** */
     private class MiniFuture extends GridFutureAdapter<GridNearTxPrepareResponse> {
         /** */
         private final int futId;
@@ -476,6 +580,9 @@
             if (res.error() != null)
                 onError(res.error());
             else {
+                if (res.mvccSnapshot() != null)
+                    tx.mvccSnapshot(res.mvccSnapshot());
+
                 onPrepareResponse(m, res, updateMapping);
 
                 onDone(res);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearSingleGetRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearSingleGetRequest.java
index 00ff4bb..cf885e2 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearSingleGetRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearSingleGetRequest.java
@@ -26,11 +26,13 @@
 import org.apache.ignite.internal.processors.cache.GridCacheIdMessage;
 import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
 import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.apache.ignite.plugin.extensions.communication.Message;
 import org.apache.ignite.plugin.extensions.communication.MessageReader;
 import org.apache.ignite.plugin.extensions.communication.MessageWriter;
 import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
 
 /**
  *
@@ -81,6 +83,9 @@
     /** TTL for read operation. */
     private long accessTtl;
 
+    /** */
+    private MvccSnapshot mvccSnapshot;
+
     /**
      * Empty constructor required for {@link Message}.
      */
@@ -103,6 +108,7 @@
      * @param addReader Add reader flag.
      * @param needVer {@code True} if entry version is needed.
      * @param addDepInfo Deployment info.
+     * @param mvccSnapshot MVCC snapshot.
      */
     public GridNearSingleGetRequest(
         int cacheId,
@@ -118,7 +124,8 @@
         boolean addReader,
         boolean needVer,
         boolean addDepInfo,
-        boolean recovery
+        boolean recovery,
+        MvccSnapshot mvccSnapshot
     ) {
         assert key != null;
 
@@ -131,6 +138,7 @@
         this.createTtl = createTtl;
         this.accessTtl = accessTtl;
         this.addDepInfo = addDepInfo;
+        this.mvccSnapshot = mvccSnapshot;
 
         if (readThrough)
             flags |= READ_THROUGH_FLAG_MASK;
@@ -149,6 +157,13 @@
     }
 
     /**
+     * @return Mvcc version.
+     */
+    @Nullable public MvccSnapshot mvccSnapshot() {
+        return mvccSnapshot;
+    }
+
+    /**
      * @return Key.
      */
     public KeyCacheObject key() {
@@ -345,6 +360,14 @@
 
                 reader.incrementState();
 
+            case 11:
+                mvccSnapshot = reader.readMessage("mvccSnapshot");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
         }
 
         return reader.afterMessageRead(GridNearSingleGetRequest.class);
@@ -413,6 +436,12 @@
 
                 writer.incrementState();
 
+            case 11:
+                if (!writer.writeMessage("mvccSnapshot", mvccSnapshot))
+                    return false;
+
+                writer.incrementState();
+
         }
 
         return true;
@@ -430,7 +459,7 @@
 
     /** {@inheritDoc} */
     @Override public byte fieldsCount() {
-        return 11;
+        return 12;
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTransactionalCache.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTransactionalCache.java
index 0c5f43d..494f388 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTransactionalCache.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTransactionalCache.java
@@ -255,7 +255,7 @@
                                         "(added to cancelled locks set): " + req);
                             }
 
-                            ctx.evicts().touch(entry, topVer);
+                            entry.touch(topVer);
                         }
                         else if (log.isDebugEnabled())
                             log.debug("Received unlock request for entry that could not be found: " + req);
@@ -363,7 +363,7 @@
                             );
 
                             if (!req.inTx())
-                                ctx.evicts().touch(entry, req.topologyVersion());
+                                entry.touch(req.topologyVersion());
                         }
                         else {
                             if (evicted == null)
@@ -596,7 +596,7 @@
                         if (topVer.equals(AffinityTopologyVersion.NONE))
                             topVer = ctx.affinity().affinityTopologyVersion();
 
-                        ctx.evicts().touch(entry, topVer);
+                        entry.touch(topVer);
 
                         break;
                     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxAbstractEnlistFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxAbstractEnlistFuture.java
new file mode 100644
index 0000000..f484bd6
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxAbstractEnlistFuture.java
@@ -0,0 +1,499 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed.near;
+
+import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteLogger;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.internal.IgniteFutureCancelledCheckedException;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
+import org.apache.ignite.internal.processors.cache.CacheStoppedException;
+import org.apache.ignite.internal.processors.cache.GridCacheCompoundIdentityFuture;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.GridCacheEntryEx;
+import org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate;
+import org.apache.ignite.internal.processors.cache.GridCacheVersionedFuture;
+import org.apache.ignite.internal.processors.cache.distributed.GridDistributedTxMapping;
+import org.apache.ignite.internal.processors.cache.distributed.dht.CompoundLockFuture;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxAbstractEnlistFuture;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxLocalAdapter;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
+import org.apache.ignite.internal.processors.timeout.GridTimeoutObjectAdapter;
+import org.apache.ignite.internal.transactions.IgniteTxTimeoutCheckedException;
+import org.apache.ignite.internal.util.tostring.GridToStringExclude;
+import org.apache.ignite.internal.util.typedef.CI1;
+import org.apache.ignite.internal.util.typedef.internal.CU;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteInClosure;
+import org.apache.ignite.lang.IgniteUuid;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ *
+ */
+public abstract class GridNearTxAbstractEnlistFuture extends GridCacheCompoundIdentityFuture<Long> implements
+    GridCacheVersionedFuture<Long> {
+    /** */
+    private static final long serialVersionUID = -6069985059301497282L;
+
+    /** Done field updater. */
+    private static final AtomicIntegerFieldUpdater<GridNearTxAbstractEnlistFuture> DONE_UPD =
+        AtomicIntegerFieldUpdater.newUpdater(GridNearTxAbstractEnlistFuture.class, "done");
+
+    /** Done field updater. */
+    private static final AtomicReferenceFieldUpdater<GridNearTxAbstractEnlistFuture, Throwable> EX_UPD =
+        AtomicReferenceFieldUpdater.newUpdater(GridNearTxAbstractEnlistFuture.class, Throwable.class, "ex");
+
+    /** Cache context. */
+    @GridToStringExclude
+    protected final GridCacheContext<?, ?> cctx;
+
+    /** Transaction. */
+    protected final GridNearTxLocal tx;
+
+    /** */
+    protected AffinityTopologyVersion topVer;
+
+    /** MVCC snapshot. */
+    protected MvccSnapshot mvccSnapshot;
+
+    /** Logger. */
+    @GridToStringExclude
+    protected final IgniteLogger log;
+
+    /** */
+    protected long timeout;
+
+    /** Initiated thread id. */
+    protected final long threadId;
+
+    /** Mvcc future id. */
+    protected final IgniteUuid futId;
+
+    /** Lock version. */
+    protected final GridCacheVersion lockVer;
+
+    /** */
+    @GridToStringExclude
+    private GridDhtTxAbstractEnlistFuture localEnlistFuture;
+
+    /** */
+    @SuppressWarnings("unused")
+    @GridToStringExclude
+    protected volatile Throwable ex;
+
+    /** */
+    @SuppressWarnings("unused")
+    @GridToStringExclude
+    private volatile int done;
+
+    /** Timeout object. */
+    @GridToStringExclude
+    protected LockTimeoutObject timeoutObj;
+
+    /**
+     * @param cctx Cache context.
+     * @param tx Transaction.
+     * @param timeout Timeout.
+     */
+    public GridNearTxAbstractEnlistFuture(
+        GridCacheContext<?, ?> cctx, GridNearTxLocal tx, long timeout) {
+        super(CU.longReducer());
+
+        assert cctx != null;
+        assert tx != null;
+
+        this.cctx = cctx;
+        this.tx = tx;
+        this.timeout = timeout;
+
+        threadId = tx.threadId();
+        lockVer = tx.xidVersion();
+        futId = IgniteUuid.randomUuid();
+
+        mvccSnapshot = tx.mvccSnapshot();
+
+        assert mvccSnapshot != null;
+
+        log = cctx.logger(getClass());
+    }
+
+    /**
+     *
+     */
+    public void init() {
+        if (timeout < 0) {
+            // Time is out.
+            onDone(timeoutException());
+
+            return;
+        }
+        else if (timeout > 0)
+            timeoutObj = new LockTimeoutObject();
+
+        while(true) {
+            IgniteInternalFuture<?> fut = tx.lockFuture();
+
+            if (fut == GridDhtTxLocalAdapter.ROLLBACK_FUT) {
+                onDone(tx.timedOut() ? tx.timeoutException() : tx.rollbackException());
+
+                return;
+            }
+            else if (fut != null) {
+                // Wait for previous future.
+                assert fut instanceof GridNearTxAbstractEnlistFuture
+                    || fut instanceof GridDhtTxAbstractEnlistFuture
+                    || fut instanceof CompoundLockFuture
+                    || fut instanceof GridNearTxSelectForUpdateFuture : fut;
+
+                // Terminate this future if parent future is terminated by rollback.
+                if (!fut.isDone()) {
+                    fut.listen(new IgniteInClosure<IgniteInternalFuture>() {
+                        @Override public void apply(IgniteInternalFuture fut) {
+                            if (fut.error() != null)
+                                onDone(fut.error());
+                        }
+                    });
+                }
+                else if (fut.error() != null)
+                    onDone(fut.error());
+
+                break;
+            }
+            else if (tx.updateLockFuture(null, this))
+                break;
+        }
+
+        boolean added = cctx.mvcc().addFuture(this);
+
+        assert added : this;
+
+        if (isDone()) {
+            cctx.mvcc().removeFuture(futId);
+
+            return;
+        }
+
+        try {
+            tx.addActiveCache(cctx, false);
+        }
+        catch (IgniteCheckedException e) {
+            onDone(e);
+
+            return;
+        }
+
+        if (timeoutObj != null)
+            cctx.time().addTimeoutObject(timeoutObj);
+
+        // Obtain the topology version to use.
+        long threadId = Thread.currentThread().getId();
+
+        AffinityTopologyVersion topVer = cctx.mvcc().lastExplicitLockTopologyVersion(threadId);
+
+        // If there is another system transaction in progress, use it's topology version to prevent deadlock.
+        if (topVer == null && tx.system())
+            topVer = cctx.tm().lockedTopologyVersion(threadId, tx);
+
+        if (topVer != null)
+            tx.topologyVersion(topVer);
+
+        if (topVer == null)
+            topVer = tx.topologyVersionSnapshot();
+
+        if (topVer != null) {
+            for (GridDhtTopologyFuture fut : cctx.shared().exchange().exchangeFutures()) {
+                if (fut.exchangeDone() && fut.topologyVersion().equals(topVer)) {
+                    Throwable err = fut.validateCache(cctx, false, false, null, null);
+
+                    if (err != null) {
+                        onDone(err);
+
+                        return;
+                    }
+
+                    break;
+                }
+            }
+
+            if (this.topVer == null)
+                this.topVer = topVer;
+
+            map(true);
+
+            return;
+        }
+
+        mapOnTopology();
+    }
+
+    /**
+     * @param node Primary node.
+     * @throws IgniteCheckedException if future is already completed.
+     */
+    protected synchronized void updateMappings(ClusterNode node) throws IgniteCheckedException {
+        checkCompleted();
+
+        IgniteTxMappings m = tx.mappings();
+
+        GridDistributedTxMapping mapping = m.get(node.id());
+
+        if (mapping == null)
+            m.put(mapping = new GridDistributedTxMapping(node));
+
+        mapping.markQueryUpdate();
+
+        if (node.isLocal())
+            tx.colocatedLocallyMapped(true);
+    }
+
+    /**
+     * @param fut Local enlist future.
+     * @throws IgniteCheckedException if future is already completed.
+     */
+    protected synchronized void updateLocalFuture(GridDhtTxAbstractEnlistFuture fut) throws IgniteCheckedException {
+        checkCompleted();
+
+        assert localEnlistFuture == null;
+
+        localEnlistFuture = fut;
+    }
+
+    /**
+     * @param fut Local enlist future.
+     * @throws IgniteCheckedException if future is already completed.
+     */
+    protected synchronized void clearLocalFuture(GridDhtTxAbstractEnlistFuture fut) throws IgniteCheckedException {
+        checkCompleted();
+
+        if (localEnlistFuture == fut)
+            localEnlistFuture = null;
+    }
+
+    /**
+     * @throws IgniteCheckedException if future is already completed.
+     */
+    protected void checkCompleted() throws IgniteCheckedException {
+        if (isDone())
+            throw new IgniteCheckedException("Future is done.");
+    }
+
+
+
+    /**
+     */
+    private void mapOnTopology() {
+        cctx.topology().readLock();
+
+        try {
+            if (cctx.topology().stopping()) {
+                onDone(new CacheStoppedException(cctx.name()));
+
+                return;
+            }
+
+            GridDhtTopologyFuture fut = cctx.topologyVersionFuture();
+
+            if (fut.isDone()) {
+                Throwable err = fut.validateCache(cctx, false, false, null, null);
+
+                if (err != null) {
+                    onDone(err);
+
+                    return;
+                }
+
+                AffinityTopologyVersion topVer = fut.topologyVersion();
+
+                if (tx != null)
+                    tx.topologyVersion(topVer);
+
+                if (this.topVer == null)
+                    this.topVer = topVer;
+
+                map(false);
+            }
+            else {
+                fut.listen(new CI1<IgniteInternalFuture<AffinityTopologyVersion>>() {
+                    @Override public void apply(IgniteInternalFuture<AffinityTopologyVersion> fut) {
+                        try {
+                            fut.get();
+
+                            mapOnTopology();
+                        }
+                        catch (IgniteCheckedException e) {
+                            onDone(e);
+                        }
+                        finally {
+                            cctx.shared().txContextReset();
+                        }
+                    }
+                });
+            }
+        }
+        finally {
+            cctx.topology().readUnlock();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override protected boolean processFailure(Throwable err, IgniteInternalFuture<Long> fut) {
+        if (ex != null || !EX_UPD.compareAndSet(this, null, err))
+            ex.addSuppressed(err);
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean onDone(@Nullable Long res, @Nullable Throwable err, boolean cancelled) {
+        if (!DONE_UPD.compareAndSet(this, 0, 1))
+            return false;
+
+        cctx.tm().txContext(tx);
+
+        Throwable ex0 = ex;
+
+        if (ex0 != null) {
+            if (err != null)
+                ex0.addSuppressed(err);
+
+            err = ex0;
+        }
+
+        if (!cancelled && err == null)
+            tx.clearLockFuture(this);
+        else
+            tx.setRollbackOnly();
+
+        synchronized (this) {
+            boolean done = super.onDone(res, err, cancelled);
+
+            assert done;
+
+            GridDhtTxAbstractEnlistFuture localFuture0 = localEnlistFuture;
+
+            if (localFuture0 != null && (err != null || cancelled))
+                localFuture0.onDone(cancelled ? new IgniteFutureCancelledCheckedException("Future was cancelled: " + localFuture0) : err);
+
+            // Clean up.
+            cctx.mvcc().removeVersionedFuture(this);
+
+            if (timeoutObj != null)
+                cctx.time().removeTimeoutObject(timeoutObj);
+
+            return true;
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void logError(IgniteLogger log, String msg, Throwable e) {
+        // no-op
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void logDebug(IgniteLogger log, String msg) {
+        // no-op
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean trackable() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void markNotTrackable() {
+        // No-op.
+    }
+
+    @Override public GridCacheVersion version() {
+        return lockVer;
+    }
+
+    @Override public boolean onOwnerChanged(GridCacheEntryEx entry, GridCacheMvccCandidate owner) {
+        return false;
+    }
+
+    @Override public IgniteUuid futureId() {
+        return futId;
+    }
+
+    /**
+     * Gets remaining allowed time.
+     *
+     * @return Remaining time. {@code 0} if timeout isn't specified. {@code -1} if time is out.
+     * @throws IgniteTxTimeoutCheckedException If timed out.
+     */
+    protected long remainingTime() throws IgniteTxTimeoutCheckedException {
+        if (timeout <= 0)
+            return 0;
+
+        long timeLeft = timeout - (U.currentTimeMillis() - startTime());
+
+        if (timeLeft <= 0)
+            throw timeoutException();
+
+        return timeLeft;
+    }
+
+    /**
+     * @return Timeout exception.
+     */
+    @NotNull protected IgniteTxTimeoutCheckedException timeoutException() {
+        return new IgniteTxTimeoutCheckedException("Failed to acquire lock within provided timeout for " +
+            "transaction [timeout=" + timeout + ", tx=" + tx + ']');
+    }
+
+    /**
+     * Start iterating the data rows and form batches.
+     *
+     * @param topLocked Whether topology was already locked.
+     */
+    protected abstract void map(boolean topLocked);
+
+    /**
+     * Lock request timeout object.
+     */
+    private class LockTimeoutObject extends GridTimeoutObjectAdapter {
+        /**
+         * Default constructor.
+         */
+        LockTimeoutObject() {
+            super(timeout);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void onTimeout() {
+            if (log.isDebugEnabled())
+                log.debug("Timed out waiting for lock response: " + this);
+
+            onDone(timeoutException());
+        }
+
+        /** {@inheritDoc} */
+        @Override public String toString() {
+            return S.toString(LockTimeoutObject.class, this);
+        }
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxFastFinishFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxFastFinishFuture.java
index 53d901a..1369f39 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxFastFinishFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxFastFinishFuture.java
@@ -17,6 +17,7 @@
 
 package org.apache.ignite.internal.processors.cache.distributed.near;
 
+import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx;
 import org.apache.ignite.internal.util.future.GridFutureAdapter;
 
@@ -51,10 +52,15 @@
         return commit;
     }
 
+    /** {@inheritDoc} */
+    @Override public GridNearTxLocal tx() {
+        return tx;
+    }
+
     /**
      * @param clearThreadMap {@code True} if need remove tx from thread map.
      */
-    public void finish(boolean clearThreadMap) {
+    @Override public void finish(boolean commit, boolean clearThreadMap, boolean onTimeout) {
         try {
             if (commit) {
                 tx.state(PREPARING);
@@ -79,4 +85,9 @@
             onDone(tx);
         }
     }
+
+    /** {@inheritDoc} */
+    @Override public void onNodeStop(IgniteCheckedException e) {
+        onDone(tx, e);
+    }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxFinishAndAckFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxFinishAndAckFuture.java
new file mode 100644
index 0000000..a3a5cdb
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxFinishAndAckFuture.java
@@ -0,0 +1,144 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed.near;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccQueryTracker;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx;
+import org.apache.ignite.internal.util.future.GridFutureAdapter;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.lang.IgniteInClosure;
+
+/**
+ *
+ */
+public class GridNearTxFinishAndAckFuture extends GridFutureAdapter<IgniteInternalTx> implements NearTxFinishFuture {
+    /** */
+    private final NearTxFinishFuture finishFut;
+
+    /**
+     * @param finishFut Finish future.
+     */
+    GridNearTxFinishAndAckFuture(NearTxFinishFuture finishFut) {
+        this.finishFut = finishFut;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean commit() {
+        return finishFut.commit();
+    }
+
+    /** {@inheritDoc} */
+    @Override public GridNearTxLocal tx() {
+        return finishFut.tx();
+    }
+
+    /** {@inheritDoc} */
+    @Override @SuppressWarnings("unchecked")
+    public void finish(boolean commit, boolean clearThreadMap, boolean onTimeout) {
+        finishFut.finish(commit, clearThreadMap, onTimeout);
+
+        if (finishFut.commit()) {
+            finishFut.listen((IgniteInClosure)new IgniteInClosure<NearTxFinishFuture>() {
+                @Override public void apply(final NearTxFinishFuture fut) {
+                    GridNearTxLocal tx = fut.tx();
+
+                    IgniteInternalFuture<Void> ackFut = null;
+
+                    MvccQueryTracker tracker = tx.mvccQueryTracker();
+
+                    MvccSnapshot mvccSnapshot = tx.mvccSnapshot();
+
+                    if (tracker != null)
+                        ackFut = tracker.onDone(tx, commit);
+                    else if (mvccSnapshot != null) {
+                        if (commit)
+                            ackFut = tx.context().coordinators().ackTxCommit(mvccSnapshot);
+                        else
+                            tx.context().coordinators().ackTxRollback(mvccSnapshot);
+                    }
+
+                    if (ackFut != null) {
+                        ackFut.listen(new IgniteInClosure<IgniteInternalFuture<Void>>() {
+                            @Override public void apply(IgniteInternalFuture<Void> ackFut) {
+                                Exception err = null;
+
+                                try {
+                                    fut.get();
+
+                                    ackFut.get();
+                                }
+                                catch (Exception e) {
+                                    err = e;
+                                }
+                                catch (Error e) {
+                                    onDone(e);
+
+                                    throw e;
+                                }
+
+                                if (err != null)
+                                    onDone(err);
+                                else
+                                    onDone(fut.tx());
+                            }
+                        });
+                    }
+                    else
+                        finishWithFutureResult(fut);
+                }
+            });
+        }
+        else {
+            finishFut.listen(new IgniteInClosure<IgniteInternalFuture>() {
+                @Override public void apply(IgniteInternalFuture fut) {
+                    finishWithFutureResult(fut);
+                }
+            });
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onNodeStop(IgniteCheckedException e) {
+        super.onDone(finishFut.tx(), e);
+    }
+
+    /**
+     * @param fut Future.
+     */
+    private void finishWithFutureResult(IgniteInternalFuture<IgniteInternalTx> fut) {
+        try {
+            onDone(fut.get());
+        }
+        catch (IgniteCheckedException | RuntimeException e) {
+            onDone(e);
+        }
+        catch (Error e) {
+            onDone(e);
+
+            throw e;
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(GridNearTxFinishAndAckFuture.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxFinishFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxFinishFuture.java
index ede8a4e..cea3030 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxFinishFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxFinishFuture.java
@@ -43,11 +43,15 @@
 import org.apache.ignite.internal.processors.cache.distributed.GridDistributedTxMapping;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxFinishRequest;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxFinishResponse;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccCoordinator;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccFuture;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
 import org.apache.ignite.internal.transactions.IgniteTxHeuristicCheckedException;
 import org.apache.ignite.internal.transactions.IgniteTxRollbackCheckedException;
+import org.apache.ignite.internal.util.GridLongList;
 import org.apache.ignite.internal.util.future.GridFutureAdapter;
 import org.apache.ignite.internal.util.tostring.GridToStringInclude;
 import org.apache.ignite.internal.util.typedef.C1;
@@ -136,6 +140,13 @@
         return commit;
     }
 
+    /**
+     * @return Cache context.
+     */
+    GridCacheSharedContext<K, V> context() {
+        return cctx;
+    }
+
     /** {@inheritDoc} */
     @Override public IgniteUuid futureId() {
         return futId;
@@ -165,7 +176,7 @@
     /**
      * @return Transaction.
      */
-    public GridNearTxLocal tx() {
+    @Override public GridNearTxLocal tx() {
         return tx;
     }
 
@@ -390,15 +401,12 @@
             fut.getClass() == CheckRemoteTxMiniFuture.class;
     }
 
-    /**
-     * Initializes future.
-     *
-     * @param commit Commit flag.
-     * @param clearThreadMap If {@code true} removes {@link GridNearTxLocal} from thread map.
-     * @param onTimeout If {@code true} called from timeout handler.
-     */
-    @SuppressWarnings("ForLoopReplaceableByForEach")
+    /** {@inheritDoc} */
+    @Override @SuppressWarnings("ForLoopReplaceableByForEach")
     public void finish(final boolean commit, final boolean clearThreadMap, final boolean onTimeout) {
+        if (!cctx.mvcc().addFuture(this, futureId()))
+            return;
+
         if (tx.onNeedCheckBackup()) {
             assert tx.onePhaseCommit();
 
@@ -412,19 +420,19 @@
         }
 
         if (!commit && !clearThreadMap)
-            tryRollbackAsync(onTimeout); // Asynchronous rollback.
+            rollbackAsyncSafe(onTimeout);
         else
             doFinish(commit, clearThreadMap);
     }
 
     /**
-     * Does async rollback when it's safe.
-     * If current future is not lock future (enlist future) waits until completion and tries again.
-     * Else terminates or waits for lock future depending on rollback mode.
+     * Rollback tx when it's safe.
+     * If current future is not lock future (enlist future) wait until completion and tries again.
+     * Else cancel lock future (onTimeout=false) or wait for completion due to deadlock detection (onTimeout=true).
      *
      * @param onTimeout If {@code true} called from timeout handler.
      */
-    private void tryRollbackAsync(boolean onTimeout) {
+    private void rollbackAsyncSafe(boolean onTimeout) {
         IgniteInternalFuture<?> curFut = tx.tryRollbackAsync();
 
         if (curFut == null) { // Safe to rollback.
@@ -447,7 +455,7 @@
                 try {
                     fut.get();
 
-                    tryRollbackAsync(onTimeout);
+                    rollbackAsyncSafe(onTimeout);
                 }
                 catch (IgniteCheckedException e) {
                     doFinish(false, false);
@@ -465,18 +473,38 @@
     private void doFinish(boolean commit, boolean clearThreadMap) {
         try {
             if (tx.localFinish(commit, clearThreadMap) || (!commit && tx.state() == UNKNOWN)) {
+                GridLongList waitTxs = tx.mvccWaitTransactions();
+
+                if (waitTxs != null) {
+                    MvccSnapshot snapshot = tx.mvccSnapshot();
+
+                    MvccCoordinator crd = cctx.coordinators().currentCoordinator();
+
+                    assert snapshot != null;
+
+                    if (snapshot.coordinatorVersion() == crd.coordinatorVersion()) {
+                        IgniteInternalFuture fut = cctx.coordinators()
+                            .waitTxsFuture(cctx.coordinators().currentCoordinatorId(), waitTxs);
+
+                        add(fut);
+                    }
+                }
+
                 if ((tx.onePhaseCommit() && needFinishOnePhase(commit)) || (!tx.onePhaseCommit() && mappings != null)) {
                     if (mappings.single()) {
                         GridDistributedTxMapping mapping = mappings.singleMapping();
 
                         if (mapping != null) {
-                            assert !hasFutures() : futures();
+                            assert !hasFutures() || waitTxs != null : futures();
 
                             finish(1, mapping, commit, !clearThreadMap);
                         }
                     }
-                    else
+                    else {
+                        assert !hasFutures() || waitTxs != null : futures();
+
                         finish(mappings.mappings(), commit, !clearThreadMap);
+                    }
                 }
 
                 markInitialized();
@@ -501,6 +529,11 @@
         }
     }
 
+    /** {@inheritDoc} */
+    @Override public void onNodeStop(IgniteCheckedException e) {
+        super.onDone(tx, e);
+    }
+
     /**
      *
      */
@@ -732,8 +765,6 @@
      * @param {@code true} If need to add completed version on finish.
      */
     private void finish(Iterable<GridDistributedTxMapping> mappings, boolean commit, boolean useCompletedVer) {
-        assert !hasFutures() : futures();
-
         int miniId = 0;
 
         // Create mini futures.
@@ -750,11 +781,11 @@
     private void finish(int miniId, GridDistributedTxMapping m, boolean commit, boolean useCompletedVer) {
         ClusterNode n = m.primary();
 
-        assert !m.empty() : m + " " + tx.state();
+        assert !m.empty() || m.queryUpdate() : m + " " + tx.state();
 
         CacheWriteSynchronizationMode syncMode = tx.syncMode();
 
-        if (m.explicitLock())
+        if (m.explicitLock() || m.queryUpdate())
             syncMode = FULL_SYNC;
 
         GridNearTxFinishRequest req = new GridNearTxFinishRequest(
@@ -775,6 +806,7 @@
             tx.size(),
             tx.subjectId(),
             tx.taskNameHash(),
+            tx.mvccSnapshot(),
             tx.activeCachesDeploymentEnabled()
         );
 
@@ -869,6 +901,11 @@
 
                     return "CheckRemoteTxMiniFuture[nodes=" + fut.nodes() + ", done=" + f.isDone() + "]";
                 }
+                else if (f instanceof MvccFuture) {
+                    MvccFuture fut = (MvccFuture)f;
+
+                    return "WaitPreviousTxsFut[mvccCrd=" + fut.coordinatorNodeId() + ", done=" + f.isDone() + "]";
+                }
                 else
                     return "[loc=true, done=" + f.isDone() + "]";
             }
@@ -909,7 +946,8 @@
             0,
             tx.activeCachesDeploymentEnabled(),
             !waitRemoteTxs && (tx.needReturnValue() && tx.implicit()),
-            waitRemoteTxs);
+            waitRemoteTxs,
+            null);
 
         finishReq.checkCommitted(true);
 
@@ -978,7 +1016,7 @@
         }
 
         /** {@inheritDoc} */
-        boolean onNodeLeft(UUID nodeId, boolean discoThread) {
+        @Override boolean onNodeLeft(UUID nodeId, boolean discoThread) {
             if (nodeId.equals(m.primary().id())) {
                 if (msgLog.isDebugEnabled()) {
                     msgLog.debug("Near finish fut, mini future node left [txId=" + tx.nearXidVersion() +
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxFinishRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxFinishRequest.java
index 00c29e5..6b5aa90 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxFinishRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxFinishRequest.java
@@ -24,6 +24,7 @@
 import org.apache.ignite.cache.CacheWriteSynchronizationMode;
 import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
 import org.apache.ignite.internal.processors.cache.distributed.GridDistributedTxFinishRequest;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
 import org.apache.ignite.internal.util.tostring.GridToStringBuilder;
 import org.apache.ignite.internal.util.typedef.internal.U;
@@ -43,6 +44,9 @@
     /** Mini future ID. */
     private int miniId;
 
+    /** */
+    private MvccSnapshot mvccSnapshot;
+
     /**
      * Empty constructor required for {@link Externalizable}.
      */
@@ -88,6 +92,7 @@
         int txSize,
         @Nullable UUID subjId,
         int taskNameHash,
+        MvccSnapshot mvccSnapshot,
         boolean addDepInfo) {
         super(
             xidVer,
@@ -111,6 +116,15 @@
 
         explicitLock(explicitLock);
         storeEnabled(storeEnabled);
+
+        this.mvccSnapshot = mvccSnapshot;
+    }
+
+    /**
+     * @return Mvcc info.
+     */
+    @Nullable public MvccSnapshot mvccSnapshot() {
+        return mvccSnapshot;
     }
 
     /**
@@ -178,6 +192,12 @@
 
                 writer.incrementState();
 
+            case 22:
+                if (!writer.writeMessage("mvccSnapshot", mvccSnapshot))
+                    return false;
+
+                writer.incrementState();
+
         }
 
         return true;
@@ -202,6 +222,14 @@
 
                 reader.incrementState();
 
+            case 22:
+                mvccSnapshot = reader.readMessage("mvccSnapshot");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
         }
 
         return reader.afterMessageRead(GridNearTxFinishRequest.class);
@@ -214,7 +242,7 @@
 
     /** {@inheritDoc} */
     @Override public byte fieldsCount() {
-        return 22;
+        return 23;
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxLocal.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxLocal.java
index 65e1ea4..d03d0d3 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxLocal.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxLocal.java
@@ -47,10 +47,10 @@
 import org.apache.ignite.internal.processors.cache.GridCacheEntryEx;
 import org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException;
 import org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate;
-import org.apache.ignite.internal.processors.cache.GridCacheVersionedFuture;
 import org.apache.ignite.internal.processors.cache.GridCacheOperation;
 import org.apache.ignite.internal.processors.cache.GridCacheReturn;
 import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
+import org.apache.ignite.internal.processors.cache.GridCacheVersionedFuture;
 import org.apache.ignite.internal.processors.cache.IgniteCacheExpiryPolicy;
 import org.apache.ignite.internal.processors.cache.KeyCacheObject;
 import org.apache.ignite.internal.processors.cache.distributed.GridDistributedCacheEntry;
@@ -61,6 +61,9 @@
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxPrepareFuture;
 import org.apache.ignite.internal.processors.cache.distributed.dht.colocated.GridDhtDetachedCacheEntry;
 import org.apache.ignite.internal.processors.cache.dr.GridCacheDrInfo;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccQueryTracker;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccQueryTrackerImpl;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey;
@@ -68,6 +71,7 @@
 import org.apache.ignite.internal.processors.cache.transactions.TransactionProxyImpl;
 import org.apache.ignite.internal.processors.cache.transactions.TransactionProxyRollbackOnlyImpl;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
+import org.apache.ignite.internal.processors.query.UpdateSourceIterator;
 import org.apache.ignite.internal.processors.timeout.GridTimeoutObject;
 import org.apache.ignite.internal.transactions.IgniteTxOptimisticCheckedException;
 import org.apache.ignite.internal.transactions.IgniteTxRollbackCheckedException;
@@ -134,6 +138,10 @@
     private static final AtomicReferenceFieldUpdater<GridNearTxLocal, NearTxFinishFuture> FINISH_FUT_UPD =
         AtomicReferenceFieldUpdater.newUpdater(GridNearTxLocal.class, NearTxFinishFuture.class, "finishFut");
 
+    /** */
+    private static final String TX_TYPE_MISMATCH_ERR_MSG =
+        "SQL queries and cache operations may not be used in the same transaction.";
+
     /** DHT mappings. */
     private IgniteTxMappings mappings;
 
@@ -179,6 +187,15 @@
     /** Tx label. */
     private @Nullable String lb;
 
+    /** */
+    private MvccQueryTracker mvccTracker;
+
+    /** Whether this transaction is for SQL operations or not.<p>
+     * {@code null} means there haven't been any calls made on this transaction, and first operation will give this
+     * field actual value.
+     */
+    private Boolean sql;
+
     /**
      * Empty constructor required for {@link Externalizable}.
      */
@@ -196,6 +213,7 @@
      * @param isolation Isolation.
      * @param timeout Timeout.
      * @param storeEnabled Store enabled flag.
+     * @param sql Whether this transaction was started via SQL API or not, or {@code null} if unknown.
      * @param txSize Transaction size.
      * @param subjId Subject ID.
      * @param taskNameHash Task name hash code.
@@ -211,6 +229,7 @@
         TransactionIsolation isolation,
         long timeout,
         boolean storeEnabled,
+        Boolean sql,
         int txSize,
         @Nullable UUID subjId,
         int taskNameHash,
@@ -237,11 +256,20 @@
 
         mappings = implicitSingle ? new IgniteTxMappingsSingleImpl() : new IgniteTxMappingsImpl();
 
+        this.sql = sql;
+
         initResult();
 
         trackTimeout = timeout() > 0 && !implicit() && cctx.time().addTimeoutObject(this);
     }
 
+    /**
+     * @return Mvcc query version tracker.
+     */
+    public MvccQueryTracker mvccQueryTracker() {
+        return mvccTracker;
+    }
+
     /** {@inheritDoc} */
     @Override public boolean near() {
         return true;
@@ -546,7 +574,7 @@
         assert key != null;
 
         try {
-            beforePut(cacheCtx, retval);
+            beforePut(cacheCtx, retval, false);
 
             final GridCacheReturn ret = new GridCacheReturn(localResult(), false);
 
@@ -689,7 +717,7 @@
         final boolean retval
     ) {
         try {
-            beforePut(cacheCtx, retval);
+            beforePut(cacheCtx, retval, false);
         }
         catch (IgniteCheckedException e) {
             return new GridFinishedFuture(e);
@@ -855,6 +883,8 @@
      * @param ret Return value.
      * @param skipStore Skip store flag.
      * @param singleRmv {@code True} for single key remove operation ({@link Cache#remove(Object)}.
+     * @param recovery Recovery flag.
+     * @param dataCenterId Optional data center Id.
      * @return Future for entry values loading.
      */
     private <K, V> IgniteInternalFuture<Void> enlistWrite(
@@ -978,6 +1008,7 @@
      * @param skipStore Skip store flag.
      * @param singleRmv {@code True} for single key remove operation ({@link Cache#remove(Object)}.
      * @param keepBinary Keep binary flag.
+     * @param recovery Recovery flag.
      * @param dataCenterId Optional data center ID.
      * @return Future for enlisting writes.
      */
@@ -1245,6 +1276,7 @@
                                         resolveTaskName(),
                                         null,
                                         keepBinary,
+                                        null, // TODO IGNITE-7371
                                         null) : null;
 
                                 if (res != null) {
@@ -1263,11 +1295,12 @@
                                     entryProcessor,
                                     resolveTaskName(),
                                     null,
-                                    keepBinary);
+                                    keepBinary,
+                                    null); // TODO IGNITE-7371
                             }
                         }
                         catch (ClusterTopologyCheckedException e) {
-                            entry.context().evicts().touch(entry, topologyVersion());
+                            entry.touch(topologyVersion());
 
                             throw e;
                         }
@@ -1325,7 +1358,7 @@
                         }
 
                         if (readCommitted())
-                            cacheCtx.evicts().touch(entry, topologyVersion());
+                            entry.touch(topologyVersion());
 
                         break; // While.
                     }
@@ -1524,6 +1557,9 @@
 
         cacheCtx.checkSecurity(SecurityPermission.CACHE_REMOVE);
 
+        if (cacheCtx.mvccEnabled() && !isOperationAllowed(false))
+            return txTypeMismatchFinishFuture();
+
         if (retval)
             needReturnValue(true);
 
@@ -1732,6 +1768,109 @@
     }
 
     /**
+     * @param cctx Cache context.
+     * @return Mvcc snapshot for read inside tx (initialized once for OPTIMISTIC SERIALIZABLE and REPEATABLE_READ txs).
+     */
+    private MvccSnapshot mvccReadSnapshot(GridCacheContext cctx) {
+        if (!cctx.mvccEnabled() || mvccTracker == null)
+            return null;
+
+        return mvccTracker.snapshot();
+    }
+
+    /**
+     * @param cacheCtx Cache context.
+     * @param cacheIds Involved cache ids.
+     * @param parts Partitions.
+     * @param schema Schema name.
+     * @param qry Query string.
+     * @param params Query parameters.
+     * @param flags Flags.
+     * @param pageSize Fetch page size.
+     * @param timeout Timeout.
+     * @return Operation future.
+     */
+    public IgniteInternalFuture<Long> updateAsync(GridCacheContext cacheCtx,
+        int[] cacheIds, int[] parts, String schema, String qry, Object[] params,
+        int flags, int pageSize, long timeout) {
+        try {
+            beforePut(cacheCtx, false, true);
+
+            return updateAsync(new GridNearTxQueryEnlistFuture(
+                cacheCtx,
+                this,
+                cacheIds,
+                parts,
+                schema,
+                qry,
+                params,
+                flags,
+                pageSize,
+                timeout));
+        }
+        catch (IgniteCheckedException e) {
+            return new GridFinishedFuture(e);
+        }
+        catch (RuntimeException e) {
+            onException();
+
+            throw e;
+        }
+    }
+
+    /**
+     * @param cacheCtx Cache context.
+     * @param it Entries iterator.
+     * @param pageSize Page size.
+     * @param timeout Timeout.
+     * @param sequential Sequential locking flag.
+     * @return Operation future.
+     */
+    public IgniteInternalFuture<Long> updateAsync(GridCacheContext cacheCtx,
+        UpdateSourceIterator<?> it, int pageSize, long timeout, boolean sequential) {
+        try {
+            beforePut(cacheCtx, false, true);
+
+            return updateAsync(new GridNearTxQueryResultsEnlistFuture(cacheCtx, this,
+                timeout, it, pageSize, sequential));
+        }
+        catch (IgniteCheckedException e) {
+            return new GridFinishedFuture(e);
+        }
+        catch (RuntimeException e) {
+            onException();
+
+            throw e;
+        }
+    }
+
+    /**
+     * @param fut Enlist future.
+     * @return Operation future.
+     */
+    public IgniteInternalFuture<Long> updateAsync(GridNearTxAbstractEnlistFuture fut) {
+        fut.init();
+
+        return nonInterruptable(new GridEmbeddedFuture<>(fut.chain(new CX1<IgniteInternalFuture<Long>, Boolean>() {
+            @Override public Boolean applyx(IgniteInternalFuture<Long> fut0) throws IgniteCheckedException {
+                return fut0.get() != null;
+            }
+        }), new PLC1<Long>(null) {
+            @Override protected Long postLock(Long val) throws IgniteCheckedException {
+                Long res = fut.get();
+
+                assert mvccSnapshot != null;
+                assert res != null;
+
+                if (res > 0)
+                    mvccSnapshot.incrementOperationCounter();
+
+                return res;
+            }
+        }));
+    }
+
+    /**
      * @param cacheCtx Cache context.
      * @param keys Keys to get.
      * @param deserializeBinary Deserialize binary flag.
@@ -1744,7 +1883,7 @@
     public <K, V> IgniteInternalFuture<Map<K, V>> getAllAsync(
         final GridCacheContext cacheCtx,
         @Nullable final AffinityTopologyVersion entryTopVer,
-        Collection<KeyCacheObject> keys,
+        final Collection<KeyCacheObject> keys,
         final boolean deserializeBinary,
         final boolean skipVals,
         final boolean keepCacheObjects,
@@ -1754,8 +1893,36 @@
         if (F.isEmpty(keys))
             return new GridFinishedFuture<>(Collections.<K, V>emptyMap());
 
+        if (cacheCtx.mvccEnabled() && !isOperationAllowed(false))
+            return txTypeMismatchFinishFuture();
+
         init();
 
+        if (cacheCtx.mvccEnabled() && (optimistic() && !readCommitted()) && mvccTracker == null) {
+            // TODO IGNITE-7388: support async tx rollback (e.g. on timeout).
+            boolean canRemap = cctx.lockedTopologyVersion(null) == null;
+
+            mvccTracker = new MvccQueryTrackerImpl(cacheCtx, canRemap);
+
+            return new GridEmbeddedFuture<>(mvccTracker.requestSnapshot(topologyVersion()),
+                new IgniteBiClosure<MvccSnapshot, Exception, IgniteInternalFuture<Map<K, V>>>() {
+                @Override public IgniteInternalFuture<Map<K, V>> apply(MvccSnapshot snapshot, Exception e) {
+                    if (e != null)
+                        return new GridFinishedFuture<>(e);
+
+                    return getAllAsync(cacheCtx,
+                        entryTopVer,
+                        keys,
+                        deserializeBinary,
+                        skipVals,
+                        keepCacheObjects,
+                        skipStore,
+                        recovery,
+                        needVer);
+                }
+            });
+        }
+
         int keysCnt = keys.size();
 
         boolean single = keysCnt == 1;
@@ -1878,6 +2045,7 @@
                                             resolveTaskName(),
                                             null,
                                             txEntry.keepBinary(),
+                                            null, // TODO IGNITE-7371
                                             null);
 
                                         if (getRes != null) {
@@ -1896,7 +2064,8 @@
                                             transformClo,
                                             resolveTaskName(),
                                             null,
-                                            txEntry.keepBinary());
+                                            txEntry.keepBinary(),
+                                            null); // TODO IGNITE-7371
                                     }
 
                                     // If value is in cache and passed the filter.
@@ -2057,6 +2226,7 @@
      * @param skipVals Skip values flag.
      * @param keepCacheObjects Keep cache objects flag.
      * @param skipStore Skip store flag.
+     * @param recovery Recovery flag..
      * @throws IgniteCheckedException If failed.
      * @return Enlisted keys.
      */
@@ -2175,7 +2345,8 @@
                                     resolveTaskName(),
                                     null,
                                     txEntry.keepBinary(),
-                                    null);
+                                    null,
+                                    null); // TODO IGNITE-7371
 
                                 if (getRes != null) {
                                     val = getRes.value();
@@ -2193,7 +2364,8 @@
                                     transformClo,
                                     resolveTaskName(),
                                     null,
-                                    txEntry.keepBinary());
+                                    txEntry.keepBinary(),
+                                    null); // TODO IGNITE-7371
                             }
 
                             if (val != null) {
@@ -2261,6 +2433,7 @@
                                         resolveTaskName(),
                                         accessPlc,
                                         !deserializeBinary,
+                                        mvccReadSnapshot(cacheCtx), // TODO IGNITE-7371
                                         null) : null;
 
                                 if (getRes != null) {
@@ -2279,7 +2452,8 @@
                                     null,
                                     resolveTaskName(),
                                     accessPlc,
-                                    !deserializeBinary);
+                                    !deserializeBinary,
+                                    mvccReadSnapshot(cacheCtx)); // TODO IGNITE-7371
                             }
 
                             if (val != null) {
@@ -2347,7 +2521,7 @@
                                 }
                             }
                             else
-                                entry.context().evicts().touch(entry, topVer);
+                                entry.touch(topVer);
                         }
                     }
                 }
@@ -2466,6 +2640,13 @@
     }
 
     /**
+     * @return Finished future with error message about tx type mismatch.
+     */
+    private static IgniteInternalFuture txTypeMismatchFinishFuture() {
+        return new GridFinishedFuture(new IgniteCheckedException(TX_TYPE_MISMATCH_ERR_MSG));
+    }
+
+    /**
      * @param cacheCtx Cache context.
      * @param loadFut Missing keys load future.
      * @param ret Future result.
@@ -2529,7 +2710,7 @@
             GridCacheEntryEx cached0 = txEntry.cached();
 
             if (cached0 != null)
-                txEntry.context().evicts().touch(cached0, topologyVersion());
+                cached0.touch(topologyVersion());
         }
     }
 
@@ -2551,7 +2732,7 @@
      * @param expiryPlc Expiry policy.
      * @return Future with {@code True} value if loading took place.
      */
-    public IgniteInternalFuture<Void> loadMissing(
+    private IgniteInternalFuture<Void> loadMissing(
         final GridCacheContext cacheCtx,
         AffinityTopologyVersion topVer,
         boolean readThrough,
@@ -2610,7 +2791,8 @@
                     skipVals,
                     needVer,
                     /*keepCacheObject*/true,
-                    recovery
+                    recovery,
+                    mvccReadSnapshot(cacheCtx)
                 ).chain(new C1<IgniteInternalFuture<Object>, Void>() {
                     @Override public Void apply(IgniteInternalFuture<Object> f) {
                         try {
@@ -2641,7 +2823,8 @@
                     expiryPlc0,
                     skipVals,
                     needVer,
-                    /*keepCacheObject*/true
+                    /*keepCacheObject*/true,
+                    mvccReadSnapshot(cacheCtx)
                 ).chain(new C1<IgniteInternalFuture<Map<Object, Object>>, Void>() {
                     @Override public Void apply(IgniteInternalFuture<Map<Object, Object>> f) {
                         try {
@@ -2738,7 +2921,8 @@
                             resolveTaskName(),
                             expiryPlc0,
                             txEntry == null ? keepBinary : txEntry.keepBinary(),
-                            null);
+                            null,
+                            null); // TODO IGNITE-7371
 
                         if (res == null) {
                             if (misses == null)
@@ -3158,7 +3342,7 @@
                 if (state != COMMITTING && state != COMMITTED)
                     throw isRollbackOnly() ? timedOut() ? timeoutException() : rollbackException() :
                         new IgniteCheckedException("Invalid transaction state for commit [state=" + state() +
-                        ", tx=" + this + ']');
+                            ", tx=" + this + ']');
                 else {
                     if (log.isDebugEnabled())
                         log.debug("Invalid transaction state for commit (another thread is committing): " + this);
@@ -3307,56 +3491,44 @@
         if (log.isDebugEnabled())
             log.debug("Committing near local tx: " + this);
 
-        NearTxFinishFuture fut = finishFut;
+        final NearTxFinishFuture fut, fut0 = finishFut; boolean fastFinish;
 
-        if (fut != null)
-            return chainFinishFuture(fut, true, true, false);
-
-        if (fastFinish()) {
-            GridNearTxFastFinishFuture fut0;
-
-            if (!FINISH_FUT_UPD.compareAndSet(this, null, fut0 = new GridNearTxFastFinishFuture(this, true)))
-                return chainFinishFuture(finishFut, true, true, false);
-
-            fut0.finish(false);
-
-            return fut0;
-        }
-
-        final GridNearTxFinishFuture fut0;
-
-        if (!FINISH_FUT_UPD.compareAndSet(this, null, fut0 = new GridNearTxFinishFuture<>(cctx, this, true)))
+        if (fut0 != null || !FINISH_FUT_UPD.compareAndSet(this, null, fut = finishFuture(fastFinish = fastFinish(), true)))
             return chainFinishFuture(finishFut, true, true, false);
 
-        cctx.mvcc().addFuture(fut0, fut0.futureId());
+        if (!fastFinish) {
+            final IgniteInternalFuture<?> prepareFut = prepareNearTxLocal();
 
-        final IgniteInternalFuture<?> prepareFut = prepareNearTxLocal();
+            prepareFut.listen(new CI1<IgniteInternalFuture<?>>() {
+                @Override public void apply(IgniteInternalFuture<?> f) {
+                    try {
+                        // Make sure that here are no exceptions.
+                        prepareFut.get();
 
-        prepareFut.listen(new CI1<IgniteInternalFuture<?>>() {
-            @Override public void apply(IgniteInternalFuture<?> f) {
-                try {
-                    // Make sure that here are no exceptions.
-                    prepareFut.get();
+                        fut.finish(true, true, false);
+                    }
+                    catch (Error | RuntimeException e) {
+                        COMMIT_ERR_UPD.compareAndSet(GridNearTxLocal.this, null, e);
 
-                    fut0.finish(true, true, false);
+                        fut.finish(false, true, false);
+
+                        throw e;
+                    }
+                    catch (IgniteCheckedException e) {
+                        COMMIT_ERR_UPD.compareAndSet(GridNearTxLocal.this, null, e);
+
+                        if (!(e instanceof NodeStoppingException))
+                            fut.finish(false, true, true);
+                        else
+                            fut.onNodeStop(e);
+                    }
                 }
-                catch (Error | RuntimeException e) {
-                    COMMIT_ERR_UPD.compareAndSet(GridNearTxLocal.this, null, e);
+            });
+        }
+        else
+            fut.finish(true, false, false);
 
-                    fut0.finish(false, true, false);
-
-                    throw e;
-                }
-                catch (IgniteCheckedException e) {
-                    COMMIT_ERR_UPD.compareAndSet(GridNearTxLocal.this, null, e);
-
-                    if (!(e instanceof NodeStoppingException))
-                        fut0.finish(false, true, true);
-                }
-            }
-        });
-
-        return fut0;
+        return fut;
     }
 
     /** {@inheritDoc} */
@@ -3400,61 +3572,70 @@
         if (onTimeout && prepFut instanceof GridNearTxPrepareFutureAdapter && !prepFut.isDone())
             ((GridNearTxPrepareFutureAdapter) prepFut).onNearTxLocalTimeout();
 
-        NearTxFinishFuture fut = finishFut;
+        final NearTxFinishFuture fut, fut0 = finishFut; boolean fastFinish;
 
-        if (fut != null)
+        if (fut0 != null)
             return chainFinishFuture(finishFut, false, clearThreadMap, onTimeout);
 
-        // Enable fast finish only from tx thread.
-        if (clearThreadMap && fastFinish()) {
-            GridNearTxFastFinishFuture fut0;
-
-            if (!FINISH_FUT_UPD.compareAndSet(this, null, fut0 = new GridNearTxFastFinishFuture(this, false)))
-                return chainFinishFuture(finishFut, false, true, onTimeout);
-
-            fut0.finish(true);
-
-            return fut0;
-        }
-
-        final GridNearTxFinishFuture fut0;
-
-        if (!FINISH_FUT_UPD.compareAndSet(this, null, fut0 = new GridNearTxFinishFuture<>(cctx, this, false)))
+        if (!FINISH_FUT_UPD.compareAndSet(this, null, fut = finishFuture(fastFinish = clearThreadMap && fastFinish(), false)))
             return chainFinishFuture(finishFut, false, clearThreadMap, onTimeout);
 
-        cctx.mvcc().addFuture(fut0, fut0.futureId());
+        rollbackFuture(fut);
 
-        if (prepFut == null || prepFut.isDone()) {
-            try {
-                // Check for errors in prepare future.
-                if (prepFut != null)
-                    prepFut.get();
-            }
-            catch (IgniteCheckedException e) {
-                if (log.isDebugEnabled())
-                    log.debug("Got optimistic tx failure [tx=" + this + ", err=" + e + ']');
-            }
-
-            fut0.finish(false, clearThreadMap, onTimeout);
-        }
-        else {
-            prepFut.listen(new CI1<IgniteInternalFuture<?>>() {
-                @Override public void apply(IgniteInternalFuture<?> f) {
-                    try {
-                        // Check for errors in prepare future.
-                        f.get();
-                    }
-                    catch (IgniteCheckedException e) {
-                        if (log.isDebugEnabled())
-                            log.debug("Got optimistic tx failure [tx=" + this + ", err=" + e + ']');
-                    }
-
-                    fut0.finish(false, clearThreadMap, onTimeout);
+        if (!fastFinish) {
+            if (prepFut == null || prepFut.isDone()) {
+                try {
+                    // Check for errors in prepare future.
+                    if (prepFut != null)
+                        prepFut.get();
                 }
-            });
+                catch (IgniteCheckedException e) {
+                    if (log.isDebugEnabled())
+                        log.debug("Got optimistic tx failure [tx=" + this + ", err=" + e + ']');
+                }
+
+                fut.finish(false, clearThreadMap, onTimeout);
+            }
+            else {
+                prepFut.listen(new CI1<IgniteInternalFuture<?>>() {
+                    @Override public void apply(IgniteInternalFuture<?> f) {
+                        try {
+                            // Check for errors in prepare future.
+                            f.get();
+                        }
+                        catch (IgniteCheckedException e) {
+                            if (log.isDebugEnabled())
+                                log.debug("Got optimistic tx failure [tx=" + this + ", err=" + e + ']');
+                        }
+
+                        fut.finish(false, clearThreadMap, onTimeout);
+                    }
+                });
+            }
+        }
+        else
+            fut.finish(false, true, onTimeout);
+
+        return fut;
+    }
+
+    /**
+     * @return Transaction commit future.
+     * @param fast {@code True} in case of fast finish.
+     * @param commit {@code True} if commit.
+     */
+    private NearTxFinishFuture finishFuture(boolean fast, boolean commit) {
+        NearTxFinishFuture fut = fast ? new GridNearTxFastFinishFuture(this, commit) :
+            new GridNearTxFinishFuture<>(cctx, this, commit);
+
+        if (mvccQueryTracker() != null || mvccSnapshot != null || txState.mvccEnabled(cctx)) {
+            if (commit)
+                fut = new GridNearTxFinishAndAckFuture(fut);
+            else
+                fut.listen(new AckCoordinatorOnRollback(this));
         }
 
-        return fut0;
+        return fut;
     }
 
     /** {@inheritDoc} */
@@ -3489,12 +3670,8 @@
 
                                 assert rollbackFut.isDone() : rollbackFut;
                             }
-                            else {
-                                if (!cctx.mvcc().addFuture(rollbackFut, rollbackFut.futureId()))
-                                    return;
-
+                            else
                                 rollbackFut.finish(false, clearThreadMap, onTimeout);
-                            }
                         }
                         else {
                             finishFut.listen(new IgniteInClosure<IgniteInternalFuture<IgniteInternalTx>>() {
@@ -3540,7 +3717,9 @@
      * @return {@code True} if 'fast finish' path can be used for transaction completion.
      */
     private boolean fastFinish() {
-        return writeMap().isEmpty() && ((optimistic() && !serializable()) || readMap().isEmpty());
+        return writeMap().isEmpty()
+            && ((optimistic() && !serializable()) || readMap().isEmpty())
+            && (mappings.single() || F.view(mappings.mappings(), CU.FILTER_QUERY_MAPPING).isEmpty());
     }
 
     /**
@@ -3985,6 +4164,20 @@
     }
 
     /**
+     * @return {@code true} if this transaction does not have type flag set or it matches invoking operation,
+     * {@code false} otherwise.
+     */
+    public boolean isOperationAllowed(boolean sqlOp) {
+        if (sql == null) {
+            sql = sqlOp;
+
+            return true;
+        }
+
+        return sql == sqlOp;
+    }
+
+    /**
      * @return Public API proxy.
      */
     public TransactionProxy proxy() {
@@ -4089,7 +4282,7 @@
                         GridCacheEntryEx e = txEntry == null ? entryEx(cacheCtx, txKey, topVer) : txEntry.cached();
 
                         if (readCommitted() || skipVals) {
-                            cacheCtx.evicts().touch(e, topologyVersion());
+                            e.touch(topologyVersion());
 
                             if (visibleVal != null) {
                                 cacheCtx.addResult(map,
@@ -4185,13 +4378,19 @@
     /**
      * @param cacheCtx Cache context.
      * @param retval Return value flag.
+     * @param sql SQL operation flag.
      * @throws IgniteCheckedException If failed.
      */
-    private void beforePut(GridCacheContext cacheCtx, boolean retval) throws IgniteCheckedException {
+    private void beforePut(GridCacheContext cacheCtx, boolean retval, boolean sql) throws IgniteCheckedException {
+        assert !sql || cacheCtx.mvccEnabled();
+
         checkUpdatesAllowed(cacheCtx);
 
         cacheCtx.checkSecurity(SecurityPermission.CACHE_PUT);
 
+        if (cacheCtx.mvccEnabled() && !isOperationAllowed(sql))
+            throw new IgniteCheckedException(TX_TYPE_MISMATCH_ERR_MSG);
+
         if (retval)
             needReturnValue(true);
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxPrepareFutureAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxPrepareFutureAdapter.java
index 5c0fcec..a9b1848 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxPrepareFutureAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxPrepareFutureAdapter.java
@@ -49,7 +49,7 @@
  * Common code for tx prepare in optimistic and pessimistic modes.
  */
 public abstract class GridNearTxPrepareFutureAdapter extends
-    GridCacheCompoundFuture<GridNearTxPrepareResponse, IgniteInternalTx> implements GridCacheVersionedFuture<IgniteInternalTx> {
+    GridCacheCompoundFuture<Object, IgniteInternalTx> implements GridCacheVersionedFuture<IgniteInternalTx> {
     /** Logger reference. */
     protected static final AtomicReference<IgniteLogger> logRef = new AtomicReference<>();
 
@@ -58,9 +58,9 @@
         AtomicReferenceFieldUpdater.newUpdater(GridNearTxPrepareFutureAdapter.class, Throwable.class, "err");
 
     /** */
-    private static final IgniteReducer<GridNearTxPrepareResponse, IgniteInternalTx> REDUCER =
-        new IgniteReducer<GridNearTxPrepareResponse, IgniteInternalTx>() {
-            @Override public boolean collect(GridNearTxPrepareResponse e) {
+    private static final IgniteReducer<Object, IgniteInternalTx> REDUCER =
+        new IgniteReducer<Object, IgniteInternalTx>() {
+            @Override public boolean collect(Object e) {
                 return true;
             }
 
@@ -165,7 +165,7 @@
      * @param txMapping Transaction mapping.
      */
     final void checkOnePhase(GridDhtTxMapping txMapping) {
-        if (tx.storeWriteThrough())
+        if (tx.storeWriteThrough() || tx.txState().mvccEnabled(cctx)) // TODO IGNITE-3479 (onePhase + mvcc)
             return;
 
         Map<UUID, Collection<UUID>> map = txMapping.transactionNodes();
@@ -255,7 +255,7 @@
                 txEntry.ttl(CU.toTtl(expiry.getExpiryForAccess()));
         }
 
-        if (!m.empty()) {
+        if (m.queryUpdate() || !m.empty()) {
             // This step is very important as near and DHT versions grow separately.
             cctx.versions().onReceived(nodeId, res.dhtVersion());
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxPrepareRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxPrepareRequest.java
index a670609..55c809d 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxPrepareRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxPrepareRequest.java
@@ -58,6 +58,9 @@
     /** */
     private static final int ALLOW_WAIT_TOP_FUT_FLAG_MASK = 0x10;
 
+    /** */
+    private static final int REQUEST_MVCC_CNTR_FLAG_MASK = 0x20;
+
     /** Future ID. */
     private IgniteUuid futId;
 
@@ -150,6 +153,20 @@
     }
 
     /**
+     * @return {@code True} if need request MVCC counter on primary node on prepare step.
+     */
+    public boolean requestMvccCounter() {
+        return isFlag(REQUEST_MVCC_CNTR_FLAG_MASK);
+    }
+
+    /**
+     * @param val {@code True} if need request MVCC counter on primary node on prepare step.
+     */
+    public void requestMvccCounter(boolean val) {
+        setFlag(val, REQUEST_MVCC_CNTR_FLAG_MASK);
+    }
+
+    /**
      * @return {@code True} if it is safe for first client request to wait for topology future
      *      completion.
      */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxPrepareResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxPrepareResponse.java
index 8162168..e9865df 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxPrepareResponse.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxPrepareResponse.java
@@ -33,6 +33,7 @@
 import org.apache.ignite.internal.processors.cache.GridCacheReturn;
 import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
 import org.apache.ignite.internal.processors.cache.distributed.GridDistributedTxPrepareResponse;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
 import org.apache.ignite.internal.util.tostring.GridToStringExclude;
@@ -97,6 +98,9 @@
     /** Not {@code null} if client node should remap transaction. */
     private AffinityTopologyVersion clientRemapVer;
 
+    /** */
+    private MvccSnapshot mvccSnapshot;
+
     /**
      * Empty constructor required by {@link Externalizable}.
      */
@@ -146,6 +150,20 @@
     }
 
     /**
+     * @param mvccSnapshot Mvcc info.
+     */
+    public void mvccSnapshot(MvccSnapshot mvccSnapshot) {
+        this.mvccSnapshot = mvccSnapshot;
+    }
+
+    /**
+     * @return Mvcc info.
+     */
+    @Nullable public MvccSnapshot mvccSnapshot() {
+        return mvccSnapshot;
+    }
+
+    /**
      * @return One-phase commit state on primary node.
      */
     public boolean onePhaseCommit() {
@@ -418,6 +436,12 @@
 
                 writer.incrementState();
 
+            case 20:
+                if (!writer.writeMessage("mvccSnapshot", mvccSnapshot))
+                    return false;
+
+                writer.incrementState();
+
         }
 
         return true;
@@ -514,6 +538,14 @@
 
                 reader.incrementState();
 
+            case 20:
+                mvccSnapshot = reader.readMessage("mvccSnapshot");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
         }
 
         return reader.afterMessageRead(GridNearTxPrepareResponse.class);
@@ -526,7 +558,7 @@
 
     /** {@inheritDoc} */
     @Override public byte fieldsCount() {
-        return 20;
+        return 21;
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxQueryEnlistFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxQueryEnlistFuture.java
new file mode 100644
index 0000000..9a2dfa3
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxQueryEnlistFuture.java
@@ -0,0 +1,382 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed.near;
+
+import java.util.Collection;
+import java.util.UUID;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException;
+import org.apache.ignite.internal.processors.affinity.AffinityAssignment;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.GridCacheMessage;
+import org.apache.ignite.internal.processors.cache.distributed.GridDistributedTxMapping;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxQueryEnlistFuture;
+import org.apache.ignite.internal.util.future.GridFutureAdapter;
+import org.apache.ignite.internal.util.tostring.GridToStringExclude;
+import org.apache.ignite.internal.util.typedef.CI1;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.X;
+import org.apache.ignite.internal.util.typedef.internal.CU;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.internal.util.typedef.internal.U;
+
+import static org.apache.ignite.internal.processors.cache.distributed.dht.NearTxQueryEnlistResultHandler.createResponse;
+
+/**
+ * Cache lock future.
+ */
+@SuppressWarnings("ForLoopReplaceableByForEach")
+public class GridNearTxQueryEnlistFuture extends GridNearTxAbstractEnlistFuture {
+    /** */
+    private static final long serialVersionUID = -2155104765461405820L;
+    /** Involved cache ids. */
+    private final int[] cacheIds;
+
+    /** Partitions. */
+    private final int[] parts;
+
+    /** Schema name. */
+    private final String schema;
+
+    /** Query string. */
+    private final String qry;
+
+    /** Query parameters. */
+    private final Object[] params;
+
+    /** Flags. */
+    private final int flags;
+
+    /** Fetch page size. */
+    private final int pageSize;
+
+    /**
+     * @param cctx Cache context.
+     * @param tx Transaction.
+     * @param cacheIds Involved cache ids.
+     * @param parts Partitions.
+     * @param schema Schema name.
+     * @param qry Query string.
+     * @param params Query parameters.
+     * @param flags Flags.
+     * @param pageSize Fetch page size.
+     * @param timeout Timeout.
+     */
+    protected GridNearTxQueryEnlistFuture(
+        GridCacheContext<?, ?> cctx, GridNearTxLocal tx, int[] cacheIds, int[] parts, String schema, String qry,
+        Object[] params, int flags, int pageSize, long timeout) {
+        super(cctx, tx, timeout);
+
+        this.cacheIds = cacheIds;
+        this.parts = parts;
+        this.schema = schema;
+        this.qry = qry;
+        this.params = params;
+        this.flags = flags;
+        this.pageSize = pageSize;
+    }
+
+    /**
+     * @param topLocked Topology locked flag.
+     */
+    @Override protected void map(final boolean topLocked) {
+        MiniFuture mini = null;
+
+        try {
+            final AffinityAssignment assignment = cctx.affinity().assignment(topVer);
+
+            Collection<ClusterNode> primary;
+
+            if (parts != null) {
+                primary = U.newHashSet(parts.length);
+
+                for (int i = 0; i < parts.length; i++) {
+                    ClusterNode pNode = assignment.get(parts[i]).get(0);
+
+                    primary.add(pNode);
+
+                    updateMappings(pNode);
+                }
+            }
+            else {
+                primary = assignment.primaryPartitionNodes();
+
+                for (ClusterNode pNode : primary) {
+                    updateMappings(pNode);
+                }
+            }
+
+            boolean locallyMapped = primary.contains(cctx.localNode());
+
+            if (locallyMapped)
+                add(new MiniFuture(cctx.localNode()));
+
+            int idx = locallyMapped ? 1 : 0;
+            boolean first = true;
+            boolean clientFirst = false;
+
+            for (ClusterNode node : F.view(primary, F.remoteNodes(cctx.localNodeId()))) {
+                add(mini = new MiniFuture(node));
+
+                if (first) {
+                    clientFirst = cctx.localNode().isClient() && !topLocked && !tx.hasRemoteLocks();
+
+                    first = false;
+                }
+
+                GridNearTxQueryEnlistRequest req = new GridNearTxQueryEnlistRequest(
+                    cctx.cacheId(),
+                    threadId,
+                    futId,
+                    ++idx,
+                    tx.subjectId(),
+                    topVer,
+                    lockVer,
+                    mvccSnapshot,
+                    cacheIds,
+                    parts,
+                    schema,
+                    qry,
+                    params,
+                    flags,
+                    pageSize,
+                    remainingTime(),
+                    tx.remainingTime(),
+                    tx.taskNameHash(),
+                    clientFirst
+                );
+
+                sendRequest(req, node.id(), mini);
+            }
+
+            if (locallyMapped) {
+                final MiniFuture localMini = mini = miniFuture(-1);
+
+                assert localMini != null;
+
+                GridDhtTxQueryEnlistFuture fut = new GridDhtTxQueryEnlistFuture(
+                    cctx.localNode().id(),
+                    lockVer,
+                    mvccSnapshot,
+                    threadId,
+                    futId,
+                    -1,
+                    tx,
+                    cacheIds,
+                    parts,
+                    schema,
+                    qry,
+                    params,
+                    flags,
+                    pageSize,
+                    remainingTime(),
+                    cctx);
+
+                updateLocalFuture(fut);
+
+                fut.listen(new CI1<IgniteInternalFuture<Long>>() {
+                    @Override public void apply(IgniteInternalFuture<Long> fut) {
+                        assert fut.error() != null || fut.result() != null : fut;
+
+                        try {
+                            clearLocalFuture((GridDhtTxQueryEnlistFuture)fut);
+
+                            GridNearTxQueryEnlistResponse res = fut.error() == null ? createResponse(fut) : null;
+
+                            localMini.onResult(res, fut.error());
+                        }
+                        catch (IgniteCheckedException e) {
+                            localMini.onResult(null, e);
+                        }
+                        finally {
+                            CU.unwindEvicts(cctx);
+                        }
+                    }
+                });
+
+                fut.init();
+            }
+        }
+        catch (Throwable e) {
+            if (mini != null)
+                mini.onResult(null, e);
+            else
+                onDone(e);
+
+            if (e instanceof Error)
+                throw (Error)e;
+        }
+
+        markInitialized();
+    }
+
+    /**
+     *
+     * @param req Request.
+     * @param nodeId Remote node ID.
+     * @param fut Result future.
+     * @throws IgniteCheckedException if failed to send.
+     */
+    private void sendRequest(GridCacheMessage req, UUID nodeId, MiniFuture fut) throws IgniteCheckedException {
+        IgniteInternalFuture<?> txSync = cctx.tm().awaitFinishAckAsync(nodeId, tx.threadId());
+
+        if (txSync == null || txSync.isDone())
+            cctx.io().send(nodeId, req, cctx.ioPolicy());
+        else
+            txSync.listen(new CI1<IgniteInternalFuture<?>>() {
+                @Override public void apply(IgniteInternalFuture<?> f) {
+                    try {
+                        cctx.io().send(nodeId, req, cctx.ioPolicy());
+                    }
+                    catch (IgniteCheckedException e) {
+                        fut.onResult(null, e);
+                    }
+                }
+            });
+    }
+
+    /**
+     * @param nodeId Left node ID
+     * @return {@code True} if node was in the list.
+     */
+    @Override public synchronized boolean onNodeLeft(UUID nodeId) {
+        for (IgniteInternalFuture<?> fut : futures()) {
+            MiniFuture f = (MiniFuture)fut;
+
+            if (f.node.id().equals(nodeId)) {
+                if (log.isDebugEnabled())
+                    log.debug("Found mini-future for left node [nodeId=" + nodeId + ", mini=" + f + ", fut=" +
+                        this + ']');
+
+                return f.onResult(null, newTopologyException(nodeId));
+            }
+        }
+
+        if (log.isDebugEnabled())
+            log.debug("Future does not have mapping for left node (ignoring) [nodeId=" + nodeId +
+                ", fut=" + this + ']');
+
+        return false;
+    }
+
+    /**
+     * Finds pending mini future by the given mini ID.
+     *
+     * @param miniId Mini ID to find.
+     * @return Mini future.
+     */
+    private MiniFuture miniFuture(int miniId) {
+         synchronized (this) {
+            int idx = Math.abs(miniId) - 1;
+
+            assert idx >= 0 && idx < futuresCountNoLock();
+
+            IgniteInternalFuture<Long> fut = future(idx);
+
+            if (!fut.isDone())
+                return (MiniFuture)fut;
+        }
+
+        return null;
+    }
+
+    /**
+     * Creates new topology exception for cases when primary node leaves grid during mapping.
+     *
+     * @param nodeId Node ID.
+     * @return Topology exception with user-friendly message.
+     */
+    private ClusterTopologyCheckedException newTopologyException(UUID nodeId) {
+        ClusterTopologyCheckedException topEx = new ClusterTopologyCheckedException("Failed to enlist keys " +
+            "(primary node left grid, retry transaction if possible) [node=" + nodeId + ']');
+
+        topEx.retryReadyFuture(cctx.shared().nextAffinityReadyFuture(topVer));
+
+        return topEx;
+    }
+
+    /**
+     * @param nodeId Sender node id.
+     * @param res Response.
+     */
+    public void onResult(UUID nodeId, GridNearTxQueryEnlistResponse res) {
+        MiniFuture mini = miniFuture(res.miniId());
+
+        if (mini != null)
+            mini.onResult(res, null);
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(GridNearTxQueryEnlistFuture.class, this, super.toString());
+    }
+
+    /** */
+    private class MiniFuture extends GridFutureAdapter<Long> {
+        /** */
+        private boolean completed;
+
+        /** Node ID. */
+        @GridToStringExclude
+        private final ClusterNode node;
+
+        /**
+         * @param node Cluster node.
+         */
+        private MiniFuture(ClusterNode node) {
+            this.node = node;
+        }
+
+        /**
+         * @param res Response.
+         * @param err Exception.
+         * @return {@code True} if future was completed by this call.
+         */
+        public boolean onResult(GridNearTxQueryEnlistResponse res, Throwable err) {
+            assert res != null || err != null : this;
+
+            if (err == null && res.error() != null)
+                err = res.error();
+
+            synchronized (this) {
+                if (completed)
+                    return false;
+
+                completed = true;
+            }
+
+            if (X.hasCause(err, ClusterTopologyCheckedException.class)
+                || (res != null && res.removeMapping())) {
+                GridDistributedTxMapping m = tx.mappings().get(node.id());
+
+                assert m != null && m.empty();
+
+                tx.removeMapping(node.id());
+
+                if (node.isLocal())
+                    tx.colocatedLocallyMapped(false);
+            }
+            else if (res != null && res.result() > 0 && !node.isLocal())
+                tx.hasRemoteLocks(true);
+
+            return err != null ? onDone(err) : onDone(res.result(), res.error());
+        }
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxQueryEnlistRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxQueryEnlistRequest.java
new file mode 100644
index 0000000..472937b
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxQueryEnlistRequest.java
@@ -0,0 +1,616 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed.near;
+
+import java.nio.ByteBuffer;
+import java.util.UUID;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.GridDirectTransient;
+import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
+import org.apache.ignite.internal.processors.cache.GridCacheIdMessage;
+import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteUuid;
+import org.apache.ignite.plugin.extensions.communication.MessageReader;
+import org.apache.ignite.plugin.extensions.communication.MessageWriter;
+
+/**
+ *
+ */
+public class GridNearTxQueryEnlistRequest extends GridCacheIdMessage {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** */
+    private long threadId;
+
+    /** */
+    private IgniteUuid futId;
+
+    /** */
+    private boolean clientFirst;
+
+    /** */
+    private int miniId;
+
+    /** */
+    private UUID subjId;
+
+    /** */
+    private AffinityTopologyVersion topVer;
+
+    /** */
+    private GridCacheVersion lockVer;
+
+    /** */
+    private MvccSnapshot mvccSnapshot;
+
+    /** */
+    private int[] cacheIds;
+
+    /** */
+    private int[] parts;
+
+    /** */
+    private String schema;
+
+    /** */
+    private String qry;
+
+    /** */
+    @GridDirectTransient
+    private Object[] params;
+
+    /** */
+    private byte[] paramsBytes;
+
+    /** */
+    private int flags;
+
+    /** */
+    private long timeout;
+
+    /** */
+    private long txTimeout;
+
+    /** */
+    private int taskNameHash;
+
+    /** */
+    private int pageSize;
+
+    /** */
+    public GridNearTxQueryEnlistRequest() {
+        // No-op.
+    }
+
+    /**
+     * @param cacheId Cache id.
+     * @param threadId Thread id.
+     * @param futId Future id.
+     * @param miniId Mini fture id.
+     * @param subjId Subject id.
+     * @param topVer Topology version.
+     * @param lockVer Lock version.
+     * @param mvccSnapshot MVCC snspshot.
+     * @param cacheIds Involved cache ids.
+     * @param parts Partitions.
+     * @param schema Schema name.
+     * @param qry Query string.
+     * @param params Query parameters.
+     * @param flags Flags.
+     * @param pageSize Fetch page size.
+     * @param timeout Timeout milliseconds.
+     * @param txTimeout Tx timeout milliseconds.
+     * @param taskNameHash Task name hash.
+     * @param clientFirst {@code True} if this is the first client request.
+     */
+    public GridNearTxQueryEnlistRequest(
+        int cacheId,
+        long threadId,
+        IgniteUuid futId,
+        int miniId,
+        UUID subjId,
+        AffinityTopologyVersion topVer,
+        GridCacheVersion lockVer,
+        MvccSnapshot mvccSnapshot,
+        int[] cacheIds,
+        int[] parts,
+        String schema,
+        String qry,
+        Object[] params,
+        int flags,
+        int pageSize,
+        long timeout,
+        long txTimeout,
+        int taskNameHash,
+        boolean clientFirst) {
+        this.cacheIds = cacheIds;
+        this.parts = parts;
+        this.schema = schema;
+        this.qry = qry;
+        this.params = params;
+        this.flags = flags;
+        this.pageSize = pageSize;
+        this.txTimeout = txTimeout;
+        this.cacheId = cacheId;
+        this.threadId = threadId;
+        this.futId = futId;
+        this.miniId = miniId;
+        this.subjId = subjId;
+        this.topVer = topVer;
+        this.lockVer = lockVer;
+        this.mvccSnapshot = mvccSnapshot;
+        this.timeout = timeout;
+        this.taskNameHash = taskNameHash;
+        this.clientFirst = clientFirst;
+    }
+
+    /**
+     * @return Thread id.
+     */
+    public long threadId() {
+        return threadId;
+    }
+
+    /**
+     * @return Future id.
+     */
+    public IgniteUuid futureId() {
+        return futId;
+    }
+
+    /**
+     * @return Mini future ID.
+     */
+    public int miniId() {
+        return miniId;
+    }
+
+    /**
+     * @return Subject id.
+     */
+    public UUID subjectId() {
+        return subjId;
+    }
+
+    /**
+     * @return Topology version.
+     */
+    @Override public AffinityTopologyVersion topologyVersion() {
+        return topVer;
+    }
+
+    /**
+     * @return Lock version.
+     */
+    public GridCacheVersion version() {
+        return lockVer;
+    }
+
+    /**
+     * @return MVCC snapshot.
+     */
+    public MvccSnapshot mvccSnapshot() {
+        return mvccSnapshot;
+    }
+
+    /**
+     * @return Involved cache ids.
+     */
+    public int[] cacheIds() {
+        return cacheIds;
+    }
+
+    /**
+     * @return Partitions.
+     */
+    public int[] partitions() {
+        return parts;
+    }
+
+    /**
+     * @return Schema name.
+     */
+    public String schemaName() {
+        return schema;
+    }
+
+    /**
+     * @return Query string.
+     */
+    public String query() {
+        return qry;
+    }
+
+    /**
+     * @return Query parameters.
+     */
+    public Object[] parameters() {
+        return params;
+    }
+
+    /**
+     * @return Flags.
+     */
+    public int flags() {
+        return flags;
+    }
+
+    /**
+     * @return Fetch page size.
+     */
+    public int pageSize() {
+        return pageSize;
+    }
+
+    /**
+     * @return Timeout milliseconds.
+     */
+    public long timeout() {
+        return timeout;
+    }
+
+    /**
+     * @return Tx timeout milliseconds.
+     */
+    public long txTimeout() {
+        return txTimeout;
+    }
+
+    /**
+     * @return Task name hash.
+     */
+    public int taskNameHash() {
+        return taskNameHash;
+    }
+
+    /**
+     * @return {@code True} if this is the first client request.
+     */
+    public boolean firstClientRequest() {
+        return clientFirst;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean addDeploymentInfo() {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte fieldsCount() {
+        return 21;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void prepareMarshal(GridCacheSharedContext ctx) throws IgniteCheckedException {
+        super.prepareMarshal(ctx);
+
+        if (params != null && paramsBytes == null)
+            paramsBytes = U.marshal(ctx, params);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void finishUnmarshal(GridCacheSharedContext ctx, ClassLoader ldr) throws IgniteCheckedException {
+        super.finishUnmarshal(ctx, ldr);
+
+        if (paramsBytes != null && params == null)
+            params = U.unmarshal(ctx, paramsBytes, ldr);
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) {
+        writer.setBuffer(buf);
+
+        if (!super.writeTo(buf, writer))
+            return false;
+
+        if (!writer.isHeaderWritten()) {
+            if (!writer.writeHeader(directType(), fieldsCount()))
+                return false;
+
+            writer.onHeaderWritten();
+        }
+
+        switch (writer.state()) {
+            case 3:
+                if (!writer.writeIntArray("cacheIds", cacheIds))
+                    return false;
+
+                writer.incrementState();
+
+            case 4:
+                if (!writer.writeBoolean("clientFirst", clientFirst))
+                    return false;
+
+                writer.incrementState();
+
+            case 5:
+                if (!writer.writeInt("flags", flags))
+                    return false;
+
+                writer.incrementState();
+
+            case 6:
+                if (!writer.writeIgniteUuid("futId", futId))
+                    return false;
+
+                writer.incrementState();
+
+            case 7:
+                if (!writer.writeMessage("lockVer", lockVer))
+                    return false;
+
+                writer.incrementState();
+
+            case 8:
+                if (!writer.writeInt("miniId", miniId))
+                    return false;
+
+                writer.incrementState();
+
+            case 9:
+                if (!writer.writeMessage("mvccSnapshot", mvccSnapshot))
+                    return false;
+
+                writer.incrementState();
+
+            case 10:
+                if (!writer.writeInt("pageSize", pageSize))
+                    return false;
+
+                writer.incrementState();
+
+            case 11:
+                if (!writer.writeByteArray("paramsBytes", paramsBytes))
+                    return false;
+
+                writer.incrementState();
+
+            case 12:
+                if (!writer.writeIntArray("parts", parts))
+                    return false;
+
+                writer.incrementState();
+
+            case 13:
+                if (!writer.writeString("qry", qry))
+                    return false;
+
+                writer.incrementState();
+
+            case 14:
+                if (!writer.writeString("schema", schema))
+                    return false;
+
+                writer.incrementState();
+
+            case 15:
+                if (!writer.writeUuid("subjId", subjId))
+                    return false;
+
+                writer.incrementState();
+
+            case 16:
+                if (!writer.writeInt("taskNameHash", taskNameHash))
+                    return false;
+
+                writer.incrementState();
+
+            case 17:
+                if (!writer.writeLong("threadId", threadId))
+                    return false;
+
+                writer.incrementState();
+
+            case 18:
+                if (!writer.writeLong("timeout", timeout))
+                    return false;
+
+                writer.incrementState();
+
+            case 19:
+                if (!writer.writeMessage("topVer", topVer))
+                    return false;
+
+                writer.incrementState();
+
+            case 20:
+                if (!writer.writeLong("txTimeout", txTimeout))
+                    return false;
+
+                writer.incrementState();
+
+        }
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) {
+        reader.setBuffer(buf);
+
+        if (!reader.beforeMessageRead())
+            return false;
+
+        if (!super.readFrom(buf, reader))
+            return false;
+
+        switch (reader.state()) {
+            case 3:
+                cacheIds = reader.readIntArray("cacheIds");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 4:
+                clientFirst = reader.readBoolean("clientFirst");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 5:
+                flags = reader.readInt("flags");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 6:
+                futId = reader.readIgniteUuid("futId");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 7:
+                lockVer = reader.readMessage("lockVer");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 8:
+                miniId = reader.readInt("miniId");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 9:
+                mvccSnapshot = reader.readMessage("mvccSnapshot");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 10:
+                pageSize = reader.readInt("pageSize");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 11:
+                paramsBytes = reader.readByteArray("paramsBytes");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 12:
+                parts = reader.readIntArray("parts");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 13:
+                qry = reader.readString("qry");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 14:
+                schema = reader.readString("schema");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 15:
+                subjId = reader.readUuid("subjId");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 16:
+                taskNameHash = reader.readInt("taskNameHash");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 17:
+                threadId = reader.readLong("threadId");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 18:
+                timeout = reader.readLong("timeout");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 19:
+                topVer = reader.readMessage("topVer");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 20:
+                txTimeout = reader.readLong("txTimeout");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+        }
+
+        return reader.afterMessageRead(GridNearTxQueryEnlistRequest.class);
+    }
+
+    /** {@inheritDoc} */
+    @Override public short directType() {
+        return 151;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(GridNearTxQueryEnlistRequest.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxQueryEnlistResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxQueryEnlistResponse.java
new file mode 100644
index 0000000..39e33c4
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxQueryEnlistResponse.java
@@ -0,0 +1,298 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed.near;
+
+import java.nio.ByteBuffer;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.GridDirectTransient;
+import org.apache.ignite.internal.processors.cache.GridCacheIdMessage;
+import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
+import org.apache.ignite.internal.processors.cache.distributed.dht.ExceptionAware;
+import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteUuid;
+import org.apache.ignite.plugin.extensions.communication.MessageReader;
+import org.apache.ignite.plugin.extensions.communication.MessageWriter;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ *
+ */
+public class GridNearTxQueryEnlistResponse extends GridCacheIdMessage implements ExceptionAware {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** Future ID. */
+    private IgniteUuid futId;
+
+    /** Error. */
+    @GridDirectTransient
+    private Throwable err;
+
+    /** Serialized error. */
+    private byte[] errBytes;
+
+    /** Mini future id. */
+    private int miniId;
+
+    /** Result. */
+    private long res;
+
+    /** Remove mapping flag. */
+    private boolean removeMapping;
+
+    /** */
+    private GridCacheVersion lockVer;
+
+    /**
+     * Default constructor.
+     */
+    public GridNearTxQueryEnlistResponse() {
+        // No-op.
+    }
+
+    /**
+     * @param cacheId Cache id.
+     * @param futId Future id.
+     * @param miniId Mini future id.
+     * @param lockVer Lock version.
+     * @param err Error.
+     */
+    public GridNearTxQueryEnlistResponse(int cacheId, IgniteUuid futId, int miniId, GridCacheVersion lockVer, Throwable err) {
+        this.cacheId = cacheId;
+        this.futId = futId;
+        this.miniId = miniId;
+        this.lockVer = lockVer;
+        this.err = err;
+    }
+
+    /**
+     * @param cacheId Cache id.
+     * @param futId Future id.
+     * @param miniId Mini future id.
+     * @param lockVer Lock version.
+     * @param res Result.
+     * @param removeMapping Remove mapping flag.
+     */
+    public GridNearTxQueryEnlistResponse(int cacheId, IgniteUuid futId, int miniId, GridCacheVersion lockVer, long res, boolean removeMapping) {
+        this.cacheId = cacheId;
+        this.futId = futId;
+        this.miniId = miniId;
+        this.lockVer = lockVer;
+        this.res = res;
+        this.removeMapping = removeMapping;
+    }
+
+    /**
+     * @return Loc version.
+     */
+    public GridCacheVersion version() {
+        return lockVer;
+    }
+
+    /**
+     * @return Future id.
+     */
+    public IgniteUuid futureId() {
+        return futId;
+    }
+
+    /**
+     * @return Mini future id.
+     */
+    public int miniId() {
+        return miniId;
+    }
+
+    /**
+     * @return Result.
+     */
+    public long result() {
+        return res;
+    }
+
+    /**
+     * @return Remove mapping flag.
+     */
+    public boolean removeMapping() {
+        return removeMapping;
+    }
+
+    /** {@inheritDoc} */
+    @Nullable @Override public Throwable error() {
+        return err;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean addDeploymentInfo() {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte fieldsCount() {
+        return 9;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) {
+        writer.setBuffer(buf);
+
+        if (!super.writeTo(buf, writer))
+            return false;
+
+        if (!writer.isHeaderWritten()) {
+            if (!writer.writeHeader(directType(), fieldsCount()))
+                return false;
+
+            writer.onHeaderWritten();
+        }
+
+        switch (writer.state()) {
+            case 3:
+                if (!writer.writeByteArray("errBytes", errBytes))
+                    return false;
+
+                writer.incrementState();
+
+            case 4:
+                if (!writer.writeIgniteUuid("futId", futId))
+                    return false;
+
+                writer.incrementState();
+
+            case 5:
+                if (!writer.writeMessage("lockVer", lockVer))
+                    return false;
+
+                writer.incrementState();
+
+            case 6:
+                if (!writer.writeInt("miniId", miniId))
+                    return false;
+
+                writer.incrementState();
+
+            case 7:
+                if (!writer.writeBoolean("removeMapping", removeMapping))
+                    return false;
+
+                writer.incrementState();
+
+            case 8:
+                if (!writer.writeLong("res", res))
+                    return false;
+
+                writer.incrementState();
+
+        }
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) {
+        reader.setBuffer(buf);
+
+        if (!reader.beforeMessageRead())
+            return false;
+
+        if (!super.readFrom(buf, reader))
+            return false;
+
+        switch (reader.state()) {
+            case 3:
+                errBytes = reader.readByteArray("errBytes");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 4:
+                futId = reader.readIgniteUuid("futId");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 5:
+                lockVer = reader.readMessage("lockVer");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 6:
+                miniId = reader.readInt("miniId");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 7:
+                removeMapping = reader.readBoolean("removeMapping");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 8:
+                res = reader.readLong("res");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+        }
+
+        return reader.afterMessageRead(GridNearTxQueryEnlistResponse.class);
+    }
+
+    /** {@inheritDoc} */
+    @Override public short directType() {
+        return 152;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void prepareMarshal(GridCacheSharedContext ctx) throws IgniteCheckedException {
+        super.prepareMarshal(ctx);
+
+        if (err != null && errBytes == null)
+            errBytes = U.marshal(ctx.marshaller(), err);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void finishUnmarshal(GridCacheSharedContext ctx, ClassLoader ldr) throws IgniteCheckedException {
+        super.finishUnmarshal(ctx, ldr);
+
+        if (errBytes != null)
+            err = U.unmarshal(ctx, errBytes, U.resolveClassLoader(ldr, ctx.gridConfig()));
+    }
+
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(GridNearTxQueryEnlistResponse.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxQueryResultsEnlistFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxQueryResultsEnlistFuture.java
new file mode 100644
index 0000000..910deb9
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxQueryResultsEnlistFuture.java
@@ -0,0 +1,670 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed.near;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.NoSuchElementException;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
+import java.util.concurrent.atomic.AtomicLongFieldUpdater;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.GridCacheMessage;
+import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxAbstractEnlistFuture;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxQueryResultsEnlistFuture;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxRemote;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshotWithoutTxs;
+import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
+import org.apache.ignite.internal.processors.query.EnlistOperation;
+import org.apache.ignite.internal.processors.query.UpdateSourceIterator;
+import org.apache.ignite.internal.transactions.IgniteTxRollbackCheckedException;
+import org.apache.ignite.internal.util.GridLongList;
+import org.apache.ignite.internal.util.tostring.GridToStringExclude;
+import org.apache.ignite.internal.util.typedef.CI1;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.X;
+import org.apache.ignite.internal.util.typedef.internal.CU;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.lang.IgniteBiTuple;
+import org.apache.ignite.lang.IgniteUuid;
+import org.apache.ignite.plugin.extensions.communication.Message;
+import org.jetbrains.annotations.Nullable;
+
+import static org.apache.ignite.internal.processors.cache.distributed.dht.NearTxQueryEnlistResultHandler.createResponse;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_OP_COUNTER_NA;
+import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC;
+import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ;
+
+/**
+ * A future tracking requests for remote nodes transaction enlisting and locking
+ * of entries produced with complex DML queries requiring reduce step.
+ */
+public class GridNearTxQueryResultsEnlistFuture extends GridNearTxAbstractEnlistFuture {
+    /** */
+    private static final long serialVersionUID = 4339957209840477447L;
+
+    /** */
+    public static final int DFLT_BATCH_SIZE = 1024;
+
+    /** Res field updater. */
+    private static final AtomicLongFieldUpdater<GridNearTxQueryResultsEnlistFuture> RES_UPD =
+        AtomicLongFieldUpdater.newUpdater(GridNearTxQueryResultsEnlistFuture.class, "res");
+
+    /** SkipCntr field updater. */
+    private static final AtomicIntegerFieldUpdater<GridNearTxQueryResultsEnlistFuture> SKIP_UPD =
+        AtomicIntegerFieldUpdater.newUpdater(GridNearTxQueryResultsEnlistFuture.class, "skipCntr");
+
+    /** Marker object. */
+    private static final Object FINISHED = new Object();
+
+    /** */
+    private final UpdateSourceIterator<?> it;
+
+    /** */
+    private int batchSize;
+
+    /** */
+    private AtomicInteger batchCntr = new AtomicInteger();
+
+    /** */
+    @SuppressWarnings("unused")
+    @GridToStringExclude
+    private volatile int skipCntr;
+
+    /** */
+    @SuppressWarnings("unused")
+    @GridToStringExclude
+    private volatile long res;
+
+    /** */
+    private final Map<UUID, Batch> batches = new ConcurrentHashMap<>();
+
+    /** Row extracted from iterator but not yet used. */
+    private Object peek;
+
+    /** Topology locked flag. */
+    private boolean topLocked;
+
+    /** */
+    private final boolean sequential;
+
+    /**
+     * @param cctx Cache context.
+     * @param tx Transaction.
+     * @param timeout Timeout.
+     * @param it Rows iterator.
+     * @param batchSize Batch size.
+     * @param sequential Sequential locking flag.
+     */
+    public GridNearTxQueryResultsEnlistFuture(GridCacheContext<?, ?> cctx,
+        GridNearTxLocal tx,
+        long timeout,
+        UpdateSourceIterator<?> it,
+        int batchSize,
+        boolean sequential) {
+        super(cctx, tx, timeout);
+
+        this.it = it;
+        this.batchSize = batchSize > 0 ? batchSize : DFLT_BATCH_SIZE;
+        this.sequential = sequential;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void map(boolean topLocked) {
+        this.topLocked = topLocked;
+
+        sendNextBatches(null);
+    }
+
+    /**
+     * Continue iterating the data rows and form new batches.
+     *
+     * @param nodeId Node that is ready for a new batch.
+     */
+    private void sendNextBatches(@Nullable UUID nodeId) {
+        try {
+            Collection<Batch> next = continueLoop(nodeId);
+
+            if (next == null)
+                return;
+
+            boolean first = (nodeId != null);
+
+            for (Batch batch : next) {
+                ClusterNode node = batch.node();
+
+                sendBatch(node, batch, first);
+
+                if (!node.isLocal())
+                    first = false;
+            }
+        }
+        catch (Throwable e) {
+            onDone(e);
+
+            if (e instanceof Error)
+                throw (Error)e;
+        }
+    }
+
+    /**
+     * Iterate data rows and form batches.
+     *
+     * @param nodeId Id of node acknowledged the last batch.
+     * @return Collection of newly completed batches.
+     * @throws IgniteCheckedException If failed.
+     */
+    private Collection<Batch> continueLoop(@Nullable UUID nodeId) throws IgniteCheckedException {
+        if (nodeId != null)
+            batches.remove(nodeId);
+
+        // Accumulate number of batches released since we got here.
+        // Let only one thread do the looping.
+        if (isDone() || SKIP_UPD.getAndIncrement(this) != 0)
+            return null;
+
+        ArrayList<Batch> res = null; Batch batch = null;
+
+        boolean flush = false;
+
+        EnlistOperation op = it.operation();
+
+        while (true) {
+            while (hasNext0()) {
+                checkCompleted();
+
+                Object cur = next0();
+
+                KeyCacheObject key = cctx.toCacheKeyObject(op.isDeleteOrLock() ? cur : ((IgniteBiTuple)cur).getKey());
+
+                List<ClusterNode> nodes = cctx.affinity().nodesByKey(key, topVer);
+
+                ClusterNode node;
+
+                if (F.isEmpty(nodes) || ((node = nodes.get(0)) == null))
+                    throw new ClusterTopologyCheckedException("Failed to get primary node " +
+                        "[topVer=" + topVer + ", key=" + key + ']');
+
+                if (!sequential)
+                    batch = batches.get(node.id());
+                else if (batch != null && !batch.node().equals(node))
+                    res = markReady(res, batch);
+
+                if (batch == null)
+                    batches.put(node.id(), batch = new Batch(node));
+
+                if (batch.ready()) {
+                    // Can't advance further at the moment.
+                    batch = null;
+
+                    peek = cur;
+
+                    it.beforeDetach();
+
+                    flush = true;
+
+                    break;
+                }
+
+                batch.add(op.isDeleteOrLock() ? key : cur,
+                    op != EnlistOperation.LOCK && cctx.affinityNode() && (cctx.isReplicated() || nodes.indexOf(cctx.localNode()) > 0));
+
+                if (batch.size() == batchSize)
+                    res = markReady(res, batch);
+            }
+
+            if (SKIP_UPD.decrementAndGet(this) == 0)
+                break;
+
+            skipCntr = 1;
+        }
+
+        if (flush)
+            return res;
+
+        // No data left - flush incomplete batches.
+        for (Batch batch0 : batches.values()) {
+            if (!batch0.ready()) {
+                if (res == null)
+                    res = new ArrayList<>();
+
+                batch0.ready(true);
+
+                res.add(batch0);
+            }
+        }
+
+        if (batches.isEmpty())
+            onDone(this.res);
+
+        return res;
+    }
+
+    /** */
+    private Object next0() {
+        if (!hasNext0())
+            throw new NoSuchElementException();
+
+        Object cur;
+
+        if ((cur = peek) != null)
+            peek = null;
+        else
+            cur = it.next();
+
+        return cur;
+    }
+
+    /** */
+    private boolean hasNext0() {
+        if (peek == null && !it.hasNext())
+            peek = FINISHED;
+
+        return peek != FINISHED;
+    }
+
+    /** */
+    private ArrayList<Batch> markReady(ArrayList<Batch> batches, Batch batch) {
+        if (!batch.ready()) {
+            batch.ready(true);
+
+            if (batches == null)
+                batches = new ArrayList<>();
+
+            batches.add(batch);
+        }
+
+        return batches;
+    }
+
+    /**
+     *
+     * @param primaryId Primary node id.
+     * @param rows Rows.
+     * @param dhtVer Dht version assigned at primary node.
+     * @param dhtFutId Dht future id assigned at primary node.
+     * @param updCntrs Update counters.
+     */
+    private void processBatchLocalBackupKeys(UUID primaryId, List<Object> rows, GridCacheVersion dhtVer,
+        IgniteUuid dhtFutId, GridLongList updCntrs) {
+        assert dhtVer != null;
+        assert dhtFutId != null;
+
+        EnlistOperation op = it.operation();
+
+        assert op != EnlistOperation.LOCK;
+
+        boolean keysOnly = op.isDeleteOrLock();
+
+        final ArrayList<KeyCacheObject> keys = new ArrayList<>(rows.size());
+        final ArrayList<Message> vals = keysOnly ? null : new ArrayList<>(rows.size());
+
+        for (Object row : rows) {
+            if (keysOnly)
+                keys.add(cctx.toCacheKeyObject(row));
+            else {
+                keys.add(cctx.toCacheKeyObject(((IgniteBiTuple)row).getKey()));
+                vals.add(cctx.toCacheObject(((IgniteBiTuple)row).getValue()));
+            }
+        }
+
+        try {
+            GridDhtTxRemote dhtTx = cctx.tm().tx(dhtVer);
+
+            if (dhtTx == null) {
+                dhtTx = new GridDhtTxRemote(cctx.shared(),
+                    cctx.localNodeId(),
+                    dhtFutId,
+                    primaryId,
+                    lockVer,
+                    topVer,
+                    dhtVer,
+                    null,
+                    cctx.systemTx(),
+                    cctx.ioPolicy(),
+                    PESSIMISTIC,
+                    REPEATABLE_READ,
+                    false,
+                    tx.remainingTime(),
+                    -1,
+                    this.tx.subjectId(),
+                    this.tx.taskNameHash(),
+                    false);
+
+                dhtTx.mvccSnapshot(new MvccSnapshotWithoutTxs(mvccSnapshot.coordinatorVersion(),
+                    mvccSnapshot.counter(), MVCC_OP_COUNTER_NA, mvccSnapshot.cleanupVersion()));
+
+                dhtTx = cctx.tm().onCreated(null, dhtTx);
+
+                if (dhtTx == null || !cctx.tm().onStarted(dhtTx)) {
+                    throw new IgniteTxRollbackCheckedException("Failed to update backup " +
+                        "(transaction has been completed): " + dhtVer);
+                }
+            }
+
+            dhtTx.mvccEnlistBatch(cctx, it.operation(), keys, vals, mvccSnapshot.withoutActiveTransactions(), updCntrs);
+        }
+        catch (IgniteCheckedException e) {
+            onDone(e);
+
+            return;
+        }
+
+        sendNextBatches(primaryId);
+    }
+
+    /**
+     *
+     * @param node Node.
+     * @param batch Batch.
+     * @param first First mapping flag.
+     */
+    private void sendBatch(ClusterNode node, Batch batch, boolean first) throws IgniteCheckedException {
+        updateMappings(node);
+
+        boolean clientFirst = first && cctx.localNode().isClient() && !topLocked && !tx.hasRemoteLocks();
+
+        int batchId = batchCntr.incrementAndGet();
+
+        if (node.isLocal())
+            enlistLocal(batchId, node.id(), batch);
+        else
+            sendBatch(batchId, node.id(), batch, clientFirst);
+    }
+
+    /**
+     * Send batch request to remote data node.
+     *
+     * @param batchId Id of a batch mini-future.
+     * @param nodeId Node id.
+     * @param batchFut Mini-future for the batch.
+     * @param clientFirst {@code true} if originating node is client and it is a first request to any data node.
+     */
+    private void sendBatch(int batchId, UUID nodeId, Batch batchFut, boolean clientFirst) throws IgniteCheckedException {
+        assert batchFut != null;
+
+        GridNearTxQueryResultsEnlistRequest req = new GridNearTxQueryResultsEnlistRequest(cctx.cacheId(),
+            threadId,
+            futId,
+            batchId,
+            tx.subjectId(),
+            topVer,
+            lockVer,
+            mvccSnapshot,
+            clientFirst,
+            remainingTime(),
+            tx.remainingTime(),
+            tx.taskNameHash(),
+            batchFut.rows(),
+            it.operation());
+
+        sendRequest(req, nodeId);
+    }
+
+    /**
+     *
+     * @param req Request.
+     * @param nodeId Remote node ID
+     * @throws IgniteCheckedException if failed to send.
+     */
+    private void sendRequest(GridCacheMessage req, UUID nodeId) throws IgniteCheckedException {
+        IgniteInternalFuture<?> txSync = cctx.tm().awaitFinishAckAsync(nodeId, tx.threadId());
+
+        if (txSync == null || txSync.isDone())
+            cctx.io().send(nodeId, req, cctx.ioPolicy());
+        else
+            txSync.listen(new CI1<IgniteInternalFuture<?>>() {
+                @Override public void apply(IgniteInternalFuture<?> future) {
+                    try {
+                        cctx.io().send(nodeId, req, cctx.ioPolicy());
+                    }
+                    catch (IgniteCheckedException e) {
+                        GridNearTxQueryResultsEnlistFuture.this.onDone(e);
+                    }
+                }
+            });
+    }
+
+    /**
+     * Enlist batch of entries to the transaction on local node.
+     *
+     * @param batchId Id of a batch mini-future.
+     * @param nodeId Node id.
+     * @param batch Batch.
+     */
+    private void enlistLocal(int batchId, UUID nodeId, Batch batch) throws IgniteCheckedException {
+        Collection<Object> rows = batch.rows();
+
+        GridDhtTxQueryResultsEnlistFuture fut = new GridDhtTxQueryResultsEnlistFuture(nodeId,
+            lockVer,
+            mvccSnapshot,
+            threadId,
+            futId,
+            batchId,
+            tx,
+            remainingTime(),
+            cctx,
+            rows,
+            it.operation());
+
+        updateLocalFuture(fut);
+
+        fut.listen(new CI1<IgniteInternalFuture<Long>>() {
+            @Override public void apply(IgniteInternalFuture<Long> fut) {
+                assert fut.error() != null || fut.result() != null : fut;
+
+                try {
+                    clearLocalFuture((GridDhtTxAbstractEnlistFuture)fut);
+
+                    GridNearTxQueryResultsEnlistResponse res = fut.error() == null ? createResponse(fut) : null;
+
+                    if (checkResponse(nodeId, res, fut.error()))
+                        sendNextBatches(nodeId);
+                }
+                catch (IgniteCheckedException e) {
+                    checkResponse(nodeId, null, e);
+                }
+                finally {
+                    CU.unwindEvicts(cctx);
+                }
+            }
+        });
+
+        fut.init();
+    }
+
+    /**
+     * @param nodeId Sender node id.
+     * @param res Response.
+     */
+    public void onResult(UUID nodeId, GridNearTxQueryResultsEnlistResponse res) {
+        if (checkResponse(nodeId, res, res.error())) {
+
+            Batch batch = batches.get(nodeId);
+
+            if (batch != null && !F.isEmpty(batch.localBackupRows()) && res.dhtFutureId() != null)
+                processBatchLocalBackupKeys(nodeId, batch.localBackupRows(), res.dhtVersion(), res.dhtFutureId(),
+                    res.updateCounters());
+            else
+                sendNextBatches(nodeId);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean onNodeLeft(UUID nodeId) {
+        if (batches.keySet().contains(nodeId)) {
+            if (log.isDebugEnabled())
+                log.debug("Found unacknowledged batch for left node [nodeId=" + nodeId + ", fut=" +
+                    this + ']');
+
+            ClusterTopologyCheckedException topEx = new ClusterTopologyCheckedException("Failed to enlist keys " +
+                "(primary node left grid, retry transaction if possible) [node=" + nodeId + ']');
+
+            topEx.retryReadyFuture(cctx.shared().nextAffinityReadyFuture(topVer));
+
+            processFailure(topEx, null);
+
+            batches.remove(nodeId);
+
+            if (batches.isEmpty()) // Wait for all pending requests.
+                onDone();
+
+        }
+
+        if (log.isDebugEnabled())
+            log.debug("Future does not have mapping for left node (ignoring) [nodeId=" + nodeId +
+                ", fut=" + this + ']');
+
+        return false;
+    }
+
+    /**
+     * @param nodeId Originating node ID.
+     * @param res Response.
+     * @param err Exception.
+     * @return {@code True} if future was completed by this call.
+     */
+    public boolean checkResponse(UUID nodeId, GridNearTxQueryResultsEnlistResponse res, Throwable err) {
+        assert res != null || err != null : this;
+
+        if (err == null && res.error() != null)
+            err = res.error();
+
+        if (X.hasCause(err, ClusterTopologyCheckedException.class))
+            tx.removeMapping(nodeId);
+
+        if (err != null)
+            processFailure(err, null);
+
+        if (ex != null) {
+            batches.remove(nodeId);
+
+            if (batches.isEmpty()) // Wait for all pending requests.
+                onDone();
+
+            return false;
+        }
+
+        assert res != null;
+
+        RES_UPD.getAndAdd(this, res.result());
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(GridNearTxQueryResultsEnlistFuture.class, this, super.toString());
+    }
+
+    /**
+     * A batch of rows
+     */
+    private class Batch {
+        /** Node ID. */
+        @GridToStringExclude
+        private final ClusterNode node;
+
+        /** Rows. */
+        private ArrayList<Object> rows = new ArrayList<>();
+
+        /** Local backup rows. */
+        private ArrayList<Object> locBkpRows;
+
+        /** Readiness flag. Set when batch is full or no new rows are expected. */
+        private boolean ready;
+
+        /**
+         * @param node Cluster node.
+         */
+        private Batch(ClusterNode node) {
+            this.node = node;
+        }
+
+        /**
+         * @return Node.
+         */
+        public ClusterNode node() {
+            return node;
+        }
+
+        /**
+         * Adds a row.
+         *
+         * @param row Row.
+         * @param localBackup {@code true}, when the row key has local backup.
+         */
+        public void add(Object row, boolean localBackup) {
+            rows.add(row);
+
+            if (localBackup) {
+                if (locBkpRows == null)
+                    locBkpRows = new ArrayList<>();
+
+                locBkpRows.add(row);
+            }
+        }
+
+        /**
+         * @return number of rows.
+         */
+        public int size() {
+            return rows.size();
+        }
+
+        /**
+         * @return Collection of rows.
+         */
+        public Collection<Object> rows() {
+            return rows;
+        }
+
+        /**
+         * @return Collection of local backup rows.
+         */
+        public List<Object> localBackupRows() {
+            return locBkpRows;
+        }
+
+        /**
+         * @return Readiness flag.
+         */
+        public boolean ready() {
+            return ready;
+        }
+
+        /**
+         * Sets readiness flag.
+         *
+         * @param ready Flag value.
+         */
+        public void ready(boolean ready) {
+            this.ready = ready;
+        }
+    }
+
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxQueryResultsEnlistRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxQueryResultsEnlistRequest.java
new file mode 100644
index 0000000..f350d50
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxQueryResultsEnlistRequest.java
@@ -0,0 +1,578 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed.near;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.UUID;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.GridDirectTransient;
+import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
+import org.apache.ignite.internal.processors.cache.CacheObject;
+import org.apache.ignite.internal.processors.cache.CacheObjectContext;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.GridCacheIdMessage;
+import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
+import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
+import org.apache.ignite.internal.processors.query.EnlistOperation;
+import org.apache.ignite.internal.util.tostring.GridToStringExclude;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.lang.IgniteBiTuple;
+import org.apache.ignite.lang.IgniteUuid;
+import org.apache.ignite.plugin.extensions.communication.MessageCollectionItemType;
+import org.apache.ignite.plugin.extensions.communication.MessageReader;
+import org.apache.ignite.plugin.extensions.communication.MessageWriter;
+
+/**
+ * Request to enlist into transaction and acquire locks for entries produced
+ * with complex DML queries with reducer step.
+ *
+ * One request per batch of entries is used.
+ */
+public class GridNearTxQueryResultsEnlistRequest extends GridCacheIdMessage {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** */
+    private long threadId;
+
+    /** */
+    private IgniteUuid futId;
+
+    /** */
+    private boolean clientFirst;
+
+    /** */
+    private int miniId;
+
+    /** */
+    private UUID subjId;
+
+    /** */
+    private AffinityTopologyVersion topVer;
+
+    /** */
+    private GridCacheVersion lockVer;
+
+    /** */
+    private MvccSnapshot mvccSnapshot;
+
+    /** */
+    private long timeout;
+
+    /** */
+    private long txTimeout;
+
+    /** */
+    private int taskNameHash;
+
+    /** */
+    @GridDirectTransient
+    private Collection<Object> rows;
+
+    /** */
+    @GridToStringExclude
+    private KeyCacheObject[] keys;
+
+    /**  */
+    @GridToStringExclude
+    private CacheObject[] values;
+
+    /** */
+    private EnlistOperation op;
+
+    /**
+     * Default constructor.
+     */
+    public GridNearTxQueryResultsEnlistRequest() {
+        // No-op.
+    }
+
+    /**
+     * @param cacheId Cache id.
+     * @param threadId Thread id.
+     * @param futId Future id.
+     * @param miniId Mini-future id.
+     * @param subjId Transaction subject id.
+     * @param topVer Topology version.
+     * @param lockVer Lock version.
+     * @param mvccSnapshot Mvcc snapshot.
+     * @param clientFirst First client request flag.
+     * @param timeout Timeout.
+     * @param txTimeout Tx timeout.
+     * @param taskNameHash Task name hash.
+     * @param rows Rows.
+     * @param op Operation.
+     */
+    GridNearTxQueryResultsEnlistRequest(int cacheId,
+        long threadId,
+        IgniteUuid futId,
+        int miniId,
+        UUID subjId,
+        AffinityTopologyVersion topVer,
+        GridCacheVersion lockVer,
+        MvccSnapshot mvccSnapshot,
+        boolean clientFirst,
+        long timeout,
+        long txTimeout, int taskNameHash,
+        Collection<Object> rows,
+        EnlistOperation op) {
+        this.txTimeout = txTimeout;
+        this.cacheId = cacheId;
+        this.threadId = threadId;
+        this.futId = futId;
+        this.miniId = miniId;
+        this.subjId = subjId;
+        this.topVer = topVer;
+        this.lockVer = lockVer;
+        this.mvccSnapshot = mvccSnapshot;
+        this.clientFirst = clientFirst;
+        this.timeout = timeout;
+        this.taskNameHash = taskNameHash;
+        this.rows = rows;
+        this.op = op;
+    }
+
+    /**
+     * @return Thread id.
+     */
+    public long threadId() {
+        return threadId;
+    }
+
+    /**
+     * @return Future id.
+     */
+    public IgniteUuid futureId() {
+        return futId;
+    }
+
+    /**
+     * @return Mini future ID.
+     */
+    public int miniId() {
+        return miniId;
+    }
+
+    /**
+     * @return Subject id.
+     */
+    public UUID subjectId() {
+        return subjId;
+    }
+
+    /**
+     * @return Topology version.
+     */
+    @Override public AffinityTopologyVersion topologyVersion() {
+        return topVer;
+    }
+
+    /**
+     * @return Lock version.
+     */
+    public GridCacheVersion version() {
+        return lockVer;
+    }
+
+    /**
+     * @return MVCC snapshot.
+     */
+    public MvccSnapshot mvccSnapshot() {
+        return mvccSnapshot;
+    }
+
+    /**
+     * @return Timeout milliseconds.
+     */
+    public long timeout() {
+        return timeout;
+    }
+
+    /**
+     * @return Tx timeout milliseconds.
+     */
+    public long txTimeout() {
+        return txTimeout;
+    }
+
+    /**
+     * @return Task name hash.
+     */
+    public int taskNameHash() {
+        return taskNameHash;
+    }
+
+    /**
+     * @return {@code True} if this is the first client request.
+     */
+    public boolean firstClientRequest() {
+        return clientFirst;
+    }
+
+    /**
+     * @return Collection of rows.
+     */
+    public Collection<Object> rows() {
+        return rows;
+    }
+
+    /**
+     * @return Operation.
+     */
+    public EnlistOperation operation() {
+        return op;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void prepareMarshal(GridCacheSharedContext ctx) throws IgniteCheckedException {
+        super.prepareMarshal(ctx);
+
+        GridCacheContext cctx = ctx.cacheContext(cacheId);
+        CacheObjectContext objCtx = cctx.cacheObjectContext();
+
+        if (rows != null && keys == null) {
+            keys = new KeyCacheObject[rows.size()];
+
+            int i = 0;
+
+            boolean keysOnly = op.isDeleteOrLock();
+
+            values = keysOnly ? null : new CacheObject[keys.length];
+
+            for (Object row : rows) {
+                Object key, val = null;
+
+                if (keysOnly)
+                    key = row;
+                else {
+                    key = ((IgniteBiTuple)row).getKey();
+                    val = ((IgniteBiTuple)row).getValue();
+                }
+
+                assert key != null && (keysOnly || val != null): "key=" + key + ", val=" + val;
+
+                KeyCacheObject key0 = cctx.toCacheKeyObject(key);
+
+                assert key0 != null;
+
+                key0.prepareMarshal(objCtx);
+
+                keys[i] = key0;
+
+                if (!keysOnly) {
+                    CacheObject val0 = cctx.toCacheObject(val);
+
+                    assert val0 != null;
+
+                    val0.prepareMarshal(objCtx);
+
+                    values[i] = val0;
+                }
+
+                i++;
+            }
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void finishUnmarshal(GridCacheSharedContext ctx, ClassLoader ldr) throws IgniteCheckedException {
+        super.finishUnmarshal(ctx, ldr);
+
+        if (keys != null) {
+            rows = new ArrayList<>(keys.length);
+
+            CacheObjectContext objCtx = ctx.cacheContext(cacheId).cacheObjectContext();
+
+            for (int i = 0; i < keys.length; i++) {
+                keys[i].finishUnmarshal(objCtx, ldr);
+
+                if (op.isDeleteOrLock())
+                    rows.add(keys[i]);
+                else {
+                    if (values[i] != null)
+                        values[i].finishUnmarshal(objCtx, ldr);
+
+                    rows.add(new IgniteBiTuple<>(keys[i], values[i]));
+                }
+            }
+
+            keys = null;
+            values = null;
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) {
+        writer.setBuffer(buf);
+
+        if (!super.writeTo(buf, writer))
+            return false;
+
+        if (!writer.isHeaderWritten()) {
+            if (!writer.writeHeader(directType(), fieldsCount()))
+                return false;
+
+            writer.onHeaderWritten();
+        }
+
+        switch (writer.state()) {
+            case 3:
+                if (!writer.writeBoolean("clientFirst", clientFirst))
+                    return false;
+
+                writer.incrementState();
+
+            case 4:
+                if (!writer.writeIgniteUuid("futId", futId))
+                    return false;
+
+                writer.incrementState();
+
+            case 5:
+                if (!writer.writeObjectArray("keys", keys, MessageCollectionItemType.MSG))
+                    return false;
+
+                writer.incrementState();
+
+            case 6:
+                if (!writer.writeMessage("lockVer", lockVer))
+                    return false;
+
+                writer.incrementState();
+
+            case 7:
+                if (!writer.writeInt("miniId", miniId))
+                    return false;
+
+                writer.incrementState();
+
+            case 8:
+                if (!writer.writeMessage("mvccSnapshot", mvccSnapshot))
+                    return false;
+
+                writer.incrementState();
+
+            case 9:
+                if (!writer.writeByte("op", op != null ? (byte)op.ordinal() : -1))
+                    return false;
+
+                writer.incrementState();
+
+            case 10:
+                if (!writer.writeUuid("subjId", subjId))
+                    return false;
+
+                writer.incrementState();
+
+            case 11:
+                if (!writer.writeInt("taskNameHash", taskNameHash))
+                    return false;
+
+                writer.incrementState();
+
+            case 12:
+                if (!writer.writeLong("threadId", threadId))
+                    return false;
+
+                writer.incrementState();
+
+            case 13:
+                if (!writer.writeLong("timeout", timeout))
+                    return false;
+
+                writer.incrementState();
+
+            case 14:
+                if (!writer.writeMessage("topVer", topVer))
+                    return false;
+
+                writer.incrementState();
+
+            case 15:
+                if (!writer.writeLong("txTimeout", txTimeout))
+                    return false;
+
+                writer.incrementState();
+
+            case 16:
+                if (!writer.writeObjectArray("values", values, MessageCollectionItemType.MSG))
+                    return false;
+
+                writer.incrementState();
+
+        }
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) {
+        reader.setBuffer(buf);
+
+        if (!reader.beforeMessageRead())
+            return false;
+
+        if (!super.readFrom(buf, reader))
+            return false;
+
+        switch (reader.state()) {
+            case 3:
+                clientFirst = reader.readBoolean("clientFirst");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 4:
+                futId = reader.readIgniteUuid("futId");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 5:
+                keys = reader.readObjectArray("keys", MessageCollectionItemType.MSG, KeyCacheObject.class);
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 6:
+                lockVer = reader.readMessage("lockVer");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 7:
+                miniId = reader.readInt("miniId");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 8:
+                mvccSnapshot = reader.readMessage("mvccSnapshot");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 9:
+                byte opOrd;
+
+                opOrd = reader.readByte("op");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                op = EnlistOperation.fromOrdinal(opOrd);
+
+                reader.incrementState();
+
+            case 10:
+                subjId = reader.readUuid("subjId");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 11:
+                taskNameHash = reader.readInt("taskNameHash");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 12:
+                threadId = reader.readLong("threadId");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 13:
+                timeout = reader.readLong("timeout");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 14:
+                topVer = reader.readMessage("topVer");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 15:
+                txTimeout = reader.readLong("txTimeout");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 16:
+                values = reader.readObjectArray("values", MessageCollectionItemType.MSG, CacheObject.class);
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+        }
+
+        return reader.afterMessageRead(GridNearTxQueryResultsEnlistRequest.class);
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte fieldsCount() {
+        return 17;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean addDeploymentInfo() {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override public short directType() {
+        return 153;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(GridNearTxQueryResultsEnlistRequest.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxQueryResultsEnlistResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxQueryResultsEnlistResponse.java
new file mode 100644
index 0000000..749afd3
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxQueryResultsEnlistResponse.java
@@ -0,0 +1,202 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed.near;
+
+import java.nio.ByteBuffer;
+import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
+import org.apache.ignite.internal.util.GridLongList;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.lang.IgniteUuid;
+import org.apache.ignite.plugin.extensions.communication.MessageReader;
+import org.apache.ignite.plugin.extensions.communication.MessageWriter;
+
+/**
+ * A response to {@link GridNearTxQueryResultsEnlistRequest}.
+ */
+public class GridNearTxQueryResultsEnlistResponse extends GridNearTxQueryEnlistResponse {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** */
+    private GridCacheVersion dhtVer;
+
+    /** */
+    private IgniteUuid dhtFutId;
+
+    /** */
+    private GridLongList updCntrs;
+
+    /**
+     * Default-constructor.
+     */
+    public GridNearTxQueryResultsEnlistResponse() {
+        // No-op.
+    }
+
+    /**
+     * @param cacheId Cache id.
+     * @param futId Future id.
+     * @param miniId Mini future id.
+     * @param lockVer Lock version.
+     * @param res Result.
+     * @param dhtFutId Dht future id.
+     * @param dhtVer Dht version.
+     * @param updCntrs Update counters.
+     */
+    public GridNearTxQueryResultsEnlistResponse(int cacheId,
+        IgniteUuid futId,
+        int miniId,
+        GridCacheVersion lockVer,
+        long res,
+        GridCacheVersion dhtVer,
+        IgniteUuid dhtFutId,
+        GridLongList updCntrs) {
+        super(cacheId, futId, miniId, lockVer, res, false);
+
+        this.dhtVer = dhtVer;
+        this.dhtFutId = dhtFutId;
+        this.updCntrs = updCntrs;
+    }
+
+    /**
+     * @param cacheId Cache id.
+     * @param futId Future id.
+     * @param miniId Mini future id.
+     * @param lockVer Lock version.
+     * @param err Error.
+     */
+    public GridNearTxQueryResultsEnlistResponse(int cacheId,
+        IgniteUuid futId,
+        int miniId,
+        GridCacheVersion lockVer,
+        Throwable err) {
+        super(cacheId, futId, miniId, lockVer, err);
+    }
+
+    /**
+     * @return Dht version.
+     */
+    public GridCacheVersion dhtVersion() {
+        return dhtVer;
+    }
+
+    /**
+     * @return Dht future id.
+     */
+    public IgniteUuid dhtFutureId() {
+        return dhtFutId;
+    }
+
+    /**
+     * @return Update counters.
+     */
+    public GridLongList updateCounters() {
+        return updCntrs;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte fieldsCount() {
+        return 12;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) {
+        writer.setBuffer(buf);
+
+        if (!super.writeTo(buf, writer))
+            return false;
+
+        if (!writer.isHeaderWritten()) {
+            if (!writer.writeHeader(directType(), fieldsCount()))
+                return false;
+
+            writer.onHeaderWritten();
+        }
+
+        switch (writer.state()) {
+            case 9:
+                if (!writer.writeIgniteUuid("dhtFutId", dhtFutId))
+                    return false;
+
+                writer.incrementState();
+
+            case 10:
+                if (!writer.writeMessage("dhtVer", dhtVer))
+                    return false;
+
+                writer.incrementState();
+
+            case 11:
+                if (!writer.writeMessage("updCntrs", updCntrs))
+                    return false;
+
+                writer.incrementState();
+        }
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) {
+        reader.setBuffer(buf);
+
+        if (!reader.beforeMessageRead())
+            return false;
+
+        if (!super.readFrom(buf, reader))
+            return false;
+
+        switch (reader.state()) {
+            case 9:
+                dhtFutId = reader.readIgniteUuid("dhtFutId");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 10:
+                dhtVer = reader.readMessage("dhtVer");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 11:
+                updCntrs = reader.readMessage("updCntrs");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+        }
+
+        return reader.afterMessageRead(GridNearTxQueryResultsEnlistResponse.class);
+    }
+
+    /** {@inheritDoc} */
+    @Override public short directType() {
+        return 154;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(GridNearTxQueryResultsEnlistResponse.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxRemote.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxRemote.java
index 5477af9..879bf26 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxRemote.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxRemote.java
@@ -243,13 +243,16 @@
         return nearXidVer;
     }
 
-    /**
-     * @param cntrs Partition indexes.
-     */
+    /** {@inheritDoc} */
     @Override public void setPartitionUpdateCounters(long[] cntrs) {
         // No-op.
     }
 
+    /** {@inheritDoc} */
+    @Override public void addActiveCache(GridCacheContext cacheCtx, boolean recovery) throws IgniteCheckedException {
+        throw new UnsupportedOperationException("Near tx doesn't track active caches.");
+    }
+
     /**
      * Adds owned versions to map.
      *
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxSelectForUpdateFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxSelectForUpdateFuture.java
new file mode 100644
index 0000000..a5ab7cd
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxSelectForUpdateFuture.java
@@ -0,0 +1,462 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed.near;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.UUID;
+import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
+import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteLogger;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException;
+import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
+import org.apache.ignite.internal.processors.cache.GridCacheCompoundIdentityFuture;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.GridCacheEntryEx;
+import org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate;
+import org.apache.ignite.internal.processors.cache.GridCacheVersionedFuture;
+import org.apache.ignite.internal.processors.cache.distributed.GridDistributedTxMapping;
+import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
+import org.apache.ignite.internal.processors.timeout.GridTimeoutObjectAdapter;
+import org.apache.ignite.internal.transactions.IgniteTxTimeoutCheckedException;
+import org.apache.ignite.internal.util.future.GridFutureAdapter;
+import org.apache.ignite.internal.util.tostring.GridToStringExclude;
+import org.apache.ignite.internal.util.typedef.X;
+import org.apache.ignite.internal.util.typedef.internal.CU;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.lang.IgniteUuid;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * A future tracking requests for remote nodes transaction enlisting and locking
+ * of entries produced with complex DML queries requiring reduce step.
+ */
+public class GridNearTxSelectForUpdateFuture extends GridCacheCompoundIdentityFuture<Long>
+    implements GridCacheVersionedFuture<Long> {
+    /** */
+    private static final long serialVersionUID = 6931664882548658420L;
+
+    /** Done field updater. */
+    private static final AtomicIntegerFieldUpdater<GridNearTxSelectForUpdateFuture> DONE_UPD =
+        AtomicIntegerFieldUpdater.newUpdater(GridNearTxSelectForUpdateFuture.class, "done");
+
+    /** Done field updater. */
+    private static final AtomicReferenceFieldUpdater<GridNearTxSelectForUpdateFuture, Throwable> EX_UPD =
+        AtomicReferenceFieldUpdater.newUpdater(GridNearTxSelectForUpdateFuture.class, Throwable.class, "ex");
+
+    /** */
+    @SuppressWarnings("unused")
+    @GridToStringExclude
+    private volatile int done;
+
+    /** */
+    @SuppressWarnings("unused")
+    @GridToStringExclude
+    private volatile Throwable ex;
+
+    /** Cache context. */
+    @GridToStringExclude
+    private final GridCacheContext<?, ?> cctx;
+
+    /** Transaction. */
+    private final GridNearTxLocal tx;
+
+    /** Mvcc future id. */
+    private final IgniteUuid futId;
+
+    /** Lock version. */
+    private final GridCacheVersion lockVer;
+
+    /** */
+    private AffinityTopologyVersion topVer;
+
+    /** */
+    private final long timeout;
+
+    /** Logger. */
+    @GridToStringExclude
+    private final IgniteLogger log;
+
+    /** Timeout object. */
+    @GridToStringExclude
+    private LockTimeoutObject timeoutObj;
+
+    /** Ids of mini futures. */
+    private final Map<UUID, Integer> miniFutIds = new HashMap<>();
+
+    /**
+     * @param cctx Cache context.
+     * @param tx Transaction.
+     * @param timeout Timeout.
+     */
+    public GridNearTxSelectForUpdateFuture(GridCacheContext<?, ?> cctx,
+        GridNearTxLocal tx,
+        long timeout) {
+        super(CU.longReducer());
+
+        this.cctx = cctx;
+        this.tx = tx;
+        this.timeout = timeout;
+
+        futId = IgniteUuid.randomUuid();
+        lockVer = tx.xidVersion();
+
+        log = cctx.logger(GridNearTxSelectForUpdateFuture.class);
+    }
+
+    /**
+     * @return Cache context.
+     */
+    public GridCacheContext<?, ?> cache() {
+        return cctx;
+    }
+
+    /**
+     * @param node Node.
+     */
+    private void map(ClusterNode node) {
+        GridDistributedTxMapping mapping = tx.mappings().get(node.id());
+
+        if (mapping == null)
+            tx.mappings().put(mapping = new GridDistributedTxMapping(node));
+
+        mapping.markQueryUpdate();
+
+        if (node.isLocal())
+            tx.colocatedLocallyMapped(true);
+
+        int futId = futuresCountNoLock();
+
+        miniFutIds.put(node.id(), futId);
+
+        add(new NodeFuture(node));
+    }
+
+    /**
+     * Process result of query execution on given
+     * @param nodeId Node id.
+     * @param cnt Total rows counter on given node.
+     * @param removeMapping Whether transaction mapping should be removed for node.
+     * @param err Error.
+     */
+    public void onResult(UUID nodeId, Long cnt, boolean removeMapping, @Nullable Throwable err) {
+        NodeFuture nodeFut = mapFuture(nodeId);
+
+        if (nodeFut != null)
+            nodeFut.onResult(cnt, removeMapping, err);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected boolean processFailure(Throwable err, IgniteInternalFuture<Long> fut) {
+        if (ex != null || !EX_UPD.compareAndSet(this, null, err))
+            ex.addSuppressed(err);
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean onDone(@Nullable Long res, @Nullable Throwable err, boolean cancelled) {
+        if (!DONE_UPD.compareAndSet(this, 0, 1))
+            return false;
+
+        cctx.tm().txContext(tx);
+
+        Throwable ex0 = ex;
+
+        if (ex0 != null) {
+            if (err != null)
+                ex0.addSuppressed(err);
+
+            err = ex0;
+        }
+
+        if (!cancelled && err == null)
+            tx.clearLockFuture(this);
+        else
+            tx.setRollbackOnly();
+
+        boolean done = super.onDone(res, err, cancelled);
+
+        assert done;
+
+        // Clean up.
+        cctx.mvcc().removeVersionedFuture(this);
+
+        if (timeoutObj != null)
+            cctx.time().removeTimeoutObject(timeoutObj);
+
+        return true;
+    }
+
+    /**
+     * Finds pending map node future by the given ID.
+     *
+     * @param nodeId Node id.
+     * @return Map node future.
+     */
+    private NodeFuture mapFuture(UUID nodeId) {
+        synchronized (this) {
+            Integer idx = miniFutIds.get(nodeId);
+
+            if (idx == null)
+                throw new IllegalStateException("SELECT FOR UPDATE node future not found [nodeId=" + nodeId + "].");
+
+            assert idx >= 0 && idx < futuresCountNoLock();
+
+            IgniteInternalFuture<Long> fut = future(idx);
+
+            if (!fut.isDone())
+                return (NodeFuture)fut;
+        }
+
+        return null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public GridCacheVersion version() {
+        return lockVer;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean onOwnerChanged(GridCacheEntryEx entry, GridCacheMvccCandidate owner) {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgniteUuid futureId() {
+        return futId;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean onNodeLeft(UUID nodeId) {
+        if (topVer == null)
+            return false; // Local query, do nothing.
+
+        for (IgniteInternalFuture<?> fut : futures()) {
+            NodeFuture f = (NodeFuture)fut;
+
+            if (f.node.id().equals(nodeId)) {
+                if (log.isDebugEnabled())
+                    log.debug("Found mini-future for left node [nodeId=" + nodeId + ", mini=" + f + ", fut=" +
+                        this + ']');
+
+                ClusterTopologyCheckedException topEx = new ClusterTopologyCheckedException("Failed to enlist keys " +
+                    "(primary node left grid, retry transaction if possible) [node=" + nodeId + ']');
+
+                topEx.retryReadyFuture(cctx.shared().nextAffinityReadyFuture(topVer));
+
+                return f.onResult(0, false, topEx);
+            }
+        }
+
+        if (log.isDebugEnabled())
+            log.debug("Future does not have mapping for left node (ignoring) [nodeId=" + nodeId +
+                ", fut=" + this + ']');
+
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean trackable() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void markNotTrackable() {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void logError(IgniteLogger log, String msg, Throwable e) {
+        // no-op
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void logDebug(IgniteLogger log, String msg) {
+        // no-op
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(GridNearTxSelectForUpdateFuture.class, this, super.toString());
+    }
+
+    /**
+     * Initialize this future for distributed execution.
+     * @param topVer Topology version.
+     * @param nodes Nodes to run query on.
+     */
+    public synchronized void init(AffinityTopologyVersion topVer, Collection<ClusterNode> nodes) {
+        doInit(topVer, nodes, false);
+    }
+
+    /**
+     * Initialize this future for local execution.
+     */
+    public synchronized void initLocal() {
+        doInit(null, Collections.singletonList(cctx.localNode()), true);
+    }
+
+    /**
+     * Initialize this future for distributed or local execution.
+     * @param topVer Topology version ({@code null} for local case).
+     * @param nodes Nodes to run query on.
+     * @param loc Local query flag.
+     */
+    private void doInit(@Nullable AffinityTopologyVersion topVer, Collection<ClusterNode> nodes, boolean loc) {
+        assert !loc || (topVer == null && nodes.size() == 1 && nodes.iterator().next().isLocal());
+        if (initialized())
+            throw new IllegalStateException("SELECT FOR UPDATE future has been initialized already.");
+
+        tx.init();
+
+        if (timeout < 0) {
+            // Time is out.
+            onDone(timeoutException());
+
+            return;
+        }
+        else if (timeout > 0)
+            timeoutObj = new LockTimeoutObject();
+
+        if (!tx.updateLockFuture(null, this)) {
+            onDone(tx.timedOut() ? tx.timeoutException() : tx.rollbackException());
+
+            return;
+        }
+
+        boolean added = cctx.mvcc().addFuture(this);
+
+        assert added : this;
+
+        try {
+            tx.addActiveCache(cctx, false);
+        }
+        catch (IgniteCheckedException e) {
+            onDone(e);
+
+            return;
+        }
+
+        if (timeoutObj != null)
+            cctx.time().addTimeoutObject(timeoutObj);
+
+        this.topVer = topVer;
+
+        for (ClusterNode n : nodes)
+            map(n);
+
+        markInitialized();
+    }
+
+    /**
+     * @return Timeout exception.
+     */
+    @NotNull private IgniteTxTimeoutCheckedException timeoutException() {
+        return new IgniteTxTimeoutCheckedException("Failed to acquire lock within provided timeout for " +
+            "transaction [timeout=" + timeout + ", tx=" + tx + ']');
+    }
+
+    /**
+     * A future tracking a single MAP request to be enlisted in transaction and locked on data node.
+     */
+    private class NodeFuture extends GridFutureAdapter<Long> {
+        /** */
+        private boolean completed;
+
+        /** Node ID. */
+        @GridToStringExclude
+        private final ClusterNode node;
+
+        /**
+         * @param node Cluster node.
+         *
+         */
+        private NodeFuture(ClusterNode node) {
+            this.node = node;
+        }
+
+        /**
+         * @return Node.
+         */
+        public ClusterNode node() {
+            return node;
+        }
+
+        /**
+         * @param cnt Total rows counter on given node.
+         * @param removeMapping Whether transaction mapping should be removed for node.
+         * @param err Exception.
+         * @return {@code True} if future was completed by this call.
+         */
+        public boolean onResult(long cnt, boolean removeMapping, Throwable err) {
+            synchronized (this) {
+                if (completed)
+                    return false;
+
+                completed = true;
+            }
+
+            if (X.hasCause(err, ClusterTopologyCheckedException.class) || removeMapping) {
+                GridDistributedTxMapping m = tx.mappings().get(node.id());
+
+                assert m != null && m.empty();
+
+                tx.removeMapping(node.id());
+
+                if (node.isLocal())
+                    tx.colocatedLocallyMapped(false);
+            }
+            else if (err == null && cnt > 0 && !node.isLocal())
+                tx.hasRemoteLocks(true);
+
+            return onDone(cnt, err);
+        }
+    }
+
+    /**
+     * Lock request timeout object.
+     */
+    private class LockTimeoutObject extends GridTimeoutObjectAdapter {
+        /**
+         * Default constructor.
+         */
+        LockTimeoutObject() {
+            super(timeout);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void onTimeout() {
+            if (log.isDebugEnabled())
+                log.debug("Timed out waiting for lock response: " + this);
+
+            onDone(timeoutException());
+        }
+
+        /** {@inheritDoc} */
+        @Override public String toString() {
+            return S.toString(LockTimeoutObject.class, this);
+        }
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteTxMappingsImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteTxMappingsImpl.java
index b12858e..03c71c4 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteTxMappingsImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteTxMappingsImpl.java
@@ -86,7 +86,7 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public String toString() {
         return S.toString(IgniteTxMappingsImpl.class, this);
     }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteTxMappingsSingleImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteTxMappingsSingleImpl.java
index b37f8d1..fcc7fe2 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteTxMappingsSingleImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteTxMappingsSingleImpl.java
@@ -95,7 +95,7 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public String toString() {
         return S.toString(IgniteTxMappingsSingleImpl.class, this);
     }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/NearTxFinishFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/NearTxFinishFuture.java
index 132c754..44a87f2 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/NearTxFinishFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/NearTxFinishFuture.java
@@ -17,15 +17,34 @@
 
 package org.apache.ignite.internal.processors.cache.distributed.near;
 
+import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.internal.IgniteInternalFuture;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx;
 
 /**
  *
  */
-public interface NearTxFinishFuture extends IgniteInternalFuture<IgniteInternalTx> {
+public interface NearTxFinishFuture extends IgniteInternalFuture<IgniteInternalTx>  {
     /**
      * @return Commit flag.
      */
     boolean commit();
+
+    /**
+     * @return Transaction.
+     */
+    GridNearTxLocal tx();
+
+    /**
+     *
+     * @param commit Commit flag.
+     * @param clearThreadMap If {@code true} removes {@link GridNearTxLocal} from thread map.
+     * @param onTimeout If {@code true} called from timeout handler.
+     */
+    public void finish(boolean commit, boolean clearThreadMap, boolean onTimeout);
+
+    /**
+     * @param e Error.
+     */
+    public void onNodeStop(IgniteCheckedException e);
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/TxTopologyVersionFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/TxTopologyVersionFuture.java
new file mode 100644
index 0000000..b5e3883
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/TxTopologyVersionFuture.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed.near;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
+import org.apache.ignite.internal.processors.cache.CacheStoppedException;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture;
+import org.apache.ignite.internal.util.future.GridFutureAdapter;
+import org.apache.ignite.internal.util.typedef.CI1;
+
+/**
+ * Future to obtain/lock topology version for SELECT FOR UPDATE.
+ */
+public class TxTopologyVersionFuture extends GridFutureAdapter<AffinityTopologyVersion> {
+    /** Transaction. */
+    private final GridNearTxLocal tx;
+
+    /** Target cache context. */
+    private final GridCacheContext<?, ?> cctx;
+
+    /** Topology locked flag. */
+    private boolean topLocked;
+
+    /**
+     * @param tx Transaction.
+     * @param cctx Target cache context.
+     */
+    public TxTopologyVersionFuture(GridNearTxLocal tx, GridCacheContext cctx) {
+        this.tx = tx;
+        this.cctx = cctx;
+
+        init();
+    }
+
+    /** */
+    private void init() {
+        // Obtain the topology version to use.
+        long threadId = Thread.currentThread().getId();
+
+        AffinityTopologyVersion topVer = cctx.mvcc().lastExplicitLockTopologyVersion(threadId);
+
+        // If there is another system transaction in progress, use it's topology version to prevent deadlock.
+        if (topVer == null && tx.system())
+            topVer = cctx.tm().lockedTopologyVersion(threadId, tx);
+
+        if (topVer != null)
+            tx.topologyVersion(topVer);
+
+        if (topVer == null)
+            topVer = tx.topologyVersionSnapshot();
+
+        if (topVer != null) {
+            for (GridDhtTopologyFuture fut : cctx.shared().exchange().exchangeFutures()) {
+                if (fut.exchangeDone() && fut.topologyVersion().equals(topVer)) {
+                    Throwable err = fut.validateCache(cctx, false, false, null, null);
+
+                    if (err != null) {
+                        onDone(err);
+
+                        return;
+                    }
+
+                    break;
+                }
+            }
+
+            onDone(topVer);
+
+            topLocked = true;
+
+            return;
+        }
+
+        acquireTopologyVersion();
+    }
+
+    /**
+     * Acquire topology future and wait for its completion.
+     */
+    private void acquireTopologyVersion() {
+        cctx.topology().readLock();
+
+        try {
+            if (cctx.topology().stopping()) {
+                onDone(new CacheStoppedException(cctx.name()));
+
+                return;
+            }
+
+            GridDhtTopologyFuture fut = cctx.topologyVersionFuture();
+
+            if (fut.isDone()) {
+                Throwable err = fut.validateCache(cctx, false, false, null, null);
+
+                if (err != null) {
+                    onDone(err);
+
+                    return;
+                }
+
+                AffinityTopologyVersion topVer = fut.topologyVersion();
+
+                if (tx != null)
+                    tx.topologyVersion(topVer);
+
+                onDone(topVer);
+            }
+            else {
+                fut.listen(new CI1<IgniteInternalFuture<AffinityTopologyVersion>>() {
+                    @Override public void apply(IgniteInternalFuture<AffinityTopologyVersion> fut) {
+                        try {
+                            fut.get();
+
+                            acquireTopologyVersion();
+                        }
+                        catch (IgniteCheckedException e) {
+                            onDone(e);
+                        }
+                        finally {
+                            cctx.shared().txContextReset();
+                        }
+                    }
+                });
+            }
+        }
+        finally {
+            cctx.topology().readUnlock();
+        }
+    }
+
+    /**
+     * @return Client first flag.
+     */
+    public boolean clientFirst() {
+        return cctx.localNode().isClient() && !topLocked && !tx.hasRemoteLocks();
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/dr/GridCacheDrManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/dr/GridCacheDrManager.java
index f2a4b30..f362374 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/dr/GridCacheDrManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/dr/GridCacheDrManager.java
@@ -22,6 +22,7 @@
 import org.apache.ignite.internal.processors.cache.CacheObject;
 import org.apache.ignite.internal.processors.cache.GridCacheManager;
 import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccVersion;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
 import org.apache.ignite.internal.processors.dr.GridDrType;
 import org.jetbrains.annotations.Nullable;
@@ -56,6 +57,36 @@
         AffinityTopologyVersion topVer)throws IgniteCheckedException;
 
     /**
+     * Enlist for DR.
+     *
+     * @param key Key.
+     * @param val Value.
+     * @param ttl TTL.
+     * @param expireTime Expire time.
+     * @param ver Version.
+     * @param drType Replication type.
+     * @param topVer Topology version.
+     * @param mvccVer Tx mvcc version.
+     * @throws IgniteCheckedException If failed.
+     */
+    void mvccReplicate(KeyCacheObject key,
+        @Nullable CacheObject val,
+        long ttl,
+        long expireTime,
+        GridCacheVersion ver,
+        GridDrType drType,
+        AffinityTopologyVersion topVer,
+        MvccVersion mvccVer) throws IgniteCheckedException;
+
+    /**
+     * @param mvccVer Tx mvcc version.
+     * @param commit {@code True} if tx committed, {@code False} otherwise.
+     * @param topVer Tx snapshot affinity version.
+     * @throws IgniteCheckedException If failed.
+     */
+    void onTxFinished(MvccVersion mvccVer, boolean commit, AffinityTopologyVersion topVer);
+
+    /**
      * Process partitions exchange event.
      *
      * @param topVer Topology version.
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/dr/GridOsCacheDrManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/dr/GridOsCacheDrManager.java
index f3c1b23..8d7e4d8 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/dr/GridOsCacheDrManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/dr/GridOsCacheDrManager.java
@@ -22,6 +22,7 @@
 import org.apache.ignite.internal.processors.cache.CacheObject;
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
 import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccVersion;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
 import org.apache.ignite.internal.processors.dr.GridDrType;
 import org.apache.ignite.lang.IgniteFuture;
@@ -78,6 +79,18 @@
     }
 
     /** {@inheritDoc} */
+    @Override public void mvccReplicate(KeyCacheObject key, @Nullable CacheObject val, long ttl, long expireTime,
+        GridCacheVersion ver, GridDrType drType, AffinityTopologyVersion topVer,
+        MvccVersion mvccVer) throws IgniteCheckedException {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onTxFinished(MvccVersion mvccVer, boolean commit, AffinityTopologyVersion topVer) {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
     @Override public void onExchange(AffinityTopologyVersion topVer, boolean left) throws IgniteCheckedException {
         // No-op.
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/GridLocalCache.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/GridLocalCache.java
index 0de53c0..7b7ac66 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/GridLocalCache.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/GridLocalCache.java
@@ -182,7 +182,7 @@
             if (entry != null && ctx.isAll(entry, CU.empty0())) {
                 entry.releaseLocal();
 
-                ctx.evicts().touch(entry, topVer);
+                entry.touch(topVer);
             }
         }
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/atomic/GridLocalAtomicCache.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/atomic/GridLocalAtomicCache.java
index b96dbdc..b615952 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/atomic/GridLocalAtomicCache.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/atomic/GridLocalAtomicCache.java
@@ -54,8 +54,8 @@
 import org.apache.ignite.internal.processors.cache.GridCacheReturn;
 import org.apache.ignite.internal.processors.cache.IgniteCacheExpiryPolicy;
 import org.apache.ignite.internal.processors.cache.KeyCacheObject;
-import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
 import org.apache.ignite.internal.processors.cache.local.GridLocalCache;
+import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxLocalEx;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
 import org.apache.ignite.internal.processors.resource.GridResourceIoc;
@@ -72,6 +72,7 @@
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.lang.IgniteBiTuple;
 import org.apache.ignite.plugin.security.SecurityPermission;
+import org.apache.ignite.thread.IgniteThread;
 import org.apache.ignite.transactions.TransactionIsolation;
 import org.jetbrains.annotations.Nullable;
 
@@ -460,6 +461,7 @@
                                     taskName,
                                     expiry,
                                     !deserializeBinary,
+                                    null,
                                     null);
 
                                 if (res != null) {
@@ -487,7 +489,8 @@
                                     null,
                                     taskName,
                                     expiry,
-                                    !deserializeBinary);
+                                    !deserializeBinary,
+                                    null);
 
                                 if (v != null) {
                                     ctx.addResult(vals,
@@ -513,7 +516,7 @@
                     }
                     finally {
                         if (entry != null)
-                            ctx.evicts().touch(entry, ctx.affinity().affinityTopologyVersion());
+                            entry.touch(ctx.affinity().affinityTopologyVersion());
                     }
 
                     if (!success && storeEnabled)
@@ -979,7 +982,7 @@
                     }
                     finally {
                         if (entry != null)
-                            ctx.evicts().touch(entry, ctx.affinity().affinityTopologyVersion());
+                            entry.touch(ctx.affinity().affinityTopologyVersion());
                     }
                 }
             }
@@ -1095,7 +1098,8 @@
                             entryProcessor,
                             taskName,
                             null,
-                            keepBinary);
+                            keepBinary,
+                            null);
 
                         Object oldVal = null;
 
@@ -1108,6 +1112,8 @@
 
                         boolean validation = false;
 
+                        IgniteThread.onEntryProcessorEntered(false);
+
                         try {
                             Object computed = entryProcessor.process(invokeEntry, invokeArgs);
 
@@ -1136,6 +1142,9 @@
                                 continue;
                             }
                         }
+                        finally {
+                            IgniteThread.onEntryProcessorLeft();
+                        }
 
                         if (invokeRes != null)
                             invokeResMap.put((K)entry.key().value(ctx.cacheObjectContext(), false), invokeRes);
@@ -1232,7 +1241,8 @@
                                 null,
                                 taskName,
                                 null,
-                                keepBinary);
+                                keepBinary,
+                                null);
 
                             Object interceptorVal = ctx.config().getInterceptor().onBeforePut(new CacheLazyEntry(
                                 ctx, entry.key(), old, keepBinary), val);
@@ -1267,7 +1277,8 @@
                                 null,
                                 taskName,
                                 null,
-                                keepBinary);
+                                keepBinary,
+                                null);
 
                             IgniteBiTuple<Boolean, ?> interceptorRes = ctx.config().getInterceptor()
                                 .onBeforeRemove(new CacheLazyEntry(ctx, entry.key(), old, keepBinary));
@@ -1502,7 +1513,7 @@
         AffinityTopologyVersion topVer = ctx.affinity().affinityTopologyVersion();
 
         for (GridCacheEntryEx entry : locked)
-            ctx.evicts().touch(entry, topVer);
+            entry.touch(topVer);
 
         throw new NullPointerException("Null key.");
     }
@@ -1519,7 +1530,7 @@
         AffinityTopologyVersion topVer = ctx.affinity().affinityTopologyVersion();
 
         for (GridCacheEntryEx entry : locked)
-            ctx.evicts().touch(entry, topVer);
+            entry.touch(topVer);
     }
 
     /**
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccCoordinator.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccCoordinator.java
new file mode 100644
index 0000000..045177a
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccCoordinator.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.io.Serializable;
+import java.util.UUID;
+import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
+import org.apache.ignite.internal.util.tostring.GridToStringInclude;
+import org.apache.ignite.internal.util.typedef.internal.S;
+
+/**
+ *
+ */
+public class MvccCoordinator implements Serializable {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** */
+    private final UUID nodeId;
+
+    /**
+     * Unique coordinator version, increases when new coordinator is assigned,
+     * can differ from topVer if we decide to assign coordinator manually.
+     */
+    private final long crdVer;
+
+    /** */
+    @GridToStringInclude
+    private final AffinityTopologyVersion topVer;
+
+    /**
+     * @param nodeId Coordinator node ID.
+     * @param crdVer Coordinator version.
+     * @param topVer Topology version when coordinator was assigned.
+     */
+    public MvccCoordinator(UUID nodeId, long crdVer, AffinityTopologyVersion topVer) {
+        assert nodeId != null;
+        assert crdVer > 0 : crdVer;
+        assert topVer != null;
+
+        this.nodeId = nodeId;
+        this.crdVer = crdVer;
+        this.topVer = topVer;
+    }
+
+    /**
+     * @return Unique coordinator version.
+     */
+    public long coordinatorVersion() {
+        return crdVer;
+    }
+
+    /**
+     * @return Coordinator node ID.
+     */
+    public UUID nodeId() {
+        return nodeId;
+    }
+
+    /**
+     * @return Topology version when coordinator was assigned.
+     */
+    public AffinityTopologyVersion topologyVersion() {
+        return topVer;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean equals(Object o) {
+        if (this == o)
+            return true;
+
+        if (o == null || getClass() != o.getClass())
+            return false;
+
+        MvccCoordinator that = (MvccCoordinator)o;
+
+        return crdVer == that.crdVer;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int hashCode() {
+        return (int)(crdVer ^ (crdVer >>> 32));
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(MvccCoordinator.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccDiscoveryData.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccDiscoveryData.java
new file mode 100644
index 0000000..d2e936f
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccDiscoveryData.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import org.apache.ignite.internal.util.typedef.internal.S;
+
+import java.io.Serializable;
+
+/**
+ * MVCC discovery data to be shared between nodes on join.
+ */
+public class MvccDiscoveryData implements Serializable {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** Current coordinator. */
+    private MvccCoordinator crd;
+
+    /**
+     * @param crd Coordinator.
+     */
+    public MvccDiscoveryData(MvccCoordinator crd) {
+        this.crd = crd;
+    }
+
+    /**
+     * @return Current coordinator.
+     */
+    public MvccCoordinator coordinator() {
+        return crd;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(MvccDiscoveryData.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccEmptyLongList.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccEmptyLongList.java
new file mode 100644
index 0000000..7963685
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccEmptyLongList.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+/**
+ *
+ */
+public class MvccEmptyLongList implements MvccLongList {
+    /** */
+    public static MvccEmptyLongList INSTANCE = new MvccEmptyLongList();
+
+    /**
+     *
+     */
+    private MvccEmptyLongList() {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public int size() {
+        return 0;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long get(int i) {
+        throw new IndexOutOfBoundsException();
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean contains(long val) {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return "MvccEmptyLongList[]";
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccFuture.java
new file mode 100644
index 0000000..0ca82d3
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccFuture.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.util.UUID;
+import org.apache.ignite.internal.util.future.GridFutureAdapter;
+import org.apache.ignite.internal.util.typedef.internal.S;
+
+/**
+ *
+ */
+public class MvccFuture<T> extends GridFutureAdapter<T> {
+    /** */
+    protected UUID crdId;
+
+    /**
+     * Default constructor.
+     */
+    public MvccFuture() {
+    }
+
+    /**
+     * @param crdId MVCC coordinator node ID.
+     */
+    public MvccFuture(UUID crdId) {
+        assert crdId != null;
+
+        this.crdId = crdId;
+    }
+
+    /**
+     * @return MVCC coordinator node ID.
+     */
+    public UUID coordinatorNodeId() {
+        return crdId;
+    }
+
+    /**
+     * @param crdId MVCC coordinator node ID.
+     */
+    public void coordinatorNodeId(UUID crdId) {
+        assert crdId != null;
+
+        this.crdId = crdId;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(MvccFuture.class, this, super.toString());
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccLongList.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccLongList.java
new file mode 100644
index 0000000..8b580ed
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccLongList.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+/**
+ *
+ */
+public interface MvccLongList {
+    public int size();
+
+    public long get(int i);
+
+    public boolean contains(long val);
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccPreviousCoordinatorQueries.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccPreviousCoordinatorQueries.java
new file mode 100644
index 0000000..cd7560f
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccPreviousCoordinatorQueries.java
@@ -0,0 +1,221 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.ConcurrentHashMap;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.internal.managers.discovery.GridDiscoveryManager;
+import org.apache.ignite.internal.util.GridLongList;
+import org.apache.ignite.internal.util.typedef.F;
+import org.jetbrains.annotations.Nullable;
+
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccQueryTracker.MVCC_TRACKER_ID_NA;
+
+/**
+ *
+ */
+class MvccPreviousCoordinatorQueries {
+    /** */
+    private volatile boolean prevQueriesDone;
+
+    /** Map of nodes to active {@link MvccQueryTracker} IDs list. */
+    private final ConcurrentHashMap<UUID, Set<Long>> activeQueries = new ConcurrentHashMap<>();
+
+    /** */
+    private final ConcurrentHashMap<UUID, Set<Long>> rcvdAcks = new ConcurrentHashMap<>();
+
+    /** */
+    private Set<UUID> rcvd;
+
+    /** */
+    private Set<UUID> waitNodes;
+
+    /** */
+    private boolean initDone;
+
+    /**
+     * @param nodeQueries Active queries map.
+     * @param nodes Cluster nodes.
+     * @param mgr Discovery manager.
+     */
+    void init(Map<UUID, GridLongList> nodeQueries, Collection<ClusterNode> nodes, GridDiscoveryManager mgr) {
+        synchronized (this) {
+            assert !initDone;
+            assert waitNodes == null;
+
+            waitNodes = new HashSet<>();
+
+            for (ClusterNode node : nodes) {
+                if ((nodeQueries == null || !nodeQueries.containsKey(node.id())) &&
+                    mgr.alive(node) &&
+                    !F.contains(rcvd, node.id()))
+                    waitNodes.add(node.id());
+            }
+
+            initDone = waitNodes.isEmpty();
+
+            if (nodeQueries != null) {
+                for (Map.Entry<UUID, GridLongList> e : nodeQueries.entrySet())
+                    mergeToActiveQueries(e.getKey(), e.getValue());
+            }
+
+            if (initDone && !prevQueriesDone)
+                prevQueriesDone = activeQueries.isEmpty() && rcvdAcks.isEmpty();
+        }
+    }
+
+    /**
+     * @return {@code True} if all queries on
+     */
+    boolean previousQueriesDone() {
+        return prevQueriesDone;
+    }
+
+    /**
+     * Merges current node active queries with the given ones.
+     *
+     * @param nodeId Node ID.
+     * @param nodeTrackers Active query trackers started on node.
+     */
+    private void mergeToActiveQueries(UUID nodeId, GridLongList nodeTrackers) {
+        if (nodeTrackers == null || nodeTrackers.isEmpty() || prevQueriesDone)
+            return;
+
+        Set<Long> currTrackers = activeQueries.get(nodeId);
+
+        if (currTrackers == null)
+            activeQueries.put(nodeId, currTrackers = addAll(nodeTrackers, null));
+        else
+            addAll(nodeTrackers, currTrackers);
+
+        // Check if there were any acks had been arrived before.
+        Set<Long> currAcks = rcvdAcks.get(nodeId);
+
+        if (!currTrackers.isEmpty() && currAcks != null && !currAcks.isEmpty()) {
+            Collection<Long> intersection =  new HashSet<>(currAcks);
+
+            intersection.retainAll(currTrackers);
+
+            currAcks.removeAll(intersection);
+            currTrackers.removeAll(intersection);
+
+            if (currTrackers.isEmpty())
+                activeQueries.remove(nodeId);
+
+            if (currAcks.isEmpty())
+                rcvdAcks.remove(nodeId);
+        }
+
+        if (initDone && !prevQueriesDone)
+            prevQueriesDone = activeQueries.isEmpty() && rcvdAcks.isEmpty();
+    }
+
+    /**
+     * @param nodeId Node ID.
+     * @param nodeTrackers  Active query trackers started on node.
+     */
+    void addNodeActiveQueries(UUID nodeId, @Nullable GridLongList nodeTrackers) {
+        synchronized (this) {
+            if (initDone)
+                return;
+
+            if (waitNodes == null) {
+                if (rcvd == null)
+                    rcvd = new HashSet<>();
+
+                rcvd.add(nodeId);
+            }
+            else {
+                waitNodes.remove(nodeId);
+
+                initDone = waitNodes.isEmpty();
+            }
+
+            mergeToActiveQueries(nodeId, nodeTrackers);
+
+            if (initDone && !prevQueriesDone)
+                prevQueriesDone = activeQueries.isEmpty() && rcvdAcks.isEmpty();
+        }
+    }
+
+    /**
+     * @param nodeId Failed node ID.
+     */
+    void onNodeFailed(UUID nodeId) {
+        synchronized (this) {
+            if (waitNodes != null) {
+                waitNodes.remove(nodeId);
+
+                initDone = waitNodes.isEmpty();
+            }
+
+            if (initDone && !prevQueriesDone && activeQueries.remove(nodeId) != null)
+                prevQueriesDone = activeQueries.isEmpty() && rcvdAcks.isEmpty();
+        }
+    }
+
+    /**
+     * @param nodeId Node ID.
+     * @param qryTrackerId Query tracker Id.
+     */
+    void onQueryDone(UUID nodeId, long qryTrackerId) {
+        if (qryTrackerId == MVCC_TRACKER_ID_NA)
+            return;
+
+        synchronized (this) {
+            Set<Long> nodeTrackers = activeQueries.get(nodeId);
+
+            if (nodeTrackers == null || !nodeTrackers.remove(qryTrackerId)) {
+                Set<Long> nodeAcks = rcvdAcks.get(nodeId);
+
+                if (nodeAcks == null)
+                    rcvdAcks.put(nodeId, nodeAcks = new HashSet<>());
+
+                // We received qry done ack before the active qry message. Need to save it.
+                nodeAcks.add(qryTrackerId);
+            }
+
+            if (nodeTrackers != null && nodeTrackers.isEmpty())
+                activeQueries.remove(nodeId);
+
+            if (initDone && !prevQueriesDone)
+                prevQueriesDone = activeQueries.isEmpty() && rcvdAcks.isEmpty();
+        }
+    }
+
+    /**
+     * @param from Long list.
+     * @param to Set.
+     */
+    private Set<Long> addAll(GridLongList from, Set<Long> to) {
+        assert from != null;
+
+        if (to == null)
+            to = new HashSet<>(from.size());
+
+        for (int i = 0; i < from.size(); i++)
+            to.add(from.get(i));
+
+        return to;
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccProcessor.java
new file mode 100644
index 0000000..a09468f
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccProcessor.java
@@ -0,0 +1,282 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.util.Collection;
+import java.util.Map;
+import java.util.UUID;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteLogger;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.internal.IgniteDiagnosticPrepareContext;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException;
+import org.apache.ignite.internal.managers.discovery.DiscoCache;
+import org.apache.ignite.internal.managers.discovery.DiscoveryCustomMessage;
+import org.apache.ignite.internal.processors.GridProcessor;
+import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
+import org.apache.ignite.internal.processors.cache.ExchangeContext;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx;
+import org.apache.ignite.internal.util.GridLongList;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ *
+ */
+public interface MvccProcessor extends GridProcessor {
+    /**
+     * @param evtType Event type.
+     * @param nodes Current nodes.
+     * @param topVer Topology version.
+     * @param customMsg Message
+     */
+    void onDiscoveryEvent(int evtType, Collection<ClusterNode> nodes, long topVer,
+        @Nullable DiscoveryCustomMessage customMsg);
+
+    /**
+     * Exchange start callback.
+     *
+     * @param mvccCrd Mvcc coordinator.
+     * @param exchCtx Exchange context.
+     * @param exchCrd Exchange coordinator.
+     */
+    void onExchangeStart(MvccCoordinator mvccCrd, ExchangeContext exchCtx, ClusterNode exchCrd);
+
+    /**
+     * Exchange done callback.
+     *
+     * @param newCoord New coordinator flag.
+     * @param discoCache Disco cache.
+     * @param activeQueries Active queries.
+     */
+    void onExchangeDone(boolean newCoord, DiscoCache discoCache, Map<UUID, GridLongList> activeQueries);
+
+    /**
+     * @param nodeId Node ID
+     * @param activeQueries Active queries.
+     */
+    void processClientActiveQueries(UUID nodeId, @Nullable GridLongList activeQueries);
+
+    /**
+     * @return Mvcc coordinator received from discovery event.
+     */
+    @Nullable MvccCoordinator assignedCoordinator();
+
+    /**
+     * @return Coordinator.
+     */
+    @Nullable MvccCoordinator currentCoordinator();
+
+    /**
+     * Check that the given topology is greater or equals to coordinator's one and returns current coordinator.
+     * @param topVer Topology version.
+     * @return Mvcc coordinator.
+     */
+    @Nullable MvccCoordinator currentCoordinator(AffinityTopologyVersion topVer);
+
+    /**
+     * @return Current coordinator node ID.
+     */
+    UUID currentCoordinatorId();
+
+    /**
+     * @param curCrd Coordinator.
+     */
+    void updateCoordinator(MvccCoordinator curCrd);
+
+    /**
+     * @param crdVer Mvcc coordinator version.
+     * @param cntr Mvcc counter.
+     * @return State for given mvcc version.
+     * @throws IgniteCheckedException If fails.
+     */
+    byte state(long crdVer, long cntr) throws IgniteCheckedException;
+
+    /**
+     * @param ver Version to check.
+     * @return State for given mvcc version.
+     * @throws IgniteCheckedException If fails.
+     */
+    byte state(MvccVersion ver) throws IgniteCheckedException;
+
+    /**
+     * @param ver Version.
+     * @param state State.
+     * @throws IgniteCheckedException If fails;
+     */
+    void updateState(MvccVersion ver, byte state) throws IgniteCheckedException;
+
+    /**
+     * @param ver Version.
+     * @param state State.
+     * @param primary Flag if this is primary node.
+     * @throws IgniteCheckedException If fails;
+     */
+    void updateState(MvccVersion ver, byte state, boolean primary) throws IgniteCheckedException;
+
+    /**
+     * @param crd Mvcc coordinator version.
+     * @param cntr Mvcc counter.
+     */
+    void registerLocalTransaction(long crd, long cntr);
+
+    /**
+     * @param crd Mvcc coordinator version.
+     * @param cntr Mvcc counter.
+     * @return {@code True} if there is an active local transaction with given version.
+     */
+    boolean hasLocalTransaction(long crd, long cntr);
+
+    /**
+     * @param cctx Cache context.
+     * @param locked Version the entry is locked by.
+     * @return Future, which is completed as soon as the lock is released.
+     * @throws IgniteCheckedException If failed.
+     */
+    IgniteInternalFuture<Void> waitFor(GridCacheContext cctx, MvccVersion locked) throws IgniteCheckedException;
+
+    /**
+     * @param tracker Query tracker.
+     */
+    void addQueryTracker(MvccQueryTracker tracker);
+
+    /**
+     * @param id Query tracker id.
+     */
+    void removeQueryTracker(Long id);
+
+    /**
+     * @return {@link MvccSnapshot} if this is a coordinator node and coordinator is initialized.
+     * {@code Null} in other cases.
+     * @throws ClusterTopologyCheckedException If coordinator doesn't match locked topology or not assigned.
+     */
+    MvccSnapshot tryRequestSnapshotLocal() throws ClusterTopologyCheckedException;
+
+    /**
+     * @param tx Transaction.
+     * @return {@link MvccSnapshot} if this is a coordinator node and coordinator is initialized.
+     * {@code Null} in other cases.
+     * @throws ClusterTopologyCheckedException If coordinator doesn't match locked topology or not assigned.
+     */
+    MvccSnapshot tryRequestSnapshotLocal(@Nullable IgniteInternalTx tx) throws ClusterTopologyCheckedException;
+
+    /**
+     * Requests snapshot on Mvcc coordinator.
+     *
+     * @return Snapshot future.
+     */
+    IgniteInternalFuture<MvccSnapshot> requestSnapshotAsync();
+
+    /**
+     * Requests snapshot on Mvcc coordinator.
+     *
+     * @param tx Transaction.
+     * @return Snapshot future.
+     */
+    IgniteInternalFuture<MvccSnapshot> requestSnapshotAsync(IgniteInternalTx tx);
+
+    /**
+     * Requests snapshot on Mvcc coordinator.
+     *
+     * @param lsnr Request listener.
+     */
+    void requestSnapshotAsync(MvccSnapshotResponseListener lsnr);
+
+    /**
+     * Requests snapshot on Mvcc coordinator.
+     *
+     * @param tx Transaction
+     * @param lsnr Request listener.
+     */
+    void requestSnapshotAsync(IgniteInternalTx tx, MvccSnapshotResponseListener lsnr);
+
+    /**
+     * @param updateVer Transaction update version.
+     * @return Acknowledge future.
+     */
+    IgniteInternalFuture<Void> ackTxCommit(MvccSnapshot updateVer);
+
+    /**
+     * @param updateVer Transaction update version.
+     * @param readSnapshot Transaction read version.
+     * @param qryId Query tracker id.
+     * @return Acknowledge future.
+     */
+    IgniteInternalFuture<Void> ackTxCommit(MvccVersion updateVer, MvccSnapshot readSnapshot, long qryId);
+
+    /**
+     * @param updateVer Transaction update version.
+     */
+    void ackTxRollback(MvccVersion updateVer);
+
+    /**
+     * @param updateVer Transaction update version.
+     * @param readSnapshot Transaction read version.
+     * @param qryTrackerId Query tracker id.
+     */
+    void ackTxRollback(MvccVersion updateVer, MvccSnapshot readSnapshot, long qryTrackerId);
+
+    /**
+     * @param snapshot Query version.
+     * @param qryId Query tracker ID.
+     */
+    void ackQueryDone(MvccSnapshot snapshot, long qryId);
+
+    /**
+     * @param crdId Coordinator ID.
+     * @param txs Transaction IDs.
+     * @return Future.
+     */
+    IgniteInternalFuture<Void> waitTxsFuture(UUID crdId, GridLongList txs);
+
+    /**
+     * @param log Logger.
+     * @param diagCtx Diagnostic request.
+     */
+    void dumpDebugInfo(IgniteLogger log, @Nullable IgniteDiagnosticPrepareContext diagCtx);
+
+    /**
+     * @return {@code True} if at least one cache with
+     * {@code CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT} mode is registered.
+     */
+    boolean mvccEnabled();
+
+    /**
+     * Pre-processes cache configuration before start.
+     *
+     * @param ccfg Cache configuration to pre-process.
+     */
+    void preProcessCacheConfiguration(CacheConfiguration ccfg);
+
+    /**
+     * Validates cache configuration before start.
+     *
+     * @param ccfg Cache configuration to validate.
+     * @throws IgniteCheckedException If validation failed.
+     */
+    void validateCacheConfiguration(CacheConfiguration ccfg) throws IgniteCheckedException;
+
+    /**
+     * Starts MVCC processor (i.e. initialises data structures and vacuum) if it has not been started yet.
+     *
+     * @throws IgniteCheckedException If failed to initialize.
+     */
+    void ensureStarted() throws IgniteCheckedException;
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccProcessorImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccProcessorImpl.java
new file mode 100644
index 0000000..d1e2dc5
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccProcessorImpl.java
@@ -0,0 +1,2295 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.TreeMap;
+import java.util.UUID;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.atomic.AtomicInteger;
+import javax.cache.expiry.EternalExpiryPolicy;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.IgniteLogger;
+import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.DataRegionConfiguration;
+import org.apache.ignite.configuration.DataStorageConfiguration;
+import org.apache.ignite.events.DiscoveryEvent;
+import org.apache.ignite.events.Event;
+import org.apache.ignite.internal.GridKernalContext;
+import org.apache.ignite.internal.IgniteDiagnosticPrepareContext;
+import org.apache.ignite.internal.IgniteFutureTimeoutCheckedException;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.IgniteInterruptedCheckedException;
+import org.apache.ignite.internal.NodeStoppingException;
+import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException;
+import org.apache.ignite.internal.managers.communication.GridMessageListener;
+import org.apache.ignite.internal.managers.discovery.DiscoCache;
+import org.apache.ignite.internal.managers.discovery.DiscoveryCustomMessage;
+import org.apache.ignite.internal.managers.eventstorage.GridLocalEventListener;
+import org.apache.ignite.internal.processors.GridProcessorAdapter;
+import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
+import org.apache.ignite.internal.processors.cache.CacheGroupContext;
+import org.apache.ignite.internal.processors.cache.DynamicCacheChangeBatch;
+import org.apache.ignite.internal.processors.cache.DynamicCacheChangeRequest;
+import org.apache.ignite.internal.processors.cache.ExchangeContext;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.GridCacheEntryEx;
+import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal;
+import org.apache.ignite.internal.processors.cache.mvcc.msg.MvccAckRequestQueryCntr;
+import org.apache.ignite.internal.processors.cache.mvcc.msg.MvccAckRequestQueryId;
+import org.apache.ignite.internal.processors.cache.mvcc.msg.MvccAckRequestTx;
+import org.apache.ignite.internal.processors.cache.mvcc.msg.MvccAckRequestTxAndQueryCntr;
+import org.apache.ignite.internal.processors.cache.mvcc.msg.MvccAckRequestTxAndQueryId;
+import org.apache.ignite.internal.processors.cache.mvcc.msg.MvccActiveQueriesMessage;
+import org.apache.ignite.internal.processors.cache.mvcc.msg.MvccFutureResponse;
+import org.apache.ignite.internal.processors.cache.mvcc.msg.MvccMessage;
+import org.apache.ignite.internal.processors.cache.mvcc.msg.MvccQuerySnapshotRequest;
+import org.apache.ignite.internal.processors.cache.mvcc.msg.MvccSnapshotResponse;
+import org.apache.ignite.internal.processors.cache.mvcc.msg.MvccTxSnapshotRequest;
+import org.apache.ignite.internal.processors.cache.mvcc.msg.MvccWaitTxsRequest;
+import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxKey;
+import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxLog;
+import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxState;
+import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
+import org.apache.ignite.internal.processors.cache.persistence.DatabaseLifecycleListener;
+import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO;
+import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccDataRow;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.search.MvccLinkAwareSearchRow;
+import org.apache.ignite.internal.util.GridAtomicLong;
+import org.apache.ignite.internal.util.GridLongList;
+import org.apache.ignite.internal.util.future.GridCompoundFuture;
+import org.apache.ignite.internal.util.future.GridCompoundIdentityFuture;
+import org.apache.ignite.internal.util.future.GridFinishedFuture;
+import org.apache.ignite.internal.util.future.GridFutureAdapter;
+import org.apache.ignite.internal.util.lang.GridCursor;
+import org.apache.ignite.internal.util.typedef.CI1;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.internal.CU;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.internal.util.worker.GridWorker;
+import org.apache.ignite.lang.IgniteClosure;
+import org.apache.ignite.lang.IgniteInClosure;
+import org.apache.ignite.lang.IgniteProductVersion;
+import org.apache.ignite.plugin.extensions.communication.Message;
+import org.apache.ignite.spi.IgniteNodeValidationResult;
+import org.apache.ignite.thread.IgniteThread;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
+
+import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT;
+import static org.apache.ignite.events.EventType.EVT_CLIENT_NODE_DISCONNECTED;
+import static org.apache.ignite.events.EventType.EVT_NODE_FAILED;
+import static org.apache.ignite.events.EventType.EVT_NODE_LEFT;
+import static org.apache.ignite.events.EventType.EVT_NODE_METRICS_UPDATED;
+import static org.apache.ignite.events.EventType.EVT_NODE_SEGMENTED;
+import static org.apache.ignite.internal.GridTopic.TOPIC_CACHE_COORDINATOR;
+import static org.apache.ignite.internal.events.DiscoveryCustomEvent.EVT_DISCOVERY_CUSTOM_EVT;
+import static org.apache.ignite.internal.managers.communication.GridIoPolicy.SYSTEM_POOL;
+import static org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState.OWNING;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccQueryTracker.MVCC_TRACKER_ID_NA;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_COUNTER_NA;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_CRD_COUNTER_NA;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_INITIAL_CNTR;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_READ_OP_CNTR;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_START_CNTR;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_START_OP_CNTR;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.compare;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.hasNewVersion;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.isVisible;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.noCoordinatorError;
+import static org.apache.ignite.internal.processors.cache.mvcc.txlog.TxLog.TX_LOG_CACHE_ID;
+import static org.apache.ignite.internal.processors.cache.mvcc.txlog.TxLog.TX_LOG_CACHE_NAME;
+import static org.apache.ignite.internal.processors.cache.persistence.CacheDataRowAdapter.RowData.KEY_ONLY;
+
+/**
+ * MVCC processor.
+ */
+@SuppressWarnings("serial")
+public class MvccProcessorImpl extends GridProcessorAdapter implements MvccProcessor, DatabaseLifecycleListener {
+    /** */
+    private static final IgniteProductVersion MVCC_SUPPORTED_SINCE = IgniteProductVersion.fromString("2.7.0");
+
+    /** */
+    private static final Waiter LOCAL_TRANSACTION_MARKER = new LocalTransactionMarker();
+
+    /** Dummy tx for vacuum. */
+    private static final IgniteInternalTx DUMMY_TX = new GridNearTxLocal();
+
+    /** For tests only. */
+    private static IgniteClosure<Collection<ClusterNode>, ClusterNode> crdC;
+
+    /**
+     * For testing only.
+     *
+     * @param crdC Closure assigning coordinator.
+     */
+    static void coordinatorAssignClosure(IgniteClosure<Collection<ClusterNode>, ClusterNode> crdC) {
+        MvccProcessorImpl.crdC = crdC;
+    }
+
+    /** Topology version when local node was assigned as coordinator. */
+    private long crdVer;
+
+    /** */
+    private volatile MvccCoordinator curCrd;
+
+    /** */
+    private volatile MvccCoordinator assignedCrd;
+
+    /** */
+    private TxLog txLog;
+
+    /** */
+    private List<GridWorker> vacuumWorkers;
+
+    /** */
+    private BlockingQueue<VacuumTask> cleanupQueue;
+
+    /**
+     * Vacuum mutex. Prevents concurrent vacuum while start/stop operations
+     */
+    private final Object mux = new Object();
+
+    /** For tests only. */
+    private volatile Throwable vacuumError;
+
+    /** */
+    private final GridAtomicLong futIdCntr = new GridAtomicLong(0);
+
+    /** */
+    private final GridAtomicLong mvccCntr = new GridAtomicLong(MVCC_START_CNTR);
+
+    /** */
+    private final GridAtomicLong committedCntr = new GridAtomicLong(MVCC_INITIAL_CNTR);
+
+    /** */
+    private final Map<Long, Long> activeTxs = new HashMap<>();
+
+    /** Active query trackers. */
+    private final Map<Long, MvccQueryTracker> activeTrackers = new ConcurrentHashMap<>();
+
+    /** */
+    private final Map<UUID, Map<Long, MvccSnapshotResponseListener>> snapLsnrs = new ConcurrentHashMap<>();
+
+    /** */
+    private final Map<Long, WaitAckFuture> ackFuts = new ConcurrentHashMap<>();
+
+    /** */
+    private final Map<Long, GridFutureAdapter> waitTxFuts = new ConcurrentHashMap<>();
+
+    /** */
+    private final Map<TxKey, Waiter> waitMap = new ConcurrentHashMap<>();
+
+    /** */
+    private final ActiveQueries activeQueries = new ActiveQueries();
+
+    /** */
+    private final MvccPreviousCoordinatorQueries prevCrdQueries = new MvccPreviousCoordinatorQueries();
+
+    /** */
+    private final GridFutureAdapter<Void> initFut = new GridFutureAdapter<>();
+
+    /** Flag whether at least one cache with {@code CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT} mode is registered. */
+    private volatile boolean mvccEnabled;
+
+    /** Flag whether all nodes in cluster support MVCC. */
+    private volatile boolean mvccSupported = true;
+
+    /**
+     * @param ctx Context.
+     */
+    public MvccProcessorImpl(GridKernalContext ctx) {
+        super(ctx);
+
+        ctx.internalSubscriptionProcessor().registerDatabaseListener(this);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void start() throws IgniteCheckedException {
+        ctx.event().addLocalEventListener(new CacheCoordinatorNodeFailListener(),
+            EVT_NODE_FAILED, EVT_NODE_LEFT);
+
+        ctx.io().addMessageListener(TOPIC_CACHE_COORDINATOR, new CoordinatorMessageListener());
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean mvccEnabled() {
+        return mvccEnabled;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void preProcessCacheConfiguration(CacheConfiguration ccfg) {
+        if (ccfg.getAtomicityMode() == TRANSACTIONAL_SNAPSHOT) {
+            if (!mvccSupported)
+                throw new IgniteException("Cannot start MVCC transactional cache. " +
+                    "MVCC is unsupported by the cluster.");
+
+            mvccEnabled = true;
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void validateCacheConfiguration(CacheConfiguration ccfg) throws IgniteCheckedException {
+        if (ccfg.getAtomicityMode() == TRANSACTIONAL_SNAPSHOT) {
+            if (!mvccSupported)
+                throw new IgniteException("Cannot start MVCC transactional cache. " +
+                    "MVCC is unsupported by the cluster.");
+
+            if (ccfg.getCacheStoreFactory() != null) {
+                throw new IgniteCheckedException("Transactional cache may not have a third party cache store when " +
+                    "MVCC is enabled.");
+            }
+
+            if (ccfg.getExpiryPolicyFactory() != null && !(ccfg.getExpiryPolicyFactory().create() instanceof
+                EternalExpiryPolicy)) {
+                throw new IgniteCheckedException("Transactional cache may not have expiry policy when " +
+                    "MVCC is enabled.");
+            }
+
+            if (ccfg.getInterceptor() != null) {
+                throw new IgniteCheckedException("Transactional cache may not have an interceptor when " +
+                    "MVCC is enabled.");
+            }
+
+            if (ccfg.getCacheMode() == CacheMode.LOCAL)
+                throw new IgniteCheckedException("Local caches are not supported by MVCC engine. Use " +
+                    "CacheAtomicityMode.TRANSACTIONAL for local caches.");
+
+            mvccEnabled = true;
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Nullable @Override public IgniteNodeValidationResult validateNode(ClusterNode node) {
+        if (mvccEnabled && node.version().compareToIgnoreTimestamp(MVCC_SUPPORTED_SINCE) < 0) {
+            String errMsg = "Failed to add node to topology. MVCC is enabled on the cluster, but " +
+                "the node doesn't support MVCC [nodeId=" + node.id() + ']';
+
+            return new IgniteNodeValidationResult(node.id(), errMsg, errMsg);
+        }
+
+        return null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void ensureStarted() throws IgniteCheckedException {
+        if (!ctx.clientNode() && txLog == null) {
+            assert mvccEnabled && mvccSupported;
+
+            txLog = new TxLog(ctx, ctx.cache().context().database());
+
+            startVacuumWorkers();
+
+            log.info("Mvcc processor started.");
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void beforeStop(IgniteCacheDatabaseSharedManager mgr) {
+        stopVacuumWorkers();
+
+        txLog = null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onInitDataRegions(IgniteCacheDatabaseSharedManager mgr) throws IgniteCheckedException {
+        // We have to always init txLog data region.
+        DataStorageConfiguration dscfg = dataStorageConfiguration();
+
+        mgr.addDataRegion(
+            dscfg,
+            createTxLogRegion(dscfg),
+            CU.isPersistenceEnabled(ctx.config()));
+    }
+
+    /** {@inheritDoc} */
+    @Override public void afterInitialise(IgniteCacheDatabaseSharedManager mgr) throws IgniteCheckedException {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("ConstantConditions")
+    @Override public void beforeMemoryRestore(IgniteCacheDatabaseSharedManager mgr) throws IgniteCheckedException {
+        assert CU.isPersistenceEnabled(ctx.config());
+        assert txLog == null;
+
+        ctx.cache().context().pageStore().initialize(TX_LOG_CACHE_ID, 1,
+            TX_LOG_CACHE_NAME, mgr.dataRegion(TX_LOG_CACHE_NAME).memoryMetrics());
+    }
+
+    /** {@inheritDoc} */
+    @Override public void afterMemoryRestore(IgniteCacheDatabaseSharedManager mgr) throws IgniteCheckedException {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onDiscoveryEvent(int evtType, Collection<ClusterNode> nodes, long topVer,
+        @Nullable DiscoveryCustomMessage customMsg) {
+        if (evtType == EVT_NODE_METRICS_UPDATED)
+            return;
+
+        if (evtType == EVT_DISCOVERY_CUSTOM_EVT)
+            checkMvccCacheStarted(customMsg);
+        else
+            assignMvccCoordinator(evtType, nodes, topVer);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onExchangeStart(MvccCoordinator mvccCrd, ExchangeContext exchCtx, ClusterNode exchCrd) {
+        if (!exchCtx.newMvccCoordinator())
+            return;
+
+        GridLongList activeQryTrackers = collectActiveQueryTrackers();
+
+        exchCtx.addActiveQueries(ctx.localNodeId(), activeQryTrackers);
+
+        if (exchCrd == null || !mvccCrd.nodeId().equals(exchCrd.id())) {
+            try {
+                sendMessage(mvccCrd.nodeId(), new MvccActiveQueriesMessage(activeQryTrackers));
+            }
+            catch (IgniteCheckedException e) {
+                U.error(log, "Failed to send active queries to mvcc coordinator: " + e);
+            }
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onExchangeDone(boolean newCrd, DiscoCache discoCache, Map<UUID, GridLongList> activeQueries) {
+        if (!newCrd)
+            return;
+
+        ctx.cache().context().tm().rollbackMvccTxOnCoordinatorChange();
+
+        if (ctx.localNodeId().equals(curCrd.nodeId())) {
+            assert ctx.localNodeId().equals(curCrd.nodeId());
+
+            MvccCoordinator crd = discoCache.mvccCoordinator();
+
+            assert crd != null;
+
+            // No need to re-initialize if coordinator version hasn't changed (e.g. it was cluster activation).
+            if (crdVer == crd.coordinatorVersion())
+                return;
+
+            crdVer = crd.coordinatorVersion();
+
+            log.info("Initialize local node as mvcc coordinator [node=" + ctx.localNodeId() +
+                ", crdVer=" + crdVer + ']');
+
+            prevCrdQueries.init(activeQueries, F.view(discoCache.allNodes(), this::supportsMvcc), ctx.discovery());
+
+            initFut.onDone();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void processClientActiveQueries(UUID nodeId, @Nullable GridLongList activeQueries) {
+        prevCrdQueries.addNodeActiveQueries(nodeId, activeQueries);
+    }
+
+    /** {@inheritDoc} */
+    @Override @Nullable public MvccCoordinator currentCoordinator() {
+        return currentCoordinator(AffinityTopologyVersion.NONE);
+    }
+
+    /** {@inheritDoc} */
+    @Override @Nullable public MvccCoordinator currentCoordinator(AffinityTopologyVersion topVer) {
+        MvccCoordinator crd = curCrd;
+
+        // Assert coordinator did not already change.
+        assert crd == null
+            || topVer == AffinityTopologyVersion.NONE
+            || crd.topologyVersion().compareTo(topVer) <= 0 : "Invalid coordinator [crd=" + crd + ", topVer=" + topVer + ']';
+
+        return crd;
+    }
+
+    /** {@inheritDoc} */
+    @Override @Nullable public MvccCoordinator assignedCoordinator() {
+        return assignedCrd;
+    }
+
+    /** {@inheritDoc} */
+    @Override public UUID currentCoordinatorId() {
+        MvccCoordinator curCrd = this.curCrd;
+
+        return curCrd != null ? curCrd.nodeId() : null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void updateCoordinator(MvccCoordinator curCrd) {
+        this.curCrd = curCrd;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte state(long crdVer, long cntr) throws IgniteCheckedException {
+        return txLog.get(crdVer, cntr);
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte state(MvccVersion ver) throws IgniteCheckedException {
+        assert txLog != null && mvccEnabled;
+
+        return txLog.get(ver.coordinatorVersion(), ver.counter());
+    }
+
+    /** {@inheritDoc} */
+    @Override public void updateState(MvccVersion ver, byte state) throws IgniteCheckedException {
+        updateState(ver, state, true);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void updateState(MvccVersion ver, byte state, boolean primary) throws IgniteCheckedException {
+        assert txLog != null && mvccEnabled;
+
+        TxKey key = new TxKey(ver.coordinatorVersion(), ver.counter());
+
+        txLog.put(key, state, primary);
+
+        Waiter waiter;
+
+        if (primary && (state == TxState.ABORTED || state == TxState.COMMITTED)
+            && (waiter = waitMap.remove(key)) != null)
+            waiter.run(ctx);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void registerLocalTransaction(long crd, long cntr) {
+        Waiter old = waitMap.putIfAbsent(new TxKey(crd, cntr), LOCAL_TRANSACTION_MARKER);
+
+        assert old == null || old.hasLocalTransaction();
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean hasLocalTransaction(long crd, long cntr) {
+        Waiter waiter = waitMap.get(new TxKey(crd, cntr));
+
+        return waiter != null && waiter.hasLocalTransaction();
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgniteInternalFuture<Void> waitFor(GridCacheContext cctx, MvccVersion locked) throws IgniteCheckedException {
+        TxKey key = new TxKey(locked.coordinatorVersion(), locked.counter());
+
+        LockFuture fut = new LockFuture(cctx.ioPolicy());
+
+        Waiter waiter = waitMap.merge(key, fut, Waiter::concat);
+
+        byte state = txLog.get(key);
+
+        if ((state == TxState.ABORTED || state == TxState.COMMITTED)
+            && !waiter.hasLocalTransaction() && (waiter = waitMap.remove(key)) != null)
+            waiter.run(ctx);
+
+        return fut;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void addQueryTracker(MvccQueryTracker tracker) {
+        assert tracker.id() != MVCC_TRACKER_ID_NA;
+
+        MvccQueryTracker tr = activeTrackers.put(tracker.id(), tracker);
+
+        assert tr == null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void removeQueryTracker(Long id) {
+        activeTrackers.remove(id);
+    }
+
+    /** {@inheritDoc} */
+    @Override public MvccSnapshot tryRequestSnapshotLocal() throws ClusterTopologyCheckedException {
+        return tryRequestSnapshotLocal(null);
+    }
+
+    /** {@inheritDoc} */
+    @Override public MvccSnapshot tryRequestSnapshotLocal(@Nullable IgniteInternalTx tx) throws ClusterTopologyCheckedException {
+        MvccCoordinator crd = currentCoordinator();
+
+        if (crd == null)
+            throw noCoordinatorError();
+
+        if (tx != null) {
+            AffinityTopologyVersion topVer = ctx.cache().context().lockedTopologyVersion(null);
+
+            if (topVer != null && topVer.compareTo(crd.topologyVersion()) < 0)
+                throw new ClusterTopologyCheckedException("Mvcc coordinator is outdated " +
+                    "for the locked topology version. [crd=" + crd + ", tx=" + tx + ']');
+        }
+
+        if (!ctx.localNodeId().equals(crd.nodeId()) || !initFut.isDone())
+            return null;
+        else if (tx != null)
+            return assignTxSnapshot(0L);
+        else
+            return activeQueries.assignQueryCounter(ctx.localNodeId(), 0L);
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgniteInternalFuture<MvccSnapshot> requestSnapshotAsync() {
+        return requestSnapshotAsync((IgniteInternalTx)null);
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgniteInternalFuture<MvccSnapshot> requestSnapshotAsync(IgniteInternalTx tx) {
+        MvccSnapshotFuture fut = new MvccSnapshotFuture();
+
+        requestSnapshotAsync(tx, fut);
+
+        return fut;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void requestSnapshotAsync(MvccSnapshotResponseListener lsnr) {
+        requestSnapshotAsync(null, lsnr);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void requestSnapshotAsync(IgniteInternalTx tx, MvccSnapshotResponseListener lsnr) {
+        MvccCoordinator crd = currentCoordinator();
+
+        if (crd == null) {
+            lsnr.onError(noCoordinatorError());
+
+            return;
+        }
+
+        if (tx != null) {
+            AffinityTopologyVersion topVer = ctx.cache().context().lockedTopologyVersion(null);
+
+            if (topVer != null && topVer.compareTo(crd.topologyVersion()) < 0) {
+                lsnr.onError(new ClusterTopologyCheckedException("Mvcc coordinator is outdated " +
+                    "for the locked topology version. [crd=" + crd + ", tx=" + tx + ']'));
+
+                return;
+            }
+        }
+
+        if (ctx.localNodeId().equals(crd.nodeId())) {
+            if (!initFut.isDone()) {
+                // Wait for the local coordinator init.
+                initFut.listen(new IgniteInClosure<IgniteInternalFuture>() {
+                    @Override public void apply(IgniteInternalFuture fut) {
+                        requestSnapshotAsync(tx, lsnr);
+                    }
+                });
+            }
+            else if (tx != null)
+                lsnr.onResponse(assignTxSnapshot(0L));
+            else
+                lsnr.onResponse(activeQueries.assignQueryCounter(ctx.localNodeId(), 0L));
+
+            return;
+        }
+
+        // Send request to the remote coordinator.
+        UUID nodeId = crd.nodeId();
+
+        long id = futIdCntr.incrementAndGet();
+
+        Map<Long, MvccSnapshotResponseListener> map = snapLsnrs.get(nodeId), map0;
+
+        if (map == null && (map0 = snapLsnrs.putIfAbsent(nodeId, map = new ConcurrentHashMap<>())) != null)
+            map = map0;
+
+        map.put(id, lsnr);
+
+        try {
+            sendMessage(nodeId, tx != null ? new MvccTxSnapshotRequest(id) : new MvccQuerySnapshotRequest(id));
+        }
+        catch (IgniteCheckedException e) {
+            if (map.remove(id) != null)
+                lsnr.onError(e);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgniteInternalFuture<Void> ackTxCommit(MvccSnapshot updateVer) {
+        return ackTxCommit(updateVer, null, 0L);
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgniteInternalFuture<Void> ackTxCommit(MvccVersion updateVer, MvccSnapshot readSnapshot,
+        long qryId) {
+        assert updateVer != null;
+
+        MvccCoordinator crd = curCrd;
+
+        if (updateVer.coordinatorVersion() == crd.coordinatorVersion())
+            return sendTxCommit(crd, createTxAckMessage(futIdCntr.incrementAndGet(), updateVer, readSnapshot, qryId));
+        else if (readSnapshot != null)
+            ackQueryDone(readSnapshot, qryId);
+
+        return new GridFinishedFuture<>();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void ackTxRollback(MvccVersion updateVer) {
+        assert updateVer != null;
+
+        MvccCoordinator crd = curCrd;
+
+        if (crd.coordinatorVersion() != updateVer.coordinatorVersion())
+            return;
+
+        MvccAckRequestTx msg = createTxAckMessage(-1, updateVer, null, 0L);
+
+        msg.skipResponse(true);
+
+        try {
+            sendMessage(crd.nodeId(), msg);
+        }
+        catch (ClusterTopologyCheckedException e) {
+            if (log.isDebugEnabled())
+                log.debug("Failed to send tx rollback ack, node left [msg=" + msg + ", node=" + crd.nodeId() + ']');
+        }
+        catch (IgniteCheckedException e) {
+            U.error(log, "Failed to send tx rollback ack [msg=" + msg + ", node=" + crd.nodeId() + ']', e);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void ackTxRollback(MvccVersion updateVer, MvccSnapshot readSnapshot, long qryTrackerId) {
+        assert updateVer != null;
+
+        MvccCoordinator crd = curCrd;
+
+        if (crd.coordinatorVersion() != updateVer.coordinatorVersion()) {
+            if (readSnapshot != null)
+                ackQueryDone(readSnapshot, qryTrackerId);
+
+            return;
+        }
+
+        MvccAckRequestTx msg = createTxAckMessage(-1, updateVer, readSnapshot, qryTrackerId);
+
+        msg.skipResponse(true);
+
+        try {
+            sendMessage(crd.nodeId(), msg);
+        }
+        catch (ClusterTopologyCheckedException e) {
+            if (log.isDebugEnabled())
+                log.debug("Failed to send tx rollback ack, node left [msg=" + msg + ", node=" + crd.nodeId() + ']');
+        }
+        catch (IgniteCheckedException e) {
+            U.error(log, "Failed to send tx rollback ack [msg=" + msg + ", node=" + crd.nodeId() + ']', e);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void ackQueryDone(MvccSnapshot snapshot, long qryId) {
+        assert snapshot != null;
+
+        MvccCoordinator crd = currentCoordinator();
+
+        if (crd == null || crd.coordinatorVersion() == snapshot.coordinatorVersion()
+            && sendQueryDone(crd, new MvccAckRequestQueryCntr(queryTrackCounter(snapshot))))
+            return;
+
+        Message msg = new MvccAckRequestQueryId(qryId);
+
+        do {
+            crd = currentCoordinator();
+        }
+        while (!sendQueryDone(crd, msg));
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgniteInternalFuture<Void> waitTxsFuture(UUID crdId, GridLongList txs) {
+        assert crdId != null;
+        assert txs != null && txs.size() > 0;
+
+        WaitAckFuture fut = new WaitAckFuture(futIdCntr.incrementAndGet(), crdId, false);
+
+        ackFuts.put(fut.id, fut);
+
+        try {
+            sendMessage(crdId, new MvccWaitTxsRequest(fut.id, txs));
+        }
+        catch (IgniteCheckedException e) {
+            if (ackFuts.remove(fut.id) != null) {
+                if (e instanceof ClusterTopologyCheckedException)
+                    fut.onDone(); // No need to wait, new coordinator will be assigned, finish without error.
+                else
+                    fut.onDone(e);
+            }
+        }
+
+        return fut;
+    }
+
+    /** {@inheritDoc} */
+    // TODO: Proper use of diagnostic context.
+    @Override public void dumpDebugInfo(IgniteLogger log, @Nullable IgniteDiagnosticPrepareContext diagCtx) {
+        boolean first = true;
+
+        for (Map<Long, MvccSnapshotResponseListener> map : snapLsnrs.values()) {
+            if (first) {
+                U.warn(log, "Pending mvcc listener: ");
+
+                first = false;
+            }
+
+            for (MvccSnapshotResponseListener lsnr : map.values()) {
+                U.warn(log, ">>> " + lsnr.toString());
+            }
+        }
+
+        first = true;
+
+        for (WaitAckFuture waitAckFut : ackFuts.values()) {
+            if (first) {
+                U.warn(log, "Pending mvcc wait ack futures: ");
+
+                first = false;
+            }
+
+            U.warn(log, ">>> " + waitAckFut.toString());
+        }
+    }
+
+    /**
+     * Removes all less or equals to the given one records from Tx log.
+     *
+     * @param ver Version.
+     * @throws IgniteCheckedException If fails.
+     */
+    void removeUntil(MvccVersion ver) throws IgniteCheckedException {
+        txLog.removeUntil(ver.coordinatorVersion(), ver.counter());
+    }
+
+    /**
+     * TODO IGNITE-7966
+     *
+     * @return Data region configuration.
+     */
+    private DataRegionConfiguration createTxLogRegion(DataStorageConfiguration dscfg) {
+        DataRegionConfiguration cfg = new DataRegionConfiguration();
+
+        cfg.setName(TX_LOG_CACHE_NAME);
+        cfg.setInitialSize(dscfg.getSystemRegionInitialSize());
+        cfg.setMaxSize(dscfg.getSystemRegionMaxSize());
+        cfg.setPersistenceEnabled(CU.isPersistenceEnabled(dscfg));
+        return cfg;
+    }
+
+    /**
+     * @return Data storage configuration.
+     */
+    private DataStorageConfiguration dataStorageConfiguration() {
+        return ctx.config().getDataStorageConfiguration();
+    }
+
+    /** */
+    private void assignMvccCoordinator(int evtType, Collection<ClusterNode> nodes, long topVer) {
+        checkMvccSupported(nodes);
+
+        MvccCoordinator crd;
+
+        if (evtType == EVT_NODE_SEGMENTED || evtType == EVT_CLIENT_NODE_DISCONNECTED)
+            crd = null;
+        else {
+            crd = assignedCrd;
+
+            if (crd == null ||
+                ((evtType == EVT_NODE_FAILED || evtType == EVT_NODE_LEFT) && !F.nodeIds(nodes).contains(crd.nodeId()))) {
+                ClusterNode crdNode = null;
+
+                if (crdC != null) {
+                    crdNode = crdC.apply(nodes);
+
+                    if (log.isInfoEnabled())
+                        log.info("Assigned coordinator using test closure: " + crd);
+                }
+                else {
+                    // Expect nodes are sorted by order.
+                    for (ClusterNode node : nodes) {
+                        if (!node.isClient() && supportsMvcc(node)) {
+                            crdNode = node;
+
+                            break;
+                        }
+                    }
+                }
+
+                crd = crdNode != null ? new MvccCoordinator(crdNode.id(), coordinatorVersion(crdNode),
+                    new AffinityTopologyVersion(topVer, 0)) : null;
+
+                if (log.isInfoEnabled() && crd != null)
+                    log.info("Assigned mvcc coordinator [crd=" + crd + ", crdNode=" + crdNode + ']');
+                else if (crd == null)
+                    U.warn(log, "New mvcc coordinator was not assigned [topVer=" + topVer + ']');
+            }
+        }
+
+        assignedCrd = crd;
+    }
+
+    /**
+     * @param crdNode Assigned coordinator node.
+     * @return Coordinator version.
+     */
+    private long coordinatorVersion(ClusterNode crdNode) {
+        return crdNode.order() + ctx.discovery().gridStartTime();
+    }
+
+    /** */
+    private void checkMvccSupported(Collection<ClusterNode> nodes) {
+        if (mvccEnabled) {
+            assert mvccSupported;
+
+            return;
+        }
+
+        boolean res = true, was = mvccSupported;
+
+        for (ClusterNode node : nodes) {
+            if (!supportsMvcc(node)) {
+                res = false;
+
+                break;
+            }
+        }
+
+        if (was != res)
+            mvccSupported = res;
+    }
+
+    /** */
+    private boolean supportsMvcc(ClusterNode node) {
+        return node.version().compareToIgnoreTimestamp(MVCC_SUPPORTED_SINCE) >= 0;
+    }
+
+    /** */
+    private void checkMvccCacheStarted(@Nullable DiscoveryCustomMessage customMsg) {
+        assert customMsg != null;
+
+        if (!mvccEnabled && customMsg instanceof DynamicCacheChangeBatch) {
+            for (DynamicCacheChangeRequest req : ((DynamicCacheChangeBatch)customMsg).requests()) {
+                CacheConfiguration ccfg = req.startCacheConfiguration();
+
+                if (ccfg == null)
+                    continue;
+
+                if (ccfg.getAtomicityMode() == TRANSACTIONAL_SNAPSHOT) {
+                    assert mvccSupported;
+
+                    mvccEnabled = true;
+                }
+            }
+        }
+    }
+
+    /**
+     * @return Active queries list.
+     */
+    private GridLongList collectActiveQueryTrackers() {
+        assert curCrd != null;
+
+        GridLongList activeQryTrackers = new GridLongList();
+
+        for (MvccQueryTracker tracker : activeTrackers.values()) {
+            long trackerId = tracker.onMvccCoordinatorChange(curCrd);
+
+            if (trackerId != MVCC_TRACKER_ID_NA)
+                activeQryTrackers.add(trackerId);
+        }
+
+        return activeQryTrackers;
+    }
+
+    /**
+     * @return Counter.
+     */
+    private MvccSnapshotResponse assignTxSnapshot(long futId) {
+        assert initFut.isDone();
+        assert crdVer != 0;
+        assert ctx.localNodeId().equals(currentCoordinatorId());
+
+        MvccSnapshotResponse res = new MvccSnapshotResponse();
+
+        long ver, cleanup, tracking;
+
+        synchronized (this) {
+            ver = mvccCntr.incrementAndGet();
+            tracking = ver;
+            cleanup = committedCntr.get() + 1;
+
+            for (Map.Entry<Long, Long> txVer : activeTxs.entrySet()) {
+                cleanup = Math.min(txVer.getValue(), cleanup);
+                tracking = Math.min(txVer.getKey(), tracking);
+
+                res.addTx(txVer.getKey());
+            }
+
+            boolean add = activeTxs.put(ver, tracking) == null;
+
+            assert add : ver;
+        }
+
+        long minQry = activeQueries.minimalQueryCounter();
+
+        if (minQry != -1)
+            cleanup = Math.min(cleanup, minQry);
+
+        cleanup = prevCrdQueries.previousQueriesDone() ? cleanup - 1 : MVCC_COUNTER_NA;
+
+        res.init(futId, crdVer, ver, MVCC_START_OP_CNTR, cleanup, tracking);
+
+        return res;
+    }
+
+    /**
+     * @param txCntr Counter assigned to transaction.
+     */
+    private void onTxDone(Long txCntr, boolean committed) {
+        assert initFut.isDone();
+
+        GridFutureAdapter fut;
+
+        synchronized (this) {
+            activeTxs.remove(txCntr);
+
+            if (committed)
+                committedCntr.setIfGreater(txCntr);
+        }
+
+        fut = waitTxFuts.remove(txCntr);
+
+        if (fut != null)
+            fut.onDone();
+    }
+
+    /**
+     * @param mvccCntr Query counter.
+     */
+    private void onQueryDone(UUID nodeId, Long mvccCntr) {
+        activeQueries.onQueryDone(nodeId, mvccCntr);
+    }
+
+    /**
+     * @param futId Future ID.
+     * @param updateVer Update version.
+     * @param readSnapshot Optional read version.
+     * @param qryTrackerId Query tracker id.
+     * @return Message.
+     */
+    private MvccAckRequestTx createTxAckMessage(long futId, MvccVersion updateVer, MvccSnapshot readSnapshot,
+        long qryTrackerId) {
+        if (readSnapshot == null)
+            return new MvccAckRequestTx(futId, updateVer.counter());
+        else if (readSnapshot.coordinatorVersion() == updateVer.coordinatorVersion())
+            return new MvccAckRequestTxAndQueryCntr(futId, updateVer.counter(), queryTrackCounter(readSnapshot));
+        else
+            return new MvccAckRequestTxAndQueryId(futId, updateVer.counter(), qryTrackerId);
+    }
+
+    /**
+     * @param mvccVer Read version.
+     * @return Tracker counter.
+     */
+    private long queryTrackCounter(MvccSnapshot mvccVer) {
+        long trackCntr = mvccVer.counter();
+
+        MvccLongList txs = mvccVer.activeTransactions();
+
+        int size = txs.size();
+
+        for (int i = 0; i < size; i++) {
+            long txVer = txs.get(i);
+
+            if (txVer < trackCntr)
+                trackCntr = txVer;
+        }
+
+        return trackCntr;
+    }
+
+    /**
+     * Launches vacuum workers and scheduler.
+     */
+    void startVacuumWorkers() {
+        if (!ctx.clientNode()) {
+            synchronized (mux) {
+                if (vacuumWorkers == null) {
+                    assert cleanupQueue == null;
+
+                    cleanupQueue = new LinkedBlockingQueue<>();
+
+                    vacuumWorkers = new ArrayList<>(ctx.config().getMvccVacuumThreadCount() + 1);
+
+                    vacuumWorkers.add(new VacuumScheduler(ctx, log, this));
+
+                    for (int i = 0; i < ctx.config().getMvccVacuumThreadCount(); i++) {
+                        vacuumWorkers.add(new VacuumWorker(ctx, log, cleanupQueue));
+                    }
+
+                    for (GridWorker worker : vacuumWorkers) {
+                        new IgniteThread(worker).start();
+                    }
+
+                    return;
+                }
+            }
+
+            U.warn(log, "Attempting to start active vacuum.");
+        }
+    }
+
+    /**
+     * Stops vacuum worker and scheduler.
+     */
+    void stopVacuumWorkers() {
+        if (!ctx.clientNode()) {
+            List<GridWorker> workers;
+            BlockingQueue<VacuumTask> queue;
+
+            synchronized (mux) {
+                workers = vacuumWorkers;
+                queue = cleanupQueue;
+
+                vacuumWorkers = null;
+                cleanupQueue = null;
+            }
+
+            if (workers == null) {
+                U.warn(log, "Attempting to stop inactive vacuum.");
+
+                return;
+            }
+
+            assert queue != null;
+
+            // Stop vacuum workers outside mutex to prevent deadlocks.
+            U.cancel(workers);
+            U.join(workers, log);
+
+            if (!queue.isEmpty()) {
+                IgniteCheckedException ex = vacuumCancelledException();
+
+                for (VacuumTask task : queue) {
+                    task.onDone(ex);
+                }
+            }
+        }
+    }
+
+    /**
+     * Runs vacuum process.
+     *
+     * @return {@code Future} with {@link VacuumMetrics}.
+     */
+    IgniteInternalFuture<VacuumMetrics> runVacuum() {
+        assert !ctx.clientNode();
+
+        MvccCoordinator crd0 = currentCoordinator();
+
+        if (Thread.currentThread().isInterrupted() ||
+            crd0 == null ||
+            crdVer == 0 && ctx.localNodeId().equals(crd0.nodeId()))
+            return new GridFinishedFuture<>(new VacuumMetrics());
+
+        final GridCompoundIdentityFuture<VacuumMetrics> res =
+            new GridCompoundIdentityFuture<>(new VacuumMetricsReducer());
+
+        MvccSnapshot snapshot;
+
+        try {
+            // TODO IGNITE-8974 create special method for getting cleanup version only.
+            snapshot = tryRequestSnapshotLocal(DUMMY_TX);
+        }
+        catch (ClusterTopologyCheckedException e) {
+            throw new AssertionError(e);
+        }
+
+        if (snapshot != null)
+            continueRunVacuum(res, snapshot);
+        else
+            requestSnapshotAsync(DUMMY_TX, new MvccSnapshotResponseListener() {
+                @Override public void onResponse(MvccSnapshot s) {
+                    continueRunVacuum(res, s);
+                }
+
+                @Override public void onError(IgniteCheckedException e) {
+                    if (!(e instanceof ClusterTopologyCheckedException))
+                        completeWithException(res, e);
+                    else {
+                        if (log.isDebugEnabled())
+                            log.debug("Vacuum failed to receive an Mvcc snapshot. " +
+                                "Need to retry on the stable topology. " + e.getMessage());
+
+                        res.onDone(new VacuumMetrics());
+                    }
+                }
+            });
+
+        return res;
+    }
+
+    /**
+     * For tests only.
+     *
+     * @return Vacuum error.
+     */
+    Throwable vacuumError() {
+        return vacuumError;
+    }
+
+    /**
+     * For tests only.
+     *
+     * @param e Vacuum error.
+     */
+    void vacuumError(Throwable e) {
+        this.vacuumError = e;
+    }
+
+    /**
+     * @param res Result.
+     * @param snapshot Snapshot.
+     */
+    private void continueRunVacuum(GridCompoundIdentityFuture<VacuumMetrics> res, MvccSnapshot snapshot) {
+        ackTxCommit(snapshot)
+            .listen(new IgniteInClosure<IgniteInternalFuture>() {
+                @Override public void apply(IgniteInternalFuture fut) {
+                    Throwable err;
+
+                    if ((err = fut.error()) != null) {
+                        U.error(log, "Vacuum error.", err);
+
+                        res.onDone(err);
+                    }
+                    else if (snapshot.cleanupVersion() <= MVCC_COUNTER_NA)
+                        res.onDone(new VacuumMetrics());
+                    else {
+                        try {
+                            if (log.isDebugEnabled())
+                                log.debug("Started vacuum with cleanup version=" + snapshot.cleanupVersion() + '.');
+
+                            synchronized (mux) {
+                                if (cleanupQueue == null) {
+                                    res.onDone(vacuumCancelledException());
+
+                                    return;
+                                }
+
+                                for (CacheGroupContext grp : ctx.cache().cacheGroups()) {
+                                    if (grp.mvccEnabled()) {
+                                        for (GridDhtLocalPartition part : grp.topology().localPartitions()) {
+                                            VacuumTask task = new VacuumTask(snapshot, part);
+
+                                            cleanupQueue.offer(task);
+
+                                            res.add(task);
+                                        }
+                                    }
+                                }
+                            }
+
+                            res.listen(new CI1<IgniteInternalFuture<VacuumMetrics>>() {
+                                @Override public void apply(IgniteInternalFuture<VacuumMetrics> fut) {
+                                    try {
+                                        VacuumMetrics metrics = fut.get();
+
+                                        if (U.assertionsEnabled()) {
+                                            MvccCoordinator crd = currentCoordinator();
+
+                                            assert crd != null
+                                                && crd.coordinatorVersion() >= snapshot.coordinatorVersion();
+
+                                            for (TxKey key : waitMap.keySet()) {
+                                                assert key.major() == snapshot.coordinatorVersion()
+                                                    && key.minor() > snapshot.cleanupVersion()
+                                                    || key.major() > snapshot.coordinatorVersion();
+                                            }
+                                        }
+
+                                        txLog.removeUntil(snapshot.coordinatorVersion(), snapshot.cleanupVersion());
+
+                                        if (log.isDebugEnabled())
+                                            log.debug("Vacuum completed. " + metrics);
+                                    }
+                                    catch (NodeStoppingException ignored) {
+                                        if (log.isDebugEnabled())
+                                            log.debug("Cannot complete vacuum (node is stopping).");
+                                    }
+                                    catch (Throwable e) {
+                                        U.error(log, "Vacuum error.", e);
+                                    }
+                                }
+                            });
+
+                            res.markInitialized();
+                        }
+                        catch (Throwable e) {
+                            completeWithException(res, e);
+                        }
+                    }
+                }
+            });
+    }
+
+    /** */
+    private void completeWithException(GridFutureAdapter fut, Throwable e) {
+        fut.onDone(e);
+
+        if (e instanceof Error)
+            throw (Error)e;
+    }
+
+    /** */
+    @NotNull private IgniteCheckedException vacuumCancelledException() {
+        return new NodeStoppingException("Operation has been cancelled (node is stopping).");
+    }
+
+    /**
+     * @param nodeId Node ID.
+     * @param msg Message.
+     */
+    private void sendFutureResponse(UUID nodeId, MvccWaitTxsRequest msg) {
+        try {
+            sendMessage(nodeId, new MvccFutureResponse(msg.futureId()));
+        }
+        catch (ClusterTopologyCheckedException e) {
+            if (log.isDebugEnabled())
+                log.debug("Failed to send tx ack response, node left [msg=" + msg + ", node=" + nodeId + ']');
+        }
+        catch (IgniteCheckedException e) {
+            U.error(log, "Failed to send tx ack response [msg=" + msg + ", node=" + nodeId + ']', e);
+        }
+    }
+
+    /** */
+    @NotNull private IgniteInternalFuture<Void> sendTxCommit(MvccCoordinator crd, MvccAckRequestTx msg) {
+        WaitAckFuture fut = new WaitAckFuture(msg.futureId(), crd.nodeId(), true);
+
+        ackFuts.put(fut.id, fut);
+
+        try {
+            sendMessage(crd.nodeId(), msg);
+        }
+        catch (IgniteCheckedException e) {
+            if (ackFuts.remove(fut.id) != null) {
+                if (e instanceof ClusterTopologyCheckedException) {
+                    if (log.isDebugEnabled())
+                        log.debug("Failed to send tx ack, node left [crd=" + crd + ", msg=" + msg + ']');
+
+                    fut.onDone(); // No need to ack, finish without error.
+                }
+                else
+
+                    fut.onDone(e);
+            }
+        }
+
+        return fut;
+    }
+
+    /**
+     * @param crd Mvcc coordinator.
+     * @param msg Message.
+     * @return {@code True} if no need to resend the message to a new coordinator.
+     */
+    private boolean sendQueryDone(MvccCoordinator crd, Message msg) {
+        if (crd == null)
+            return true; // no need to send ack;
+
+        try {
+            sendMessage(crd.nodeId(), msg);
+
+            return true;
+        }
+        catch (ClusterTopologyCheckedException e) {
+            if (log.isDebugEnabled())
+                log.debug("Failed to send query ack, node left [crd=" + crd + ", msg=" + msg + ']');
+
+            MvccCoordinator crd0 = currentCoordinator();
+
+            // Coordinator is unassigned or still the same.
+            return crd0 == null || crd.coordinatorVersion() == crd0.coordinatorVersion();
+        }
+        catch (IgniteCheckedException e) {
+            U.error(log, "Failed to send query ack [crd=" + crd + ", msg=" + msg + ']', e);
+
+            return true;
+        }
+    }
+
+    /**
+     * Send IO message.
+     *
+     * @param nodeId Node ID.
+     * @param msg Message.
+     */
+    private void sendMessage(UUID nodeId, Message msg) throws IgniteCheckedException {
+        ctx.io().sendToGridTopic(nodeId, TOPIC_CACHE_COORDINATOR, msg, SYSTEM_POOL);
+    }
+
+    /**
+     * @param nodeId Sender node ID.
+     * @param msg Message.
+     */
+    private void processCoordinatorTxSnapshotRequest(UUID nodeId, MvccTxSnapshotRequest msg) {
+        ClusterNode node = ctx.discovery().node(nodeId);
+
+        if (node == null) {
+            if (log.isDebugEnabled())
+                log.debug("Ignore tx snapshot request processing, node left [msg=" + msg + ", node=" + nodeId + ']');
+
+            return;
+        }
+
+        MvccSnapshotResponse res = assignTxSnapshot(msg.futureId());
+
+        try {
+            sendMessage(node.id(), res);
+        }
+        catch (ClusterTopologyCheckedException e) {
+            if (log.isDebugEnabled())
+                log.debug("Failed to send tx snapshot response, node left [msg=" + msg + ", node=" + nodeId + ']');
+        }
+        catch (IgniteCheckedException e) {
+            U.error(log, "Failed to send tx snapshot response [msg=" + msg + ", node=" + nodeId + ']', e);
+        }
+    }
+
+    /**
+     * @param nodeId Sender node ID.
+     * @param msg Message.
+     */
+    private void processCoordinatorQuerySnapshotRequest(UUID nodeId, MvccQuerySnapshotRequest msg) {
+        ClusterNode node = ctx.discovery().node(nodeId);
+
+        if (node == null) {
+            if (log.isDebugEnabled())
+                log.debug("Ignore query counter request processing, node left [msg=" + msg + ", node=" + nodeId + ']');
+
+            return;
+        }
+
+        MvccSnapshotResponse res = activeQueries.assignQueryCounter(nodeId, msg.futureId());
+
+        try {
+            sendMessage(node.id(), res);
+        }
+        catch (ClusterTopologyCheckedException e) {
+            if (log.isDebugEnabled())
+                log.debug("Failed to send query counter response, node left [msg=" + msg + ", node=" + nodeId + ']');
+        }
+        catch (IgniteCheckedException e) {
+            U.error(log, "Failed to send query counter response [msg=" + msg + ", node=" + nodeId + ']', e);
+
+            onQueryDone(nodeId, res.tracking());
+        }
+    }
+
+    /**
+     * @param nodeId Sender node ID.
+     * @param msg Message.
+     */
+    private void processCoordinatorSnapshotResponse(UUID nodeId, MvccSnapshotResponse msg) {
+        Map<Long, MvccSnapshotResponseListener> map = snapLsnrs.get(nodeId);
+
+        MvccSnapshotResponseListener lsnr;
+
+        if (map != null && (lsnr = map.remove(msg.futureId())) != null)
+            lsnr.onResponse(msg);
+        else {
+            if (ctx.discovery().alive(nodeId))
+                U.warn(log, "Failed to find query version future [node=" + nodeId + ", msg=" + msg + ']');
+            else if (log.isDebugEnabled())
+                log.debug("Failed to find query version future [node=" + nodeId + ", msg=" + msg + ']');
+        }
+    }
+
+    /**
+     * @param nodeId Node ID.
+     * @param msg Message.
+     */
+    private void processCoordinatorQueryAckRequest(UUID nodeId, MvccAckRequestQueryCntr msg) {
+        onQueryDone(nodeId, msg.counter());
+    }
+
+    /**
+     * @param nodeId Node ID.
+     * @param msg Message.
+     */
+    private void processNewCoordinatorQueryAckRequest(UUID nodeId, MvccAckRequestQueryId msg) {
+        prevCrdQueries.onQueryDone(nodeId, msg.queryTrackerId());
+    }
+
+    /**
+     * @param nodeId Sender node ID.
+     * @param msg Message.
+     */
+    private void processCoordinatorTxAckRequest(UUID nodeId, MvccAckRequestTx msg) {
+        onTxDone(msg.txCounter(), msg.futureId() >= 0);
+
+        if (msg.queryCounter() != MVCC_COUNTER_NA)
+            onQueryDone(nodeId, msg.queryCounter());
+        else if (msg.queryTrackerId() != MVCC_TRACKER_ID_NA)
+            prevCrdQueries.onQueryDone(nodeId, msg.queryTrackerId());
+
+        if (!msg.skipResponse()) {
+            try {
+                sendMessage(nodeId, new MvccFutureResponse(msg.futureId()));
+            }
+            catch (ClusterTopologyCheckedException e) {
+                if (log.isDebugEnabled())
+                    log.debug("Failed to send tx ack response, node left [msg=" + msg + ", node=" + nodeId + ']');
+            }
+            catch (IgniteCheckedException e) {
+                U.error(log, "Failed to send tx ack response [msg=" + msg + ", node=" + nodeId + ']', e);
+            }
+        }
+    }
+
+    /**
+     * @param nodeId Sender node ID.
+     * @param msg Message.
+     */
+    private void processCoordinatorAckResponse(UUID nodeId, MvccFutureResponse msg) {
+        WaitAckFuture fut = ackFuts.remove(msg.futureId());
+
+        if (fut != null)
+            fut.onResponse();
+        else {
+            if (ctx.discovery().alive(nodeId))
+                U.warn(log, "Failed to find tx ack future [node=" + nodeId + ", msg=" + msg + ']');
+            else if (log.isDebugEnabled())
+                log.debug("Failed to find tx ack future [node=" + nodeId + ", msg=" + msg + ']');
+        }
+    }
+
+    /**
+     * @param nodeId Node ID.
+     * @param msg Message.
+     */
+    @SuppressWarnings("unchecked")
+    private void processCoordinatorWaitTxsRequest(final UUID nodeId, final MvccWaitTxsRequest msg) {
+        GridLongList txs = msg.transactions();
+
+        GridCompoundFuture resFut = null;
+
+        for (int i = 0; i < txs.size(); i++) {
+            Long txId = txs.get(i);
+
+            GridFutureAdapter fut = waitTxFuts.get(txId);
+
+            if (fut == null) {
+                GridFutureAdapter old = waitTxFuts.putIfAbsent(txId, fut = new GridFutureAdapter());
+
+                if (old != null)
+                    fut = old;
+            }
+
+            boolean isDone;
+
+            synchronized (this) {
+                isDone = !activeTxs.containsKey(txId);
+            }
+
+            if (isDone)
+                fut.onDone();
+
+            if (!fut.isDone()) {
+                if (resFut == null)
+                    resFut = new GridCompoundFuture();
+
+                resFut.add(fut);
+            }
+        }
+
+        if (resFut != null)
+            resFut.markInitialized();
+
+        if (resFut == null || resFut.isDone())
+            sendFutureResponse(nodeId, msg);
+        else {
+            resFut.listen(new IgniteInClosure<IgniteInternalFuture>() {
+                @Override public void apply(IgniteInternalFuture fut) {
+                    sendFutureResponse(nodeId, msg);
+                }
+            });
+        }
+    }
+
+    /**
+     * @param nodeId Node ID.
+     * @param msg Message.
+     */
+    private void processCoordinatorActiveQueriesMessage(UUID nodeId, MvccActiveQueriesMessage msg) {
+        prevCrdQueries.addNodeActiveQueries(nodeId, msg.activeQueries());
+    }
+
+    /**
+     *
+     */
+    private class ActiveQueries {
+        /** */
+        private final Map<UUID, TreeMap<Long, AtomicInteger>> activeQueries = new HashMap<>();
+
+        /** */
+        private Long minQry;
+
+        /** */
+        private synchronized long minimalQueryCounter() {
+            return minQry == null ? -1 : minQry;
+        }
+
+        /** */
+        private synchronized MvccSnapshotResponse assignQueryCounter(UUID nodeId, long futId) {
+            MvccSnapshotResponse res = new MvccSnapshotResponse();
+
+            long ver, tracking;
+
+            synchronized (MvccProcessorImpl.this) {
+                ver = committedCntr.get();
+                tracking = ver;
+
+                for (Long txVer : activeTxs.keySet()) {
+                    if (txVer < ver) {
+                        tracking = Math.min(txVer, tracking);
+                        res.addTx(txVer);
+                    }
+                }
+            }
+
+            TreeMap<Long, AtomicInteger> nodeMap = activeQueries.get(nodeId);
+
+            if (nodeMap == null) {
+                activeQueries.put(nodeId, nodeMap = new TreeMap<>());
+
+                nodeMap.put(tracking, new AtomicInteger(1));
+            }
+            else {
+                AtomicInteger cntr = nodeMap.get(tracking);
+
+                if (cntr == null)
+                    nodeMap.put(tracking, new AtomicInteger(1));
+                else
+                    cntr.incrementAndGet();
+            }
+
+            if (minQry == null)
+                minQry = tracking;
+
+            res.init(futId, crdVer, ver, MVCC_READ_OP_CNTR, MVCC_COUNTER_NA, tracking);
+
+            return res;
+        }
+
+        /** */
+        private synchronized void onQueryDone(UUID nodeId, Long ver) {
+            TreeMap<Long, AtomicInteger> nodeMap = activeQueries.get(nodeId);
+
+            if (nodeMap == null)
+                return;
+
+            assert minQry != null;
+
+            AtomicInteger cntr = nodeMap.get(ver);
+
+            assert cntr != null && cntr.get() > 0 : "onQueryDone ver=" + ver;
+
+            if (cntr.decrementAndGet() == 0) {
+                nodeMap.remove(ver);
+
+                if (nodeMap.isEmpty())
+                    activeQueries.remove(nodeId);
+
+                if (ver.equals(minQry))
+                    minQry = activeMinimal();
+            }
+        }
+
+        /** */
+        private synchronized void onNodeFailed(UUID nodeId) {
+            activeQueries.remove(nodeId);
+
+            minQry = activeMinimal();
+        }
+
+        /** */
+        private Long activeMinimal() {
+            Long min = null;
+
+            for (TreeMap<Long, AtomicInteger> s : activeQueries.values()) {
+                Long first = s.firstKey();
+
+                if (min == null || first < min)
+                    min = first;
+            }
+
+            return min;
+        }
+    }
+
+    /**
+     *
+     */
+    private class WaitAckFuture extends MvccFuture<Void> {
+        /** */
+        private final long id;
+
+        /** */
+        final boolean ackTx;
+
+        /**
+         * @param id Future ID.
+         * @param nodeId Coordinator node ID.
+         * @param ackTx {@code True} if ack tx commit, {@code false} if waits for previous txs.
+         */
+        WaitAckFuture(long id, UUID nodeId, boolean ackTx) {
+            super(nodeId);
+
+            this.id = id;
+            this.ackTx = ackTx;
+        }
+
+        /**
+         *
+         */
+        void onResponse() {
+            onDone();
+        }
+
+        /**
+         * @param nodeId Failed node ID.
+         */
+        void onNodeLeft(UUID nodeId) {
+            if (crdId.equals(nodeId) && ackFuts.remove(id) != null)
+                onDone();
+        }
+
+        /** {@inheritDoc} */
+        @Override public String toString() {
+            return S.toString(WaitAckFuture.class, this, super.toString());
+        }
+    }
+
+    /**
+     *
+     */
+    private class CacheCoordinatorNodeFailListener implements GridLocalEventListener {
+        /** {@inheritDoc} */
+        @Override public void onEvent(Event evt) {
+            assert evt instanceof DiscoveryEvent : evt;
+
+            DiscoveryEvent discoEvt = (DiscoveryEvent)evt;
+
+            UUID nodeId = discoEvt.eventNode().id();
+
+            Map<Long, MvccSnapshotResponseListener> map = snapLsnrs.remove(nodeId);
+
+            if (map != null) {
+                ClusterTopologyCheckedException ex = new ClusterTopologyCheckedException("Failed to request mvcc " +
+                    "version, coordinator failed: " + nodeId);
+
+                MvccSnapshotResponseListener lsnr;
+
+                for (Long id : map.keySet()) {
+                    if ((lsnr = map.remove(id)) != null)
+                        lsnr.onError(ex);
+                }
+            }
+
+            for (WaitAckFuture fut : ackFuts.values())
+                fut.onNodeLeft(nodeId);
+
+            activeQueries.onNodeFailed(nodeId);
+
+            prevCrdQueries.onNodeFailed(nodeId);
+        }
+
+        /** {@inheritDoc} */
+        @Override public String toString() {
+            return "CacheCoordinatorDiscoveryListener[]";
+        }
+    }
+
+    /**
+     *
+     */
+    private class CoordinatorMessageListener implements GridMessageListener {
+        /** {@inheritDoc} */
+        @Override public void onMessage(UUID nodeId, Object msg, byte plc) {
+            MvccMessage msg0 = (MvccMessage)msg;
+
+            if (msg0.waitForCoordinatorInit() && !initFut.isDone()) {
+                initFut.listen(new IgniteInClosure<IgniteInternalFuture<Void>>() {
+                    @Override public void apply(IgniteInternalFuture<Void> future) {
+                        assert crdVer != 0L;
+
+                        processMessage(nodeId, msg);
+                    }
+                });
+            }
+            else
+                processMessage(nodeId, msg);
+        }
+
+        /**
+         * Processes mvcc message.
+         *
+         * @param nodeId Node id.
+         * @param msg Message.
+         */
+        private void processMessage(UUID nodeId, Object msg) {
+            if (msg instanceof MvccTxSnapshotRequest)
+                processCoordinatorTxSnapshotRequest(nodeId, (MvccTxSnapshotRequest)msg);
+            else if (msg instanceof MvccAckRequestTx)
+                processCoordinatorTxAckRequest(nodeId, (MvccAckRequestTx)msg);
+            else if (msg instanceof MvccFutureResponse)
+                processCoordinatorAckResponse(nodeId, (MvccFutureResponse)msg);
+            else if (msg instanceof MvccAckRequestQueryCntr)
+                processCoordinatorQueryAckRequest(nodeId, (MvccAckRequestQueryCntr)msg);
+            else if (msg instanceof MvccQuerySnapshotRequest)
+                processCoordinatorQuerySnapshotRequest(nodeId, (MvccQuerySnapshotRequest)msg);
+            else if (msg instanceof MvccSnapshotResponse)
+                processCoordinatorSnapshotResponse(nodeId, (MvccSnapshotResponse)msg);
+            else if (msg instanceof MvccWaitTxsRequest)
+                processCoordinatorWaitTxsRequest(nodeId, (MvccWaitTxsRequest)msg);
+            else if (msg instanceof MvccAckRequestQueryId)
+                processNewCoordinatorQueryAckRequest(nodeId, (MvccAckRequestQueryId)msg);
+            else if (msg instanceof MvccActiveQueriesMessage)
+                processCoordinatorActiveQueriesMessage(nodeId, (MvccActiveQueriesMessage)msg);
+            else
+                U.warn(log, "Unexpected message received [node=" + nodeId + ", msg=" + msg + ']');
+        }
+
+        /** {@inheritDoc} */
+        @Override public String toString() {
+            return "CoordinatorMessageListener[]";
+        }
+    }
+
+    /** */
+    private interface Waiter {
+        /**
+         * @param ctx Grid kernal context.
+         */
+        void run(GridKernalContext ctx);
+
+        /**
+         * @param other Another waiter.
+         * @return New compound waiter.
+         */
+        Waiter concat(Waiter other);
+
+        /**
+         * @return {@code True} if there is an active local transaction
+         */
+        boolean hasLocalTransaction();
+
+        /**
+         * @return {@code True} if it is a compound waiter.
+         */
+        boolean compound();
+    }
+
+    /** */
+    private static class LockFuture extends GridFutureAdapter<Void> implements Waiter, Runnable {
+        /** */
+        private final byte plc;
+
+        /**
+         * @param plc Pool policy.
+         */
+        LockFuture(byte plc) {
+            this.plc = plc;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void run() {
+            onDone();
+        }
+
+        /** {@inheritDoc} */
+        @Override public void run(GridKernalContext ctx) {
+            try {
+                ctx.pools().poolForPolicy(plc).execute(this);
+            }
+            catch (IgniteCheckedException e) {
+                U.error(ctx.log(LockFuture.class), e);
+            }
+        }
+
+        /** {@inheritDoc} */
+        @Override public Waiter concat(Waiter other) {
+            return new CompoundWaiterNoLocal(this, other);
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean hasLocalTransaction() {
+            return false;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean compound() {
+            return false;
+        }
+    }
+
+    /** */
+    private static class LocalTransactionMarker implements Waiter {
+        /** {@inheritDoc} */
+        @Override public void run(GridKernalContext ctx) {
+            // No-op
+        }
+
+        /** {@inheritDoc} */
+        @Override public Waiter concat(Waiter other) {
+            return new CompoundWaiter(other);
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean hasLocalTransaction() {
+            return true;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean compound() {
+            return false;
+        }
+    }
+
+    /** */
+    @SuppressWarnings("unchecked")
+    private static class CompoundWaiter implements Waiter {
+        /** */
+        private final Object inner;
+
+        /**
+         * @param waiter Waiter to wrap.
+         */
+        private CompoundWaiter(Waiter waiter) {
+            inner = waiter.compound() ? ((CompoundWaiter)waiter).inner : waiter;
+        }
+
+        /**
+         * @param first First waiter.
+         * @param second Second waiter.
+         */
+        private CompoundWaiter(Waiter first, Waiter second) {
+            ArrayList<Waiter> list = new ArrayList<>();
+
+            add(list, first);
+            add(list, second);
+
+            inner = list;
+        }
+
+        /** */
+        private void add(List<Waiter> to, Waiter waiter) {
+            if (!waiter.compound())
+                to.add(waiter);
+            else if (((CompoundWaiter)waiter).inner.getClass() == ArrayList.class)
+                to.addAll((List<Waiter>)((CompoundWaiter)waiter).inner);
+            else
+                to.add((Waiter)((CompoundWaiter)waiter).inner);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void run(GridKernalContext ctx) {
+            if (inner.getClass() == ArrayList.class) {
+                for (Waiter waiter : (List<Waiter>)inner) {
+                    waiter.run(ctx);
+                }
+            }
+            else
+                ((Waiter)inner).run(ctx);
+        }
+
+        /** {@inheritDoc} */
+        @Override public Waiter concat(Waiter other) {
+            return new CompoundWaiter(this, other);
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean hasLocalTransaction() {
+            return true;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean compound() {
+            return true;
+        }
+    }
+
+    /** */
+    private static class CompoundWaiterNoLocal extends CompoundWaiter {
+        /**
+         * @param first First waiter.
+         * @param second Second waiter.
+         */
+        private CompoundWaiterNoLocal(Waiter first, Waiter second) {
+            super(first, second);
+        }
+
+        /** {@inheritDoc} */
+        @Override public Waiter concat(Waiter other) {
+            return new CompoundWaiterNoLocal(this, other);
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean hasLocalTransaction() {
+            return false;
+        }
+    }
+
+    /**
+     * Mvcc garbage collection scheduler.
+     */
+    private static class VacuumScheduler extends GridWorker {
+        /** */
+        private final static long VACUUM_TIMEOUT = 60_000;
+
+        /** */
+        private final long interval;
+
+        /** */
+        private final MvccProcessorImpl prc;
+
+        /**
+         * @param ctx Kernal context.
+         * @param log Logger.
+         * @param prc Mvcc processor.
+         */
+        VacuumScheduler(GridKernalContext ctx, IgniteLogger log, MvccProcessorImpl prc) {
+            super(ctx.igniteInstanceName(), "vacuum-scheduler", log);
+
+            this.interval = ctx.config().getMvccVacuumFrequency();
+            this.prc = prc;
+        }
+
+        /** {@inheritDoc} */
+        @Override protected void body() throws InterruptedException, IgniteInterruptedCheckedException {
+            U.sleep(interval); // initial delay
+
+            while (!isCancelled()) {
+                long nextScheduledTime = U.currentTimeMillis() + interval;
+
+                try {
+                    IgniteInternalFuture<VacuumMetrics> fut = prc.runVacuum();
+
+                    if (log.isDebugEnabled())
+                        log.debug("Vacuum started by scheduler.");
+
+                    while (true) {
+                        try {
+                            fut.get(VACUUM_TIMEOUT);
+
+                            break;
+                        }
+                        catch (IgniteFutureTimeoutCheckedException e) {
+                            U.warn(log, "Failed to wait for vacuum complete. Consider increasing vacuum workers count.");
+                        }
+                    }
+                }
+                catch (IgniteInterruptedCheckedException e) {
+                    throw e;
+                }
+                catch (Throwable e) {
+                    prc.vacuumError(e);
+
+                    if (e instanceof Error)
+                        throw (Error) e;
+                }
+
+                long delay = nextScheduledTime - U.currentTimeMillis();
+
+                if (delay > 0)
+                    U.sleep(delay);
+            }
+        }
+    }
+
+    /**
+     * Vacuum worker.
+     */
+    private static class VacuumWorker extends GridWorker {
+        /** */
+        private final BlockingQueue<VacuumTask> cleanupQueue;
+
+        /**
+         * @param ctx Kernal context.
+         * @param log Logger.
+         * @param cleanupQueue Cleanup tasks queue.
+         */
+        VacuumWorker(GridKernalContext ctx, IgniteLogger log, BlockingQueue<VacuumTask> cleanupQueue) {
+            super(ctx.igniteInstanceName(), "vacuum-cleaner", log);
+
+            this.cleanupQueue = cleanupQueue;
+        }
+
+        /** {@inheritDoc} */
+        @Override protected void body() throws InterruptedException, IgniteInterruptedCheckedException {
+            while (!isCancelled()) {
+                VacuumTask task = cleanupQueue.take();
+
+                try {
+                    if (task.part().state() != OWNING) {
+                        task.part().group().preloader().rebalanceFuture()
+                            .listen(new IgniteInClosure<IgniteInternalFuture<Boolean>>() {
+                                @Override public void apply(IgniteInternalFuture<Boolean> future) {
+                                    cleanupQueue.add(task);
+                                }
+                            });
+
+                        continue;
+                    }
+
+                    task.onDone(processPartition(task));
+                }
+                catch (IgniteInterruptedCheckedException e) {
+                    throw e; // Cancelled.
+                }
+                catch (Throwable e) {
+                    task.onDone(e);
+
+                    if (e instanceof Error)
+                        throw (Error) e;
+                }
+            }
+        }
+
+        /**
+         * Process partition.
+         *
+         * @param task VacuumTask.
+         * @throws IgniteCheckedException If failed.
+         */
+        private VacuumMetrics processPartition(VacuumTask task) throws IgniteCheckedException {
+            long startNanoTime = System.nanoTime();
+
+            GridDhtLocalPartition part = task.part();
+
+            VacuumMetrics metrics = new VacuumMetrics();
+
+            if (part == null || part.state() != OWNING || !part.reserve())
+                return metrics;
+
+            try {
+                GridCursor<? extends CacheDataRow> cursor = part.dataStore().cursor(KEY_ONLY);
+
+                KeyCacheObject prevKey = null;
+
+                Object rest = null;
+
+                List<MvccLinkAwareSearchRow> cleanupRows = null;
+
+                MvccSnapshot snapshot = task.snapshot();
+
+                GridCacheContext cctx = null;
+
+                int curCacheId = CU.UNDEFINED_CACHE_ID;
+
+                boolean shared = part.group().sharedGroup();
+
+                if (!shared && (cctx = F.first(part.group().caches())) == null)
+                    return metrics;
+
+                while (cursor.next()) {
+                    if (isCancelled())
+                        throw new IgniteInterruptedCheckedException("Operation has been cancelled.");
+
+                    MvccDataRow row = (MvccDataRow)cursor.get();
+
+                    if (prevKey == null)
+                        prevKey = row.key();
+
+                    if (cctx == null) {
+                        assert shared;
+
+                        cctx = part.group().shared().cacheContext(curCacheId = row.cacheId());
+
+                        if (cctx == null)
+                            return metrics;
+                    }
+
+                    if (!prevKey.equals(row.key()) || (shared && curCacheId != row.cacheId())) {
+                        if (rest != null || !F.isEmpty(cleanupRows))
+                            cleanup(part, prevKey, cleanupRows, rest, cctx, metrics);
+
+                        cleanupRows = null;
+
+                        rest = null;
+
+                        if (shared && curCacheId != row.cacheId()) {
+                            cctx = part.group().shared().cacheContext(curCacheId = row.cacheId());
+
+                            if (cctx == null)
+                                return metrics;
+                        }
+
+                        prevKey = row.key();
+                    }
+
+                    if (canClean(row, snapshot, cctx))
+                        cleanupRows = addRow(cleanupRows, row);
+                    else if (actualize(cctx, row, snapshot))
+                        rest = addRest(rest, row);
+
+                    metrics.addScannedRowsCount(1);
+                }
+
+                if (rest != null || !F.isEmpty(cleanupRows))
+                    cleanup(part, prevKey, cleanupRows, rest, cctx, metrics);
+
+                metrics.addSearchNanoTime(System.nanoTime() - startNanoTime - metrics.cleanupNanoTime());
+
+                return metrics;
+            }
+            finally {
+                part.release();
+            }
+        }
+
+        /** */
+        @SuppressWarnings("unchecked")
+        @NotNull private Object addRest(@Nullable Object rest, MvccDataRow row) {
+            if (rest == null)
+                rest = row;
+            else if (rest.getClass() == ArrayList.class)
+                ((List)rest).add(row);
+            else {
+                ArrayList list = new ArrayList();
+
+                list.add(rest);
+                list.add(row);
+
+                rest = list;
+            }
+
+            return rest;
+        }
+
+        /**
+         * @param rows Collection of rows.
+         * @param row Row to add.
+         * @return Collection of rows.
+         */
+        @NotNull private List<MvccLinkAwareSearchRow> addRow(@Nullable List<MvccLinkAwareSearchRow> rows, MvccDataRow row) {
+            if (rows == null)
+                rows = new ArrayList<>();
+
+            rows.add(new MvccLinkAwareSearchRow(row.cacheId(), row.key(), row.mvccCoordinatorVersion(),
+                row.mvccCounter(), row.mvccOperationCounter(), row.link()));
+
+            return rows;
+        }
+
+        /**
+         * @param row Mvcc row to check.
+         * @param snapshot Cleanup version to compare with.
+         * @param cctx Cache context.
+         * @throws IgniteCheckedException If failed.
+         */
+        private boolean canClean(MvccDataRow row, MvccSnapshot snapshot,
+            GridCacheContext cctx) throws IgniteCheckedException {
+            // Row can be safely cleaned if it has ABORTED min version or COMMITTED and less than cleanup one max version.
+            return compare(row, snapshot.coordinatorVersion(), snapshot.cleanupVersion()) <= 0
+                && hasNewVersion(row) && MvccUtils.compareNewVersion(row, snapshot.coordinatorVersion(), snapshot.cleanupVersion()) <= 0
+                && MvccUtils.state(cctx, row.newMvccCoordinatorVersion(), row.newMvccCounter(),
+                row.newMvccOperationCounter() | (row.newMvccTxState() << PageIO.MVCC_HINTS_BIT_OFF)) == TxState.COMMITTED
+                || MvccUtils.state(cctx, row.mvccCoordinatorVersion(), row.mvccCounter(),
+                row.mvccOperationCounter() | (row.mvccTxState() << PageIO.MVCC_HINTS_BIT_OFF)) == TxState.ABORTED;
+        }
+
+        /** */
+        private boolean actualize(GridCacheContext cctx, MvccDataRow row,
+            MvccSnapshot snapshot) throws IgniteCheckedException {
+            return isVisible(cctx, snapshot, row.mvccCoordinatorVersion(), row.mvccCounter(), row.mvccOperationCounter(), false)
+                && (row.mvccTxState() == TxState.NA || (row.newMvccCoordinatorVersion() != MVCC_CRD_COUNTER_NA && row.newMvccTxState() == TxState.NA));
+        }
+
+        /**
+         * @param part Local partition.
+         * @param key Key.
+         * @param cleanupRows Cleanup rows.
+         * @param cctx Cache context.
+         * @param metrics Vacuum metrics.
+         * @throws IgniteCheckedException If failed.
+         */
+        @SuppressWarnings("unchecked")
+        private void cleanup(GridDhtLocalPartition part, KeyCacheObject key, List<MvccLinkAwareSearchRow> cleanupRows,
+            Object rest, GridCacheContext cctx, VacuumMetrics metrics) throws IgniteCheckedException {
+            assert key != null && cctx != null && (!F.isEmpty(cleanupRows) || rest != null);
+
+            long cleanupStartNanoTime = System.nanoTime();
+
+            GridCacheEntryEx entry = cctx.cache().entryEx(key);
+
+            while (true) {
+                entry.lockEntry();
+
+                if (!entry.obsolete())
+                    break;
+
+                entry.unlockEntry();
+
+                entry = cctx.cache().entryEx(key);
+            }
+
+            cctx.shared().database().checkpointReadLock();
+
+            int cleaned = 0;
+
+            try {
+                if (cleanupRows != null)
+                    cleaned = part.dataStore().cleanup(cctx, cleanupRows);
+
+                if (rest != null) {
+                    if (rest.getClass() == ArrayList.class) {
+                        for (MvccDataRow row : ((List<MvccDataRow>)rest)) {
+                            part.dataStore().updateTxState(cctx, row);
+                        }
+                    }
+                    else
+                        part.dataStore().updateTxState(cctx, (MvccDataRow)rest);
+                }
+            }
+            finally {
+                cctx.shared().database().checkpointReadUnlock();
+
+                entry.unlockEntry();
+                cctx.evicts().touch(entry, AffinityTopologyVersion.NONE);
+
+                metrics.addCleanupNanoTime(System.nanoTime() - cleanupStartNanoTime);
+                metrics.addCleanupRowsCnt(cleaned);
+            }
+        }
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccQueryTracker.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccQueryTracker.java
new file mode 100644
index 0000000..f143a43
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccQueryTracker.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.util.concurrent.atomic.AtomicLong;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Mvcc tracker.
+ */
+public interface MvccQueryTracker {
+    /** */
+    public static final AtomicLong ID_CNTR = new AtomicLong();
+
+    /** */
+    public static final long MVCC_TRACKER_ID_NA = -1;
+
+    /**
+     * @return Tracker id.
+     */
+    public long id();
+
+    /**
+     * @return Requested MVCC snapshot.
+     */
+    public MvccSnapshot snapshot();
+
+    /**
+     * @return Cache context.
+     */
+    public GridCacheContext context();
+
+    /**
+     * @return Topology version.
+     */
+    public AffinityTopologyVersion topologyVersion();
+
+    /**
+     * Requests version on coordinator.
+     *
+     * @return Future to wait for result.
+     */
+    public IgniteInternalFuture<MvccSnapshot> requestSnapshot();
+
+    /**
+     * Requests version on coordinator.
+     *
+     * @param topVer Topology version.
+     * @return Future to wait for result.
+     */
+    public IgniteInternalFuture<MvccSnapshot> requestSnapshot(@NotNull AffinityTopologyVersion topVer);
+
+    /**
+     * Requests version on coordinator.
+     *
+     * @param topVer Topology version.
+     * @param lsnr Response listener.
+     */
+    public void requestSnapshot(@NotNull AffinityTopologyVersion topVer, @NotNull MvccSnapshotResponseListener lsnr);
+
+    /**
+     * Marks tracker as done.
+     */
+    public void onDone();
+
+    /**
+     * Marks tracker as done.
+     *
+     * @param tx Transaction.
+     * @param commit Commit flag.
+     * @return Acknowledge future.
+     */
+    @Nullable public IgniteInternalFuture<Void> onDone(@NotNull GridNearTxLocal tx, boolean commit);
+
+    /**
+     * Mvcc coordinator change callback.
+     *
+     * @param newCrd New mvcc coordinator.
+     * @return Query id if exists.
+     */
+    long onMvccCoordinatorChange(MvccCoordinator newCrd);
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccQueryTrackerImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccQueryTrackerImpl.java
new file mode 100644
index 0000000..f46d1e0
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccQueryTrackerImpl.java
@@ -0,0 +1,348 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteLogger;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException;
+import org.apache.ignite.internal.cluster.ClusterTopologyServerNotFoundException;
+import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal;
+import org.apache.ignite.internal.util.future.GridFinishedFuture;
+import org.apache.ignite.internal.util.tostring.GridToStringExclude;
+import org.apache.ignite.lang.IgniteInClosure;
+import org.jetbrains.annotations.NotNull;
+
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.noCoordinatorError;
+
+/**
+ * Tracker used for an optimistic tx and not-in-tx queries.
+ */
+@SuppressWarnings("unchecked")
+public class MvccQueryTrackerImpl implements MvccQueryTracker {
+    /** */
+    @GridToStringExclude
+    private final GridCacheContext cctx;
+
+    /** */
+    @GridToStringExclude
+    private final IgniteLogger log;
+
+    /** */
+    @GridToStringExclude
+    private long crdVer;
+
+    /** */
+    private final long id;
+
+    /** */
+    private MvccSnapshot snapshot;
+
+    /** */
+    private volatile AffinityTopologyVersion topVer;
+
+    /** */
+    private final boolean canRemap;
+
+    /**
+     * @param cctx Cache context.
+     */
+    public MvccQueryTrackerImpl(GridCacheContext cctx) {
+        this(cctx, true);
+    }
+
+    /**
+     * @param cctx Cache context.
+     * @param canRemap {@code True} if tracker can remap on coordinator fail.
+     */
+    public MvccQueryTrackerImpl(GridCacheContext cctx, boolean canRemap) {
+        this.cctx = cctx;
+        this.id = ID_CNTR.incrementAndGet();
+        this.canRemap = canRemap;
+
+        log = cctx.logger(getClass());
+    }
+
+    /** {@inheritDoc} */
+    @Override public long id() {
+        return id;
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized MvccSnapshot snapshot() {
+        return snapshot;
+    }
+
+    /** {@inheritDoc} */
+    @Override public GridCacheContext context() {
+        return cctx;
+    }
+
+    /** {@inheritDoc} */
+    @Override public AffinityTopologyVersion topologyVersion() {
+        return topVer;
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgniteInternalFuture<MvccSnapshot> requestSnapshot() {
+        MvccSnapshot snapshot; MvccSnapshotFuture fut;
+
+        if ((snapshot = snapshot()) != null)
+            return new GridFinishedFuture<>(snapshot);
+
+        requestSnapshot0(cctx.shared().exchange().readyAffinityVersion(), fut = new MvccSnapshotFuture());
+
+        return fut;
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgniteInternalFuture<MvccSnapshot> requestSnapshot(@NotNull AffinityTopologyVersion topVer) {
+        MvccSnapshot snapshot; MvccSnapshotFuture fut;
+
+        if ((snapshot = snapshot()) != null)
+            return new GridFinishedFuture<>(snapshot);
+
+        requestSnapshot0(topVer, fut = new MvccSnapshotFuture());
+
+        return fut;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void requestSnapshot(@NotNull AffinityTopologyVersion topVer, @NotNull MvccSnapshotResponseListener lsnr) {
+        MvccSnapshot snapshot = snapshot();
+
+        if (snapshot != null)
+            lsnr.onResponse(snapshot);
+        else
+            requestSnapshot0(topVer, lsnr);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onDone() {
+        MvccProcessor prc = cctx.shared().coordinators();
+
+        MvccSnapshot snapshot = snapshot();
+
+        if (snapshot != null) {
+            prc.removeQueryTracker(id);
+
+            prc.ackQueryDone(snapshot, id);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgniteInternalFuture<Void> onDone(@NotNull GridNearTxLocal tx, boolean commit) {
+        MvccSnapshot snapshot = snapshot(), txSnapshot = tx.mvccSnapshot();
+
+        if (snapshot == null && txSnapshot == null)
+            return commit ? new GridFinishedFuture<>() : null;
+
+        MvccProcessor prc = cctx.shared().coordinators();
+
+        if (snapshot != null)
+            prc.removeQueryTracker(id);
+
+        if (txSnapshot == null)
+            prc.ackQueryDone(snapshot, id);
+        else if (commit)
+            return prc.ackTxCommit(txSnapshot, snapshot, id);
+        else
+            prc.ackTxRollback(txSnapshot, snapshot, id);
+
+        return null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public synchronized long onMvccCoordinatorChange(MvccCoordinator newCrd) {
+        if (snapshot != null) {
+            assert crdVer != 0 : this;
+
+            if (crdVer != newCrd.coordinatorVersion()) {
+                crdVer = newCrd.coordinatorVersion();
+
+                return id;
+            }
+            else
+                return MVCC_TRACKER_ID_NA;
+        }
+        else if (crdVer != 0)
+            crdVer = 0; // Mark for remap.
+
+        return MVCC_TRACKER_ID_NA;
+    }
+
+    /** */
+    private void requestSnapshot0(AffinityTopologyVersion topVer, MvccSnapshotResponseListener lsnr) {
+        if (checkTopology(topVer, lsnr = decorate(lsnr))) {
+            try {
+                MvccSnapshot snapshot = cctx.shared().coordinators().tryRequestSnapshotLocal();
+
+                if (snapshot == null)
+                    cctx.shared().coordinators().requestSnapshotAsync(lsnr);
+                else
+                    lsnr.onResponse(snapshot);
+            }
+            catch (ClusterTopologyCheckedException e) {
+                lsnr.onError(e);
+            }
+        }
+    }
+
+    /** */
+    private MvccSnapshotResponseListener decorate(MvccSnapshotResponseListener lsnr) {
+        assert lsnr != null;
+
+        if (lsnr.getClass() == ListenerDecorator.class)
+            return lsnr;
+
+        return new ListenerDecorator(lsnr);
+    }
+
+    /**
+     * Validates if mvcc snapshot could be requested on the given topology.
+     *
+     * @return {@code True} if topology is valid.
+     */
+    private boolean checkTopology(AffinityTopologyVersion topVer, MvccSnapshotResponseListener lsnr) {
+        MvccCoordinator crd = cctx.affinity().mvccCoordinator(topVer);
+
+        if (crd == null) {
+            lsnr.onError(noCoordinatorError(topVer));
+
+            return false;
+        }
+
+        this.topVer = topVer;
+
+        synchronized (this) {
+            crdVer = crd.coordinatorVersion();
+        }
+
+        MvccCoordinator curCrd = cctx.topology().mvccCoordinator();
+
+        if (!crd.equals(curCrd)) {
+            assert cctx.topology().topologyVersionFuture().initialVersion().compareTo(topVer) > 0;
+
+            tryRemap(lsnr);
+
+            return false;
+        }
+
+        return true;
+    }
+
+    /** */
+    private void tryRemap(MvccSnapshotResponseListener lsnr) {
+        if (!canRemap) {
+            lsnr.onError(new ClusterTopologyCheckedException("Failed to request mvcc version, coordinator failed."));
+
+            return;
+        }
+
+        IgniteInternalFuture<AffinityTopologyVersion> waitFut =
+            cctx.shared().exchange().affinityReadyFuture(topVer.nextMinorVersion());
+
+        if (waitFut == null)
+            requestSnapshot(cctx.shared().exchange().readyAffinityVersion(), lsnr);
+        else {
+            waitFut.listen(new IgniteInClosure<IgniteInternalFuture<AffinityTopologyVersion>>() {
+                @Override public void apply(IgniteInternalFuture<AffinityTopologyVersion> fut) {
+                    try {
+                        requestSnapshot(fut.get(), lsnr);
+                    }
+                    catch (IgniteCheckedException e) {
+                        lsnr.onError(e);
+                    }
+                }
+            });
+        }
+    }
+
+    /**
+     * @param res Response.
+     * @param lsnr Response listener.
+     * @return {@code false} if need to remap.
+     */
+    private boolean onResponse0(@NotNull MvccSnapshot res, MvccSnapshotResponseListener lsnr) {
+        boolean needRemap = false;
+
+        synchronized (this) {
+            assert snapshot() == null : "[this=" + this + ", rcvdVer=" + res + "]";
+
+            if (crdVer != 0) {
+                this.snapshot = res;
+            }
+            else
+                needRemap = true;
+        }
+
+        if (needRemap) { // Coordinator failed or reassigned, need remap.
+            tryRemap(lsnr);
+
+            return false;
+        }
+
+        cctx.shared().coordinators().addQueryTracker(this);
+
+        return true;
+    }
+
+    /**
+     * @param e Exception.
+     * @param lsnr Response listener.
+     * @return {@code false} if need to remap.
+     */
+    private boolean onError0(IgniteCheckedException e, MvccSnapshotResponseListener lsnr) {
+        if (e instanceof ClusterTopologyCheckedException && canRemap) {
+            if (e instanceof ClusterTopologyServerNotFoundException)
+                return true; // No Mvcc coordinator assigned
+
+            if (log.isDebugEnabled())
+                log.debug("Mvcc coordinator failed, need remap: " + e);
+
+            tryRemap(lsnr);
+
+            return false;
+        }
+
+        return true;
+    }
+
+    /** */
+    private final class ListenerDecorator implements MvccSnapshotResponseListener {
+        /** */
+        private final MvccSnapshotResponseListener lsnr;
+
+        /** */
+        private ListenerDecorator(MvccSnapshotResponseListener lsnr) {
+            this.lsnr = lsnr;
+        }
+
+        @Override public void onResponse(MvccSnapshot res) {
+            if (onResponse0(res, this))
+                lsnr.onResponse(res);
+        }
+
+        @Override public void onError(IgniteCheckedException e) {
+            if (onError0(e, this))
+                lsnr.onError(e);
+        }
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccSnapshot.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccSnapshot.java
new file mode 100644
index 0000000..5ed743a
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccSnapshot.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import org.apache.ignite.plugin.extensions.communication.Message;
+
+/**
+ * MVCC snapshot which holds the following information:
+ * - Current MVCC version which should be used for visibility checks
+ * - List of active transactions which should not be visible to current transaction
+ * - Cleanup version which is used to help vacuum process.
+ */
+public interface MvccSnapshot extends MvccVersion, Message {
+    /**
+     * @return Active transactions.
+     */
+    public MvccLongList activeTransactions();
+
+    /**
+     * @return Cleanup version (all smaller versions are safe to remove).
+     */
+    public long cleanupVersion();
+
+    /**
+     * @return Version without active transactions.
+     */
+    public MvccSnapshot withoutActiveTransactions();
+
+    /**
+     * Increments operation counter.
+     */
+    public void incrementOperationCounter();
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccSnapshotFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccSnapshotFuture.java
new file mode 100644
index 0000000..934ff2f
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccSnapshotFuture.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.util.typedef.internal.S;
+
+/**
+ *
+ */
+public class MvccSnapshotFuture extends MvccFuture<MvccSnapshot> implements MvccSnapshotResponseListener {
+    /** {@inheritDoc} */
+    @Override public void onResponse(MvccSnapshot res) {
+        assert res != null;
+
+        onDone(res);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onError(IgniteCheckedException err) {
+        onDone(err);
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(MvccSnapshotFuture.class, this, super.toString());
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccSnapshotResponseListener.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccSnapshotResponseListener.java
new file mode 100644
index 0000000..e0bf448
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccSnapshotResponseListener.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import org.apache.ignite.IgniteCheckedException;
+
+/**
+ *
+ */
+public interface MvccSnapshotResponseListener {
+    /**
+     * @param res Version.
+     */
+    public void onResponse(MvccSnapshot res);
+
+    /**
+     * @param e Error.
+     */
+    public void onError(IgniteCheckedException e);
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccSnapshotWithoutTxs.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccSnapshotWithoutTxs.java
new file mode 100644
index 0000000..5be6317
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccSnapshotWithoutTxs.java
@@ -0,0 +1,204 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.nio.ByteBuffer;
+import org.apache.ignite.internal.managers.communication.GridIoMessageFactory;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.plugin.extensions.communication.MessageReader;
+import org.apache.ignite.plugin.extensions.communication.MessageWriter;
+
+/**
+ *
+ */
+public class MvccSnapshotWithoutTxs implements MvccSnapshot {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** */
+    private long crdVer;
+
+    /** */
+    private long cntr;
+
+    /** */
+    private long cleanupVer;
+
+    /** */
+    private int opCntr;
+
+    /**
+     * Required by {@link GridIoMessageFactory}.
+     */
+    public MvccSnapshotWithoutTxs() {
+        // No-op.
+    }
+
+    /**
+     * @param crdVer Coordinator version.
+     * @param cntr Counter.
+     * @param cleanupVer Cleanup version.
+     */
+    public MvccSnapshotWithoutTxs(long crdVer, long cntr, int opCntr, long cleanupVer) {
+        this.crdVer = crdVer;
+        this.cntr = cntr;
+        this.cleanupVer = cleanupVer;
+        this.opCntr = opCntr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public MvccLongList activeTransactions() {
+        return MvccEmptyLongList.INSTANCE;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long coordinatorVersion() {
+        return crdVer;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long cleanupVersion() {
+        return cleanupVer;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long counter() {
+        return cntr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int operationCounter() {
+        return opCntr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void incrementOperationCounter() {
+        throw new UnsupportedOperationException();
+    }
+
+    /** {@inheritDoc} */
+    @Override public MvccSnapshot withoutActiveTransactions() {
+        return this;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) {
+        writer.setBuffer(buf);
+
+        if (!writer.isHeaderWritten()) {
+            if (!writer.writeHeader(directType(), fieldsCount()))
+                return false;
+
+            writer.onHeaderWritten();
+        }
+
+        switch (writer.state()) {
+            case 0:
+                if (!writer.writeLong("cleanupVer", cleanupVer))
+                    return false;
+
+                writer.incrementState();
+
+            case 1:
+                if (!writer.writeLong("cntr", cntr))
+                    return false;
+
+                writer.incrementState();
+
+            case 2:
+                if (!writer.writeLong("crdVer", crdVer))
+                    return false;
+
+                writer.incrementState();
+
+            case 3:
+                if (!writer.writeInt("opCntr", opCntr))
+                    return false;
+
+                writer.incrementState();
+
+        }
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) {
+        reader.setBuffer(buf);
+
+        if (!reader.beforeMessageRead())
+            return false;
+
+        switch (reader.state()) {
+            case 0:
+                cleanupVer = reader.readLong("cleanupVer");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 1:
+                cntr = reader.readLong("cntr");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 2:
+                crdVer = reader.readLong("crdVer");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 3:
+                opCntr = reader.readInt("opCntr");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+        }
+
+        return reader.afterMessageRead(MvccSnapshotWithoutTxs.class);
+    }
+
+    /** {@inheritDoc} */
+    @Override public short directType() {
+        return 150;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte fieldsCount() {
+        return 4;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onAckReceived() {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(MvccSnapshotWithoutTxs.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccUpdateVersionAware.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccUpdateVersionAware.java
new file mode 100644
index 0000000..17804c4
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccUpdateVersionAware.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+/**
+ * Interface for objects aware theirs mvcc update version.
+ */
+public interface MvccUpdateVersionAware {
+    /**
+     * @return New mvcc coordinator version.
+     */
+    public long newMvccCoordinatorVersion();
+
+    /**
+     * @return New mvcc counter.
+     */
+    public long newMvccCounter();
+
+    /**
+     * @return New mvcc operation counter.
+     */
+    public int newMvccOperationCounter();
+
+    /**
+     * @return New Tx state.
+     */
+    public byte newMvccTxState();
+
+    /**
+     * Copies new MVCC version
+     * @param other Object to copy version from.
+     */
+    public default void newMvccVersion(MvccUpdateVersionAware other) {
+        newMvccVersion(other.newMvccCoordinatorVersion(), other.newMvccCounter(), other.newMvccOperationCounter());
+    }
+
+    /**
+     * Sets new MVCC version
+     * @param ver MVCC version.
+     */
+    public default void newMvccVersion(MvccVersion ver) {
+        newMvccVersion(ver.coordinatorVersion(), ver.counter(), ver.operationCounter());
+    }
+
+    /**
+     * Sets new mvcc version.
+     * @param crd New mvcc coordinator version.
+     * @param cntr New mvcc counter.
+     * @param opCntr New mvcc operation counter.
+     */
+    public default void newMvccVersion(long crd, long cntr, int opCntr) {
+        throw new UnsupportedOperationException();
+    }
+
+    /**
+     * @return New mvcc version.
+     */
+    public default MvccVersion newMvccVersion() {
+        return new MvccVersionImpl(newMvccCoordinatorVersion(), newMvccCounter(), newMvccOperationCounter());
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccUtils.java
new file mode 100644
index 0000000..0422459
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccUtils.java
@@ -0,0 +1,884 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.configuration.TransactionConfiguration;
+import org.apache.ignite.internal.GridKernalContext;
+import org.apache.ignite.internal.cluster.ClusterTopologyServerNotFoundException;
+import org.apache.ignite.internal.pagemem.PageMemory;
+import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
+import org.apache.ignite.internal.processors.cache.CacheGroupContext;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal;
+import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxState;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO;
+import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode;
+import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx;
+import org.apache.ignite.internal.processors.cache.transactions.IgniteTxManager;
+import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
+import org.apache.ignite.internal.processors.query.IgniteSQLException;
+import org.apache.ignite.internal.transactions.IgniteTxMvccVersionCheckedException;
+import org.apache.ignite.internal.util.typedef.internal.CU;
+import org.apache.ignite.transactions.TransactionState;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
+
+import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC;
+import static org.apache.ignite.internal.pagemem.PageIdUtils.itemId;
+import static org.apache.ignite.internal.pagemem.PageIdUtils.pageId;
+import static org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO.MVCC_INFO_SIZE;
+import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO.MVCC_HINTS_BIT_OFF;
+import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO.MVCC_HINTS_MASK;
+import static org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode.TRANSACTION_COMPLETED;
+import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC;
+import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ;
+
+/**
+ * Utils for MVCC.
+ */
+public class MvccUtils {
+    /** */
+    public static final long MVCC_CRD_COUNTER_NA = 0L;
+    /** */
+    public static final long MVCC_CRD_START_CNTR = 1L;
+    /** */
+    public static final long MVCC_COUNTER_NA = 0L;
+    /** */
+    public static final long MVCC_INITIAL_CNTR = 1L;
+    /** */
+    public static final long MVCC_START_CNTR = 3L;
+    /** */
+    public static final int MVCC_OP_COUNTER_NA = 0;
+    /** */
+    public static final int MVCC_START_OP_CNTR = 1;
+    /** */
+    public static final int MVCC_READ_OP_CNTR = ~MVCC_HINTS_MASK;
+
+    /** */
+    public static final int MVCC_INVISIBLE = 0;
+    /** */
+    public static final int MVCC_VISIBLE_REMOVED = 1;
+    /** */
+    public static final int MVCC_VISIBLE = 2;
+
+    /** */
+    public static final MvccVersion INITIAL_VERSION =
+        mvccVersion(MVCC_CRD_START_CNTR, MVCC_INITIAL_CNTR, MVCC_START_OP_CNTR);
+
+    /** */
+    public static final MvccVersion MVCC_VERSION_NA =
+        mvccVersion(MVCC_CRD_COUNTER_NA, MVCC_COUNTER_NA, MVCC_OP_COUNTER_NA);
+
+    /** */
+    private static final MvccClosure<Integer> getVisibleState = new GetVisibleState();
+
+    /** */
+    private static final MvccClosure<Boolean> isVisible = new IsVisible();
+
+    /** */
+    private static final MvccClosure<MvccVersion> getNewVer = new GetNewVersion();
+
+    /**
+     *
+     */
+    private MvccUtils(){
+    }
+
+    /**
+     * @param cctx Cache context.
+     * @param mvccCrd Mvcc coordinator version.
+     * @param mvccCntr Mvcc counter.
+     * @param snapshot Snapshot.
+     * @return {@code True} if transaction is active.
+     * @see TxState
+     * @throws IgniteCheckedException If failed.
+     */
+    public static boolean isActive(GridCacheContext cctx, long mvccCrd, long mvccCntr, MvccSnapshot snapshot)
+        throws IgniteCheckedException {
+        if (isVisible(cctx, snapshot, mvccCrd, mvccCntr, MVCC_OP_COUNTER_NA, false))
+            return false;
+
+        byte state = state(cctx, mvccCrd, mvccCntr, 0);
+
+        return state != TxState.COMMITTED && state != TxState.ABORTED
+            || cctx.kernalContext().coordinators().hasLocalTransaction(mvccCrd, mvccCntr);
+    }
+
+    /**
+     * @param cctx Cache context.
+     * @param mvccCrd Mvcc coordinator version.
+     * @param mvccCntr Mvcc counter.
+     * @param mvccOpCntr Mvcc operation counter.
+     * @return TxState
+     * @see TxState
+     * @throws IgniteCheckedException If failed.
+     */
+    public static byte state(GridCacheContext cctx, long mvccCrd, long mvccCntr, int mvccOpCntr) throws IgniteCheckedException {
+        return state(cctx.kernalContext().coordinators(), mvccCrd, mvccCntr, mvccOpCntr);
+    }
+
+    /**
+     * @param grp Cache group context.
+     * @param mvccCrd Mvcc coordinator version.
+     * @param mvccCntr Mvcc counter.
+     * @param mvccOpCntr Mvcc operation counter.
+     * @return TxState
+     * @see TxState
+     * @throws IgniteCheckedException If failed.
+     */
+    public static byte state(CacheGroupContext grp, long mvccCrd, long mvccCntr, int mvccOpCntr) throws IgniteCheckedException {
+        return state(grp.shared().coordinators(), mvccCrd, mvccCntr, mvccOpCntr);
+    }
+
+    /**
+     * @param proc Mvcc processor.
+     * @param mvccCrd Mvcc coordinator version.
+     * @param mvccCntr Mvcc counter.
+     * @return TxState
+     * @see TxState
+     * @throws IgniteCheckedException If failed.
+     */
+    private static byte state(MvccProcessor proc, long mvccCrd, long mvccCntr, int mvccOpCntr) throws IgniteCheckedException {
+        if (compare(INITIAL_VERSION, mvccCrd, mvccCntr, mvccOpCntr) == 0)
+            return TxState.COMMITTED; // Initial version is always committed;
+
+        if ((mvccOpCntr & MVCC_HINTS_MASK) != 0)
+            return (byte)(mvccOpCntr >>> MVCC_HINTS_BIT_OFF);
+
+        return proc.state(mvccCrd, mvccCntr);
+    }
+
+    /**
+     * Checks if version is visible from the given snapshot.
+     *
+     * @param cctx Cache context.
+     * @param snapshot Snapshot.
+     * @param mvccCrd Mvcc coordinator.
+     * @param mvccCntr Mvcc counter.
+     * @param opCntr Operation counter.
+     * @return {@code True} if visible.
+     * @throws IgniteCheckedException If failed.
+     */
+    public static boolean isVisible(GridCacheContext cctx, MvccSnapshot snapshot, long mvccCrd, long mvccCntr,
+        int opCntr) throws IgniteCheckedException {
+        return isVisible(cctx, snapshot, mvccCrd, mvccCntr, opCntr, true);
+    }
+
+    /**
+     * Checks if version is visible from the given snapshot.
+     *
+     * @param cctx Cache context.
+     * @param snapshot Snapshot.
+     * @param mvccCrd Mvcc coordinator.
+     * @param mvccCntr Mvcc counter.
+     * @param opCntr Operation counter.
+     * @param useTxLog {@code True} if TxLog should be used.
+     * @return {@code True} if visible.
+     * @throws IgniteCheckedException If failed.
+     */
+    public static boolean isVisible(GridCacheContext cctx, MvccSnapshot snapshot, long mvccCrd, long mvccCntr,
+        int opCntr, boolean useTxLog) throws IgniteCheckedException {
+        if (mvccCrd == MVCC_CRD_COUNTER_NA) {
+            assert mvccCntr == MVCC_COUNTER_NA && opCntr == MVCC_OP_COUNTER_NA
+                : "rowVer=" + mvccVersion(mvccCrd, mvccCntr, opCntr) + ", snapshot=" + snapshot;
+
+            return false; // Unassigned version is always invisible
+        }
+
+        if (compare(INITIAL_VERSION, mvccCrd, mvccCntr, opCntr) == 0)
+            return true; // Initial version is always visible
+
+        long snapshotCrd = snapshot.coordinatorVersion();
+
+        long snapshotCntr = snapshot.counter();
+        int snapshotOpCntr = snapshot.operationCounter();
+
+        if (mvccCrd > snapshotCrd)
+            return false; // Rows in the future are never visible.
+
+        if (mvccCrd < snapshotCrd)
+            // Don't check the row with TxLog if the row is expected to be committed.
+            return !useTxLog || isCommitted(cctx, mvccCrd, mvccCntr, opCntr);
+
+        if (mvccCntr > snapshotCntr) // we don't see future updates
+            return false;
+
+        if (mvccCntr == snapshotCntr) {
+            assert opCntr <= snapshotOpCntr : "rowVer=" + mvccVersion(mvccCrd, mvccCntr, opCntr) + ", snapshot=" + snapshot;
+
+            return opCntr < snapshotOpCntr; // we don't see own pending updates
+        }
+
+        if (snapshot.activeTransactions().contains(mvccCntr)) // we don't see of other transactions' pending updates
+            return false;
+
+        if (!useTxLog)
+            return true; // The checking row is expected to be committed.
+
+        byte state = state(cctx, mvccCrd, mvccCntr, opCntr);
+
+        if (state != TxState.COMMITTED && state != TxState.ABORTED)
+            throw unexpectedStateException(cctx, state, mvccCrd, mvccCntr, opCntr, snapshot);
+
+        return state == TxState.COMMITTED;
+    }
+
+    /**
+     *
+     * @param grp Cache group context.
+     * @param state State.
+     * @param crd Mvcc coordinator counter.
+     * @param cntr Mvcc counter.
+     * @param opCntr Mvcc operation counter.
+     * @return State exception.
+     */
+    public static IgniteTxMvccVersionCheckedException unexpectedStateException(
+        CacheGroupContext grp, byte state, long crd, long cntr,
+        int opCntr) {
+        return unexpectedStateException(grp.shared().kernalContext(), state, crd, cntr, opCntr, null);
+    }
+
+    /**
+     *
+     * @param cctx Cache context.
+     * @param state State.
+     * @param crd Mvcc coordinator counter.
+     * @param cntr Mvcc counter.
+     * @param opCntr Mvcc operation counter.
+     * @param snapshot Mvcc snapshot
+     * @return State exception.
+     */
+    public static IgniteTxMvccVersionCheckedException unexpectedStateException(
+        GridCacheContext cctx, byte state, long crd, long cntr,
+        int opCntr, MvccSnapshot snapshot) {
+        return unexpectedStateException(cctx.kernalContext(), state, crd, cntr, opCntr, snapshot);
+    }
+
+    /** */
+    private static IgniteTxMvccVersionCheckedException unexpectedStateException(GridKernalContext ctx, byte state, long crd, long cntr,
+        int opCntr, MvccSnapshot snapshot) {
+        String msg = "Unexpected state: [state=" + state + ", rowVer=" + crd + ":" + cntr + ":" + opCntr;
+
+        if (snapshot != null)
+            msg += ", txVer=" + snapshot.coordinatorVersion() + ":" + snapshot.counter() + ":" + snapshot.operationCounter();
+
+        msg += ", localNodeId=" + ctx.localNodeId()  + "]";
+
+        return new IgniteTxMvccVersionCheckedException(msg);
+    }
+
+    /**
+     * Checks visibility of the given row versions from the given snapshot.
+     *
+     * @param cctx Context.
+     * @param snapshot Snapshot.
+     * @param crd Mvcc coordinator counter.
+     * @param cntr Mvcc counter.
+     * @param opCntr Mvcc operation counter.
+     * @param link Link to data row (new version is located there).
+     * @return Visibility status.
+     * @throws IgniteCheckedException If failed.
+     */
+    public static boolean isVisible(GridCacheContext cctx, MvccSnapshot snapshot, long crd, long cntr,
+        int opCntr, long link) throws IgniteCheckedException {
+        return isVisible(cctx, snapshot, crd, cntr, opCntr, false)
+            && isVisible(cctx, link, snapshot);
+    }
+
+    /**
+     * Checks if a row has not empty new version (xid_max).
+     *
+     * @param row Row.
+     * @return {@code True} if row has a new version.
+     */
+    public static boolean hasNewVersion(MvccUpdateVersionAware row) {
+        assert row.newMvccCoordinatorVersion() == MVCC_CRD_COUNTER_NA
+            || mvccVersionIsValid(row.newMvccCoordinatorVersion(), row.newMvccCounter(), row.newMvccOperationCounter());
+
+        return row.newMvccCoordinatorVersion() > MVCC_CRD_COUNTER_NA;
+    }
+
+    /**
+     * Checks if a row's new version is visible for the given snapshot.
+     *
+     * @param cctx Cache context.
+     * @param link Link to the row.
+     * @param snapshot Mvcc snapshot.
+     * @return {@code True} if row is visible for the given snapshot.
+     * @throws IgniteCheckedException If failed.
+     */
+    public static int getVisibleState(GridCacheContext cctx, long link, MvccSnapshot snapshot)
+        throws IgniteCheckedException {
+        return invoke(cctx, link, getVisibleState, snapshot);
+    }
+
+    /**
+     * Returns new version of row (xid_max) if any.
+     *
+     * @param cctx Cache context.
+     * @param link Link to the row.
+     * @return New {@code MvccVersion} if row has xid_max, or null if doesn't.
+     * @throws IgniteCheckedException If failed.
+     */
+    public static MvccVersion getNewVersion(GridCacheContext cctx, long link)
+        throws IgniteCheckedException {
+        return invoke(cctx, link, getNewVer, null);
+    }
+
+    /**
+     * Compares row version (xid_min) with the given version.
+     *
+     * @param row Row.
+     * @param ver Version.
+     * @return Comparison result, see {@link Comparable}.
+     */
+    public static int compare(MvccVersionAware row, MvccVersion ver) {
+        return compare(row.mvccCoordinatorVersion(), row.mvccCounter(), row.mvccOperationCounter(),
+            ver.coordinatorVersion(), ver.counter(), ver.operationCounter());
+    }
+
+    /**
+     * Compares to pairs of MVCC versions. See {@link Comparable}.
+     *
+     * @param mvccVerLeft First MVCC version.
+     * @param mvccCrdRight Second coordinator version.
+     * @param mvccCntrRight Second counter.
+     * @return Comparison result, see {@link Comparable}.
+     */
+    public static int compare(MvccVersion mvccVerLeft, long mvccCrdRight, long mvccCntrRight) {
+        return compare(mvccVerLeft.coordinatorVersion(), mvccVerLeft.counter(), mvccCrdRight, mvccCntrRight);
+    }
+
+    /**
+     * Compares to pairs of MVCC versions. See {@link Comparable}.
+     *
+     * @param row First MVCC version.
+     * @param mvccCrdRight Second coordinator version.
+     * @param mvccCntrRight Second counter.
+     * @return Comparison result, see {@link Comparable}.
+     */
+    public static int compare(MvccVersionAware row, long mvccCrdRight, long mvccCntrRight) {
+        return compare(row.mvccCoordinatorVersion(), row.mvccCounter(), mvccCrdRight, mvccCntrRight);
+    }
+
+    /**
+     * Compares to pairs of MVCC versions. See {@link Comparable}.
+     *
+     * @param mvccVerLeft First MVCC version.
+     * @param mvccCrdRight Second coordinator version.
+     * @param mvccCntrRight Second counter.
+     * @param mvccOpCntrRight Second operation counter.
+     * @return Comparison result, see {@link Comparable}.
+     */
+    public static int compare(MvccVersion mvccVerLeft, long mvccCrdRight, long mvccCntrRight, int mvccOpCntrRight) {
+        return compare(mvccVerLeft.coordinatorVersion(), mvccVerLeft.counter(),
+            mvccVerLeft.operationCounter(), mvccCrdRight, mvccCntrRight, mvccOpCntrRight);
+    }
+
+    /**
+     * Compares to pairs of coordinator/counter versions. See {@link Comparable}.
+     *
+     * @param mvccCrdLeft First coordinator version.
+     * @param mvccCntrLeft First counter version.
+     * @param mvccOpCntrLeft First operation counter.
+     * @param other The object to compare with.
+     * @return Comparison result, see {@link Comparable}.
+     */
+    public static int compare(long mvccCrdLeft, long mvccCntrLeft, int mvccOpCntrLeft, MvccVersionAware other) {
+        return compare(mvccCrdLeft, mvccCntrLeft, mvccOpCntrLeft,
+            other.mvccCoordinatorVersion(), other.mvccCounter(), other.mvccOperationCounter());
+    }
+
+    /**
+     * Compares to pairs of coordinator/counter versions. See {@link Comparable}.
+     *
+     * @param mvccCrdLeft First coordinator version.
+     * @param mvccCntrLeft First counter version.
+     * @param mvccCrdRight Second coordinator version.
+     * @param mvccCntrRight Second counter version.
+     * @return Comparison result, see {@link Comparable}.
+     */
+    public static int compare(long mvccCrdLeft, long mvccCntrLeft, long mvccCrdRight, long mvccCntrRight) {
+        return compare(mvccCrdLeft, mvccCntrLeft, 0, mvccCrdRight, mvccCntrRight, 0);
+    }
+
+    /**
+     * Compares to pairs of coordinator/counter versions. See {@link Comparable}.
+     *
+     * @param mvccCrdLeft First coordinator version.
+     * @param mvccCntrLeft First counter version.
+     * @param mvccOpCntrLeft First operation counter.
+     * @param mvccCrdRight Second coordinator version.
+     * @param mvccCntrRight Second counter version.
+     * @param mvccOpCntrRight Second operation counter.
+     * @return Comparison result, see {@link Comparable}.
+     */
+    public static int compare(long mvccCrdLeft, long mvccCntrLeft, int mvccOpCntrLeft, long mvccCrdRight,
+        long mvccCntrRight, int mvccOpCntrRight) {
+        int cmp;
+
+        if ((cmp = Long.compare(mvccCrdLeft, mvccCrdRight)) != 0
+            || (cmp = Long.compare(mvccCntrLeft, mvccCntrRight)) != 0
+            || (cmp = Integer.compare(mvccOpCntrLeft & ~MVCC_HINTS_MASK, mvccOpCntrRight & ~MVCC_HINTS_MASK)) != 0)
+            return cmp;
+
+        return 0;
+    }
+
+    /**
+     * Compares new row version (xid_max) with the given counter and coordinator versions.
+     *
+     * @param row Row.
+     * @param mvccCrd Mvcc coordinator.
+     * @param mvccCntr Mvcc counter.
+     * @return Comparison result, see {@link Comparable}.
+     */
+    public static int compareNewVersion(MvccUpdateVersionAware row, long mvccCrd, long mvccCntr) {
+        return compare(row.newMvccCoordinatorVersion(), row.newMvccCounter(), mvccCrd, mvccCntr);
+    }
+
+    /**
+     * Compares new row version (xid_max) with the given counter and coordinator versions.
+     *
+     * @param row Row.
+     * @param mvccCrd Mvcc coordinator.
+     * @param mvccCntr Mvcc counter.
+     * @param opCntr Mvcc operation counter.
+     * @return Comparison result, see {@link Comparable}.
+     */
+    public static int compareNewVersion(MvccUpdateVersionAware row, long mvccCrd, long mvccCntr, int opCntr) {
+        return compare(row.newMvccCoordinatorVersion(), row.newMvccCounter(), row.newMvccOperationCounter(), mvccCrd, mvccCntr, opCntr);
+    }
+
+    /**
+     * Compares new row version (xid_max) with the given version.
+     *
+     * @param row Row.
+     * @param ver Version.
+     * @return Comparison result, see {@link Comparable}.
+     */
+    public static int compareNewVersion(MvccUpdateVersionAware row, MvccVersion ver) {
+        return compare(row.newMvccCoordinatorVersion(), row.newMvccCounter(), row.newMvccOperationCounter(),
+            ver.coordinatorVersion(), ver.counter(), ver.operationCounter());
+    }
+
+    /**
+     * @param crdVer Mvcc coordinator version.
+     * @param cntr Counter.
+     * @param opCntr Operation counter.
+     * @return Always {@code true}.
+     */
+    public static boolean mvccVersionIsValid(long crdVer, long cntr, int opCntr) {
+        return mvccVersionIsValid(crdVer, cntr) && opCntr != MVCC_OP_COUNTER_NA;
+    }
+
+    /**
+     * @param crdVer Mvcc coordinator version.
+     * @param cntr Counter.
+     * @return {@code True} if version is valid.
+     */
+    public static boolean mvccVersionIsValid(long crdVer, long cntr) {
+        return crdVer > MVCC_CRD_COUNTER_NA && cntr != MVCC_COUNTER_NA;
+    }
+
+    /**
+     * @param topVer Topology version for cache operation.
+     * @return Error.
+     */
+    public static ClusterTopologyServerNotFoundException noCoordinatorError(AffinityTopologyVersion topVer) {
+        return new ClusterTopologyServerNotFoundException("Mvcc coordinator is not assigned for " +
+            "topology version: " + topVer);
+    }
+
+    /**
+     * @return Error.
+     */
+    public static ClusterTopologyServerNotFoundException noCoordinatorError() {
+        return new ClusterTopologyServerNotFoundException("Mvcc coordinator is not assigned.");
+    }
+
+    /**
+     * @param cctx Cache context.
+     * @param link Link to the row.
+     * @param snapshot Mvcc snapshot.
+     * @return {@code True} if row is updated for given snapshot.
+     * @throws IgniteCheckedException If failed.
+     */
+    private static boolean isVisible(GridCacheContext cctx, long link,
+        MvccSnapshot snapshot)
+        throws IgniteCheckedException {
+        return invoke(cctx, link, isVisible, snapshot);
+    }
+
+    /**
+     * Encapsulates common logic for working with row mvcc info: page locking/unlocking, checks and other.
+     * Strategy pattern.
+     *
+     * @param cctx Cache group.
+     * @param link Row link.
+     * @param clo Closure to apply.
+     * @param snapshot Mvcc snapshot.
+     * @param <R> Return type.
+     * @return Result.
+     * @throws IgniteCheckedException If failed.
+     */
+    private static <R> R invoke(GridCacheContext cctx, long link, MvccClosure<R> clo, MvccSnapshot snapshot)
+        throws IgniteCheckedException {
+        assert cctx.mvccEnabled();
+
+        PageMemory pageMem = cctx.dataRegion().pageMemory();
+        int grpId = cctx.groupId();
+
+        long pageId = pageId(link);
+        long page = pageMem.acquirePage(grpId, pageId);
+
+        try {
+            long pageAddr = pageMem.readLock(grpId, pageId, page);
+
+            try{
+                DataPageIO dataIo = DataPageIO.VERSIONS.forPage(pageAddr);
+
+                int offset = dataIo.getPayloadOffset(pageAddr, itemId(link), pageMem.pageSize(), MVCC_INFO_SIZE);
+
+                long mvccCrd = dataIo.mvccCoordinator(pageAddr, offset);
+                long mvccCntr = dataIo.mvccCounter(pageAddr, offset);
+                int mvccOpCntr = dataIo.mvccOperationCounter(pageAddr, offset);
+
+                assert mvccVersionIsValid(mvccCrd, mvccCntr, mvccOpCntr) : mvccVersion(mvccCrd, mvccCntr, mvccOpCntr);
+
+                long newMvccCrd = dataIo.newMvccCoordinator(pageAddr, offset);
+                long newMvccCntr = dataIo.newMvccCounter(pageAddr, offset);
+                int newMvccOpCntr = dataIo.newMvccOperationCounter(pageAddr, offset);
+
+                assert newMvccCrd == MVCC_CRD_COUNTER_NA || mvccVersionIsValid(newMvccCrd, newMvccCntr, newMvccOpCntr)
+                    : mvccVersion(newMvccCrd, newMvccCntr, newMvccOpCntr);
+
+                return clo.apply(cctx, snapshot, mvccCrd, mvccCntr, mvccOpCntr, newMvccCrd, newMvccCntr, newMvccOpCntr);
+            }
+            finally {
+                pageMem.readUnlock(grpId, pageId, page);
+            }
+        }
+        finally {
+            pageMem.releasePage(grpId, pageId, page);
+        }
+    }
+
+    /**
+     *
+     * @param cctx Cache context.
+     * @param mvccCrd Coordinator version.
+     * @param mvccCntr Counter.
+     * @return {@code True} in case the corresponding transaction is in {@code TxState.COMMITTED} state.
+     * @throws IgniteCheckedException If failed.
+     */
+    private static boolean isCommitted(GridCacheContext cctx, long mvccCrd, long mvccCntr, int mvccOpCntr) throws IgniteCheckedException {
+        return state(cctx, mvccCrd, mvccCntr, mvccOpCntr) == TxState.COMMITTED;
+    }
+
+    /**
+     * Throw an {@link UnsupportedOperationException} if this cache is transactional and MVCC is enabled with
+     * appropriate message about corresponding operation type.
+     * @param cctx Cache context.
+     * @param opType operation type to mention in error message.
+     */
+    public static void verifyMvccOperationSupport(GridCacheContext<?, ?> cctx, String opType) {
+        if (cctx.mvccEnabled())
+            throw new UnsupportedOperationException(opType + " operations are not supported on transactional " +
+                "caches when MVCC is enabled.");
+    }
+
+    /**
+     * Checks transaction state.
+     * @param tx Transaction.
+     * @return Checked transaction.
+     */
+    public static GridNearTxLocal checkActive(GridNearTxLocal tx) {
+        if (tx != null && tx.state() != TransactionState.ACTIVE)
+            throw new IgniteSQLException("Transaction is already completed.", TRANSACTION_COMPLETED);
+
+        return tx;
+    }
+
+
+    /**
+     * @param ctx Grid kernal context.
+     * @return Currently started user transaction, or {@code null} if none started.
+     */
+    @Nullable public static GridNearTxLocal tx(GridKernalContext ctx) {
+        return tx(ctx, null);
+    }
+
+    /**
+     * @param ctx Grid kernal context.
+     * @param txId Transaction ID.
+     * @return Currently started user transaction, or {@code null} if none started.
+     */
+    @Nullable public static GridNearTxLocal tx(GridKernalContext ctx, @Nullable GridCacheVersion txId) {
+        IgniteTxManager tm = ctx.cache().context().tm();
+
+        IgniteInternalTx tx0 = txId == null ? tm.tx() : tm.tx(txId);
+
+        GridNearTxLocal tx = tx0 != null && tx0.user() ? (GridNearTxLocal)tx0 : null;
+
+        if (tx != null) {
+            if (!tx.pessimistic() || !tx.repeatableRead()) {
+                tx.setRollbackOnly();
+
+                throw new IgniteSQLException("Only pessimistic repeatable read transactions are supported at the moment.",
+                    IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
+
+            }
+
+            if (!tx.isOperationAllowed(true)) {
+                tx.setRollbackOnly();
+
+                throw new IgniteSQLException("SQL queries and cache operations " +
+                    "may not be used in the same transaction.", IgniteQueryErrorCode.TRANSACTION_TYPE_MISMATCH);
+            }
+        }
+
+        return tx;
+    }
+
+
+    /**
+     * @param ctx Grid kernal context.
+     * @param timeout Transaction timeout.
+     * @return Newly started SQL transaction.
+     */
+    public static GridNearTxLocal txStart(GridKernalContext ctx, long timeout) {
+        return txStart(ctx, null, timeout);
+    }
+
+    /**
+     * @param cctx Cache context.
+     * @param timeout Transaction timeout.
+     * @return Newly started SQL transaction.
+     */
+    public static GridNearTxLocal txStart(GridCacheContext cctx, long timeout) {
+        return txStart(cctx.kernalContext(), cctx, timeout);
+    }
+
+    /**
+     * @param ctx Grid kernal context.
+     * @param cctx Cache context.
+     * @param timeout Transaction timeout.
+     * @return Newly started SQL transaction.
+     */
+    private static GridNearTxLocal txStart(GridKernalContext ctx, @Nullable GridCacheContext cctx, long timeout) {
+        if (timeout == 0) {
+            TransactionConfiguration tcfg = cctx != null ?
+                CU.transactionConfiguration(cctx, ctx.config()) : null;
+
+            if (tcfg != null)
+                timeout = tcfg.getDefaultTxTimeout();
+        }
+
+        GridNearTxLocal tx = ctx.cache().context().tm().newTx(
+            false,
+            false,
+            cctx != null && cctx.systemTx() ? cctx : null,
+            PESSIMISTIC,
+            REPEATABLE_READ,
+            timeout,
+            cctx == null || !cctx.skipStore(),
+            true,
+            0,
+            null
+        );
+
+        tx.syncMode(FULL_SYNC);
+
+        return tx;
+    }
+
+    /**
+     * @param ctx Grid kernal context.
+     * @return Whether MVCC is enabled or not.
+     */
+    public static boolean mvccEnabled(GridKernalContext ctx) {
+        return ctx.coordinators().mvccEnabled();
+    }
+
+    /**
+     * Initialises MVCC filter and returns MVCC query tracker if needed.
+     * @param cctx Cache context.
+     * @param startTx Start transaction flag.
+     * @return MVCC query tracker.
+     * @throws IgniteCheckedException If failed.
+     */
+    @NotNull public static MvccQueryTracker mvccTracker(GridCacheContext cctx, boolean startTx) throws IgniteCheckedException {
+        assert cctx != null && cctx.mvccEnabled();
+
+        GridNearTxLocal tx = tx(cctx.kernalContext());
+
+        if (tx == null && startTx)
+            tx = txStart(cctx, 0);
+
+        return mvccTracker(cctx, tx);
+    }
+
+    /**
+     * Initialises MVCC filter and returns MVCC query tracker if needed.
+     * @param cctx Cache context.
+     * @param tx Transaction.
+     * @return MVCC query tracker.
+     * @throws IgniteCheckedException If failed.
+     */
+    @NotNull public static MvccQueryTracker mvccTracker(GridCacheContext cctx,
+        GridNearTxLocal tx) throws IgniteCheckedException {
+        MvccQueryTracker tracker;
+
+        if (tx == null)
+            tracker = new MvccQueryTrackerImpl(cctx);
+        else if ((tracker = tx.mvccQueryTracker()) == null)
+            tracker = new StaticMvccQueryTracker(cctx, requestSnapshot(cctx, tx));
+
+        if (tracker.snapshot() == null)
+            // TODO IGNITE-7388
+            tracker.requestSnapshot().get();
+
+        return tracker;
+    }
+
+    /**
+     * @param cctx Cache context.
+     * @param tx Transaction.
+     * @throws IgniteCheckedException If failed.
+     * @return Mvcc snapshot.
+     */
+    public static MvccSnapshot requestSnapshot(GridCacheContext cctx,
+        GridNearTxLocal tx) throws IgniteCheckedException {
+        MvccSnapshot snapshot;
+
+        tx = checkActive(tx);
+
+        if ((snapshot = tx.mvccSnapshot()) == null) {
+            MvccProcessor prc = cctx.shared().coordinators();
+
+            snapshot = prc.tryRequestSnapshotLocal(tx);
+
+            if (snapshot == null)
+                // TODO IGNITE-7388
+                snapshot = prc.requestSnapshotAsync(tx).get();
+
+            tx.mvccSnapshot(snapshot);
+        }
+
+        return snapshot;
+    }
+
+    /**
+     * Throws atomicity modes compatibility validation exception.
+     *
+     * @param ctx1 Cache context.
+     * @param ctx2 Another cache context.
+     */
+    public static void throwAtomicityModesMismatchException(GridCacheContext ctx1, GridCacheContext ctx2) {
+        throw new IgniteException("Caches with transactional_snapshot atomicity mode cannot participate in the same" +
+            " transaction with caches having another atomicity mode. [cacheName=" + ctx1.name() +
+            ", cacheMode=" + ctx1.config().getAtomicityMode() +
+            ", anotherCacheName=" + ctx2.name() + " anotherCacheMode=" + ctx2.config().getAtomicityMode() + ']');
+    }
+
+    /** */
+    private static MvccVersion mvccVersion(long crd, long cntr, int opCntr) {
+        return new MvccVersionImpl(crd, cntr, opCntr);
+    }
+
+    /**
+     * Mvcc closure interface.
+     * @param <R> Return type.
+     */
+    private interface MvccClosure<R> {
+        /**
+         * Runs closure over the Mvcc info.
+         * @param snapshot Mvcc snapshot.
+         * @param mvccCrd Coordinator version.
+         * @param mvccCntr Counter.
+         * @param mvccOpCntr Operation counter.
+         * @param newMvccCrd New mvcc coordinator
+         * @param newMvccCntr New mvcc counter.
+         * @param newMvccOpCntr New mvcc operation counter.
+         * @return Result.
+         */
+        public R apply(GridCacheContext cctx, MvccSnapshot snapshot, long mvccCrd, long mvccCntr, int mvccOpCntr,
+            long newMvccCrd, long newMvccCntr, int newMvccOpCntr) throws IgniteCheckedException;
+    }
+
+    /**
+     * Closure for checking row visibility for snapshot.
+     */
+    private static class GetVisibleState implements MvccClosure<Integer> {
+        /** {@inheritDoc} */
+        @Override public Integer apply(GridCacheContext cctx, MvccSnapshot snapshot, long mvccCrd, long mvccCntr,
+            int mvccOpCntr, long newMvccCrd, long newMvccCntr, int newMvccOpCntr) throws IgniteCheckedException {
+
+            if (!isVisible(cctx, snapshot, mvccCrd, mvccCntr, mvccOpCntr))
+                return MVCC_INVISIBLE;
+
+            if (newMvccCrd == MVCC_CRD_COUNTER_NA)
+                return MVCC_VISIBLE;
+
+            assert mvccVersionIsValid(newMvccCrd, newMvccCntr, newMvccOpCntr);
+
+            if (mvccCrd == newMvccCrd && mvccCntr == newMvccCntr) // Double-changed in scope of one transaction.
+                return MVCC_VISIBLE_REMOVED;
+
+            return isVisible(cctx, snapshot, newMvccCrd, newMvccCntr, newMvccOpCntr) ? MVCC_VISIBLE_REMOVED :
+                MVCC_VISIBLE;
+        }
+    }
+
+    /**
+     * Closure for checking whether the row is visible for given snapshot.
+     */
+    private static class IsVisible implements MvccClosure<Boolean> {
+        /** {@inheritDoc} */
+        @Override public Boolean apply(GridCacheContext cctx, MvccSnapshot snapshot, long mvccCrd, long mvccCntr,
+            int mvccOpCntr, long newMvccCrd, long newMvccCntr, int newMvccOpCntr) throws IgniteCheckedException {
+
+            if (!isVisible(cctx, snapshot, mvccCrd, mvccCntr, mvccOpCntr))
+                return false;
+
+            if (newMvccCrd == MVCC_CRD_COUNTER_NA)
+                return true;
+
+            assert mvccVersionIsValid(newMvccCrd, newMvccCntr, newMvccOpCntr);
+
+            if (mvccCrd == newMvccCrd && mvccCntr == newMvccCntr) // Double-changed in scope of one transaction.
+                return false;
+
+            return !isVisible(cctx, snapshot, newMvccCrd, newMvccCntr, newMvccOpCntr);
+        }
+    }
+
+    /**
+     * Closure for getting xid_max version of row.
+     */
+    private static class GetNewVersion implements MvccClosure<MvccVersion> {
+        /** {@inheritDoc} */
+        @Override public MvccVersion apply(GridCacheContext cctx, MvccSnapshot snapshot, long mvccCrd, long mvccCntr,
+            int mvccOpCntr, long newMvccCrd, long newMvccCntr, int newMvccOpCntr) {
+            return newMvccCrd == MVCC_CRD_COUNTER_NA ? null : mvccVersion(newMvccCrd, newMvccCntr, newMvccOpCntr);
+        }
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccVersion.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccVersion.java
new file mode 100644
index 0000000..f43d3b9
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccVersion.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import org.jetbrains.annotations.NotNull;
+
+/**
+ * MVCC version. This is unique version allowing to order all reads and writes within a cluster. Consists of two parts:
+ * - coordinator version - number which increases on every coordinator change;
+ * - counter - local coordinator counter which is increased on every update.
+ */
+public interface MvccVersion extends Comparable<MvccVersion> {
+    /**
+     * @return Coordinator version.
+     */
+    public long coordinatorVersion();
+
+    /**
+     * @return Local counter.
+     */
+    public long counter();
+
+    /**
+     * @return Operation id in scope of current transaction.
+     */
+    public int operationCounter();
+
+    /** {@inheritDoc} */
+    @Override default int compareTo(@NotNull MvccVersion another) {
+        return MvccUtils.compare(coordinatorVersion(), counter(), operationCounter(),
+            another.coordinatorVersion(), another.counter(), another.operationCounter());
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccVersionAware.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccVersionAware.java
new file mode 100644
index 0000000..3bfefbc
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccVersionAware.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+/**
+ *
+ */
+public interface MvccVersionAware {
+    /**
+     * @return Mvcc coordinator version.
+     */
+    public long mvccCoordinatorVersion();
+
+    /**
+     * @return Mvcc counter.
+     */
+    public long mvccCounter();
+
+    /**
+     * @return Mvcc operation counter.
+     */
+    public int mvccOperationCounter();
+
+    /**
+     * @return Tx state hint for 'created' mvcc version.
+     */
+    public byte mvccTxState();
+
+    /**
+     * Copies mvcc version from another object.
+     * @param other Info source.
+     */
+    public default void mvccVersion(MvccVersionAware other) {
+        mvccVersion(other.mvccCoordinatorVersion(), other.mvccCounter(), other.mvccOperationCounter());
+    }
+
+    /**
+     * Sets mvcc version.
+     * @param ver Mvcc version.
+     */
+    public default void mvccVersion(MvccVersion ver) {
+        mvccVersion(ver.coordinatorVersion(), ver.counter(), ver.operationCounter());
+    }
+
+    /**
+     * Sets mvcc version.
+     * @param crd Mvcc coordinator version.
+     * @param cntr Mvcc counter.
+     * @param opCntr Mvcc operation counter.
+     */
+    public default void mvccVersion(long crd, long cntr, int opCntr) {
+        throw new UnsupportedOperationException();
+    }
+
+    /**
+     * @return Mvcc version.
+     */
+    public default MvccVersion mvccVersion() {
+        return new MvccVersionImpl(mvccCoordinatorVersion(), mvccCounter(), mvccOperationCounter());
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccVersionImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccVersionImpl.java
new file mode 100644
index 0000000..882560d
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/MvccVersionImpl.java
@@ -0,0 +1,193 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.plugin.extensions.communication.Message;
+import org.apache.ignite.plugin.extensions.communication.MessageReader;
+import org.apache.ignite.plugin.extensions.communication.MessageWriter;
+
+import java.nio.ByteBuffer;
+
+/**
+ * Base MVCC version implementation.
+ */
+public class MvccVersionImpl implements MvccVersion, Message {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** Coordinator version. */
+    private long crdVer;
+
+    /** Local counter. */
+    private long cntr;
+
+    /** Operation counter. */
+    private int opCntr;
+
+    /**
+     * Constructor.
+     */
+    public MvccVersionImpl() {
+        // No-op.
+    }
+
+    /**
+     * @param crdVer Coordinator version.
+     * @param cntr Counter.
+     * @param opCntr Operation counter.
+     */
+    public MvccVersionImpl(long crdVer, long cntr, int opCntr) {
+        this.crdVer = crdVer;
+        this.cntr = cntr;
+        this.opCntr = opCntr;
+    }
+
+    /**
+     * @return Coordinator version.
+     */
+    @Override public long coordinatorVersion() {
+        return crdVer;
+    }
+
+    /**
+     * @return Local counter.
+     */
+    @Override public long counter() {
+        return cntr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int operationCounter() {
+        return opCntr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean equals(Object o) {
+        if (this == o)
+            return true;
+
+        if (o == null || getClass() != o.getClass())
+            return false;
+
+        MvccVersionImpl that = (MvccVersionImpl) o;
+
+        return crdVer == that.crdVer && cntr == that.cntr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int hashCode() {
+        int res = (int) (crdVer ^ (crdVer >>> 32));
+
+        res = 31 * res + (int) (cntr ^ (cntr >>> 32));
+
+        return res;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) {
+        writer.setBuffer(buf);
+
+        if (!writer.isHeaderWritten()) {
+            if (!writer.writeHeader(directType(), fieldsCount()))
+                return false;
+
+            writer.onHeaderWritten();
+        }
+
+        switch (writer.state()) {
+            case 0:
+                if (!writer.writeLong("cntr", cntr))
+                    return false;
+
+                writer.incrementState();
+
+            case 1:
+                if (!writer.writeLong("crdVer", crdVer))
+                    return false;
+
+                writer.incrementState();
+
+            case 2:
+                if (!writer.writeInt("opCntr", opCntr))
+                    return false;
+
+                writer.incrementState();
+
+        }
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) {
+        reader.setBuffer(buf);
+
+        if (!reader.beforeMessageRead())
+            return false;
+
+        switch (reader.state()) {
+            case 0:
+                cntr = reader.readLong("cntr");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 1:
+                crdVer = reader.readLong("crdVer");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 2:
+                opCntr = reader.readInt("opCntr");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+        }
+
+        return reader.afterMessageRead(MvccVersionImpl.class);
+    }
+
+    /** {@inheritDoc} */
+    @Override public short directType() {
+        return 148;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte fieldsCount() {
+        return 3;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onAckReceived() {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(MvccVersionImpl.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/StaticMvccQueryTracker.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/StaticMvccQueryTracker.java
new file mode 100644
index 0000000..52fb1db
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/StaticMvccQueryTracker.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal;
+import org.apache.ignite.internal.util.future.GridFinishedFuture;
+import org.jetbrains.annotations.NotNull;
+
+/**
+ * Simple MVCC tracker used only as an Mvcc snapshot holder.
+ */
+public class StaticMvccQueryTracker implements MvccQueryTracker {
+    /** */
+    private final MvccSnapshot snapshot;
+    /** */
+    private final GridCacheContext cctx;
+
+    /**
+     * @param cctx Cache context.
+     * @param snapshot Mvcc snapshot.
+     */
+    public StaticMvccQueryTracker(GridCacheContext cctx, MvccSnapshot snapshot) {
+        this.snapshot = snapshot;
+        this.cctx = cctx;
+    }
+
+    /** {@inheritDoc} */
+    @Override public MvccSnapshot snapshot() {
+        assert snapshot != null : this;
+
+        return snapshot;
+    }
+
+    /** {@inheritDoc} */
+    @Override public GridCacheContext context() {
+        return cctx;
+    }
+
+    /** {@inheritDoc} */
+    @Override public AffinityTopologyVersion topologyVersion() {
+        return AffinityTopologyVersion.NONE;
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgniteInternalFuture<MvccSnapshot> requestSnapshot() {
+        return new GridFinishedFuture<>(snapshot);
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgniteInternalFuture<MvccSnapshot> requestSnapshot(@NotNull final AffinityTopologyVersion topVer) {
+        return new GridFinishedFuture<>(snapshot);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void requestSnapshot(@NotNull AffinityTopologyVersion topVer, @NotNull MvccSnapshotResponseListener lsnr) {
+        lsnr.onResponse(snapshot);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onDone() {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public IgniteInternalFuture<Void> onDone(@NotNull GridNearTxLocal tx, boolean commit) {
+        throw new UnsupportedOperationException("Operation is not supported.");
+    }
+
+    /** {@inheritDoc} */
+    @Override public long onMvccCoordinatorChange(MvccCoordinator newCrd) {
+        return MVCC_TRACKER_ID_NA;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long id() {
+        return MVCC_TRACKER_ID_NA;
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/VacuumMetrics.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/VacuumMetrics.java
new file mode 100644
index 0000000..1de297f
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/VacuumMetrics.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+/**
+ *
+ */
+public class VacuumMetrics {
+    /** */
+    private long cleanupRowsCnt;
+
+    /** */
+    private long scannedRowsCnt;
+
+    /** */
+    private long searchNanoTime;
+
+    /** */
+    private long cleanupNanoTime;
+
+    /**
+     * @return Cleanup rows count.
+     */
+    public long cleanupRowsCount() {
+        return cleanupRowsCnt;
+    }
+
+    /**
+     * @return Scanned rows count.
+     */
+    public long scannedRowsCount() {
+        return scannedRowsCnt;
+    }
+
+    /**
+     * @return Search nano time.
+     */
+    public long searchNanoTime() {
+        return searchNanoTime;
+    }
+
+    /**
+     * @return Cleanup nano time
+     */
+    public long cleanupNanoTime() {
+        return cleanupNanoTime;
+    }
+
+
+    /**
+     * @param delta Delta.
+     */
+    public void addCleanupRowsCnt(long delta) {
+        cleanupRowsCnt += delta;
+    }
+
+    /**
+     * @param delta Delta.
+     */
+    public void addScannedRowsCount(long delta) {
+        scannedRowsCnt += delta;
+    }
+
+    /**
+     * @param delta Delta.
+     */
+    public void addSearchNanoTime(long delta) {
+        searchNanoTime += delta;
+    }
+
+    /**
+     * @param delta Delta.
+     */
+    public void addCleanupNanoTime(long delta) {
+        cleanupNanoTime += delta;
+    }
+
+    /** */
+    @Override public String toString() {
+        return "VacuumMetrics[" +
+            "cleanupRowsCnt=" + cleanupRowsCnt +
+            ", scannedRowsCnt=" + scannedRowsCnt +
+            ", searchNanoTime=" + Math.round((float)searchNanoTime / 1_000_000) +
+            " ms, cleanupNanoTime=" + Math.round((float)cleanupNanoTime / 1_000_000) +
+            " ms]";
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/VacuumMetricsReducer.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/VacuumMetricsReducer.java
new file mode 100644
index 0000000..c952a48
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/VacuumMetricsReducer.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import org.apache.ignite.lang.IgniteReducer;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Vacuum metrics reducer.
+ */
+public class VacuumMetricsReducer implements IgniteReducer<VacuumMetrics, VacuumMetrics> {
+    /** */
+    private static final long serialVersionUID = 7063457745963917386L;
+
+    /** */
+    private final VacuumMetrics m = new VacuumMetrics();
+
+    /** {@inheritDoc} */
+    @Override public boolean collect(@Nullable VacuumMetrics metrics) {
+        assert metrics != null;
+
+        m.addCleanupRowsCnt(metrics.cleanupRowsCount());
+        m.addScannedRowsCount(metrics.scannedRowsCount());
+        m.addSearchNanoTime(metrics.searchNanoTime());
+        m.addCleanupNanoTime(metrics.cleanupNanoTime());
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public VacuumMetrics reduce() {
+        return m;
+    }
+}
\ No newline at end of file
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/VacuumTask.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/VacuumTask.java
new file mode 100644
index 0000000..9a0d9e2
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/VacuumTask.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition;
+import org.apache.ignite.internal.util.future.GridFutureAdapter;
+import org.apache.ignite.internal.util.tostring.GridToStringExclude;
+import org.apache.ignite.internal.util.typedef.internal.S;
+
+/**
+ * Task for cleaning sing partition.
+ */
+public class VacuumTask extends GridFutureAdapter<VacuumMetrics> {
+    /** */
+    private final MvccSnapshot snapshot;
+
+    /** */
+    @GridToStringExclude
+    private final GridDhtLocalPartition part;
+
+    /**
+     * @param snapshot Snapshot.
+     * @param part Partition to cleanup.
+     */
+    VacuumTask(MvccSnapshot snapshot, GridDhtLocalPartition part) {
+        this.snapshot = snapshot;
+        this.part = part;
+    }
+
+    /**
+     * @return Snapshot.
+     */
+    public MvccSnapshot snapshot() {
+        return snapshot;
+    }
+
+    /**
+     * @return Partition to cleanup.
+     */
+    public GridDhtLocalPartition part() {
+        return part;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(VacuumTask.class, this, "partId", part.id());
+    }
+}
\ No newline at end of file
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccAckRequestQueryCntr.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccAckRequestQueryCntr.java
new file mode 100644
index 0000000..0156c53
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccAckRequestQueryCntr.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc.msg;
+
+import java.nio.ByteBuffer;
+import org.apache.ignite.internal.managers.communication.GridIoMessageFactory;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.plugin.extensions.communication.MessageReader;
+import org.apache.ignite.plugin.extensions.communication.MessageWriter;
+
+/**
+ *
+ */
+public class MvccAckRequestQueryCntr implements MvccMessage {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** */
+    private long cntr;
+
+    /**
+     * Required by {@link GridIoMessageFactory}.
+     */
+    public MvccAckRequestQueryCntr() {
+        // No-op.
+    }
+
+    /**
+     * @param cntr Query counter.
+     */
+    public MvccAckRequestQueryCntr(long cntr) {
+        this.cntr = cntr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean waitForCoordinatorInit() {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean processedFromNioThread() {
+        return true;
+    }
+
+    /**
+     * @return Counter.
+     */
+    public long counter() {
+        return cntr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) {
+        writer.setBuffer(buf);
+
+        if (!writer.isHeaderWritten()) {
+            if (!writer.writeHeader(directType(), fieldsCount()))
+                return false;
+
+            writer.onHeaderWritten();
+        }
+
+        switch (writer.state()) {
+            case 0:
+                if (!writer.writeLong("cntr", cntr))
+                    return false;
+
+                writer.incrementState();
+
+        }
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) {
+        reader.setBuffer(buf);
+
+        if (!reader.beforeMessageRead())
+            return false;
+
+        switch (reader.state()) {
+            case 0:
+                cntr = reader.readLong("cntr");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+        }
+
+        return reader.afterMessageRead(MvccAckRequestQueryCntr.class);
+    }
+
+    /** {@inheritDoc} */
+    @Override public short directType() {
+        return 140;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte fieldsCount() {
+        return 1;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onAckReceived() {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(MvccAckRequestQueryCntr.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccAckRequestQueryId.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccAckRequestQueryId.java
new file mode 100644
index 0000000..7771f4d
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccAckRequestQueryId.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc.msg;
+
+import java.nio.ByteBuffer;
+import org.apache.ignite.internal.managers.communication.GridIoMessageFactory;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.plugin.extensions.communication.MessageReader;
+import org.apache.ignite.plugin.extensions.communication.MessageWriter;
+
+/**
+ *
+ */
+public class MvccAckRequestQueryId implements MvccMessage {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** */
+    private long qryTrackerId;
+
+    /**
+     * Required by {@link GridIoMessageFactory}.
+     */
+    public MvccAckRequestQueryId() {
+        // No-op.
+    }
+
+    /**
+     * @param qryTrackerId Query tracker Id.
+     */
+    public MvccAckRequestQueryId(long qryTrackerId) {
+        this.qryTrackerId = qryTrackerId;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean waitForCoordinatorInit() {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean processedFromNioThread() {
+        return true;
+    }
+
+    /**
+     * @return Query tracker id.
+     */
+    public long queryTrackerId() {
+        return qryTrackerId;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) {
+        writer.setBuffer(buf);
+
+        if (!writer.isHeaderWritten()) {
+            if (!writer.writeHeader(directType(), fieldsCount()))
+                return false;
+
+            writer.onHeaderWritten();
+        }
+
+        switch (writer.state()) {
+            case 0:
+                if (!writer.writeLong("qryTrackerId", qryTrackerId))
+                    return false;
+
+                writer.incrementState();
+
+        }
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) {
+        reader.setBuffer(buf);
+
+        if (!reader.beforeMessageRead())
+            return false;
+
+        switch (reader.state()) {
+            case 0:
+                qryTrackerId = reader.readLong("qryTrackerId");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+        }
+
+        return reader.afterMessageRead(MvccAckRequestQueryId.class);
+    }
+
+    /** {@inheritDoc} */
+    @Override public short directType() {
+        return 145;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte fieldsCount() {
+        return 1;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onAckReceived() {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(MvccAckRequestQueryId.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccAckRequestTx.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccAckRequestTx.java
new file mode 100644
index 0000000..69dfd25
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccAckRequestTx.java
@@ -0,0 +1,210 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc.msg;
+
+import java.nio.ByteBuffer;
+import org.apache.ignite.internal.managers.communication.GridIoMessageFactory;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.plugin.extensions.communication.MessageReader;
+import org.apache.ignite.plugin.extensions.communication.MessageWriter;
+
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccQueryTracker.MVCC_TRACKER_ID_NA;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_COUNTER_NA;
+
+/**
+ *
+ */
+public class MvccAckRequestTx implements MvccMessage {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** */
+    private static final int SKIP_RESPONSE_FLAG_MASK = 0x01;
+
+    /** */
+    private long futId;
+
+    /** */
+    private long txCntr;
+
+    /** */
+    private byte flags;
+
+    /**
+     * Required by {@link GridIoMessageFactory}.
+     */
+    public MvccAckRequestTx() {
+        // No-op.
+    }
+
+    /**
+     * @param futId Future ID.
+     * @param txCntr Counter assigned to transaction.
+     */
+    public MvccAckRequestTx(long futId, long txCntr) {
+        this.futId = futId;
+        this.txCntr = txCntr;
+    }
+
+    /**
+     * @return Query counter.
+     */
+    public long queryCounter() {
+        return MVCC_COUNTER_NA;
+    }
+
+    /**
+     * @return Query tracker id.
+     */
+    public long queryTrackerId() {
+        return MVCC_TRACKER_ID_NA;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean waitForCoordinatorInit() {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean processedFromNioThread() {
+        return true;
+    }
+
+    /**
+     * @return Future ID.
+     */
+    public long futureId() {
+        return futId;
+    }
+
+    /**
+     * @return {@code True} if response message is not needed.
+     */
+    public boolean skipResponse() {
+        return (flags & SKIP_RESPONSE_FLAG_MASK) != 0;
+    }
+
+    /**
+     * @param val {@code True} if response message is not needed.
+     */
+    public void skipResponse(boolean val) {
+        if (val)
+            flags |= SKIP_RESPONSE_FLAG_MASK;
+        else
+            flags &= ~SKIP_RESPONSE_FLAG_MASK;
+    }
+
+    /**
+     * @return Counter assigned tp transaction.
+     */
+    public long txCounter() {
+        return txCntr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) {
+        writer.setBuffer(buf);
+
+        if (!writer.isHeaderWritten()) {
+            if (!writer.writeHeader(directType(), fieldsCount()))
+                return false;
+
+            writer.onHeaderWritten();
+        }
+
+        switch (writer.state()) {
+            case 0:
+                if (!writer.writeByte("flags", flags))
+                    return false;
+
+                writer.incrementState();
+
+            case 1:
+                if (!writer.writeLong("futId", futId))
+                    return false;
+
+                writer.incrementState();
+
+            case 2:
+                if (!writer.writeLong("txCntr", txCntr))
+                    return false;
+
+                writer.incrementState();
+
+        }
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) {
+        reader.setBuffer(buf);
+
+        if (!reader.beforeMessageRead())
+            return false;
+
+        switch (reader.state()) {
+            case 0:
+                flags = reader.readByte("flags");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 1:
+                futId = reader.readLong("futId");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 2:
+                txCntr = reader.readLong("txCntr");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+        }
+
+        return reader.afterMessageRead(MvccAckRequestTx.class);
+    }
+
+    /** {@inheritDoc} */
+    @Override public short directType() {
+        return 137;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte fieldsCount() {
+        return 3;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onAckReceived() {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(MvccAckRequestTx.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccAckRequestTxAndQueryCntr.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccAckRequestTxAndQueryCntr.java
new file mode 100644
index 0000000..99761c3
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccAckRequestTxAndQueryCntr.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc.msg;
+
+import java.nio.ByteBuffer;
+import org.apache.ignite.internal.managers.communication.GridIoMessageFactory;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.plugin.extensions.communication.MessageReader;
+import org.apache.ignite.plugin.extensions.communication.MessageWriter;
+
+/**
+ *
+ */
+public class MvccAckRequestTxAndQueryCntr extends MvccAckRequestTx {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** */
+    private long qryCntr;
+
+    /**
+     * Required by {@link GridIoMessageFactory}.
+     */
+    public MvccAckRequestTxAndQueryCntr() {
+        // No-op.
+    }
+
+    /**
+     * @param futId Future ID.
+     * @param txCntr Counter assigned to transaction update.
+     * @param qryCntr Counter assigned for transaction reads.
+     */
+    public MvccAckRequestTxAndQueryCntr(long futId, long txCntr, long qryCntr) {
+        super(futId, txCntr);
+
+        this.qryCntr = qryCntr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long queryCounter() {
+        return qryCntr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) {
+        writer.setBuffer(buf);
+
+        if (!super.writeTo(buf, writer))
+            return false;
+
+        if (!writer.isHeaderWritten()) {
+            if (!writer.writeHeader(directType(), fieldsCount()))
+                return false;
+
+            writer.onHeaderWritten();
+        }
+
+        switch (writer.state()) {
+            case 3:
+                if (!writer.writeLong("qryCntr", qryCntr))
+                    return false;
+
+                writer.incrementState();
+
+        }
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) {
+        reader.setBuffer(buf);
+
+        if (!reader.beforeMessageRead())
+            return false;
+
+        if (!super.readFrom(buf, reader))
+            return false;
+
+        switch (reader.state()) {
+            case 3:
+                qryCntr = reader.readLong("qryCntr");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+        }
+
+        return reader.afterMessageRead(MvccAckRequestTxAndQueryCntr.class);
+    }
+
+    /** {@inheritDoc} */
+    @Override public short directType() {
+        return 146;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte fieldsCount() {
+        return 4;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(MvccAckRequestTxAndQueryCntr.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccAckRequestTxAndQueryId.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccAckRequestTxAndQueryId.java
new file mode 100644
index 0000000..89f09db
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccAckRequestTxAndQueryId.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc.msg;
+
+import java.nio.ByteBuffer;
+import org.apache.ignite.internal.managers.communication.GridIoMessageFactory;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.plugin.extensions.communication.MessageReader;
+import org.apache.ignite.plugin.extensions.communication.MessageWriter;
+
+/**
+ *
+ */
+public class MvccAckRequestTxAndQueryId extends MvccAckRequestTx {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** */
+    private long qryTrackerId;
+
+    /**
+     * Required by {@link GridIoMessageFactory}.
+     */
+    public MvccAckRequestTxAndQueryId() {
+        // No-op.
+    }
+
+    /**
+     * @param futId Future ID.
+     * @param txCntr Counter assigned to transaction update.
+     * @param qryTrackerId Query tracker id.
+     */
+    public MvccAckRequestTxAndQueryId(long futId, long txCntr, long qryTrackerId) {
+        super(futId, txCntr);
+
+        this.qryTrackerId = qryTrackerId;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long queryTrackerId() {
+        return qryTrackerId;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) {
+        writer.setBuffer(buf);
+
+        if (!super.writeTo(buf, writer))
+            return false;
+
+        if (!writer.isHeaderWritten()) {
+            if (!writer.writeHeader(directType(), fieldsCount()))
+                return false;
+
+            writer.onHeaderWritten();
+        }
+
+        switch (writer.state()) {
+            case 3:
+                if (!writer.writeLong("qryTrackerId", qryTrackerId))
+                    return false;
+
+                writer.incrementState();
+        }
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) {
+        reader.setBuffer(buf);
+
+        if (!reader.beforeMessageRead())
+            return false;
+
+        if (!super.readFrom(buf, reader))
+            return false;
+
+        switch (reader.state()) {
+            case 3:
+                qryTrackerId = reader.readLong("qryTrackerId");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+        }
+
+        return reader.afterMessageRead(MvccAckRequestTxAndQueryId.class);
+    }
+
+    /** {@inheritDoc} */
+    @Override public short directType() {
+        return 147;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte fieldsCount() {
+        return 4;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(MvccAckRequestTxAndQueryId.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccActiveQueriesMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccActiveQueriesMessage.java
new file mode 100644
index 0000000..4b78c24
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccActiveQueriesMessage.java
@@ -0,0 +1,132 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc.msg;
+
+import java.nio.ByteBuffer;
+import org.apache.ignite.internal.managers.communication.GridIoMessageFactory;
+import org.apache.ignite.internal.util.GridLongList;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.plugin.extensions.communication.MessageReader;
+import org.apache.ignite.plugin.extensions.communication.MessageWriter;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ *
+ */
+public class MvccActiveQueriesMessage implements MvccMessage {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** */
+    private GridLongList activeQrys;
+
+    /**
+     * Required by {@link GridIoMessageFactory}.
+     */
+    public MvccActiveQueriesMessage() {
+        // No-op.
+    }
+
+    /**
+     * @param activeQrys Active queries.
+     */
+    public MvccActiveQueriesMessage(GridLongList activeQrys) {
+        this.activeQrys = activeQrys;
+    }
+
+    /**
+     * @return Active queries.
+     */
+    @Nullable public GridLongList activeQueries() {
+        return activeQrys;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean waitForCoordinatorInit() {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean processedFromNioThread() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) {
+        writer.setBuffer(buf);
+
+        if (!writer.isHeaderWritten()) {
+            if (!writer.writeHeader(directType(), fieldsCount()))
+                return false;
+
+            writer.onHeaderWritten();
+        }
+
+        switch (writer.state()) {
+            case 0:
+                if (!writer.writeMessage("activeQrys", activeQrys))
+                    return false;
+
+                writer.incrementState();
+
+        }
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) {
+        reader.setBuffer(buf);
+
+        if (!reader.beforeMessageRead())
+            return false;
+
+        switch (reader.state()) {
+            case 0:
+                activeQrys = reader.readMessage("activeQrys");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+        }
+
+        return reader.afterMessageRead(MvccActiveQueriesMessage.class);
+    }
+
+    /** {@inheritDoc} */
+    @Override public short directType() {
+        return 149;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte fieldsCount() {
+        return 1;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onAckReceived() {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(MvccActiveQueriesMessage.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccFutureResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccFutureResponse.java
new file mode 100644
index 0000000..72e4c52
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccFutureResponse.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc.msg;
+
+import java.nio.ByteBuffer;
+import org.apache.ignite.internal.managers.communication.GridIoMessageFactory;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.plugin.extensions.communication.MessageReader;
+import org.apache.ignite.plugin.extensions.communication.MessageWriter;
+
+/**
+ *
+ */
+public class MvccFutureResponse implements MvccMessage {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** */
+    private long futId;
+
+    /**
+     * Required by {@link GridIoMessageFactory}.
+     */
+    public MvccFutureResponse() {
+        // No-op.
+    }
+
+    /**
+     * @param futId Future ID.
+     */
+    public MvccFutureResponse(long futId) {
+        this.futId = futId;
+    }
+
+    /**
+     * @return Future ID.
+     */
+    public long futureId() {
+        return futId;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean waitForCoordinatorInit() {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean processedFromNioThread() {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) {
+        writer.setBuffer(buf);
+
+        if (!writer.isHeaderWritten()) {
+            if (!writer.writeHeader(directType(), fieldsCount()))
+                return false;
+
+            writer.onHeaderWritten();
+        }
+
+        switch (writer.state()) {
+            case 0:
+                if (!writer.writeLong("futId", futId))
+                    return false;
+
+                writer.incrementState();
+
+        }
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) {
+        reader.setBuffer(buf);
+
+        if (!reader.beforeMessageRead())
+            return false;
+
+        switch (reader.state()) {
+            case 0:
+                futId = reader.readLong("futId");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+        }
+
+        return reader.afterMessageRead(MvccFutureResponse.class);
+    }
+
+    /** {@inheritDoc} */
+    @Override public short directType() {
+        return 138;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte fieldsCount() {
+        return 1;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onAckReceived() {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(MvccFutureResponse.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccMessage.java
new file mode 100644
index 0000000..6d8b3c4
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccMessage.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc.msg;
+
+import org.apache.ignite.plugin.extensions.communication.Message;
+
+/**
+ * Common interface for all MVCC-related messages.
+ */
+public interface MvccMessage extends Message {
+    /**
+     * @return {@code True} if should wait for coordinator initialization.
+     */
+    public boolean waitForCoordinatorInit();
+
+    /**
+     * @return {@code True} if message should be processed from NIO thread.
+     */
+    public boolean processedFromNioThread();
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccQuerySnapshotRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccQuerySnapshotRequest.java
new file mode 100644
index 0000000..75d33a7
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccQuerySnapshotRequest.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc.msg;
+
+import java.nio.ByteBuffer;
+import org.apache.ignite.internal.managers.communication.GridIoMessageFactory;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.plugin.extensions.communication.MessageReader;
+import org.apache.ignite.plugin.extensions.communication.MessageWriter;
+
+/**
+ * Request to get MVCC snapshot for a query.
+ */
+public class MvccQuerySnapshotRequest implements MvccMessage {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** */
+    private long futId;
+
+    /**
+     * Required by {@link GridIoMessageFactory}.
+     */
+    public MvccQuerySnapshotRequest() {
+        // No-op.
+    }
+
+    /**
+     * @param futId Future ID.
+     */
+    public MvccQuerySnapshotRequest(long futId) {
+        this.futId = futId;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean waitForCoordinatorInit() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean processedFromNioThread() {
+        return true;
+    }
+
+    /**
+     * @return Future ID.
+     */
+    public long futureId() {
+        return futId;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) {
+        writer.setBuffer(buf);
+
+        if (!writer.isHeaderWritten()) {
+            if (!writer.writeHeader(directType(), fieldsCount()))
+                return false;
+
+            writer.onHeaderWritten();
+        }
+
+        switch (writer.state()) {
+            case 0:
+                if (!writer.writeLong("futId", futId))
+                    return false;
+
+                writer.incrementState();
+
+        }
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) {
+        reader.setBuffer(buf);
+
+        if (!reader.beforeMessageRead())
+            return false;
+
+        switch (reader.state()) {
+            case 0:
+                futId = reader.readLong("futId");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+        }
+
+        return reader.afterMessageRead(MvccQuerySnapshotRequest.class);
+    }
+
+    /** {@inheritDoc} */
+    @Override public short directType() {
+        return 139;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte fieldsCount() {
+        return 1;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onAckReceived() {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(MvccQuerySnapshotRequest.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccSnapshotResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccSnapshotResponse.java
new file mode 100644
index 0000000..2c22616
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccSnapshotResponse.java
@@ -0,0 +1,320 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc.msg;
+
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import org.apache.ignite.internal.GridDirectTransient;
+import org.apache.ignite.internal.managers.communication.GridIoMessageFactory;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccLongList;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshotWithoutTxs;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.plugin.extensions.communication.MessageReader;
+import org.apache.ignite.plugin.extensions.communication.MessageWriter;
+
+/**
+ *
+ */
+public class MvccSnapshotResponse implements MvccMessage, MvccSnapshot, MvccLongList {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** */
+    private long futId;
+
+    /** */
+    private long crdVer;
+
+    /** */
+    private long cntr;
+
+    /** */
+    private int opCntr;
+
+    /** */
+    @GridDirectTransient
+    private int txsCnt;
+
+    /** */
+    private long[] txs;
+
+    /** */
+    private long cleanupVer;
+
+    /** */
+    @GridDirectTransient
+    private long tracking;
+
+    /**
+     * Required by {@link GridIoMessageFactory}.
+     */
+    public MvccSnapshotResponse() {
+        // No-op.
+    }
+
+    /**
+     * @param futId Future ID.
+     * @param crdVer Coordinator version.
+     * @param cntr Counter.
+     * @param opCntr Operation counter.
+     * @param cleanupVer Cleanup version.
+     * @param tracking Tracking number.
+     */
+    public void init(long futId, long crdVer, long cntr, int opCntr, long cleanupVer, long tracking) {
+        this.futId = futId;
+        this.crdVer = crdVer;
+        this.cntr = cntr;
+        this.opCntr = opCntr;
+        this.cleanupVer = cleanupVer;
+        this.tracking = tracking;
+
+        if (txsCnt > 0 && txs.length > txsCnt) // truncate if necessary
+            txs = Arrays.copyOf(txs, txsCnt);
+    }
+
+    /**
+     * @param txId Transaction counter.
+     */
+    public void addTx(long txId) {
+        if (txs == null)
+            txs = new long[4];
+        else if (txs.length == txsCnt)
+            txs = Arrays.copyOf(txs, txs.length << 1);
+
+        txs[txsCnt++] = txId;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int size() {
+        return txsCnt;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long get(int i) {
+        return txs[i];
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean contains(long val) {
+        for (int i = 0; i < txsCnt; i++) {
+            if (txs[i] == val)
+                return true;
+        }
+
+        return false;
+    }
+
+    /**
+     * @return Tracking counter.
+     */
+    public long tracking() {
+        return tracking;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean waitForCoordinatorInit() {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean processedFromNioThread() {
+        return false;
+    }
+
+    /**
+     * @return Future ID.
+     */
+    public long futureId() {
+        return futId;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long cleanupVersion() {
+        return cleanupVer;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long counter() {
+        return cntr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int operationCounter() {
+        return opCntr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void incrementOperationCounter() {
+        opCntr++;
+    }
+
+    /** {@inheritDoc} */
+    @Override public MvccLongList activeTransactions() {
+        return this;
+    }
+
+    /** {@inheritDoc} */
+    @Override public MvccSnapshot withoutActiveTransactions() {
+        if (txsCnt > 0)
+            return new MvccSnapshotWithoutTxs(crdVer, cntr, opCntr, cleanupVer);
+
+        return this;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long coordinatorVersion() {
+        return crdVer;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) {
+        writer.setBuffer(buf);
+
+        if (!writer.isHeaderWritten()) {
+            if (!writer.writeHeader(directType(), fieldsCount()))
+                return false;
+
+            writer.onHeaderWritten();
+        }
+
+        switch (writer.state()) {
+            case 0:
+                if (!writer.writeLong("cleanupVer", cleanupVer))
+                    return false;
+
+                writer.incrementState();
+
+            case 1:
+                if (!writer.writeLong("cntr", cntr))
+                    return false;
+
+                writer.incrementState();
+
+            case 2:
+                if (!writer.writeLong("crdVer", crdVer))
+                    return false;
+
+                writer.incrementState();
+
+            case 3:
+                if (!writer.writeLong("futId", futId))
+                    return false;
+
+                writer.incrementState();
+
+            case 4:
+                if (!writer.writeInt("opCntr", opCntr))
+                    return false;
+
+                writer.incrementState();
+
+            case 5:
+                if (!writer.writeLongArray("txs", txs))
+                    return false;
+
+                writer.incrementState();
+
+        }
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) {
+        reader.setBuffer(buf);
+
+        if (!reader.beforeMessageRead())
+            return false;
+
+        switch (reader.state()) {
+            case 0:
+                cleanupVer = reader.readLong("cleanupVer");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 1:
+                cntr = reader.readLong("cntr");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 2:
+                crdVer = reader.readLong("crdVer");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 3:
+                futId = reader.readLong("futId");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 4:
+                opCntr = reader.readInt("opCntr");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 5:
+                txs = reader.readLongArray("txs");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                txsCnt = txs != null ? txs.length : 0;
+
+                reader.incrementState();
+
+        }
+
+        return reader.afterMessageRead(MvccSnapshotResponse.class);
+    }
+
+    /** {@inheritDoc} */
+    @Override public short directType() {
+        return 141;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte fieldsCount() {
+        return 6;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onAckReceived() {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(MvccSnapshotResponse.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccTxSnapshotRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccTxSnapshotRequest.java
new file mode 100644
index 0000000..cd30eb8
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccTxSnapshotRequest.java
@@ -0,0 +1,128 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc.msg;
+
+import java.nio.ByteBuffer;
+import org.apache.ignite.internal.managers.communication.GridIoMessageFactory;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.plugin.extensions.communication.MessageReader;
+import org.apache.ignite.plugin.extensions.communication.MessageWriter;
+
+/**
+ * Request to get MVCC snapshot for a new transaction.
+ */
+public class MvccTxSnapshotRequest implements MvccMessage {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** */
+    private long futId;
+
+    /**
+     * Required by {@link GridIoMessageFactory}.
+     */
+    public MvccTxSnapshotRequest() {
+        // No-op.
+    }
+
+    /**
+     * @param futId Future ID.
+     */
+    public MvccTxSnapshotRequest(long futId) {
+        this.futId = futId;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean waitForCoordinatorInit() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean processedFromNioThread() {
+        return true;
+    }
+
+    /**
+     * @return Future ID.
+     */
+    public long futureId() {
+        return futId;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) {
+        writer.setBuffer(buf);
+
+        if (!writer.isHeaderWritten()) {
+            if (!writer.writeHeader(directType(), fieldsCount()))
+                return false;
+
+            writer.onHeaderWritten();
+        }
+
+        switch (writer.state()) {
+            case 0:
+                if (!writer.writeLong("futId", futId))
+                    return false;
+
+                writer.incrementState();
+        }
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) {
+        reader.setBuffer(buf);
+
+        if (!reader.beforeMessageRead())
+            return false;
+
+        switch (reader.state()) {
+            case 0:
+                futId = reader.readLong("futId");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+        }
+
+        return reader.afterMessageRead(MvccTxSnapshotRequest.class);
+    }
+
+    /** {@inheritDoc} */
+    @Override public short directType() {
+        return 136;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte fieldsCount() {
+        return 1;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onAckReceived() {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(MvccTxSnapshotRequest.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccWaitTxsRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccWaitTxsRequest.java
new file mode 100644
index 0000000..ae57507
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/msg/MvccWaitTxsRequest.java
@@ -0,0 +1,159 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc.msg;
+
+import java.nio.ByteBuffer;
+
+import org.apache.ignite.internal.util.GridLongList;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.plugin.extensions.communication.MessageReader;
+import org.apache.ignite.plugin.extensions.communication.MessageWriter;
+
+/**
+ *
+ */
+public class MvccWaitTxsRequest implements MvccMessage {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** */
+    private long futId;
+
+    /** */
+    private GridLongList txs;
+
+    /**
+     *
+     */
+    public MvccWaitTxsRequest() {
+        // No-op.
+    }
+
+    /**
+     * @param futId Future ID.
+     * @param txs Transactions to wait for.
+     */
+    public MvccWaitTxsRequest(long futId, GridLongList txs) {
+        assert txs != null && txs.size() > 0 : txs;
+
+        this.futId = futId;
+        this.txs = txs;
+    }
+
+    /**
+     * @return Future ID.
+     */
+    public long futureId() {
+        return futId;
+    }
+
+    /**
+     * @return Transactions to wait for.
+     */
+    public GridLongList transactions() {
+        return txs;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean waitForCoordinatorInit() {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean processedFromNioThread() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) {
+        writer.setBuffer(buf);
+
+        if (!writer.isHeaderWritten()) {
+            if (!writer.writeHeader(directType(), fieldsCount()))
+                return false;
+
+            writer.onHeaderWritten();
+        }
+
+        switch (writer.state()) {
+            case 0:
+                if (!writer.writeLong("futId", futId))
+                    return false;
+
+                writer.incrementState();
+
+            case 1:
+                if (!writer.writeMessage("txs", txs))
+                    return false;
+
+                writer.incrementState();
+
+        }
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) {
+        reader.setBuffer(buf);
+
+        if (!reader.beforeMessageRead())
+            return false;
+
+        switch (reader.state()) {
+            case 0:
+                futId = reader.readLong("futId");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 1:
+                txs = reader.readMessage("txs");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+        }
+
+        return reader.afterMessageRead(MvccWaitTxsRequest.class);
+    }
+
+    /** {@inheritDoc} */
+    @Override public short directType() {
+        return 142;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte fieldsCount() {
+        return 2;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onAckReceived() {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(MvccWaitTxsRequest.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxKey.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxKey.java
new file mode 100644
index 0000000..92aff7b
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxKey.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc.txlog;
+
+import org.apache.ignite.internal.util.typedef.internal.S;
+
+/**
+ *
+ */
+public class TxKey {
+    /** */
+    private final long major;
+
+    /** */
+    private final long minor;
+
+    /**
+     * @param major Major version.
+     * @param minor Minor version
+     */
+    public TxKey(long major, long minor) {
+        this.major = major;
+        this.minor = minor;
+    }
+
+    /**
+     * @return Major version.
+     */
+    public long major() {
+        return major;
+    }
+
+    /**
+     * @return Minor version.
+     */
+    public long minor() {
+        return minor;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean equals(Object o) {
+        if (this == o) return true;
+        if (o == null || o.getClass() != TxKey.class) return false;
+
+        TxKey txKey = (TxKey) o;
+
+        return major == txKey.major && minor == txKey.minor;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int hashCode() {
+        int result = (int) (major ^ (major >>> 32));
+        result = 31 * result + (int) (minor ^ (minor >>> 32));
+        return result;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(TxKey.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLog.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLog.java
new file mode 100644
index 0000000..905bfc4
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLog.java
@@ -0,0 +1,584 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc.txlog;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.GridKernalContext;
+import org.apache.ignite.internal.pagemem.PageIdUtils;
+import org.apache.ignite.internal.pagemem.PageMemory;
+import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager;
+import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRecord;
+import org.apache.ignite.internal.processors.cache.persistence.DbCheckpointListener;
+import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager;
+import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager;
+import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx;
+import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.PagePartitionMetaIO;
+import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList;
+import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseListImpl;
+import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandler;
+import org.apache.ignite.internal.util.IgniteTree;
+import org.apache.ignite.internal.util.typedef.internal.CU;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.jetbrains.annotations.Nullable;
+
+import static org.apache.ignite.internal.pagemem.PageIdAllocator.FLAG_IDX;
+import static org.apache.ignite.internal.pagemem.PageIdAllocator.INDEX_PARTITION;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_COUNTER_NA;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_CRD_COUNTER_NA;
+
+/**
+ *
+ */
+public class TxLog implements DbCheckpointListener {
+    /** */
+    public static final String TX_LOG_CACHE_NAME = "TxLog";
+
+    /** */
+    public static final int TX_LOG_CACHE_ID = CU.cacheId(TX_LOG_CACHE_NAME);
+
+    /** */
+    private static final TxKey LOWEST = new TxKey(0, 0);
+
+    /** */
+    private final IgniteCacheDatabaseSharedManager mgr;
+
+    /** */
+    private ReuseListImpl reuseList;
+
+    /** */
+    private TxLogTree tree;
+
+    /** */
+    private ConcurrentMap<TxKey, Sync> keyMap = new ConcurrentHashMap<>();
+
+    /**
+     *
+     * @param ctx Kernal context.
+     * @param mgr Database shared manager.
+     */
+    public TxLog(GridKernalContext ctx, IgniteCacheDatabaseSharedManager mgr) throws IgniteCheckedException {
+        this.mgr = mgr;
+
+        init(ctx);
+    }
+
+    /**
+     *
+     * @param ctx Kernal context.
+     * @throws IgniteCheckedException If failed.
+     */
+    private void init(GridKernalContext ctx) throws IgniteCheckedException {
+        if (CU.isPersistenceEnabled(ctx.config())) {
+            mgr.checkpointReadLock();
+
+            try {
+                IgniteWriteAheadLogManager wal = ctx.cache().context().wal();
+                PageMemoryEx pageMemory = (PageMemoryEx)mgr.dataRegion(TX_LOG_CACHE_NAME).pageMemory();
+
+                long partMetaId = pageMemory.partitionMetaPageId(TX_LOG_CACHE_ID, 0);
+                long partMetaPage = pageMemory.acquirePage(TX_LOG_CACHE_ID, partMetaId);
+
+                long treeRoot, reuseListRoot;
+
+                boolean isNew = false;
+
+                try {
+                    long pageAddr = pageMemory.writeLock(TX_LOG_CACHE_ID, partMetaId, partMetaPage);
+
+                    try {
+                        if (PageIO.getType(pageAddr) != PageIO.T_PART_META) {
+                            // Initialize new page.
+                            PagePartitionMetaIO io = PagePartitionMetaIO.VERSIONS.latest();
+
+                            io.initNewPage(pageAddr, partMetaId, pageMemory.pageSize());
+
+                            treeRoot = pageMemory.allocatePage(TX_LOG_CACHE_ID, 0, PageMemory.FLAG_DATA);
+                            reuseListRoot = pageMemory.allocatePage(TX_LOG_CACHE_ID, 0, PageMemory.FLAG_DATA);
+
+                            assert PageIdUtils.flag(treeRoot) == PageMemory.FLAG_DATA;
+                            assert PageIdUtils.flag(reuseListRoot) == PageMemory.FLAG_DATA;
+
+                            io.setTreeRoot(pageAddr, treeRoot);
+                            io.setReuseListRoot(pageAddr, reuseListRoot);
+
+                            if (PageHandler.isWalDeltaRecordNeeded(pageMemory, TX_LOG_CACHE_ID, partMetaId, partMetaPage, wal, null))
+                                wal.log(new MetaPageInitRecord(
+                                    TX_LOG_CACHE_ID,
+                                    partMetaId,
+                                    io.getType(),
+                                    io.getVersion(),
+                                    treeRoot,
+                                    reuseListRoot
+                                ));
+
+                            isNew = true;
+                        }
+                        else {
+                            PagePartitionMetaIO io = PageIO.getPageIO(pageAddr);
+
+                            treeRoot = io.getTreeRoot(pageAddr);
+                            reuseListRoot = io.getReuseListRoot(pageAddr);
+
+                            assert PageIdUtils.flag(treeRoot) == PageMemory.FLAG_DATA :
+                                U.hexLong(treeRoot) + ", part=" + 0 + ", TX_LOG_CACHE_ID=" + TX_LOG_CACHE_ID;
+                            assert PageIdUtils.flag(reuseListRoot) == PageMemory.FLAG_DATA :
+                                U.hexLong(reuseListRoot) + ", part=" + 0 + ", TX_LOG_CACHE_ID=" + TX_LOG_CACHE_ID;
+                        }
+                    }
+                    finally {
+                        pageMemory.writeUnlock(TX_LOG_CACHE_ID, partMetaId, partMetaPage, null, isNew);
+                    }
+                }
+                finally {
+                    pageMemory.releasePage(TX_LOG_CACHE_ID, partMetaId, partMetaPage);
+                }
+
+                reuseList = new ReuseListImpl(
+                    TX_LOG_CACHE_ID,
+                    TX_LOG_CACHE_NAME,
+                    pageMemory,
+                    wal,
+                    reuseListRoot,
+                    isNew);
+
+                tree = new TxLogTree(pageMemory, wal, treeRoot, reuseList, ctx.failure(), isNew);
+
+                ((GridCacheDatabaseSharedManager)mgr).addCheckpointListener(this);
+            }
+            finally {
+                mgr.checkpointReadUnlock();
+            }
+        }
+        else {
+            PageMemory pageMemory = mgr.dataRegion(TX_LOG_CACHE_NAME).pageMemory();
+            ReuseList reuseList1 = mgr.reuseList(TX_LOG_CACHE_NAME);
+
+            long treeRoot;
+
+            if ((treeRoot = reuseList1.takeRecycledPage()) == 0L)
+                treeRoot = pageMemory.allocatePage(TX_LOG_CACHE_ID, INDEX_PARTITION, FLAG_IDX);
+
+            tree = new TxLogTree(pageMemory, null, treeRoot, reuseList1, ctx.failure(), true);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onCheckpointBegin(Context ctx) throws IgniteCheckedException {
+        reuseList.saveMetadata();
+    }
+
+    /**
+     *
+     * @param major Major version.
+     * @param minor Minor version.
+     * @return Transaction state for given version.
+     * @throws IgniteCheckedException If failed.
+     */
+    public byte get(long major, long minor) throws IgniteCheckedException {
+        return get(new TxKey(major, minor));
+    }
+
+    /**
+     *
+     * @param key Transaction key.
+     * @return Transaction state for given version.
+     * @throws IgniteCheckedException If failed.
+     */
+    public byte get(TxKey key) throws IgniteCheckedException {
+        TxRow row = tree.findOne(key);
+
+        return row == null ? TxState.NA : row.state();
+    }
+
+    /**
+     *
+     * @param key TxKey.
+     * @param state  Transaction state for given version.
+     * @param primary Flag if this is a primary node.
+     * @throws IgniteCheckedException If failed.
+     */
+    public void put(TxKey key, byte state, boolean primary) throws IgniteCheckedException {
+        Sync sync = syncObject(key);
+
+        try {
+            mgr.checkpointReadLock();
+
+            try {
+                synchronized (sync) {
+                    tree.invoke(key, null, new TxLogUpdateClosure(key.major(), key.minor(), state, primary));
+                }
+            }
+            finally {
+                mgr.checkpointReadUnlock();
+            }
+        } finally {
+            evict(key, sync);
+        }
+    }
+
+    /**
+     * Removes all records less or equals to the given version.
+     *
+     * @param major Major version.
+     * @param minor Minor version.
+     * @throws IgniteCheckedException If failed.
+     */
+    public void removeUntil(long major, long minor) throws IgniteCheckedException {
+        TraversingClosure clo = new TraversingClosure(major, minor);
+
+        tree.iterate(LOWEST, clo, clo);
+
+        if (clo.rows != null) {
+            for (TxKey row : clo.rows) {
+                remove(row);
+            }
+        }
+    }
+
+    /** */
+    private void remove(TxKey key) throws IgniteCheckedException {
+        Sync sync = syncObject(key);
+
+        try {
+            mgr.checkpointReadLock();
+
+            try {
+                synchronized (sync) {
+                    tree.removex(key);
+                }
+            }
+            finally {
+                mgr.checkpointReadUnlock();
+            }
+        } finally {
+            evict(key, sync);
+        }
+    }
+
+    /** */
+    private Sync syncObject(TxKey key) {
+        Sync sync = keyMap.get(key);
+
+        while (true) {
+            if (sync == null) {
+                Sync old = keyMap.putIfAbsent(key, sync = new Sync());
+
+                if (old == null)
+                    return sync;
+                else
+                    sync = old;
+            }
+            else {
+                int cntr = sync.counter;
+
+                while (cntr > 0) {
+                    if (sync.casCounter(cntr, cntr + 1))
+                        return sync;
+
+                    cntr = sync.counter;
+                }
+
+                sync = keyMap.get(key);
+            }
+        }
+    }
+
+    /** */
+    private void evict(TxKey key, Sync sync) {
+        assert sync != null;
+
+        int cntr = sync.counter;
+
+        while (true) {
+            assert cntr > 0;
+
+            if (!sync.casCounter(cntr, cntr - 1)) {
+                cntr = sync.counter;
+
+                continue;
+            }
+
+            if (cntr == 1) {
+                boolean removed = keyMap.remove(key, sync);
+
+                assert removed;
+            }
+
+            break;
+        }
+    }
+
+    /**
+     *
+     */
+    private static class TraversingClosure extends TxKey implements BPlusTree.TreeRowClosure<TxKey, TxRow> {
+        /** */
+        private List<TxKey> rows;
+
+        /**
+         *
+         * @param major Major version.
+         * @param minor Minor version.
+         */
+        TraversingClosure(long major, long minor) {
+            super(major, minor);
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean apply(BPlusTree<TxKey, TxRow> tree, BPlusIO<TxKey> io, long pageAddr,
+                                       int idx) throws IgniteCheckedException {
+
+            if (rows == null)
+                rows = new ArrayList<>();
+
+            TxLogIO logIO = (TxLogIO)io;
+            int offset = io.offset(idx);
+
+            rows.add(new TxKey(logIO.getMajor(pageAddr, offset), logIO.getMinor(pageAddr, offset)));
+
+            return true;
+        }
+    }
+
+    /** */
+    private static class Sync {
+        /** */
+        private static final AtomicIntegerFieldUpdater<Sync> UPD = AtomicIntegerFieldUpdater.newUpdater(Sync.class, "counter");
+
+        /** */
+        volatile int counter = 1;
+
+        /** */
+        boolean casCounter(int old, int upd) {
+            return UPD.compareAndSet(this, old, upd);
+        }
+    }
+
+    /**
+     * TxLog update closure.
+     */
+    private static final class TxLogUpdateClosure implements IgniteTree.InvokeClosure<TxRow> {
+        /** */
+        private final long major;
+
+        /** */
+        private final long minor;
+
+        /** */
+        private final byte newState;
+
+        /** */
+        private final boolean primary;
+
+        /** */
+        private IgniteTree.OperationType treeOp;
+
+        /**
+         *
+         * @param major Coordinator version.
+         * @param minor Counter.
+         * @param newState New Tx newState.
+         * @param primary Flag if this is primary node.
+         */
+        TxLogUpdateClosure(long major, long minor, byte newState, boolean primary) {
+            assert major > MVCC_CRD_COUNTER_NA && minor > MVCC_COUNTER_NA && newState != TxState.NA;
+            this.major = major;
+            this.minor = minor;
+            this.newState = newState;
+            this.primary = primary;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void call(@Nullable TxRow row) {
+            if (row == null) {
+                valid();
+
+                return;
+            }
+
+            byte currState = row.state();
+
+            switch (currState) {
+                case TxState.NA:
+                    checkNa(currState);
+
+                    break;
+
+                case TxState.PREPARED:
+                    checkPrepared(currState);
+
+                    break;
+
+                case TxState.COMMITTED:
+                    checkCommitted(currState);
+
+                    break;
+
+                case TxState.ABORTED:
+                    checkAborted(currState);
+
+                    break;
+
+                default:
+                    throw new IllegalStateException("Unknown tx state: " + currState);
+            }
+        }
+
+        /** {@inheritDoc} */
+        @Override public TxRow newRow() {
+            return treeOp == IgniteTree.OperationType.PUT ? new TxRow(major, minor, newState) : null;
+        }
+
+        /** {@inheritDoc} */
+        @Override public IgniteTree.OperationType operationType() {
+            return treeOp;
+        }
+
+        /**
+         * Checks update possibility for {@code TxState.NA} tx status.
+         *
+         * @param currState Current tx state.
+         */
+        private void checkNa(byte currState) {
+            switch (newState) {
+                case TxState.ABORTED:
+                case TxState.PREPARED:
+                    valid();
+
+                    break;
+
+                case TxState.COMMITTED:
+                    invalid(currState); // TODO IGNITE-8445
+
+                    break;
+
+                default:
+                    invalid(currState);
+            }
+        }
+
+        /**
+         * Checks update possibility for {@code TxState.PREPARED} status.
+         *
+         * @param currState Current tx state.
+         */
+        private void checkPrepared(byte currState) {
+            switch (newState) {
+                case TxState.ABORTED:
+                case TxState.COMMITTED:
+                    valid();
+
+                    break;
+
+                case TxState.PREPARED:
+                    ignore();
+
+                    break;
+
+                default:
+                    invalid(currState);
+            }
+        }
+
+        /**
+         * Checks update possibility for {@code TxState.COMMITTED} status.
+         *
+         * @param currState Current tx state.
+         */
+        private void checkCommitted(byte currState) {
+            switch (newState) {
+                case TxState.COMMITTED:
+                    ignore();
+
+                    break;
+
+                case TxState.PREPARED:
+                    if (primary)
+                        ignore(); // In case when remote tx has updated the current state before.
+                    else
+                        invalid(currState);
+
+                    break;
+
+                default:
+                    invalid(currState);
+            }
+        }
+
+        /**
+         * Checks update possibility for {@code TxState.ABORTED} status.
+         *
+         * @param currState Current tx state.
+         */
+        private void checkAborted(byte currState) {
+            switch (newState) {
+                case TxState.ABORTED:
+                    ignore();
+
+                    break;
+
+                case TxState.PREPARED:
+                    if (primary)
+                        ignore(); // In case when remote tx has updated the current state before.
+                    else
+                        invalid(currState);
+
+                    break;
+
+                default:
+                    invalid(currState);
+            }
+        }
+
+        /**
+         * Action for valid tx status update.
+         */
+        private void valid() {
+            assert treeOp == null;
+
+            treeOp = IgniteTree.OperationType.PUT;
+        }
+
+        /**
+         * Action for invalid tx status update.
+         */
+        private void invalid(byte currState) {
+            assert treeOp == null;
+
+            throw new IllegalStateException("Unexpected new transaction state. [currState=" +
+                currState +  ", newState=" + newState +  ", cntr=" + minor +']');
+        }
+
+        /**
+         * Action for ignoring tx status update.
+         */
+        private void ignore() {
+            assert treeOp == null;
+
+            treeOp = IgniteTree.OperationType.NOOP;
+        }
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLogIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLogIO.java
new file mode 100644
index 0000000..e952b43
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLogIO.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc.txlog;
+
+/**
+ *
+ */
+public interface TxLogIO {
+    /**
+     * @param pageAddr Page address.
+     * @param off Item offset.
+     * @param row Row to compare with.
+     * @return Comparision result.
+     */
+    int compare(long pageAddr, int off, TxKey row);
+
+    /**
+     * @param pageAddr Page address.
+     * @param off Item offset.
+     * @return Major version
+     */
+    long getMajor(long pageAddr, int off);
+
+    /**
+     * @param pageAddr Page address.
+     * @param off Item offset.
+     * @param major Major version
+     */
+    void setMajor(long pageAddr, int off, long major);
+
+    /**
+     * @param pageAddr Page address.
+     * @param off Item offset.
+     * @return Minor version.
+     */
+    long getMinor(long pageAddr, int off);
+
+    /**
+     * @param pageAddr Page address.
+     * @param off Item offset.
+     * @param minor Minor version.
+     */
+    void setMinor(long pageAddr, int off, long minor);
+
+    /**
+     * @param pageAddr Page address.
+     * @param off Item offset.
+     * @return Transaction state.
+     */
+    byte getState(long pageAddr, int off);
+
+    /**
+     * @param pageAddr Page address.
+     * @param off Item offset.
+     * @param state Transaction state.
+     */
+    void setState(long pageAddr, int off, byte state);
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLogInnerIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLogInnerIO.java
new file mode 100644
index 0000000..95c10ce
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLogInnerIO.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc.txlog;
+
+import org.apache.ignite.internal.pagemem.PageUtils;
+import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusInnerIO;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.IOVersions;
+
+/** */
+public class TxLogInnerIO extends BPlusInnerIO<TxKey> implements TxLogIO {
+    /** */
+    public static final IOVersions<TxLogInnerIO> VERSIONS = new IOVersions<>(new TxLogInnerIO(1));
+
+    /**
+     * @param ver Page format version.
+     */
+    protected TxLogInnerIO(int ver) {
+        super(T_TX_LOG_INNER, ver, true, 8 + 8 + 1);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void storeByOffset(long pageAddr, int off, TxKey row) {
+        TxRow row0 = (TxRow)row;
+
+        setMajor(pageAddr, off, row0.major());
+        setMinor(pageAddr, off, row0.minor());
+        setState(pageAddr, off, row0.state());
+    }
+
+    /** {@inheritDoc} */
+    @Override public void store(long dstPageAddr, int dstIdx, BPlusIO<TxKey> srcIo, long srcPageAddr, int srcIdx) {
+        TxLogIO srcIo0 = (TxLogIO)srcIo;
+
+        int srcOff = srcIo.offset(srcIdx);
+        int dstOff = offset(dstIdx);
+
+        setMajor(dstPageAddr, dstOff, srcIo0.getMajor(srcPageAddr, srcOff));
+        setMinor(dstPageAddr, dstOff, srcIo0.getMinor(srcPageAddr, srcOff));
+        setState(dstPageAddr, dstOff, srcIo0.getState(srcPageAddr, srcOff));
+    }
+
+    /** {@inheritDoc} */
+    @Override public TxKey getLookupRow(BPlusTree<TxKey, ?> tree, long pageAddr, int idx) {
+        int off = offset(idx);
+
+        return new TxRow(
+            getMajor(pageAddr, off),
+            getMinor(pageAddr, off),
+            getState(pageAddr, off));
+    }
+
+    /** {@inheritDoc} */
+    @Override public int compare(long pageAddr, int off, TxKey row) {
+        int cmp = Long.compare(PageUtils.getLong(pageAddr, off), row.major());
+
+        return cmp != 0 ? cmp : Long.compare(getMinor(pageAddr, off), row.minor());
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getMajor(long pageAddr, int off) {
+        return PageUtils.getLong(pageAddr, off);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setMajor(long pageAddr, int off, long major) {
+        PageUtils.putLong(pageAddr, off, major);
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getMinor(long pageAddr, int off) {
+        return PageUtils.getLong(pageAddr, off + 8);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setMinor(long pageAddr, int off, long minor) {
+        PageUtils.putLong(pageAddr, off + 8, minor);
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte getState(long pageAddr, int off) {
+        return PageUtils.getByte(pageAddr, off + 16);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setState(long pageAddr, int off, byte state) {
+        PageUtils.putByte(pageAddr, off + 16, state);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLogLeafIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLogLeafIO.java
new file mode 100644
index 0000000..e037fbe
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLogLeafIO.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc.txlog;
+
+import org.apache.ignite.internal.pagemem.PageUtils;
+import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusLeafIO;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.IOVersions;
+
+/** */
+public class TxLogLeafIO extends BPlusLeafIO<TxKey> implements TxLogIO {
+    /** */
+    public static final IOVersions<TxLogLeafIO> VERSIONS = new IOVersions<>(new TxLogLeafIO(1));
+
+    /**
+     * @param ver Page format version.
+     */
+    protected TxLogLeafIO(int ver) {
+        super(T_TX_LOG_LEAF, ver, 17);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void storeByOffset(long pageAddr, int off, TxKey row) {
+        TxRow row0 = (TxRow)row;
+
+        setMajor(pageAddr, off, row0.major());
+        setMinor(pageAddr, off, row0.minor());
+        setState(pageAddr, off, row0.state());
+    }
+
+    /** {@inheritDoc} */
+    @Override public void store(long dstPageAddr, int dstIdx, BPlusIO<TxKey> srcIo, long srcPageAddr, int srcIdx) {
+        TxLogIO srcIo0 = (TxLogIO)srcIo;
+
+        int srcOff = srcIo.offset(srcIdx);
+        int dstOff = offset(dstIdx);
+
+        setMajor(dstPageAddr, dstOff, srcIo0.getMajor(srcPageAddr, srcOff));
+        setMinor(dstPageAddr, dstOff, srcIo0.getMinor(srcPageAddr, srcOff));
+        setState(dstPageAddr, dstOff, srcIo0.getState(srcPageAddr, srcOff));
+    }
+
+    /** {@inheritDoc} */
+    @Override public TxKey getLookupRow(BPlusTree<TxKey, ?> tree, long pageAddr, int idx) {
+        int off = offset(idx);
+
+        return new TxRow(
+            getMajor(pageAddr, off),
+            getMinor(pageAddr, off),
+            getState(pageAddr, off));
+    }
+
+    /** {@inheritDoc} */
+    @Override public int compare(long pageAddr, int off, TxKey row) {
+        int cmp = Long.compare(getMajor(pageAddr, off), row.major());
+
+        return cmp != 0 ? cmp : Long.compare(getMinor(pageAddr, off), row.minor());
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getMajor(long pageAddr, int off) {
+        return PageUtils.getLong(pageAddr, off);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setMajor(long pageAddr, int off, long major) {
+        PageUtils.putLong(pageAddr, off, major);
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getMinor(long pageAddr, int off) {
+        return PageUtils.getLong(pageAddr, off + 8);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setMinor(long pageAddr, int off, long minor) {
+        PageUtils.putLong(pageAddr, off + 8, minor);
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte getState(long pageAddr, int off) {
+        return PageUtils.getByte(pageAddr, off + 16);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setState(long pageAddr, int off, byte state) {
+        PageUtils.putByte(pageAddr, off + 16, state);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLogTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLogTree.java
new file mode 100644
index 0000000..60fbc84
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxLogTree.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc.txlog;
+
+import java.util.concurrent.atomic.AtomicLong;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.pagemem.PageMemory;
+import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager;
+import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO;
+import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList;
+import org.apache.ignite.internal.processors.failure.FailureProcessor;
+
+/**
+ *
+ */
+public class TxLogTree extends BPlusTree<TxKey, TxRow> {
+    /**
+     * @param pageMem Page memory.
+     * @param wal Write ahead log manager
+     * @param metaPageId Tree metapage id.
+     * @param reuseList Reuse list
+     * @param failureProcessor Failure processor.
+     * @param initNew {@code True} if new tree should be created.
+     * @throws IgniteCheckedException If fails.
+     */
+    public TxLogTree(PageMemory pageMem,
+        IgniteWriteAheadLogManager wal, long metaPageId,
+        ReuseList reuseList, FailureProcessor failureProcessor,
+        boolean initNew) throws IgniteCheckedException {
+        super(TxLog.TX_LOG_CACHE_NAME, TxLog.TX_LOG_CACHE_ID, pageMem, wal, new AtomicLong(), metaPageId,
+            reuseList, TxLogInnerIO.VERSIONS, TxLogLeafIO.VERSIONS, failureProcessor);
+
+        initTree(initNew);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected int compare(BPlusIO<TxKey> io, long pageAddr, int idx, TxKey row) {
+        return ((TxLogIO)io).compare(pageAddr, io.offset(idx), row);
+    }
+
+    /** {@inheritDoc} */
+    @Override public TxRow getRow(BPlusIO<TxKey> io, long pageAddr,
+                                  int idx, Object ignored) throws IgniteCheckedException {
+        return (TxRow) io.getLookupRow(this, pageAddr, idx);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxRow.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxRow.java
new file mode 100644
index 0000000..0d161c8
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxRow.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc.txlog;
+
+/**
+ *
+ */
+public class TxRow extends TxKey {
+    /** */
+    private byte state;
+
+    /**
+     * @param major Major version.
+     * @param minor Minor version.
+     * @param state Transaction state.
+     */
+    TxRow(long major, long minor, byte state) {
+        super(major, minor);
+
+        this.state = state;
+    }
+
+    /**
+     * @return Transaction state.
+     */
+    public byte state() {
+        return state;
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxState.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxState.java
new file mode 100644
index 0000000..65a1f25
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/mvcc/txlog/TxState.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc.txlog;
+
+/**
+ *
+ */
+public final class TxState {
+    /** */
+    public static final byte NA         = 0x0;
+    /** */
+    public static final byte PREPARED   = 0x1;
+    /** */
+    public static final byte ABORTED    = 0x2;
+    /** */
+    public static final byte COMMITTED  = 0x3;
+
+    /**
+     * Private constructor.
+     */
+    private TxState() {}
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/AllocatedPageTracker.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/AllocatedPageTracker.java
index 54aad8d..a1902d6 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/AllocatedPageTracker.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/AllocatedPageTracker.java
@@ -21,8 +21,11 @@
  * Tracks allocated pages.
  */
 public interface AllocatedPageTracker {
+    /** No-op instance. */
+    public AllocatedPageTracker NO_OP = delta -> {};
+
     /**
-     * Increments totalAllocatedPages counter.
+     * Updates totalAllocatedPages counter.
      *
      * @param delta Value to increment by.
      */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CacheDataRow.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CacheDataRow.java
index 44f0a3f..92f06a3 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CacheDataRow.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CacheDataRow.java
@@ -19,12 +19,13 @@
 
 import org.apache.ignite.internal.processors.cache.CacheObject;
 import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUpdateVersionAware;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
 
 /**
  * Cache data row.
  */
-public interface CacheDataRow extends CacheSearchRow, Storable {
+public interface CacheDataRow extends MvccUpdateVersionAware, CacheSearchRow, Storable {
     /**
      * @return Cache value.
      */
@@ -43,12 +44,12 @@
     /**
      * @return Partition for this key.
      */
-    public int partition();
+    @Override public int partition();
 
     /**
      * @param link Link for this row.
      */
-    public void link(long link);
+    @Override public void link(long link);
 
     /**
      * @param key Key.
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CacheDataRowAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CacheDataRowAdapter.java
index 9f2e031..b8245df 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CacheDataRowAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CacheDataRowAdapter.java
@@ -30,6 +30,7 @@
 import org.apache.ignite.internal.processors.cache.IncompleteCacheObject;
 import org.apache.ignite.internal.processors.cache.IncompleteObject;
 import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxState;
 import org.apache.ignite.internal.processors.cache.persistence.tree.io.CacheVersionIO;
 import org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO;
 import org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPagePayload;
@@ -42,6 +43,10 @@
 
 import static org.apache.ignite.internal.pagemem.PageIdUtils.itemId;
 import static org.apache.ignite.internal.pagemem.PageIdUtils.pageId;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_COUNTER_NA;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_CRD_COUNTER_NA;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_OP_COUNTER_NA;
+import static org.apache.ignite.internal.processors.cache.persistence.CacheDataRowAdapter.RowData.LINK_WITH_HEADER;
 
 /**
  * Cache data row adapter.
@@ -60,6 +65,7 @@
     protected CacheObject val;
 
     /** */
+    @GridToStringInclude
     protected long expireTime = -1;
 
     /** */
@@ -153,6 +159,8 @@
 
                     nextLink = data.nextLink();
 
+                    int hdrLen = 0;
+
                     if (first) {
                         if (nextLink == 0) {
                             // Fast path for a single page row.
@@ -162,12 +170,21 @@
                         }
 
                         first = false;
+
+                        // Assume that row header is always located entirely on the very first page.
+                        hdrLen = readHeader(pageAddr, data.offset());
+
+                        if (rowData == LINK_WITH_HEADER)
+                            return;
                     }
 
                     ByteBuffer buf = pageMem.pageBuffer(pageAddr);
 
-                    buf.position(data.offset());
-                    buf.limit(data.offset() + data.payloadSize());
+                    int off = data.offset() + hdrLen;
+                    int payloadSize = data.payloadSize() - hdrLen;
+
+                    buf.position(off);
+                    buf.limit(off + payloadSize);
 
                     boolean keyOnly = rowData == RowData.KEY_ONLY;
 
@@ -190,6 +207,18 @@
     }
 
     /**
+     * Reads row header (i.e. MVCC info) which should be located on the very first page od data.
+     *
+     * @param addr Address.
+     * @param off Offset
+     * @return Number of bytes read.
+     */
+    protected int readHeader(long addr, int off) {
+        // No-op.
+        return 0;
+    }
+
+    /**
      * @param sharedCtx Cache shared context.
      * @param coctx Cache object context.
      * @param buf Buffer.
@@ -199,7 +228,7 @@
      * @throws IgniteCheckedException If failed.
      * @return Read object.
      */
-    private IncompleteObject<?> readFragment(
+    protected IncompleteObject<?> readFragment(
         GridCacheSharedContext<?, ?> sharedCtx,
         CacheObjectContext coctx,
         ByteBuffer buf,
@@ -268,7 +297,7 @@
      * @param readCacheId {@code true} If need to read cache ID.
      * @throws IgniteCheckedException If failed.
      */
-    private void readFullRow(
+    protected void readFullRow(
         GridCacheSharedContext<?, ?> sharedCtx,
         CacheObjectContext coctx,
         long addr,
@@ -277,6 +306,11 @@
         throws IgniteCheckedException {
         int off = 0;
 
+        off += readHeader(addr, off);
+
+        if (rowData == LINK_WITH_HEADER)
+            return;
+
         if (readCacheId) {
             cacheId = PageUtils.getInt(addr, off);
 
@@ -326,7 +360,7 @@
      * @param buf Buffer.
      * @param incomplete Incomplete.
      */
-    private IncompleteObject<?> readIncompleteCacheId(
+    protected IncompleteObject<?> readIncompleteCacheId(
         ByteBuffer buf,
         IncompleteObject<?> incomplete
     ) {
@@ -371,7 +405,7 @@
      * @return Incomplete object.
      * @throws IgniteCheckedException If failed.
      */
-    private IncompleteCacheObject readIncompleteKey(
+    protected IncompleteCacheObject readIncompleteKey(
         CacheObjectContext coctx,
         ByteBuffer buf,
         IncompleteCacheObject incomplete
@@ -396,7 +430,7 @@
      * @return Incomplete object.
      * @throws IgniteCheckedException If failed.
      */
-    private IncompleteCacheObject readIncompleteValue(
+    protected IncompleteCacheObject readIncompleteValue(
         CacheObjectContext coctx,
         ByteBuffer buf,
         IncompleteCacheObject incomplete
@@ -419,7 +453,7 @@
      * @param incomplete Incomplete object.
      * @return Incomplete object.
      */
-    private IncompleteObject<?> readIncompleteExpireTime(
+    protected IncompleteObject<?> readIncompleteExpireTime(
         ByteBuffer buf,
         IncompleteObject<?> incomplete
     ) {
@@ -463,7 +497,7 @@
      * @return Incomplete object.
      * @throws IgniteCheckedException If failed.
      */
-    private IncompleteObject<?> readIncompleteVersion(
+    protected IncompleteObject<?> readIncompleteVersion(
         ByteBuffer buf,
         IncompleteObject<?> incomplete
     ) throws IgniteCheckedException {
@@ -573,6 +607,60 @@
         throw new UnsupportedOperationException();
     }
 
+    /** {@inheritDoc} */
+    @Override public int size() throws IgniteCheckedException {
+        int len = key().valueBytesLength(null);
+
+        len += value().valueBytesLength(null) + CacheVersionIO.size(version(), false) + 8;
+
+        return len + (cacheId() != 0 ? 4 : 0);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int headerSize() {
+        return 0;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long mvccCoordinatorVersion() {
+        return MVCC_CRD_COUNTER_NA;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long mvccCounter() {
+        return MVCC_COUNTER_NA;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int mvccOperationCounter() {
+        return MVCC_OP_COUNTER_NA;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte mvccTxState() {
+        return TxState.NA;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long newMvccCoordinatorVersion() {
+        return MVCC_CRD_COUNTER_NA;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long newMvccCounter() {
+        return MVCC_COUNTER_NA;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int newMvccOperationCounter() {
+        return MVCC_OP_COUNTER_NA;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte newMvccTxState() {
+        return TxState.NA;
+    }
+
     /**
      *
      */
@@ -584,7 +672,13 @@
         KEY_ONLY,
 
         /** */
-        NO_KEY
+        NO_KEY,
+
+        /** */
+        LINK_ONLY,
+
+        /** */
+        LINK_WITH_HEADER
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CacheSearchRow.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CacheSearchRow.java
index 1637eb0..c3cfb83 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CacheSearchRow.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/CacheSearchRow.java
@@ -18,11 +18,12 @@
 package org.apache.ignite.internal.processors.cache.persistence;
 
 import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccVersionAware;
 
 /**
  *
  */
-public interface CacheSearchRow {
+public interface CacheSearchRow extends MvccVersionAware {
     /**
      * @return Cache key.
      */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataRegionMetricsImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataRegionMetricsImpl.java
index f4e7a2c..4334c74 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataRegionMetricsImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DataRegionMetricsImpl.java
@@ -19,6 +19,7 @@
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
 import java.util.concurrent.atomic.LongAdder;
 import org.apache.ignite.DataRegionMetrics;
 import org.apache.ignite.configuration.DataRegionConfiguration;
@@ -171,7 +172,9 @@
 
         long totalAllocated = getPageSize() * totalAllocatedPages.longValue();
 
-        return (float) (totalAllocated - freeSpace) / totalAllocated;
+        return totalAllocated != 0 ?
+            (float) (totalAllocated - freeSpace) / totalAllocated
+            : 0f;
     }
 
     /** {@inheritDoc} */
@@ -358,21 +361,12 @@
             dirtyPages.reset();
     }
 
-    /**
-     * Increments totalAllocatedPages counter.
-     */
-    public void incrementTotalAllocatedPages() {
-        updateTotalAllocatedPages(1);
-    }
-
     /** {@inheritDoc} */
     @Override public void updateTotalAllocatedPages(long delta) {
-        if (metricsEnabled) {
-            totalAllocatedPages.add(delta);
+        totalAllocatedPages.add(delta);
 
-            if (delta > 0)
-                updateAllocationRateMetrics(delta);
-        }
+        if (metricsEnabled && delta > 0)
+            updateAllocationRateMetrics(delta);
     }
 
     /**
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DatabaseLifecycleListener.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DatabaseLifecycleListener.java
new file mode 100644
index 0000000..f96cdd9
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/DatabaseLifecycleListener.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.persistence;
+
+import org.apache.ignite.IgniteCheckedException;
+
+/**
+ *
+ */
+public interface DatabaseLifecycleListener {
+
+    /**
+     * @param mgr Database shared manager.
+     *
+     */
+    void onInitDataRegions(IgniteCacheDatabaseSharedManager mgr) throws IgniteCheckedException;
+
+    /**
+     * @param mgr Page store manager.
+     *
+     */
+    void beforeMemoryRestore(IgniteCacheDatabaseSharedManager mgr) throws IgniteCheckedException;
+
+    /**
+     * @param mgr Database shared manager.
+     *
+     */
+    void afterMemoryRestore(IgniteCacheDatabaseSharedManager mgr) throws IgniteCheckedException;
+
+    /**
+     * @param mgr Database shared manager.
+     */
+    void afterInitialise(IgniteCacheDatabaseSharedManager mgr) throws IgniteCheckedException;
+
+    /**
+     * @param mgr Database shared manager.
+     */
+    void beforeStop(IgniteCacheDatabaseSharedManager mgr);
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java
index 1203de8..3e69824 100755
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java
@@ -88,7 +88,6 @@
 import org.apache.ignite.internal.pagemem.PageUtils;
 import org.apache.ignite.internal.pagemem.store.IgnitePageStoreManager;
 import org.apache.ignite.internal.pagemem.store.PageStore;
-import org.apache.ignite.internal.pagemem.wal.StorageException;
 import org.apache.ignite.internal.pagemem.wal.WALIterator;
 import org.apache.ignite.internal.pagemem.wal.WALPointer;
 import org.apache.ignite.internal.pagemem.wal.record.CacheState;
@@ -112,6 +111,7 @@
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState;
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture;
+import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxLog;
 import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointEntry;
 import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointEntryType;
 import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointHistory;
@@ -119,7 +119,6 @@
 import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory;
 import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStore;
 import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager;
-import org.apache.ignite.internal.processors.cache.persistence.file.PersistentStorageIOException;
 import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetaStorage;
 import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetastorageLifecycleListener;
 import org.apache.ignite.internal.processors.cache.persistence.pagemem.CheckpointMetricsTracker;
@@ -168,6 +167,7 @@
 import static org.apache.ignite.failure.FailureType.CRITICAL_ERROR;
 import static org.apache.ignite.failure.FailureType.SYSTEM_WORKER_TERMINATION;
 import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.CHECKPOINT_RECORD;
+import static org.apache.ignite.configuration.DataStorageConfiguration.DFLT_WAL_HISTORY_SIZE;
 import static org.apache.ignite.internal.processors.cache.persistence.metastorage.MetaStorage.METASTORAGE_CACHE_ID;
 
 /**
@@ -217,9 +217,6 @@
     /** Checkpoint file name pattern. */
     public static final Pattern CP_FILE_NAME_PATTERN = Pattern.compile("(\\d+)-(.*)-(START|END)\\.bin");
 
-    /** Checkpoint file temporary suffix. This is needed to safe writing checkpoint markers through temporary file and renaming. */
-    public static final String FILE_TMP_SUFFIX = ".tmp";
-
     /** Node started file suffix. */
     public static final String NODE_STARTED_FILE_NAME_SUFFIX = "-node-started.bin";
 
@@ -361,7 +358,9 @@
 
         checkpointFreq = persistenceCfg.getCheckpointFrequency();
 
-        truncateWalOnCpFinish = persistenceCfg.getWalHistorySize() != Integer.MAX_VALUE;
+        truncateWalOnCpFinish = persistenceCfg.isWalHistorySizeParameterUsed()
+            ? persistenceCfg.getWalHistorySize() != Integer.MAX_VALUE
+            : persistenceCfg.getMaxWalArchiveSize() != Long.MAX_VALUE;
 
         lockWaitTime = persistenceCfg.getLockWaitTime();
 
@@ -411,8 +410,8 @@
     }
 
     /** {@inheritDoc} */
-    @Override protected void initDataRegions(DataStorageConfiguration memCfg) throws IgniteCheckedException {
-        super.initDataRegions(memCfg);
+    @Override protected void initDataRegions0(DataStorageConfiguration memCfg) throws IgniteCheckedException {
+        super.initDataRegions0(memCfg);
 
         addDataRegion(
             memCfg,
@@ -472,14 +471,21 @@
             if (!U.mkdirs(cpDir))
                 throw new IgniteCheckedException("Could not create directory for checkpoint metadata: " + cpDir);
 
-            cleanupTempCheckpointDirectory();
-
             final FileLockHolder preLocked = kernalCtx.pdsFolderResolver()
-                .resolveFolders()
-                .getLockedFileLockHolder();
+                    .resolveFolders()
+                    .getLockedFileLockHolder();
 
-            if (preLocked == null)
-                fileLockHolder = new FileLockHolder(storeMgr.workDir().getPath(), kernalCtx, log);
+            fileLockHolder = preLocked == null ?
+                        new FileLockHolder(storeMgr.workDir().getPath(), kernalCtx, log) : preLocked;
+
+            if (log.isDebugEnabled())
+                log.debug("Try to capture file lock [nodeId=" +
+                        cctx.localNodeId() + " path=" + fileLockHolder.lockPath() + "]");
+
+            if (!fileLockHolder.isLocked())
+                fileLockHolder.tryLock(lockWaitTime);
+
+            cleanupTempCheckpointDirectory();
 
             persStoreMetrics.wal(cctx.wal());
 
@@ -489,13 +495,13 @@
     }
 
     /**
-     * Cleanup checkpoint directory from all temporary files {@link #FILE_TMP_SUFFIX}.
+     * Cleanup checkpoint directory from all temporary files.
      */
-    public void cleanupTempCheckpointDirectory() throws IgniteCheckedException {
+    @Override public void cleanupTempCheckpointDirectory() throws IgniteCheckedException {
         try {
             try (DirectoryStream<Path> files = Files.newDirectoryStream(
                 cpDir.toPath(),
-                path -> path.endsWith(FILE_TMP_SUFFIX))
+                path -> path.endsWith(FilePageStoreManager.TMP_SUFFIX))
             ) {
                 for (Path path : files)
                     Files.delete(path);
@@ -509,7 +515,7 @@
     /**
      * Cleanup checkpoint directory.
      */
-    public void cleanupCheckpointDirectory() throws IgniteCheckedException {
+    @Override public void cleanupCheckpointDirectory() throws IgniteCheckedException {
         try {
             try (DirectoryStream<Path> files = Files.newDirectoryStream(cpDir.toPath())) {
                 for (Path path : files)
@@ -597,7 +603,7 @@
                 Files.delete(endFile);
         }
         catch (IOException e) {
-            throw new PersistentStorageIOException("Failed to delete stale checkpoint files: " + cpEntry, e);
+            throw new StorageException("Failed to delete stale checkpoint files: " + cpEntry, e);
         }
     }
 
@@ -710,16 +716,10 @@
 
         onKernalStop0(false);
 
-        stop0(false);
+        super.onDeActivate(kctx);
 
         /* Must be here, because after deactivate we can invoke activate and file lock must be already configured */
         stopping = false;
-
-        if (!cctx.localNode().isClient()) {
-            //we replace lock with new instance (only if we're responsible for locking folders)
-            if (fileLockHolder != null)
-                fileLockHolder = new FileLockHolder(storeMgr.workDir().getPath(), cctx.kernalContext(), log);
-        }
     }
 
     /**
@@ -813,6 +813,10 @@
         checkpointReadLock();
 
         try {
+            for (DatabaseLifecycleListener lsnr : getDatabaseListeners(cctx.kernalContext())) {
+                lsnr.beforeMemoryRestore(this);
+            }
+
             if (!F.isEmpty(cachesToStart)) {
                 for (DynamicCacheDescriptor desc : cachesToStart) {
                     if (CU.affinityNode(cctx.localNode(), desc.cacheConfiguration().getNodeFilter()))
@@ -852,9 +856,14 @@
             metaStorage.init(this);
 
             notifyMetastorageReadyForReadWrite();
+
+            for (DatabaseLifecycleListener lsnr : getDatabaseListeners(cctx.kernalContext())) {
+                lsnr.afterMemoryRestore(this);
+            }
+
         }
         catch (IgniteCheckedException e) {
-            if (X.hasCause(e, StorageException.class, PersistentStorageIOException.class, IOException.class))
+            if (X.hasCause(e, StorageException.class, IOException.class))
                 cctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e));
 
             throw e;
@@ -874,7 +883,7 @@
         FileWALPointer p = (FileWALPointer)ptr;
 
         String fileName = U.currentTimeMillis() + NODE_STARTED_FILE_NAME_SUFFIX;
-        String tmpFileName = fileName + FILE_TMP_SUFFIX;
+        String tmpFileName = fileName + FilePageStoreManager.TMP_SUFFIX;
 
         ByteBuffer buf = ByteBuffer.allocate(FileWALPointer.POINTER_SIZE);
         buf.order(ByteOrder.nativeOrder());
@@ -900,7 +909,7 @@
             Files.move(Paths.get(cpDir.getAbsolutePath(), tmpFileName), Paths.get(cpDir.getAbsolutePath(), fileName));
         }
         catch (IOException e) {
-            throw new PersistentStorageIOException("Failed to write node start marker: " + ptr, e);
+            throw new StorageException("Failed to write node start marker: " + ptr, e);
         }
     }
 
@@ -943,12 +952,12 @@
                     buf.clear();
                 }
                 catch (IOException e) {
-                    throw new PersistentStorageIOException("Failed to read node started marker file: " + f.getAbsolutePath(), e);
+                    throw new StorageException("Failed to read node started marker file: " + f.getAbsolutePath(), e);
                 }
             }
         }
         catch (IOException e) {
-            throw new PersistentStorageIOException("Failed to retreive node started files.", e);
+            throw new StorageException("Failed to retreive node started files.", e);
         }
 
         // Sort start markers by file timestamp.
@@ -958,28 +967,6 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void lock() throws IgniteCheckedException {
-        if (fileLockHolder != null) {
-            if (log.isDebugEnabled())
-                log.debug("Try to capture file lock [nodeId=" +
-                    cctx.localNodeId() + " path=" + fileLockHolder.lockPath() + "]");
-
-            fileLockHolder.tryLock(lockWaitTime);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public void unLock() {
-        if (fileLockHolder != null) {
-            if (log.isDebugEnabled())
-                log.debug("Release file lock [nodeId=" +
-                    cctx.localNodeId() + " path=" + fileLockHolder.lockPath() + "]");
-
-            fileLockHolder.release();
-        }
-    }
-
-    /** {@inheritDoc} */
     @Override protected void onKernalStop0(boolean cancel) {
         checkpointLock.writeLock().lock();
 
@@ -996,16 +983,24 @@
 
         super.onKernalStop0(cancel);
 
-        if (!cctx.kernalContext().clientNode()) {
-            unLock();
-
-            if (fileLockHolder != null)
-                fileLockHolder.close();
-        }
-
         unRegistrateMetricsMBean();
     }
 
+    /** {@inheritDoc} */
+    @Override protected void stop0(boolean cancel) {
+        super.stop0(cancel);
+
+        if (!cctx.kernalContext().clientNode()) {
+            if (fileLockHolder != null) {
+                if (log.isDebugEnabled())
+                    log.debug("Release file lock [nodeId=" +
+                            cctx.localNodeId() + " path=" + fileLockHolder.lockPath() + "]");
+
+                fileLockHolder.close();
+            }
+        }
+    }
+
     /** */
     private long[] calculateFragmentSizes(int concLvl, long cacheSize, long chpBufSize) {
         if (concLvl < 2)
@@ -1364,9 +1359,10 @@
                     final int cacheId = cacheCtx.cacheId();
                     final GridFutureAdapter<Void> usrFut = idxRebuildFuts.get(cacheId);
 
-                    if (!cctx.pageStore().hasIndexStore(cacheCtx.groupId()) && cacheCtx.affinityNode()) {
+                    if (!cctx.pageStore().hasIndexStore(cacheCtx.groupId()) && cacheCtx.affinityNode()
+                        && cacheCtx.group().persistenceEnabled()) {
                         IgniteInternalFuture<?> rebuildFut = cctx.kernalContext().query()
-                            .rebuildIndexesFromHash(Collections.singletonList(cacheCtx.cacheId()));
+                            .rebuildIndexesFromHash(Collections.singleton(cacheCtx.cacheId()));
 
                         assert usrFut != null : "Missing user future for cache: " + cacheCtx.name();
 
@@ -1785,6 +1781,13 @@
         return cp.wakeupForCheckpoint(0, reason);
     }
 
+    /** {@inheritDoc} */
+    @Override public WALPointer lastCheckpointMarkWalPointer() {
+        CheckpointEntry lastCheckpointEntry = cpHistory == null ? null : cpHistory.lastCheckpoint();
+
+        return lastCheckpointEntry == null ? null : lastCheckpointEntry.checkpointMark();
+    }
+
     /**
      * @return Checkpoint directory.
      */
@@ -1929,7 +1932,7 @@
             cctx.pageStore().beginRecover();
         }
         else
-            cctx.wal().allowCompressionUntil(status.startPtr);
+            cctx.wal().notchLastCheckpointPtr(status.startPtr);
 
         long start = U.currentTimeMillis();
 
@@ -2103,6 +2106,10 @@
      * @throws IgniteCheckedException if no DataRegion is configured for a name obtained from cache descriptor.
      */
     private PageMemoryEx getPageMemoryForCacheGroup(int grpId) throws IgniteCheckedException {
+        // TODO IGNITE-7792 add generic mapping.
+        if (grpId == TxLog.TX_LOG_CACHE_ID)
+            return (PageMemoryEx)dataRegion(TxLog.TX_LOG_CACHE_NAME).pageMemory();
+
         // TODO IGNITE-5075: cache descriptor can be removed.
         GridCacheSharedContext sharedCtx = context();
 
@@ -2304,8 +2311,20 @@
                 }
             }
 
-            if (!metastoreOnly)
-                restorePartitionStates(partStates, null);
+            if (!metastoreOnly) {
+                long startRestorePart = U.currentTimeMillis();
+
+                if (log.isInfoEnabled())
+                    log.info("Restoring partition state for local groups [cntPartStateWal="
+                        + partStates.size() + ", lastCheckpointId=" + status.cpStartId + ']');
+
+                long proc = restorePartitionStates(partStates, null);
+
+                if (log.isInfoEnabled())
+                    log.info("Finished restoring partition state for local groups [cntProcessed=" + proc +
+                        ", cntPartStateWal=" + partStates.size() +
+                        ", time=" + (U.currentTimeMillis() - startRestorePart) + "ms]");
+            }
         }
         finally {
             if (!metastoreOnly)
@@ -2323,12 +2342,15 @@
      *
      * @param partStates Partition states restored from WAL.
      * @param onlyForGroups If not {@code null} restore states only for specified cache groups.
+     * @return cntParts Count of partitions processed.
      * @throws IgniteCheckedException If failed to restore partition states.
      */
-    private void restorePartitionStates(
+    private long restorePartitionStates(
         Map<T2<Integer, Integer>, T2<Integer, Long>> partStates,
         @Nullable Set<Integer> onlyForGroups
     ) throws IgniteCheckedException {
+        long cntParts = 0;
+
         for (CacheGroupContext grp : cctx.cache().cacheGroups()) {
             if (grp.isLocal() || !grp.affinityNode()) {
                 // Local cache has no partitions and its states.
@@ -2391,7 +2413,7 @@
                                     }
                                 }
                                 else
-                                    updateState(part, (int)io.getPartitionState(pageAddr));
+                                    changed = updateState(part, (int)io.getPartitionState(pageAddr));
                             }
                             finally {
                                 pageMem.writeUnlock(grpId, partMetaId, partMetaPage, null, changed);
@@ -2415,11 +2437,15 @@
 
                     updateState(part, restore.get1());
                 }
+
+                cntParts++;
             }
 
             // After partition states are restored, it is necessary to update internal data structures in topology.
             grp.topology().afterStateRestored(grp.topology().lastTopologyChangeVersion());
         }
+
+        return cntParts;
     }
 
     /**
@@ -2464,7 +2490,7 @@
         if (partId == -1)
             partId = cacheCtx.affinity().partition(dataEntry.key());
 
-        GridDhtLocalPartition locPart = cacheCtx.topology().forceCreatePartition(partId);
+        GridDhtLocalPartition locPart = cacheCtx.isLocal() ? null : cacheCtx.topology().forceCreatePartition(partId);
 
         switch (dataEntry.op()) {
             case CREATE:
@@ -2633,11 +2659,11 @@
      * @param entryBuf Checkpoint entry buffer to write.
      * @param cp Checkpoint entry.
      * @param type Checkpoint entry type.
-     * @throws PersistentStorageIOException If failed to write checkpoint entry.
+     * @throws StorageException If failed to write checkpoint entry.
      */
-    public void writeCheckpointEntry(ByteBuffer entryBuf, CheckpointEntry cp, CheckpointEntryType type) throws PersistentStorageIOException {
+    public void writeCheckpointEntry(ByteBuffer entryBuf, CheckpointEntry cp, CheckpointEntryType type) throws StorageException {
         String fileName = checkpointFileName(cp, type);
-        String tmpFileName = fileName + FILE_TMP_SUFFIX;
+        String tmpFileName = fileName + FilePageStoreManager.TMP_SUFFIX;
 
         try {
             try (FileIO io = ioFactory.create(Paths.get(cpDir.getAbsolutePath(), skipSync ? fileName : tmpFileName).toFile(),
@@ -2655,7 +2681,7 @@
                 Files.move(Paths.get(cpDir.getAbsolutePath(), tmpFileName), Paths.get(cpDir.getAbsolutePath(), fileName));
         }
         catch (IOException e) {
-            throw new PersistentStorageIOException("Failed to write checkpoint entry [ptr=" + cp.checkpointMark()
+            throw new StorageException("Failed to write checkpoint entry [ptr=" + cp.checkpointMark()
                 + ", cpTs=" + cp.timestamp()
                 + ", cpId=" + cp.checkpointId()
                 + ", type=" + type + "]", e);
@@ -3659,7 +3685,7 @@
 
                 writeCheckpointEntry(tmpWriteBuf, cp, CheckpointEntryType.END);
 
-                cctx.wal().allowCompressionUntil(chp.cpEntry.checkpointMark());
+                cctx.wal().notchLastCheckpointPtr(chp.cpEntry.checkpointMark());
             }
 
             List<CheckpointEntry> removedFromHistory = cpHistory.onCheckpointFinished(chp, truncateWalOnCpFinish);
@@ -3848,20 +3874,21 @@
 
                 PageMemoryEx pageMem;
 
-                if (grpId != MetaStorage.METASTORAGE_CACHE_ID) {
+                // TODO IGNITE-7792 add generic mapping.
+                if (grpId == MetaStorage.METASTORAGE_CACHE_ID)
+                    pageMem = (PageMemoryEx)metaStorage.pageMemory();
+                else if (grpId == TxLog.TX_LOG_CACHE_ID)
+                    pageMem = (PageMemoryEx)dataRegion(TxLog.TX_LOG_CACHE_NAME).pageMemory();
+                else {
                     CacheGroupContext grp = context().cache().cacheGroup(grpId);
 
-                    if (grp == null)
+                    DataRegion region = grp != null ?grp .dataRegion() : null;
+
+                    if (region == null || !region.config().isPersistenceEnabled())
                         continue;
 
-                    if (!grp.dataRegion().config().isPersistenceEnabled())
-                        continue;
-
-                    pageMem = (PageMemoryEx)grp.dataRegion().pageMemory();
+                    pageMem = (PageMemoryEx)region.pageMemory();
                 }
-                else
-                    pageMem = (PageMemoryEx)metaStorage.pageMemory();
-
 
                 Integer tag = pageMem.getForCheckpoint(
                     fullId, tmpWriteBuf, persStoreMetrics.metricsEnabled() ? tracker : null);
@@ -4017,7 +4044,7 @@
         }
 
         /** {@inheritDoc} */
-        public String toString() {
+        @Override public String toString() {
             return S.toString(CheckpointStatus.class, this);
         }
     }
@@ -4110,7 +4137,7 @@
         private RandomAccessFile lockFile;
 
         /** Lock. */
-        private FileLock lock;
+        private volatile FileLock lock;
 
         /** Kernal context to generate Id of locked node in file. */
         @NotNull private GridKernalContext ctx;
@@ -4183,6 +4210,7 @@
                 for (int i = 0; i < lockWaitTimeMillis; i += 1000) {
                     try {
                         lock = ch.tryLock(0, 1, false);
+
                         if (lock != null && lock.isValid()) {
                             writeContent(sb.toString());
 
@@ -4253,13 +4281,20 @@
             return content;
         }
 
+        /** Locked or not. */
+        public boolean isLocked() {
+            return lock != null && lock.isValid();
+        }
+
         /** Releases file lock */
         public void release() {
             U.releaseQuiet(lock);
         }
 
         /** Closes file channel */
-        public void close() {
+        @Override public void close() {
+            release();
+
             U.closeQuiet(lockFile);
         }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java
index ea775dc..04476ad 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java
@@ -17,9 +17,11 @@
 
 package org.apache.ignite.internal.processors.cache.persistence;
 
+import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.List;
 import java.util.Map;
 import java.util.NoSuchElementException;
 import java.util.Set;
@@ -35,7 +37,6 @@
 import org.apache.ignite.internal.pagemem.PageMemory;
 import org.apache.ignite.internal.pagemem.PageSupport;
 import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager;
-import org.apache.ignite.internal.pagemem.wal.StorageException;
 import org.apache.ignite.internal.pagemem.wal.WALIterator;
 import org.apache.ignite.internal.pagemem.wal.WALPointer;
 import org.apache.ignite.internal.pagemem.wal.record.DataEntry;
@@ -58,6 +59,8 @@
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState;
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.CachePartitionPartialCountersMap;
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.IgniteHistoricalIterator;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccVersion;
 import org.apache.ignite.internal.processors.cache.persistence.freelist.CacheFreeListImpl;
 import org.apache.ignite.internal.processors.cache.persistence.migration.UpgradePendingTreeToPerPartitionTask;
 import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx;
@@ -77,8 +80,11 @@
 import org.apache.ignite.internal.processors.cache.tree.CacheDataTree;
 import org.apache.ignite.internal.processors.cache.tree.PendingEntriesTree;
 import org.apache.ignite.internal.processors.cache.tree.PendingRow;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccUpdateResult;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.search.MvccLinkAwareSearchRow;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
 import org.apache.ignite.internal.processors.query.GridQueryRowCacheCleaner;
+import org.apache.ignite.internal.util.GridLongList;
 import org.apache.ignite.internal.util.lang.GridCursor;
 import org.apache.ignite.internal.util.lang.IgniteInClosure2X;
 import org.apache.ignite.internal.util.tostring.GridToStringInclude;
@@ -132,7 +138,8 @@
             PageIdAllocator.FLAG_IDX,
             reuseList,
             metastoreRoot.pageId().pageId(),
-            metastoreRoot.isAllocated());
+            metastoreRoot.isAllocated(),
+            ctx.kernalContext().failure());
 
         ((GridCacheDatabaseSharedManager)ctx.database()).addCheckpointListener(this);
     }
@@ -735,11 +742,14 @@
     }
 
     /** {@inheritDoc} */
-    @Override @Nullable protected WALHistoricalIterator historicalIterator(
+    @Override @Nullable protected IgniteHistoricalIterator historicalIterator(
         CachePartitionPartialCountersMap partCntrs, Set<Integer> missing) throws IgniteCheckedException {
         if (partCntrs == null || partCntrs.isEmpty())
             return null;
 
+        if (grp.mvccEnabled()) // TODO IGNITE-7384
+            return super.historicalIterator(partCntrs, missing);
+
         GridCacheDatabaseSharedManager database = (GridCacheDatabaseSharedManager)grp.shared().database();
 
         FileWALPointer minPtr = null;
@@ -870,6 +880,10 @@
         /** Flag indicates that partition belongs to current {@link #next} is finished and no longer needs to rebalance. */
         private boolean reachedPartitionEnd;
 
+        /** Flag indicates that update counters for requested partitions have been reached and done.
+         *  It means that no further iteration is needed. */
+        private boolean doneAllPartitions;
+
         /**
          * @param grp Cache context.
          * @param walIt WAL iterator.
@@ -943,6 +957,9 @@
                 doneParts.add(next.partitionId());
 
                 reachedPartitionEnd = false;
+
+                if (doneParts.size() == partMap.size())
+                    doneAllPartitions = true;
             }
 
             advance();
@@ -1001,6 +1018,9 @@
         private void advance() {
             next = null;
 
+            if (doneAllPartitions)
+                return;
+
             while (true) {
                 if (entryIt != null) {
                     while (entryIt.hasNext()) {
@@ -1093,6 +1113,16 @@
         }
 
         /** {@inheritDoc} */
+        @Override public int size() throws IgniteCheckedException {
+            throw new UnsupportedOperationException();
+        }
+
+        /** {@inheritDoc} */
+        @Override public int headerSize() {
+            throw new UnsupportedOperationException();
+        }
+
+        /** {@inheritDoc} */
         @Override public long link() {
             return 0;
         }
@@ -1111,6 +1141,46 @@
         @Override public int cacheId() {
             return entry.cacheId();
         }
+
+        /** {@inheritDoc} */
+        @Override public long mvccCoordinatorVersion() {
+            return 0; // TODO IGNITE-7384
+        }
+
+        /** {@inheritDoc} */
+        @Override public long mvccCounter() {
+            return 0;  // TODO IGNITE-7384
+        }
+
+        /** {@inheritDoc} */
+        @Override public int mvccOperationCounter() {
+            return 0;  // TODO IGNITE-7384
+        }
+
+        /** {@inheritDoc} */
+        @Override public long newMvccCoordinatorVersion() {
+            return 0; // TODO IGNITE-7384
+        }
+
+        /** {@inheritDoc} */
+        @Override public long newMvccCounter() {
+            return 0; // TODO IGNITE-7384
+        }
+
+        /** {@inheritDoc} */
+        @Override public int newMvccOperationCounter() {
+            return 0;  // TODO IGNITE-7384
+        }
+
+        /** {@inheritDoc} */
+        @Override public byte mvccTxState() {
+            return 0;  // TODO IGNITE-7384
+        }
+
+        /** {@inheritDoc} */
+        @Override public byte newMvccTxState() {
+            return 0; // TODO IGNITE-7384
+        }
     }
 
     /**
@@ -1493,6 +1563,19 @@
         }
 
         /** {@inheritDoc} */
+        @Override public void updateSize(int cacheId, long delta) {
+            try {
+                CacheDataStore delegate0 = init0(false);
+
+                if (delegate0 != null)
+                    delegate0.updateSize(cacheId, delta);
+            }
+            catch (IgniteCheckedException e) {
+                throw new IgniteException(e);
+            }
+        }
+
+        /** {@inheritDoc} */
         @Override public long updateCounter() {
             try {
                 CacheDataStore delegate0 = init0(true);
@@ -1505,6 +1588,30 @@
         }
 
         /** {@inheritDoc} */
+        @Override public long nextMvccUpdateCounter() {
+            try {
+                CacheDataStore delegate0 = init0(true);
+
+                return delegate0 == null ? 0 : delegate0.nextMvccUpdateCounter();
+            }
+            catch (IgniteCheckedException e) {
+                throw new IgniteException(e);
+            }
+        }
+
+        /** {@inheritDoc} */
+        @Override public long mvccUpdateCounter() {
+            try {
+                CacheDataStore delegate0 = init0(true);
+
+                return delegate0 == null ? 0 : delegate0.mvccUpdateCounter();
+            }
+            catch (IgniteCheckedException e) {
+                throw new IgniteException(e);
+            }
+        }
+
+        /** {@inheritDoc} */
         @Override public void init(long size, long updCntr, @Nullable Map<Integer, Long> cacheSizes) {
             throw new IllegalStateException("Should be never called.");
         }
@@ -1589,6 +1696,126 @@
         }
 
         /** {@inheritDoc} */
+        @Override public boolean mvccInitialValue(
+            GridCacheContext cctx,
+            KeyCacheObject key,
+            @Nullable CacheObject val,
+            GridCacheVersion ver,
+            long expireTime,
+            MvccVersion mvccVer,
+            MvccVersion newMvccVer)
+            throws IgniteCheckedException
+        {
+            CacheDataStore delegate = init0(false);
+
+            return delegate.mvccInitialValue(cctx, key, val, ver, expireTime, mvccVer, newMvccVer);
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean mvccInitialValueIfAbsent(
+            GridCacheContext cctx,
+            KeyCacheObject key,
+            @Nullable CacheObject val,
+            GridCacheVersion ver,
+            long expireTime,
+            MvccVersion mvccVer,
+            MvccVersion newMvccVer,
+            byte txState,
+            byte newTxState)
+            throws IgniteCheckedException
+        {
+            CacheDataStore delegate = init0(false);
+
+            return delegate.mvccInitialValueIfAbsent(cctx, key, val, ver, expireTime, mvccVer, newMvccVer,
+                txState, newTxState);
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean mvccUpdateRowWithPreloadInfo(
+            GridCacheContext cctx,
+            KeyCacheObject key,
+            @Nullable CacheObject val,
+            GridCacheVersion ver,
+            long expireTime,
+            MvccVersion mvccVer,
+            MvccVersion newMvccVer,
+            byte mvccTxState,
+            byte newMvccTxState) throws IgniteCheckedException {
+
+            CacheDataStore delegate = init0(false);
+
+            return delegate.mvccUpdateRowWithPreloadInfo(cctx,
+                key,
+                val,
+                ver,
+                expireTime,
+                mvccVer,
+                newMvccVer,
+                mvccTxState,
+                newMvccTxState);
+        }
+
+        /** {@inheritDoc} */
+        @Override public MvccUpdateResult mvccUpdate(
+            GridCacheContext cctx,
+            KeyCacheObject key,
+            CacheObject val,
+            GridCacheVersion ver,
+            long expireTime,
+            MvccSnapshot mvccVer,
+            boolean primary,
+            boolean needHistory,
+            boolean noCreate) throws IgniteCheckedException {
+            CacheDataStore delegate = init0(false);
+
+            return delegate.mvccUpdate(
+                cctx, key, val, ver, expireTime, mvccVer, primary, needHistory, noCreate);
+        }
+
+        /** {@inheritDoc} */
+        @Override public MvccUpdateResult mvccRemove(
+            GridCacheContext cctx,
+            KeyCacheObject key,
+            MvccSnapshot mvccVer,
+            boolean primary,
+            boolean needHistory) throws IgniteCheckedException {
+            CacheDataStore delegate = init0(false);
+
+            return delegate.mvccRemove(cctx, key, mvccVer, primary, needHistory);
+        }
+
+        /** {@inheritDoc} */
+        @Override public MvccUpdateResult mvccLock(
+            GridCacheContext cctx,
+            KeyCacheObject key,
+            MvccSnapshot mvccSnapshot) throws IgniteCheckedException {
+            CacheDataStore delegate = init0(false);
+
+            return delegate.mvccLock(cctx, key, mvccSnapshot);
+        }
+
+        /** {@inheritDoc} */
+        @Override public GridLongList mvccUpdateNative(GridCacheContext cctx, boolean primary, KeyCacheObject key, CacheObject val, GridCacheVersion ver, long expireTime, MvccSnapshot mvccSnapshot) throws IgniteCheckedException {
+            CacheDataStore delegate = init0(false);
+
+            return delegate.mvccUpdateNative(cctx, primary, key, val, ver, expireTime, mvccSnapshot);
+        }
+
+        /** {@inheritDoc} */
+        @Override public GridLongList mvccRemoveNative(GridCacheContext cctx, boolean primary, KeyCacheObject key, MvccSnapshot mvccSnapshot) throws IgniteCheckedException {
+            CacheDataStore delegate = init0(false);
+
+            return delegate.mvccRemoveNative(cctx, primary, key, mvccSnapshot);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void mvccRemoveAll(GridCacheContext cctx, KeyCacheObject key) throws IgniteCheckedException {
+            CacheDataStore delegate = init0(false);
+
+            delegate.mvccRemoveAll(cctx, key);
+        }
+
+        /** {@inheritDoc} */
         @Override public CacheDataRow createRow(
             GridCacheContext cctx,
             KeyCacheObject key,
@@ -1604,6 +1831,21 @@
         }
 
         /** {@inheritDoc} */
+        @Override public int cleanup(GridCacheContext cctx,
+            @Nullable List<MvccLinkAwareSearchRow> cleanupRows) throws IgniteCheckedException {
+            CacheDataStore delegate = init0(false);
+
+            return delegate.cleanup(cctx, cleanupRows);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void updateTxState(GridCacheContext cctx, CacheSearchRow row) throws IgniteCheckedException {
+            CacheDataStore delegate = init0(false);
+
+            delegate.updateTxState(cctx, row);
+        }
+
+        /** {@inheritDoc} */
         @Override public void invoke(GridCacheContext cctx, KeyCacheObject key, OffheapInvokeClosure c)
             throws IgniteCheckedException {
             assert ctx.database().checkpointLockIsHeldByThread();
@@ -1634,6 +1876,40 @@
         }
 
         /** {@inheritDoc} */
+        @Override public CacheDataRow mvccFind(GridCacheContext cctx, KeyCacheObject key, MvccSnapshot snapshot)
+            throws IgniteCheckedException {
+            CacheDataStore delegate = init0(true);
+
+            if (delegate != null)
+                return delegate.mvccFind(cctx, key, snapshot);
+
+            return null;
+        }
+
+        /** {@inheritDoc} */
+        @Override public List<IgniteBiTuple<Object, MvccVersion>> mvccFindAllVersions(GridCacheContext cctx, KeyCacheObject key)
+            throws IgniteCheckedException {
+            CacheDataStore delegate = init0(true);
+
+            if (delegate != null)
+                return delegate.mvccFindAllVersions(cctx, key);
+
+            return Collections.emptyList();
+        }
+
+        /** {@inheritDoc} */
+        @Override public GridCursor<CacheDataRow> mvccAllVersionsCursor(GridCacheContext cctx,
+            KeyCacheObject key, Object x) throws IgniteCheckedException {
+            CacheDataStore delegate = init0(true);
+
+            if (delegate != null)
+                return delegate.mvccAllVersionsCursor(cctx, key, x);
+
+            return EMPTY_CURSOR;
+        }
+
+
+        /** {@inheritDoc} */
         @Override public GridCursor<? extends CacheDataRow> cursor() throws IgniteCheckedException {
             CacheDataStore delegate = init0(true);
 
@@ -1644,6 +1920,27 @@
         }
 
         /** {@inheritDoc} */
+        @Override public GridCursor<? extends CacheDataRow> cursor(Object x) throws IgniteCheckedException {
+            CacheDataStore delegate = init0(true);
+
+            if (delegate != null)
+                return delegate.cursor(x);
+
+            return EMPTY_CURSOR;
+        }
+
+        /** {@inheritDoc} */
+        @Override public GridCursor<? extends CacheDataRow> cursor(MvccSnapshot mvccSnapshot)
+            throws IgniteCheckedException {
+            CacheDataStore delegate = init0(true);
+
+            if (delegate != null)
+                return delegate.cursor(mvccSnapshot);
+
+            return EMPTY_CURSOR;
+        }
+
+        /** {@inheritDoc} */
         @Override public GridCursor<? extends CacheDataRow> cursor(
             int cacheId,
             KeyCacheObject lower,
@@ -1671,6 +1968,21 @@
         }
 
         /** {@inheritDoc} */
+        @Override public GridCursor<? extends CacheDataRow> cursor(int cacheId,
+            KeyCacheObject lower,
+            KeyCacheObject upper,
+            Object x,
+            MvccSnapshot mvccSnapshot)
+            throws IgniteCheckedException {
+            CacheDataStore delegate = init0(true);
+
+            if (delegate != null)
+                return delegate.cursor(cacheId, lower, upper, x, mvccSnapshot);
+
+            return EMPTY_CURSOR;
+        }
+
+        /** {@inheritDoc} */
         @Override public void destroy() throws IgniteCheckedException {
             // No need to destroy delegate.
         }
@@ -1686,6 +1998,17 @@
         }
 
         /** {@inheritDoc} */
+        @Override public GridCursor<? extends CacheDataRow> cursor(int cacheId,
+            MvccSnapshot mvccSnapshot) throws IgniteCheckedException {
+            CacheDataStore delegate = init0(true);
+
+            if (delegate != null)
+                return delegate.cursor(cacheId, mvccSnapshot);
+
+            return EMPTY_CURSOR;
+        }
+
+        /** {@inheritDoc} */
         @Override public void clear(int cacheId) throws IgniteCheckedException {
             CacheDataStore delegate0 = init0(true);
 
@@ -1855,7 +2178,7 @@
     /**
      *
      */
-    private static final GridCursor<CacheDataRow> EMPTY_CURSOR = new GridCursor<CacheDataRow>() {
+    public static final GridCursor<CacheDataRow> EMPTY_CURSOR = new GridCursor<CacheDataRow>() {
         /** {@inheritDoc} */
         @Override public boolean next() {
             return false;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java
index 92de54a..737b02a 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java
@@ -42,6 +42,7 @@
 import org.apache.ignite.internal.mem.unsafe.UnsafeMemoryProvider;
 import org.apache.ignite.internal.pagemem.PageMemory;
 import org.apache.ignite.internal.pagemem.impl.PageMemoryNoStoreImpl;
+import org.apache.ignite.internal.pagemem.wal.WALPointer;
 import org.apache.ignite.internal.processors.cache.CacheGroupContext;
 import org.apache.ignite.internal.processors.cache.DynamicCacheDescriptor;
 import org.apache.ignite.internal.processors.cache.GridCacheMapEntry;
@@ -69,6 +70,8 @@
 
 import static org.apache.ignite.configuration.DataStorageConfiguration.DFLT_DATA_REG_DEFAULT_NAME;
 import static org.apache.ignite.configuration.DataStorageConfiguration.DFLT_PAGE_SIZE;
+import static org.apache.ignite.configuration.DataStorageConfiguration.DFLT_WAL_ARCHIVE_MAX_SIZE;
+import static org.apache.ignite.configuration.DataStorageConfiguration.DFLT_WAL_HISTORY_SIZE;
 
 /**
  *
@@ -223,6 +226,16 @@
         if (dataRegionsInitialized)
             return;
 
+        initDataRegions0(memCfg);
+
+        dataRegionsInitialized = true;
+    }
+
+    /**
+     * @param memCfg Database config.
+     * @throws IgniteCheckedException If failed to initialize swap path.
+     */
+    protected void initDataRegions0(DataStorageConfiguration memCfg) throws IgniteCheckedException {
         DataRegionConfiguration[] dataRegionCfgs = memCfg.getDataRegionConfigurations();
 
         int dataRegions = dataRegionCfgs == null ? 0 : dataRegionCfgs.length;
@@ -251,8 +264,17 @@
             CU.isPersistenceEnabled(memCfg)
         );
 
+        for (DatabaseLifecycleListener lsnr : getDatabaseListeners(cctx.kernalContext())) {
+            lsnr.onInitDataRegions(this);
+        }
+    }
 
-        dataRegionsInitialized = true;
+    /**
+     * @param kctx Kernal context.
+     * @return Database lifecycle listeners.
+     */
+    protected List<DatabaseLifecycleListener> getDatabaseListeners(GridKernalContext kctx) {
+        return kctx.internalSubscriptionProcessor().getDatabaseListeners();
     }
 
     /**
@@ -260,7 +282,7 @@
      * @param dataRegionCfg Data region config.
      * @throws IgniteCheckedException If failed to initialize swap path.
      */
-    protected void addDataRegion(
+    public void addDataRegion(
         DataStorageConfiguration dataStorageCfg,
         DataRegionConfiguration dataRegionCfg,
         boolean trackable
@@ -369,6 +391,30 @@
         }
 
         checkDataRegionConfiguration(memCfg, regNames, memCfg.getDefaultDataRegionConfiguration());
+
+        checkWalArchiveSizeConfiguration(memCfg);
+    }
+
+    /**
+     * Check wal archive size configuration for correctness.
+     *
+     * @param memCfg durable memory configuration for an Apache Ignite node.
+     */
+    private void checkWalArchiveSizeConfiguration(DataStorageConfiguration memCfg) throws IgniteCheckedException {
+        if (memCfg.getWalHistorySize() == DFLT_WAL_HISTORY_SIZE || memCfg.getWalHistorySize() == Integer.MAX_VALUE)
+            LT.warn(log, "DataRegionConfiguration.maxWalArchiveSize instead DataRegionConfiguration.walHistorySize " +
+            "would be used for removing old archive wal files");
+        else if(memCfg.getMaxWalArchiveSize() == DFLT_WAL_ARCHIVE_MAX_SIZE)
+            LT.warn(log, "walHistorySize was deprecated. maxWalArchiveSize should be used instead");
+        else
+            throw new IgniteCheckedException("Should be used only one of wal history size or max wal archive size." +
+                "(use DataRegionConfiguration.maxWalArchiveSize because DataRegionConfiguration.walHistorySize was deprecated)"
+            );
+
+        if(memCfg.getMaxWalArchiveSize() < memCfg.getWalSegmentSize())
+            throw new IgniteCheckedException(
+                "DataRegionConfiguration.maxWalArchiveSize should be greater than DataRegionConfiguration.walSegmentSize"
+            );
     }
 
     /**
@@ -659,21 +705,7 @@
 
     /** {@inheritDoc} */
     @Override protected void stop0(boolean cancel) {
-        if (dataRegionMap != null) {
-            for (DataRegion memPlc : dataRegionMap.values()) {
-                memPlc.pageMemory().stop();
-
-                memPlc.evictionTracker().stop();
-
-                unregisterMBean(memPlc.memoryMetrics().getName());
-            }
-
-            dataRegionMap.clear();
-
-            dataRegionMap = null;
-
-            dataRegionsInitialized = false;
-        }
+        onDeActivate(cctx.kernalContext());
     }
 
     /**
@@ -708,20 +740,6 @@
     }
 
     /**
-     *
-     */
-    public void lock() throws IgniteCheckedException {
-
-    }
-
-    /**
-     *
-     */
-    public void unLock() {
-
-    }
-
-    /**
      * No-op for non-persistent storage.
      */
     public void checkpointReadLock() {
@@ -757,6 +775,13 @@
     }
 
     /**
+     * @return Last checkpoint mark WAL pointer.
+     */
+    public WALPointer lastCheckpointMarkWalPointer() {
+        return null;
+    }
+
+    /**
      * Allows to wait checkpoint finished.
      *
      * @param reason Reason.
@@ -1012,6 +1037,7 @@
         final DataRegionMetricsImpl memMetrics
     ) {
         return new DirectMemoryProvider() {
+            /** */
             private final DirectMemoryProvider memProvider = memoryProvider0;
 
             @Override public void initialize(long[] chunkSizes) {
@@ -1067,11 +1093,33 @@
         startMemoryPolicies();
 
         initPageMemoryDataStructures(memCfg);
+
+        for (DatabaseLifecycleListener lsnr : getDatabaseListeners(kctx)) {
+            lsnr.afterInitialise(this);
+        }
     }
 
     /** {@inheritDoc} */
     @Override public void onDeActivate(GridKernalContext kctx) {
-        stop0(false);
+        for (DatabaseLifecycleListener lsnr : getDatabaseListeners(cctx.kernalContext())) {
+            lsnr.beforeStop(this);
+        }
+
+        if (dataRegionMap != null) {
+            for (DataRegion memPlc : dataRegionMap.values()) {
+                memPlc.pageMemory().stop();
+
+                memPlc.evictionTracker().stop();
+
+                unregisterMBean(memPlc.memoryMetrics().getName());
+            }
+
+            dataRegionMap.clear();
+
+            dataRegionMap = null;
+
+            dataRegionsInitialized = false;
+        }
     }
 
     /**
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IndexStorageImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IndexStorageImpl.java
index 7daef3c..6248765 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IndexStorageImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IndexStorageImpl.java
@@ -32,7 +32,9 @@
 import org.apache.ignite.internal.processors.cache.persistence.tree.io.IOVersions;
 import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList;
 import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandler;
+import org.apache.ignite.internal.processors.failure.FailureProcessor;
 import org.apache.ignite.internal.util.typedef.internal.U;
+import org.jetbrains.annotations.Nullable;
 
 /**
  * Metadata storage.
@@ -78,7 +80,8 @@
         final byte allocSpace,
         final ReuseList reuseList,
         final long rootPageId,
-        final boolean initNew
+        final boolean initNew,
+        final FailureProcessor failureProcessor
     ) {
         try {
             this.pageMem = pageMem;
@@ -88,7 +91,7 @@
             this.reuseList = reuseList;
 
             metaTree = new MetaTree(grpId, allocPartId, allocSpace, pageMem, wal, globalRmvId, rootPageId,
-                reuseList, MetaStoreInnerIO.VERSIONS, MetaStoreLeafIO.VERSIONS, initNew);
+                reuseList, MetaStoreInnerIO.VERSIONS, MetaStoreLeafIO.VERSIONS, initNew, failureProcessor);
         }
         catch (IgniteCheckedException e) {
             throw new IgniteException(e);
@@ -164,6 +167,7 @@
          * @param reuseList Reuse list.
          * @param innerIos Inner IOs.
          * @param leafIos Leaf IOs.
+         * @param failureProcessor if the tree is corrupted.
          * @throws IgniteCheckedException If failed.
          */
         private MetaTree(
@@ -177,9 +181,10 @@
             final ReuseList reuseList,
             final IOVersions<? extends BPlusInnerIO<IndexItem>> innerIos,
             final IOVersions<? extends BPlusLeafIO<IndexItem>> leafIos,
-            final boolean initNew
+            final boolean initNew,
+            @Nullable FailureProcessor failureProcessor
         ) throws IgniteCheckedException {
-            super(treeName("meta", "Meta"), cacheId, pageMem, wal, globalRmvId, metaPageId, reuseList, innerIos, leafIos);
+            super(treeName("meta", "Meta"), cacheId, pageMem, wal, globalRmvId, metaPageId, reuseList, innerIos, leafIos, failureProcessor);
 
             this.allocPartId = allocPartId;
             this.allocSpace = allocSpace;
@@ -215,7 +220,7 @@
         }
 
         /** {@inheritDoc} */
-        @Override protected IndexItem getRow(final BPlusIO<IndexItem> io, final long pageAddr,
+        @Override public IndexItem getRow(final BPlusIO<IndexItem> io, final long pageAddr,
             final int idx, Object ignore) throws IgniteCheckedException {
             return readRow(pageAddr, ((IndexIO)io).getOffset(pageAddr, idx));
         }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/RowStore.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/RowStore.java
index ee7c255..6900b7e 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/RowStore.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/RowStore.java
@@ -23,6 +23,7 @@
 import org.apache.ignite.internal.processors.cache.CacheObjectContext;
 import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
 import org.apache.ignite.internal.processors.cache.persistence.freelist.FreeList;
+import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandler;
 import org.apache.ignite.internal.processors.query.GridQueryRowCacheCleaner;
 
 /**
@@ -100,6 +101,8 @@
 
             try {
                 freeList.insertDataRow(row);
+
+                assert row.link() != 0L;
             }
             finally {
                 ctx.database().checkpointReadUnlock();
@@ -110,8 +113,8 @@
     /**
      * @param link Row link.
      * @param row New row data.
-     * @throws IgniteCheckedException If failed.
      * @return {@code True} if was able to update row.
+     * @throws IgniteCheckedException If failed.
      */
     public boolean updateRow(long link, CacheDataRow row) throws IgniteCheckedException {
         assert !persistenceEnabled || ctx.database().checkpointLockIsHeldByThread();
@@ -123,6 +126,29 @@
     }
 
     /**
+     * Run page handler operation over the row.
+     *
+     * @param link Row link.
+     * @param pageHnd Page handler.
+     * @param arg Page handler argument.
+     * @throws IgniteCheckedException If failed.
+     */
+    public <S, R> void updateDataRow(long link, PageHandler<S, R> pageHnd, S arg) throws IgniteCheckedException {
+        if (!persistenceEnabled)
+            freeList.updateDataRow(link, pageHnd, arg);
+        else {
+            ctx.database().checkpointReadLock();
+
+            try {
+                freeList.updateDataRow(link, pageHnd, arg);
+            }
+            finally {
+                ctx.database().checkpointReadUnlock();
+            }
+        }
+    }
+
+    /**
      * @return Free list.
      */
     public FreeList freeList() {
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/Storable.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/Storable.java
index ae200df..133f0a1 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/Storable.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/Storable.java
@@ -17,6 +17,8 @@
 
 package org.apache.ignite.internal.processors.cache.persistence;
 
+import org.apache.ignite.IgniteCheckedException;
+
 /**
  * Simple interface for data, store in some RowStore.
  */
@@ -35,4 +37,16 @@
      * @return Partition.
      */
     public int partition();
+
+    /**
+     * @return Row size in page.
+     * @throws IgniteCheckedException If failed.
+     */
+    public int size() throws IgniteCheckedException;
+
+    /**
+     * @return Row header size in page. Header is indivisible part of row
+     * which is entirely available on the very first page followed by the row link.
+     */
+    public int headerSize();
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/StorageException.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/StorageException.java
new file mode 100644
index 0000000..509dee6
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/StorageException.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.persistence;
+
+import java.io.IOException;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.InvalidEnvironmentException;
+import org.jetbrains.annotations.NotNull;
+
+/**
+ * Exception is needed to distinguish WAL manager & page store critical I/O errors.
+ */
+public class StorageException extends IgniteCheckedException implements InvalidEnvironmentException {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /**
+     * @param msg Error message.
+     * @param cause Error cause.
+     */
+    public StorageException(String msg, @NotNull IOException cause) {
+        super(msg, cause);
+    }
+
+    /**
+     * @param e Cause exception.
+     */
+    public StorageException(IOException e) {
+        super(e);
+    }
+
+    /**
+     * @param msg Error message
+     */
+    public StorageException(String msg) {
+        super(msg);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointHistory.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointHistory.java
index 3fb8457..e64a53f 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointHistory.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/checkpoint/CheckpointHistory.java
@@ -35,6 +35,7 @@
 import org.apache.ignite.internal.pagemem.wal.WALPointer;
 import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
 import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager;
+import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.Checkpoint;
 import org.apache.ignite.internal.processors.cache.persistence.wal.FileWALPointer;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.internal.U;
@@ -64,6 +65,9 @@
     /** The maximal number of checkpoints hold in memory. */
     private final int maxCpHistMemSize;
 
+    /** If WalHistorySize was setted by user will use old way for removing checkpoints. */
+    private final boolean isWalHistorySizeParameterEnabled;
+
     /**
      * Constructor.
      *
@@ -77,6 +81,8 @@
 
         maxCpHistMemSize = Math.min(dsCfg.getWalHistorySize(),
             IgniteSystemProperties.getInteger(IGNITE_PDS_MAX_CHECKPOINT_MEMORY_HISTORY_SIZE, 100));
+
+        isWalHistorySizeParameterEnabled = dsCfg.isWalHistorySizeParameterUsed();
     }
 
     /**
@@ -198,36 +204,98 @@
      *
      * @return List of checkpoints removed from history.
      */
-    public List<CheckpointEntry> onCheckpointFinished(GridCacheDatabaseSharedManager.Checkpoint chp, boolean truncateWal) {
-        List<CheckpointEntry> rmv = new ArrayList<>();
-
+    public List<CheckpointEntry> onCheckpointFinished(Checkpoint chp, boolean truncateWal) {
         chp.walSegsCoveredRange(calculateWalSegmentsCovered());
 
+        WALPointer checkpointMarkUntilDel = isWalHistorySizeParameterEnabled //check for compatibility mode.
+            ? checkpointMarkUntilDeleteByMemorySize()
+            : newerPointer(checkpointMarkUntilDeleteByMemorySize(), checkpointMarkUntilDeleteByArchiveSize());
+
+        if (checkpointMarkUntilDel == null)
+            return Collections.emptyList();
+
+        List<CheckpointEntry> deletedCheckpoints = onWalTruncated(checkpointMarkUntilDel);
+
         int deleted = 0;
 
-        while (histMap.size() > maxCpHistMemSize) {
-            Map.Entry<Long, CheckpointEntry> entry = histMap.firstEntry();
-
-            CheckpointEntry cpEntry = entry.getValue();
-
-            if (cctx.wal().reserved(cpEntry.checkpointMark())) {
-                U.warn(log, "Could not clear historyMap due to WAL reservation on cpEntry " + cpEntry.checkpointId() +
-                    ", history map size is " + histMap.size());
-
-                break;
-            }
-
-            if (truncateWal)
-                deleted += cctx.wal().truncate(null, cpEntry.checkpointMark());
-
-            histMap.remove(entry.getKey());
-
-            rmv.add(cpEntry);
-        }
+        if (truncateWal)
+            deleted += cctx.wal().truncate(null, firstCheckpointPointer());
 
         chp.walFilesDeleted(deleted);
 
-        return rmv;
+        return deletedCheckpoints;
+    }
+
+    /**
+     * @param firstPointer One of pointers to choose the newest.
+     * @param secondPointer One of pointers to choose the newest.
+     * @return The newest pointer from input ones.
+     */
+    private FileWALPointer newerPointer(WALPointer firstPointer, WALPointer secondPointer) {
+        FileWALPointer first = (FileWALPointer)firstPointer;
+        FileWALPointer second = (FileWALPointer)secondPointer;
+
+        if (firstPointer == null)
+            return second;
+
+        if (secondPointer == null)
+            return first;
+
+        return first.index() > second.index() ? first : second;
+    }
+
+    /**
+     * Calculate mark until delete by maximum checkpoint history memory size.
+     *
+     * @return Checkpoint mark until which checkpoints can be deleted(not including this pointer).
+     */
+    private WALPointer checkpointMarkUntilDeleteByMemorySize() {
+        if (histMap.size() <= maxCpHistMemSize)
+            return null;
+
+        int calculatedCpHistSize = maxCpHistMemSize;
+
+        for (Map.Entry<Long, CheckpointEntry> entry : histMap.entrySet()) {
+            if (histMap.size() <= calculatedCpHistSize++)
+                return entry.getValue().checkpointMark();
+        }
+
+        return lastCheckpoint().checkpointMark();
+    }
+
+    /**
+     * Calculate mark until delete by maximum allowed archive size.
+     *
+     * @return Checkpoint mark until which checkpoints can be deleted(not including this pointer).
+     */
+    @Nullable private WALPointer checkpointMarkUntilDeleteByArchiveSize() {
+        long absFileIdxToDel = cctx.wal().maxArchivedSegmentToDelete();
+
+        if (absFileIdxToDel < 0)
+            return null;
+
+        long fileUntilDel = absFileIdxToDel + 1;
+
+        long checkpointFileIdx = absFileIdx(lastCheckpoint());
+
+        for (CheckpointEntry cpEntry : histMap.values()) {
+            long currFileIdx = absFileIdx(cpEntry);
+
+            if (checkpointFileIdx <= currFileIdx || fileUntilDel <= currFileIdx)
+                return cpEntry.checkpointMark();
+        }
+
+        return lastCheckpoint().checkpointMark();
+    }
+
+    /**
+     * Retrieve absolute file index by checkpoint entry.
+     *
+     * @param pointer checkpoint entry for which need to calculate absolute file index.
+     * @return absolute file index for given checkpoint entry.
+     */
+    private long absFileIdx(CheckpointEntry pointer) {
+        return ((FileWALPointer)pointer.checkpointMark()).index();
     }
 
     /**
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/AbstractFileIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/AbstractFileIO.java
index 4723644..418c676 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/AbstractFileIO.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/AbstractFileIO.java
@@ -46,7 +46,7 @@
      *
      * @param num Number of bytes to operate.
      */
-    private int fully(IOOperation operation, int num, boolean write) throws IOException {
+    private int fully(IOOperation operation, long position, int num, boolean write) throws IOException {
         if (num > 0) {
             long time = 0;
 
@@ -58,14 +58,17 @@
                     time = 0;
                 }
                 else if (n == 0) {
+                    if (!write && available(num - i, position + i) == 0)
+                        return i;
+
                     if (time == 0)
                         time = U.currentTimeMillis();
                     else if ((U.currentTimeMillis() - time) >= MAX_IO_TIMEOUT_MS)
-                        throw new IOException(write && position() == size() ? "Failed to extend file." :
+                        throw new IOException(write && (position + i) == size() ? "Failed to extend file." :
                             "Probably disk is too busy, please check your device.");
                 }
                 else
-                    throw new EOFException("EOF at position [" + position() + "] expected to read [" + num + "] bytes.");
+                    return -1;
             }
         }
 
@@ -78,7 +81,7 @@
             @Override public int run(int offs) throws IOException {
                 return read(destBuf);
             }
-        }, available(destBuf.remaining()), false);
+        }, position(), destBuf.remaining(), false);
     }
 
     /** {@inheritDoc} */
@@ -87,7 +90,7 @@
             @Override public int run(int offs) throws IOException {
                 return read(destBuf, position + offs);
             }
-        }, available(destBuf.remaining(), position), false);
+        }, position, destBuf.remaining(), false);
     }
 
     /** {@inheritDoc} */
@@ -96,7 +99,7 @@
             @Override public int run(int offs) throws IOException {
                 return read(buf, off + offs, len - offs);
             }
-        }, len, false);
+        }, position(), len, false);
     }
 
     /** {@inheritDoc} */
@@ -105,7 +108,7 @@
             @Override public int run(int offs) throws IOException {
                 return write(srcBuf);
             }
-        }, srcBuf.remaining(), true);
+        }, position(), srcBuf.remaining(), true);
     }
 
     /** {@inheritDoc} */
@@ -114,7 +117,7 @@
             @Override public int run(int offs) throws IOException {
                 return write(srcBuf, position + offs);
             }
-        }, srcBuf.remaining(), true);
+        }, position, srcBuf.remaining(), true);
     }
 
     /** {@inheritDoc} */
@@ -123,19 +126,14 @@
             @Override public int run(int offs) throws IOException {
                 return write(buf, off + offs, len - offs);
             }
-        }, len, true);
-    }
-
-    /**
-     * @param requested Requested.
-     */
-    private int available(int requested) throws IOException {
-        return available(requested, position());
+        }, position(), len, true);
     }
 
     /**
      * @param requested Requested.
      * @param position Position.
+     *
+     * @return Bytes available.
      */
     private int available(int requested, long position) throws IOException {
         long avail = size() - position;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStore.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStore.java
index d2d5506..d4d47163 100755
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStore.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStore.java
@@ -33,6 +33,7 @@
 import org.apache.ignite.internal.pagemem.PageIdUtils;
 import org.apache.ignite.internal.pagemem.store.PageStore;
 import org.apache.ignite.internal.processors.cache.persistence.AllocatedPageTracker;
+import org.apache.ignite.internal.processors.cache.persistence.StorageException;
 import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO;
 import org.apache.ignite.internal.processors.cache.persistence.wal.crc.IgniteDataIntegrityViolationException;
 import org.apache.ignite.internal.processors.cache.persistence.wal.crc.PureJavaCrc32;
@@ -165,7 +166,7 @@
         try {
             ByteBuffer hdr = header(type, dbCfg.getPageSize());
 
-        fileIO.writeFully(hdr);
+            fileIO.writeFully(hdr);
 
             //there is 'super' page in every file
             return headerSize() + dbCfg.getPageSize();
@@ -182,7 +183,7 @@
      * Checks that file store has correct header and size.
      *
      * @return Next available position in the file to store a data.
-     * @throws IOException If check is failed.
+     * @throws IOException If check has failed.
      */
     private long checkFile(FileIO fileIO) throws IOException {
         ByteBuffer hdr = ByteBuffer.allocate(headerSize()).order(ByteOrder.LITTLE_ENDIAN);
@@ -233,10 +234,10 @@
     }
 
     /**
-     * @param cleanFile {@code True} to delete file.
-     * @throws PersistentStorageIOException If failed.
+     * @param delete {@code True} to delete file.
+     * @throws StorageException If failed in case of underlying I/O exception.
      */
-    public void stop(boolean cleanFile) throws PersistentStorageIOException {
+    public void stop(boolean delete) throws StorageException {
         lock.writeLock().lock();
 
         try {
@@ -249,11 +250,12 @@
 
             fileIO = null;
 
-            if (cleanFile)
+            if (delete)
                 Files.delete(cfgFile.toPath());
         }
         catch (IOException e) {
-            throw new PersistentStorageIOException(e);
+            throw new StorageException("Failed to stop serving partition file [file=" + cfgFile.getPath()
+                + ", delete=" + delete + "]", e);
         }
         finally {
             lock.writeLock().unlock();
@@ -264,9 +266,9 @@
      * Truncates and deletes partition file.
      *
      * @param tag New partition tag.
-     * @throws PersistentStorageIOException If failed
+     * @throws StorageException If failed in case of underlying I/O exception.
      */
-    public void truncate(int tag) throws PersistentStorageIOException {
+    public void truncate(int tag) throws StorageException {
         init();
 
         lock.writeLock().lock();
@@ -283,12 +285,10 @@
             Files.delete(cfgFile.toPath());
         }
         catch (IOException e) {
-            throw new PersistentStorageIOException("Failed to delete partition file: " + cfgFile.getPath(), e);
+            throw new StorageException("Failed to truncate partition file [file=" + cfgFile.getPath() + "]", e);
         }
         finally {
-            allocatedTracker.updateTotalAllocatedPages(-1L * allocated.get() / pageSize);
-
-            allocated.set(0);
+            allocatedTracker.updateTotalAllocatedPages(-1L * allocated.getAndSet(0) / pageSize);
 
             inited = false;
 
@@ -311,16 +311,15 @@
     }
 
     /**
-     *
+     * @throws StorageException If failed in case of underlying I/O exception.
      */
-    public void finishRecover() throws PersistentStorageIOException {
+    public void finishRecover() throws StorageException {
         lock.writeLock().lock();
 
         try {
-            // Since we always have a meta-page in the store, never revert allocated counter to a value smaller than
-            // header + page.
+            // Since we always have a meta-page in the store, never revert allocated counter to a value smaller than page.
             if (inited) {
-                long newSize = Math.max(headerSize() + pageSize, fileIO.size());
+                long newSize = Math.max(pageSize, fileIO.size() - headerSize());
 
                 long delta = newSize - allocated.getAndSet(newSize);
 
@@ -332,7 +331,7 @@
             recover = false;
         }
         catch (IOException e) {
-            throw new PersistentStorageIOException("Failed to finish recover", e);
+            throw new StorageException("Failed to finish recover partition file [file=" + cfgFile.getAbsolutePath() + "]", e);
         }
         finally {
             lock.writeLock().unlock();
@@ -347,28 +346,20 @@
             long off = pageOffset(pageId);
 
             assert pageBuf.capacity() == pageSize;
+            assert pageBuf.remaining() == pageSize;
             assert pageBuf.position() == 0;
             assert pageBuf.order() == ByteOrder.nativeOrder();
-            assert off <= (allocated.get() - headerSize()) : "calculatedOffset=" + off +
-                ", allocated=" + allocated.get() + ", headerSize="+headerSize();
+            assert off <= allocated.get() : "calculatedOffset=" + off +
+                ", allocated=" + allocated.get() + ", headerSize=" + headerSize();
 
-            int len = pageSize;
+            int n = readWithFailover(pageBuf, off);
 
-            do {
-                int n = readWithFailover(pageBuf, off);
+            // If page was not written yet, nothing to read.
+            if (n < 0) {
+                pageBuf.put(new byte[pageBuf.remaining()]);
 
-                // If page was not written yet, nothing to read.
-                if (n < 0) {
-                    pageBuf.put(new byte[pageBuf.remaining()]);
-
-                    return;
-                }
-
-                off += n;
-
-                len -= n;
+                return;
             }
-            while (len > 0);
 
             int savedCrc32 = PageIO.getCrc(pageBuf);
 
@@ -394,7 +385,7 @@
                 PageIO.setCrc(pageBuf, savedCrc32);
         }
         catch (IOException e) {
-            throw new PersistentStorageIOException("Read error", e);
+            throw new StorageException("Failed to read page [file=" + cfgFile.getAbsolutePath() + ", pageId=" + pageId + "]", e);
         }
     }
 
@@ -405,32 +396,17 @@
         try {
             assert buf.remaining() == headerSize();
 
-            int len = headerSize();
-
-            long off = 0;
-
-            do {
-                int n = readWithFailover(buf, off);
-
-                // If page was not written yet, nothing to read.
-                if (n < 0)
-                    return;
-
-                off += n;
-
-                len -= n;
-            }
-            while (len > 0);
+            readWithFailover(buf, 0);
         }
         catch (IOException e) {
-            throw new PersistentStorageIOException("Read error", e);
+            throw new StorageException("Failed to read header [file=" + cfgFile.getAbsolutePath() + "]", e);
         }
     }
 
     /**
-     * @throws PersistentStorageIOException If failed to initialize store file.
+     * @throws StorageException If failed to initialize store file.
      */
-    private void init() throws PersistentStorageIOException {
+    private void init() throws StorageException {
         if (!inited) {
             lock.writeLock().lock();
 
@@ -438,7 +414,7 @@
                 if (!inited) {
                     FileIO fileIO = null;
 
-                    PersistentStorageIOException err = null;
+                    StorageException err = null;
 
                     long newSize;
 
@@ -449,7 +425,7 @@
                             try {
                                 this.fileIO = fileIO = ioFactory.create(cfgFile, CREATE, READ, WRITE);
 
-                                newSize = cfgFile.length() == 0 ? initFile(fileIO) : checkFile(fileIO);
+                                newSize = (cfgFile.length() == 0 ? initFile(fileIO) : checkFile(fileIO)) - headerSize();
 
                                 if (interrupted)
                                     Thread.currentThread().interrupt();
@@ -465,15 +441,17 @@
 
                         assert allocated.get() == 0;
 
-                        allocatedTracker.updateTotalAllocatedPages(newSize / pageSize);
-
                         allocated.set(newSize);
 
                         inited = true;
+
+                        // Order is important, update of total allocated pages must be called after allocated update
+                        // and setting inited to true, because it affects pages() returned value.
+                        allocatedTracker.updateTotalAllocatedPages(pages());
                     }
                     catch (IOException e) {
-                        err = new PersistentStorageIOException(
-                            "Failed to initialize partition file: " + cfgFile.getName(), e);
+                        err = new StorageException(
+                            "Failed to initialize partition file: " + cfgFile.getAbsolutePath(), e);
 
                         throw err;
                     }
@@ -572,7 +550,7 @@
 
                     long off = pageOffset(pageId);
 
-                    assert (off >= 0 && off + headerSize() <= allocated.get()) || recover :
+                    assert (off >= 0 && off <= allocated.get()) || recover :
                         "off=" + U.hexLong(off) + ", allocated=" + U.hexLong(allocated.get()) + ", pageId=" + U.hexLong(pageId);
 
                     assert pageBuf.capacity() == pageSize;
@@ -631,8 +609,8 @@
                     }
                 }
 
-                throw new PersistentStorageIOException("Failed to write the page to the file store [pageId=" + pageId
-                    + ", file=" + cfgFile.getAbsolutePath() + ']', e);
+                throw new StorageException("Failed to write page [file=" + cfgFile.getAbsolutePath()
+                    + ", pageId=" + pageId + ", tag=" + tag + "]", e);
             }
         }
     }
@@ -658,7 +636,7 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void sync() throws IgniteCheckedException {
+    @Override public void sync() throws StorageException {
         lock.writeLock().lock();
 
         try {
@@ -670,7 +648,7 @@
                 fileIO.force();
         }
         catch (IOException e) {
-            throw new PersistentStorageIOException("Sync error", e);
+            throw new StorageException("Failed to fsync partition file [file=" + cfgFile.getAbsolutePath() + "]", e);
         }
         finally {
             lock.writeLock().unlock();
@@ -686,9 +664,7 @@
     @Override public long allocatePage() throws IgniteCheckedException {
         init();
 
-        long off = allocPage();
-
-        return (off - headerSize()) / pageSize;
+        return allocPage() / pageSize;
     }
 
     /**
@@ -716,7 +692,7 @@
         if (!inited)
             return 0;
 
-        return (int)((allocated.get() - headerSize()) / pageSize);
+        return (int)(allocated.get() / pageSize);
     }
 
     /**
@@ -738,7 +714,7 @@
             try {
                 assert destBuf.remaining() > 0;
 
-                int bytesRead = fileIO.read(destBuf, position);
+                int bytesRead = fileIO.readFully(destBuf, position);
 
                 if (interrupted)
                     Thread.currentThread().interrupt();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java
index 3d23d53..020f84e 100755
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java
@@ -55,6 +55,7 @@
 import org.apache.ignite.internal.processors.cache.StoredCacheData;
 import org.apache.ignite.internal.processors.cache.persistence.AllocatedPageTracker;
 import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl;
+import org.apache.ignite.internal.processors.cache.persistence.StorageException;
 import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFolderSettings;
 import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetaStorage;
 import org.apache.ignite.internal.processors.cache.persistence.snapshot.IgniteCacheSnapshotManager;
@@ -76,6 +77,12 @@
     /** File suffix. */
     public static final String FILE_SUFFIX = ".bin";
 
+    /** Suffix for zip files */
+    public static final String ZIP_SUFFIX = ".zip";
+
+    /** Suffix for tmp files */
+    public static final String TMP_SUFFIX = ".tmp";
+
     /** Partition file prefix. */
     public static final String PART_FILE_PREFIX = "part-";
 
@@ -95,7 +102,7 @@
     public static final String CACHE_DATA_FILENAME = "cache_data.dat";
 
     /** */
-    public static final String CACHE_DATA_TMP_FILENAME = CACHE_DATA_FILENAME + ".tmp";
+    public static final String CACHE_DATA_TMP_FILENAME = CACHE_DATA_FILENAME + TMP_SUFFIX;
 
     /** */
     public static final String DFLT_STORE_DIR = "db";
@@ -270,7 +277,7 @@
                     partStore.finishRecover();
             }
         }
-        catch (PersistentStorageIOException e) {
+        catch (StorageException e) {
             cctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e));
 
             throw e;
@@ -278,8 +285,24 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void initializeForCache(CacheGroupDescriptor grpDesc, StoredCacheData cacheData)
+    @Override public void initialize(int cacheId, int partitions, String workingDir, AllocatedPageTracker tracker)
         throws IgniteCheckedException {
+        if (!idxCacheStores.containsKey(cacheId)) {
+            CacheStoreHolder holder = initDir(
+                new File(storeWorkDir, workingDir),
+                cacheId,
+                partitions,
+                tracker
+            );
+
+            CacheStoreHolder old = idxCacheStores.put(cacheId, holder);
+
+            assert old == null : "Non-null old store holder for cacheId: " + cacheId;
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void initializeForCache(CacheGroupDescriptor grpDesc, StoredCacheData cacheData) throws IgniteCheckedException {
         int grpId = grpDesc.groupId();
 
         if (!idxCacheStores.containsKey(grpId)) {
@@ -292,8 +315,7 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void initializeForMetastorage()
-        throws IgniteCheckedException {
+    @Override public void initializeForMetastorage() throws IgniteCheckedException {
         int grpId = MetaStorage.METASTORAGE_CACHE_ID;
 
         if (!idxCacheStores.containsKey(grpId)) {
@@ -301,7 +323,7 @@
                 new File(storeWorkDir, META_STORAGE_NAME),
                 grpId,
                 1,
-                delta -> {/* No-op */} );
+                AllocatedPageTracker.NO_OP );
 
             CacheStoreHolder old = idxCacheStores.put(grpId, holder);
 
@@ -325,7 +347,7 @@
 
         if (overwrite || !file.exists() || file.length() == 0) {
             try {
-                File tmp = new File(file.getParent(), file.getName() + ".tmp");
+                File tmp = new File(file.getParent(), file.getName() + TMP_SUFFIX);
 
                 tmp.createNewFile();
 
@@ -399,7 +421,7 @@
         try {
             store.read(pageId, pageBuf, keepCrc);
         }
-        catch (PersistentStorageIOException e) {
+        catch (StorageException e) {
             cctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e));
 
             throw e;
@@ -420,7 +442,7 @@
         try {
             store.readHeader(buf);
         }
-        catch (PersistentStorageIOException e) {
+        catch (StorageException e) {
             cctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e));
 
             throw e;
@@ -456,7 +478,7 @@
         try {
             store.write(pageId, pageBuf, tag, calculateCrc);
         }
-        catch (PersistentStorageIOException e) {
+        catch (StorageException e) {
             cctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e));
 
             throw e;
@@ -503,7 +525,7 @@
      * @param cacheWorkDir Work directory.
      * @param grpId Group ID.
      * @param partitions Number of partitions.
-     * @param allocatedTracker Metrics updater
+     * @param allocatedTracker Metrics updater.
      * @return Cache store holder.
      * @throws IgniteCheckedException If failed.
      */
@@ -542,7 +564,7 @@
 
             return new CacheStoreHolder(idxStore, partStores);
         }
-        catch (PersistentStorageIOException e) {
+        catch (StorageException e) {
             cctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e));
 
             throw e;
@@ -580,7 +602,7 @@
 
             Path cacheWorkDirPath = cacheWorkDir.toPath();
 
-            Path tmp = cacheWorkDirPath.getParent().resolve(cacheWorkDir.getName() + ".tmp");
+            Path tmp = cacheWorkDirPath.getParent().resolve(cacheWorkDir.getName() + TMP_SUFFIX);
 
             if (Files.exists(tmp) && Files.isDirectory(tmp) &&
                     Files.exists(tmp.resolve(IgniteCacheSnapshotManager.TEMP_FILES_COMPLETENESS_MARKER))) {
@@ -634,7 +656,7 @@
         try {
             getStore(grpId, partId).sync();
         }
-        catch (PersistentStorageIOException e) {
+        catch (StorageException e) {
             cctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e));
 
             throw e;
@@ -646,7 +668,7 @@
         try {
             getStore(grpId, partId).ensure();
         }
-        catch (PersistentStorageIOException e) {
+        catch (StorageException e) {
             cctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e));
 
             throw e;
@@ -664,7 +686,7 @@
 
             return PageIdUtils.pageId(partId, flags, (int)pageIdx);
         }
-        catch (PersistentStorageIOException e) {
+        catch (StorageException e) {
             cctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e));
 
             throw e;
@@ -1013,7 +1035,7 @@
         /**
          *
          */
-        public CacheStoreHolder(FilePageStore idxStore, FilePageStore[] partStores) {
+        CacheStoreHolder(FilePageStore idxStore, FilePageStore[] partStores) {
             this.idxStore = idxStore;
             this.partStores = partStores;
         }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/PersistentStorageIOException.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/PersistentStorageIOException.java
deleted file mode 100644
index 7b3c303..0000000
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/PersistentStorageIOException.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ignite.internal.processors.cache.persistence.file;
-
-import java.io.IOException;
-import org.apache.ignite.IgniteCheckedException;
-
-/**
- * Exception is needed to distinguish persistent storage I/O errors.
- */
-public class PersistentStorageIOException extends IgniteCheckedException {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /**
-     * Create an instance of exception.
-     *
-     * @param cause Error cause.
-     */
-    public PersistentStorageIOException(IOException cause) {
-        super(cause);
-    }
-
-    /**
-     * Create an instance of exception.
-     *
-     * @param msg Error message.
-     * @param cause Error cause.
-     */
-    public PersistentStorageIOException(String msg, IOException cause) {
-        super(msg, cause);
-    }
-}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/filename/PdsConsistentIdProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/filename/PdsConsistentIdProcessor.java
index ba6d822..ffef9af 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/filename/PdsConsistentIdProcessor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/filename/PdsConsistentIdProcessor.java
@@ -211,18 +211,12 @@
         }
 
         // was not able to find free slot, allocating new
-        final GridCacheDatabaseSharedManager.FileLockHolder rootDirLock = lockRootDirectory(pstStoreBasePath);
-
-        try {
+        try (final GridCacheDatabaseSharedManager.FileLockHolder rootDirLock = lockRootDirectory(pstStoreBasePath)) {
             final List<FolderCandidate> sortedCandidates = getNodeIndexSortedCandidates(pstStoreBasePath);
             final int nodeIdx = sortedCandidates.isEmpty() ? 0 : (sortedCandidates.get(sortedCandidates.size() - 1).nodeIndex() + 1);
 
             return generateAndLockNewDbStorage(pstStoreBasePath, nodeIdx);
         }
-        finally {
-            rootDirLock.release();
-            rootDirLock.close();
-        }
     }
 
     /**
@@ -505,11 +499,10 @@
         if (settings != null) {
             final GridCacheDatabaseSharedManager.FileLockHolder fileLockHolder = settings.getLockedFileLockHolder();
 
-            if (fileLockHolder != null) {
-                fileLockHolder.release();
+            if (fileLockHolder != null)
                 fileLockHolder.close();
-            }
         }
+
         super.stop(cancel);
     }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java
index bcedd8c..4e1f783 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/AbstractFreeList.java
@@ -100,7 +100,7 @@
             throws IgniteCheckedException {
             AbstractDataPageIO<T> io = (AbstractDataPageIO<T>)iox;
 
-            int rowSize = io.getRowSize(row);
+            int rowSize = row.size();
 
             boolean updated = io.updateRow(pageAddr, itemId, pageSize(), null, row, rowSize);
 
@@ -146,7 +146,7 @@
             throws IgniteCheckedException {
             AbstractDataPageIO<T> io = (AbstractDataPageIO<T>)iox;
 
-            int rowSize = io.getRowSize(row);
+            int rowSize = row.size();
             int oldFreeSpace = io.getFreeSpace(pageAddr);
 
             assert oldFreeSpace > 0 : oldFreeSpace;
@@ -466,7 +466,7 @@
 
     /** {@inheritDoc} */
     @Override public void insertDataRow(T row) throws IgniteCheckedException {
-        int rowSize = ioVersions().latest().getRowSize(row);
+        int rowSize = row.size();
 
         int written = 0;
 
@@ -546,6 +546,20 @@
     }
 
     /** {@inheritDoc} */
+    @Override public <S, R> R updateDataRow(long link, PageHandler<S, R> pageHnd, S arg) throws IgniteCheckedException {
+        assert link != 0;
+
+        long pageId = PageIdUtils.pageId(link);
+        int itemId = PageIdUtils.itemId(link);
+
+        R updRes = write(pageId, pageHnd, arg, itemId, null);
+
+        assert updRes != null; // Can't fail here.
+
+        return updRes;
+    }
+
+    /** {@inheritDoc} */
     @Override public void removeDataRowByLink(long link) throws IgniteCheckedException {
         assert link != 0;
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/FreeList.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/FreeList.java
index bdca21c..e73124e 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/FreeList.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/FreeList.java
@@ -20,6 +20,7 @@
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.IgniteLogger;
 import org.apache.ignite.internal.processors.cache.persistence.Storable;
+import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandler;
 
 /**
  */
@@ -40,6 +41,17 @@
 
     /**
      * @param link Row link.
+     * @param pageHnd Page handler.
+     * @param arg Handler argument.
+     * @param <S> Argument type.
+     * @param <R> Result type.
+     * @return Result.
+     * @throws IgniteCheckedException If failed.
+     */
+    public <S, R>  R updateDataRow(long link, PageHandler<S, R> pageHnd, S arg) throws IgniteCheckedException;
+
+    /**
+     * @param link Row link.
      * @throws IgniteCheckedException If failed.
      */
     public void removeDataRowByLink(long link) throws IgniteCheckedException;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetaStorage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetaStorage.java
index 14bd450..c0fba73 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetaStorage.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetaStorage.java
@@ -29,7 +29,7 @@
 import org.apache.ignite.internal.pagemem.PageIdUtils;
 import org.apache.ignite.internal.pagemem.PageMemory;
 import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager;
-import org.apache.ignite.internal.pagemem.wal.StorageException;
+import org.apache.ignite.internal.processors.cache.persistence.StorageException;
 import org.apache.ignite.internal.pagemem.wal.WALPointer;
 import org.apache.ignite.internal.pagemem.wal.record.MetastoreDataRecord;
 import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRecord;
@@ -51,6 +51,7 @@
 import org.apache.ignite.internal.processors.cache.persistence.tree.io.SimpleDataPageIO;
 import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList;
 import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandler;
+import org.apache.ignite.internal.processors.failure.FailureProcessor;
 import org.apache.ignite.internal.util.lang.GridCursor;
 import org.apache.ignite.internal.util.typedef.internal.CU;
 import org.apache.ignite.internal.util.typedef.internal.U;
@@ -115,6 +116,9 @@
     private final Marshaller marshaller = new JdkMarshaller();
 
     /** */
+    private final FailureProcessor failureProcessor;
+
+    /** */
     public MetaStorage(
         GridCacheSharedContext cctx,
         DataRegion dataRegion,
@@ -126,6 +130,7 @@
         this.regionMetrics = regionMetrics;
         this.readOnly = readOnly;
         log = cctx.logger(getClass());
+        this.failureProcessor = cctx.kernalContext().failure();
     }
 
     /** */
@@ -145,7 +150,7 @@
             MetastorageRowStore rowStore = new MetastorageRowStore(freeList, db);
 
             tree = new MetastorageTree(METASTORAGE_CACHE_ID, dataRegion.pageMemory(), wal, rmvId,
-                freeList, rowStore, treeRoot.pageId().pageId(), treeRoot.isAllocated());
+                freeList, rowStore, treeRoot.pageId().pageId(), treeRoot.isAllocated(), failureProcessor);
 
             if (!readOnly)
                 ((GridCacheDatabaseSharedManager)db).addCheckpointListener(this);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetastorageDataRow.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetastorageDataRow.java
index 271efdf..5e2660b 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetastorageDataRow.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetastorageDataRow.java
@@ -17,6 +17,7 @@
 
 package org.apache.ignite.internal.processors.cache.persistence.metastorage;
 
+import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.internal.processors.cache.persistence.Storable;
 
 /**
@@ -48,7 +49,7 @@
     /**
      * @return Key.
      */
-    public String key() {
+    @Override public String key() {
         return key;
     }
 
@@ -65,6 +66,16 @@
     }
 
     /** {@inheritDoc} */
+    @Override public int size() throws IgniteCheckedException {
+        return 4 + value().length;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int headerSize() {
+        return 0;
+    }
+
+    /** {@inheritDoc} */
     @Override
     public void link(long link) {
         this.link = link;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetastorageTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetastorageTree.java
index 445522b..00db5cd 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetastorageTree.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/metastorage/MetastorageTree.java
@@ -28,6 +28,8 @@
 import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusLeafIO;
 import org.apache.ignite.internal.processors.cache.persistence.tree.io.IOVersions;
 import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList;
+import org.apache.ignite.internal.processors.failure.FailureProcessor;
+import org.jetbrains.annotations.Nullable;
 
 /**
  *
@@ -40,12 +42,15 @@
     private MetastorageRowStore rowStore;
 
     /**
-     * @param pageMem
-     * @param wal
-     * @param globalRmvId
-     * @param metaPageId
-     * @param reuseList
-     * @throws IgniteCheckedException
+     * @param pageMem Page memory instance.
+     * @param wal WAL manager.
+     * @param globalRmvId Global remove ID.
+     * @param metaPageId Meta page ID.
+     * @param reuseList Reuse list.
+     * @param rowStore Row store.
+     * @param initNew Init new flag, if {@code true}, then new tree will be allocated.
+     * @param failureProcessor To call if the tree is corrupted.
+     * @throws IgniteCheckedException If failed to initialize.
      */
     public MetastorageTree(int cacheId,
         PageMemory pageMem,
@@ -54,9 +59,10 @@
         ReuseList reuseList,
         MetastorageRowStore rowStore,
         long metaPageId,
-        boolean initNew) throws IgniteCheckedException {
+        boolean initNew,
+        @Nullable FailureProcessor failureProcessor) throws IgniteCheckedException {
         super("Metastorage", cacheId, pageMem, wal,
-            globalRmvId, metaPageId, reuseList, MetastorageInnerIO.VERSIONS, MetastoreLeafIO.VERSIONS);
+            globalRmvId, metaPageId, reuseList, MetastorageInnerIO.VERSIONS, MetastoreLeafIO.VERSIONS, failureProcessor);
 
         this.rowStore = rowStore;
 
@@ -65,7 +71,7 @@
 
     /** {@inheritDoc} */
     @Override protected int compare(BPlusIO<MetastorageSearchRow> io, long pageAddr, int idx,
-        MetastorageSearchRow row) throws IgniteCheckedException {
+        MetastorageSearchRow row) {
 
         String key = ((DataLinkIO)io).getKey(pageAddr, idx);
 
@@ -73,7 +79,7 @@
     }
 
     /** {@inheritDoc} */
-    @Override protected MetastorageDataRow getRow(BPlusIO<MetastorageSearchRow> io, long pageAddr, int idx,
+    @Override public MetastorageDataRow getRow(BPlusIO<MetastorageSearchRow> io, long pageAddr, int idx,
         Object x) throws IgniteCheckedException {
         long link = ((DataLinkIO)io).getLink(pageAddr, idx);
         String key = ((DataLinkIO)io).getKey(pageAddr, idx);
@@ -132,7 +138,7 @@
 
         /** {@inheritDoc} */
         @Override public void storeByOffset(long pageAddr, int off,
-            MetastorageSearchRow row) throws IgniteCheckedException {
+            MetastorageSearchRow row) {
             assert row.link() != 0;
 
             PageUtils.putLong(pageAddr, off, row.link());
@@ -146,7 +152,7 @@
 
         /** {@inheritDoc} */
         @Override public void store(long dstPageAddr, int dstIdx, BPlusIO<MetastorageSearchRow> srcIo, long srcPageAddr,
-            int srcIdx) throws IgniteCheckedException {
+            int srcIdx) {
             int srcOff = srcIo.offset(srcIdx);
             int dstOff = offset(dstIdx);
 
@@ -162,7 +168,7 @@
 
         /** {@inheritDoc} */
         @Override public MetastorageSearchRow getLookupRow(BPlusTree<MetastorageSearchRow, ?> tree, long pageAddr,
-            int idx) throws IgniteCheckedException {
+            int idx) {
             long link = getLink(pageAddr, idx);
             String key = getKey(pageAddr, idx);
 
@@ -207,7 +213,7 @@
 
         /** {@inheritDoc} */
         @Override public void storeByOffset(long pageAddr, int off,
-            MetastorageSearchRow row) throws IgniteCheckedException {
+            MetastorageSearchRow row) {
             assert row.link() != 0;
 
             PageUtils.putLong(pageAddr, off, row.link());
@@ -221,7 +227,7 @@
 
         /** {@inheritDoc} */
         @Override public void store(long dstPageAddr, int dstIdx, BPlusIO<MetastorageSearchRow> srcIo, long srcPageAddr,
-            int srcIdx) throws IgniteCheckedException {
+            int srcIdx) {
             int srcOff = srcIo.offset(srcIdx);
             int dstOff = offset(dstIdx);
 
@@ -237,7 +243,7 @@
 
         /** {@inheritDoc} */
         @Override public MetastorageSearchRow getLookupRow(BPlusTree<MetastorageSearchRow, ?> tree, long pageAddr,
-            int idx) throws IgniteCheckedException {
+            int idx) {
             long link = getLink(pageAddr, idx);
             String key = getKey(pageAddr, idx);
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryEx.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryEx.java
index ed0d304..af204dd 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryEx.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryEx.java
@@ -23,7 +23,7 @@
 import org.apache.ignite.internal.IgniteInternalFuture;
 import org.apache.ignite.internal.pagemem.FullPageId;
 import org.apache.ignite.internal.pagemem.PageMemory;
-import org.apache.ignite.internal.pagemem.wal.StorageException;
+import org.apache.ignite.internal.processors.cache.persistence.StorageException;
 import org.apache.ignite.internal.util.GridMultiCollectionWrapper;
 import org.jetbrains.annotations.Nullable;
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java
index 1524150..ba565c9 100755
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java
@@ -52,7 +52,7 @@
 import org.apache.ignite.internal.pagemem.PageUtils;
 import org.apache.ignite.internal.pagemem.store.IgnitePageStoreManager;
 import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager;
-import org.apache.ignite.internal.pagemem.wal.StorageException;
+import org.apache.ignite.internal.processors.cache.persistence.StorageException;
 import org.apache.ignite.internal.pagemem.wal.WALIterator;
 import org.apache.ignite.internal.pagemem.wal.WALPointer;
 import org.apache.ignite.internal.pagemem.wal.record.CheckpointRecord;
@@ -778,9 +778,13 @@
 
                 ByteBuffer buf = wrapPointer(pageAddr, pageSize());
 
+                long actualPageId = 0;
+
                 try {
                     storeMgr.read(grpId, pageId, buf);
 
+                    actualPageId = PageIO.getPageId(buf);
+
                     memMetrics.onPageRead();
                 }
                 catch (IgniteDataIntegrityViolationException ignore) {
@@ -794,7 +798,8 @@
                     memMetrics.onPageRead();
                 }
                 finally {
-                    rwLock.writeUnlock(lockedPageAbsPtr + PAGE_LOCK_OFFSET, OffheapReadWriteLock.TAG_LOCK_ALWAYS);
+                    rwLock.writeUnlock(lockedPageAbsPtr + PAGE_LOCK_OFFSET,
+                        actualPageId == 0 ? OffheapReadWriteLock.TAG_LOCK_ALWAYS : PageIdUtils.tag(actualPageId));
                 }
             }
         }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottle.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottle.java
index d5f4bd5..2828c43 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottle.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottle.java
@@ -16,13 +16,18 @@
 */
 package org.apache.ignite.internal.processors.cache.persistence.pagemem;
 
+import java.util.Collection;
+import java.util.concurrent.ConcurrentLinkedQueue;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.locks.LockSupport;
 import org.apache.ignite.IgniteLogger;
+import org.apache.ignite.IgniteSystemProperties;
 import org.apache.ignite.internal.processors.cache.persistence.CheckpointLockStateChecker;
 import org.apache.ignite.internal.processors.cache.persistence.CheckpointWriteProgressSupplier;
 import org.apache.ignite.internal.util.typedef.internal.U;
 
+import static org.apache.ignite.IgniteSystemProperties.IGNITE_STARVATION_CHECK_INTERVAL;
+
 /**
  * Throttles threads that generate dirty pages during ongoing checkpoint.
  * Designed to avoid zero dropdowns that can happen if checkpoint buffer is overflowed.
@@ -46,6 +51,9 @@
     /** Backoff ratio. Each next park will be this times longer. */
     private static final double BACKOFF_RATIO = 1.05;
 
+    /** Checkpoint buffer fullfill upper bound. */
+    private static final float CP_BUF_FILL_THRESHOLD = 2f / 3;
+
     /** Counter for dirty pages ratio throttling. */
     private final AtomicInteger notInCheckpointBackoffCntr = new AtomicInteger(0);
 
@@ -55,6 +63,9 @@
     /** Logger. */
     private IgniteLogger log;
 
+    /** Currently parking threads. */
+    private final Collection<Thread> parkThrds = new ConcurrentLinkedQueue<>();
+
     /**
      * @param pageMemory Page memory.
      * @param cpProgress Database manager.
@@ -85,7 +96,7 @@
         boolean shouldThrottle = false;
 
         if (isPageInCheckpoint) {
-            int checkpointBufLimit = pageMemory.checkpointBufferPagesSize() * 2 / 3;
+            int checkpointBufLimit = (int)(pageMemory.checkpointBufferPagesSize() * CP_BUF_FILL_THRESHOLD);
 
             shouldThrottle = pageMemory.checkpointBufferPagesCount() > checkpointBufLimit;
         }
@@ -126,10 +137,19 @@
                     + " for timeout(ms)=" + (throttleParkTimeNs / 1_000_000));
             }
 
+            if (isPageInCheckpoint)
+                parkThrds.add(Thread.currentThread());
+
             LockSupport.parkNanos(throttleParkTimeNs);
         }
-        else
-            cntr.set(0);
+        else {
+            int oldCntr = cntr.getAndSet(0);
+
+            if (isPageInCheckpoint && oldCntr != 0) {
+                parkThrds.forEach(LockSupport::unpark);
+                parkThrds.clear();
+            }
+        }
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottlePolicy.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottlePolicy.java
index 53a8017..e6aab79 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottlePolicy.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottlePolicy.java
@@ -17,14 +17,19 @@
 
 package org.apache.ignite.internal.processors.cache.persistence.pagemem;
 
+import org.apache.ignite.IgniteSystemProperties;
+
 import java.util.concurrent.TimeUnit;
 
+import static org.apache.ignite.IgniteSystemProperties.IGNITE_THROTTLE_LOG_THRESHOLD;
+
 /**
  * Throttling policy, encapsulates logic of delaying write operations.
  */
 public interface PagesWriteThrottlePolicy {
     /** Max park time. */
-    public long LOGGING_THRESHOLD = TimeUnit.SECONDS.toNanos(10);
+    public long LOGGING_THRESHOLD = TimeUnit.SECONDS.toNanos(IgniteSystemProperties.getInteger
+            (IGNITE_THROTTLE_LOG_THRESHOLD, 10));
 
     /**
      * Callback to apply throttling delay.
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java
index e30de5a..55c0ef2 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java
@@ -27,7 +27,11 @@
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.IgniteException;
 import org.apache.ignite.IgniteSystemProperties;
+import org.apache.ignite.failure.FailureContext;
+import org.apache.ignite.failure.FailureType;
 import org.apache.ignite.internal.IgniteInterruptedCheckedException;
+import org.apache.ignite.internal.UnregisteredBinaryTypeException;
+import org.apache.ignite.internal.UnregisteredClassException;
 import org.apache.ignite.internal.pagemem.PageIdUtils;
 import org.apache.ignite.internal.pagemem.PageMemory;
 import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager;
@@ -53,6 +57,8 @@
 import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseBag;
 import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList;
 import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandler;
+import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandlerWrapper;
+import org.apache.ignite.internal.processors.failure.FailureProcessor;
 import org.apache.ignite.internal.util.GridArrays;
 import org.apache.ignite.internal.util.GridLongList;
 import org.apache.ignite.internal.util.IgniteTree;
@@ -65,6 +71,7 @@
 import org.apache.ignite.lang.IgniteInClosure;
 import org.jetbrains.annotations.Nullable;
 
+import static org.apache.ignite.IgniteSystemProperties.IGNITE_BPLUS_TREE_LOCK_RETRIES;
 import static org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree.Bool.DONE;
 import static org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree.Bool.FALSE;
 import static org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree.Bool.READY;
@@ -84,6 +91,9 @@
     /** */
     private static final Object[] EMPTY = {};
 
+    /** Wrapper for tree pages operations. Noop by default. Override for test purposes. */
+    public static volatile PageHandlerWrapper<Result> pageHndWrapper = (tree, hnd) -> hnd;
+
     /** */
     private static volatile boolean interrupted;
 
@@ -92,7 +102,7 @@
 
     /** */
     private static final int LOCK_RETRIES = IgniteSystemProperties.getInteger(
-        IgniteSystemProperties.IGNITE_BPLUS_TREE_LOCK_RETRIES, IGNITE_BPLUS_TREE_LOCK_RETRIES_DEFAULT);
+        IGNITE_BPLUS_TREE_LOCK_RETRIES, IGNITE_BPLUS_TREE_LOCK_RETRIES_DEFAULT);
 
     /** */
     private final AtomicBoolean destroyed = new AtomicBoolean(false);
@@ -124,6 +134,9 @@
     /** */
     private volatile TreeMetaData treeMeta;
 
+    /** Failure processor. */
+    private final FailureProcessor failureProcessor;
+
     /** */
     private final GridTreePrinter<Long> treePrinter = new GridTreePrinter<Long>() {
         /** */
@@ -219,7 +232,7 @@
     };
 
     /** */
-    private final GetPageHandler<Get> askNeighbor = new AskNeighbor();
+    private final PageHandler<Get, Result> askNeighbor;
 
     /**
      *
@@ -250,12 +263,12 @@
     }
 
     /** */
-    private final GetPageHandler<Get> search = new Search();
+    private final PageHandler<Get, Result> search;
 
     /**
      *
      */
-    private class Search extends GetPageHandler<Get> {
+    public class Search extends GetPageHandler<Get> {
         /** {@inheritDoc} */
         @Override public Result run0(long pageId, long page, long pageAddr, BPlusIO<L> io, Get g, int lvl)
             throws IgniteCheckedException {
@@ -339,12 +352,12 @@
     }
 
     /** */
-    private final GetPageHandler<Put> replace = new Replace();
+    private final PageHandler<Put, Result> replace;
 
     /**
      *
      */
-    private class Replace extends GetPageHandler<Put> {
+    public class Replace extends GetPageHandler<Put> {
         /** {@inheritDoc} */
         @SuppressWarnings("unchecked")
         @Override public Result run0(long pageId, long page, long pageAddr, BPlusIO<L> io, Put p, int lvl)
@@ -401,12 +414,12 @@
     }
 
     /** */
-    private final GetPageHandler<Put> insert = new Insert();
+    private final PageHandler<Put, Result> insert;
 
     /**
      *
      */
-    private class Insert extends GetPageHandler<Put> {
+    public class Insert extends GetPageHandler<Put> {
         /** {@inheritDoc} */
         @Override public Result run0(long pageId, long page, long pageAddr, BPlusIO<L> io, Put p, int lvl)
             throws IgniteCheckedException {
@@ -450,7 +463,7 @@
     }
 
     /** */
-    private final GetPageHandler<Remove> rmvFromLeaf = new RemoveFromLeaf();
+    private final PageHandler<Remove, Result> rmvFromLeaf;
 
     /**
      *
@@ -525,7 +538,7 @@
     }
 
     /** */
-    private final GetPageHandler<Remove> lockBackAndRmvFromLeaf = new LockBackAndRmvFromLeaf();
+    private final PageHandler<Remove, Result> lockBackAndRmvFromLeaf;
 
     /**
      *
@@ -550,7 +563,7 @@
     }
 
     /** */
-    private final GetPageHandler<Remove> lockBackAndTail = new LockBackAndTail();
+    private final PageHandler<Remove, Result> lockBackAndTail;
 
     /**
      *
@@ -574,7 +587,7 @@
     }
 
     /** */
-    private final GetPageHandler<Remove> lockTailForward = new LockTailForward();
+    private final PageHandler<Remove, Result> lockTailForward;
 
     /**
      *
@@ -590,7 +603,7 @@
     }
 
     /** */
-    private final GetPageHandler<Remove> lockTail = new LockTail();
+    private final PageHandler<Remove, Result> lockTail;
 
     /**
      *
@@ -720,6 +733,7 @@
      * @param reuseList Reuse list.
      * @param innerIos Inner IO versions.
      * @param leafIos Leaf IO versions.
+     * @param failureProcessor if the tree is corrupted.
      * @throws IgniteCheckedException If failed.
      */
     protected BPlusTree(
@@ -731,9 +745,10 @@
         long metaPageId,
         ReuseList reuseList,
         IOVersions<? extends BPlusInnerIO<L>> innerIos,
-        IOVersions<? extends BPlusLeafIO<L>> leafIos
+        IOVersions<? extends BPlusLeafIO<L>> leafIos,
+        @Nullable FailureProcessor failureProcessor
     ) throws IgniteCheckedException {
-        this(name, cacheId, pageMem, wal, globalRmvId, metaPageId, reuseList);
+        this(name, cacheId, pageMem, wal, globalRmvId, metaPageId, reuseList, failureProcessor);
         setIos(innerIos, leafIos);
     }
 
@@ -745,6 +760,7 @@
      * @param globalRmvId Remove ID.
      * @param metaPageId Meta page ID.
      * @param reuseList Reuse list.
+     * @param failureProcessor if the tree is corrupted.
      * @throws IgniteCheckedException If failed.
      */
     protected BPlusTree(
@@ -754,7 +770,8 @@
         IgniteWriteAheadLogManager wal,
         AtomicLong globalRmvId,
         long metaPageId,
-        ReuseList reuseList
+        ReuseList reuseList,
+        @Nullable FailureProcessor failureProcessor
     ) throws IgniteCheckedException {
         super(cacheId, pageMem, wal);
 
@@ -770,6 +787,18 @@
         this.name = name;
         this.reuseList = reuseList;
         this.globalRmvId = globalRmvId;
+        this.failureProcessor = failureProcessor;
+
+        // Initialize page handlers.
+        askNeighbor = (PageHandler<Get, Result>) pageHndWrapper.wrap(this, new AskNeighbor());
+        search = (PageHandler<Get, Result>) pageHndWrapper.wrap(this, new Search());
+        lockTail = (PageHandler<Remove, Result>) pageHndWrapper.wrap(this, new LockTail());
+        lockTailForward = (PageHandler<Remove, Result>) pageHndWrapper.wrap(this, new LockTailForward());
+        lockBackAndTail = (PageHandler<Remove, Result>) pageHndWrapper.wrap(this, new LockBackAndTail());
+        lockBackAndRmvFromLeaf = (PageHandler<Remove, Result>) pageHndWrapper.wrap(this, new LockBackAndRmvFromLeaf());
+        rmvFromLeaf = (PageHandler<Remove, Result>) pageHndWrapper.wrap(this, new RemoveFromLeaf());
+        insert = (PageHandler<Put, Result>) pageHndWrapper.wrap(this, new Insert());
+        replace = (PageHandler<Put, Result>) pageHndWrapper.wrap(this, new Replace());
     }
 
     /**
@@ -901,12 +930,13 @@
 
     /**
      * @param upper Upper bound.
+     * @param c Filter closure.
      * @param x Implementation specific argument, {@code null} always means that we need to return full detached data row.
      * @return Cursor.
      * @throws IgniteCheckedException If failed.
      */
-    private GridCursor<T> findLowerUnbounded(L upper, Object x) throws IgniteCheckedException {
-        ForwardCursor cursor = new ForwardCursor(null, upper, x);
+    private GridCursor<T> findLowerUnbounded(L upper, TreeRowClosure<L, T> c, Object x) throws IgniteCheckedException {
+        ForwardCursor cursor = new ForwardCursor(null, upper, c, x);
 
         long firstPageId;
 
@@ -924,7 +954,7 @@
             long pageAddr = readLock(firstPageId, firstPage); // We always merge pages backwards, the first page is never removed.
 
             try {
-                cursor.init(pageAddr, io(pageAddr), 0);
+                cursor.init(pageAddr, io(pageAddr), -1);
             }
             finally {
                 readUnlock(firstPageId, firstPage, pageAddr);
@@ -951,14 +981,26 @@
     }
 
     /** {@inheritDoc} */
-    public final GridCursor<T> find(L lower, L upper, Object x) throws IgniteCheckedException {
+    @Override public final GridCursor<T> find(L lower, L upper, Object x) throws IgniteCheckedException {
+        return find(lower, upper, null, x);
+    }
+
+    /**
+     * @param lower Lower bound inclusive or {@code null} if unbounded.
+     * @param upper Upper bound inclusive or {@code null} if unbounded.
+     * @param c Filter closure.
+     * @param x Implementation specific argument, {@code null} always means that we need to return full detached data row.
+     * @return Cursor.
+     * @throws IgniteCheckedException If failed.
+     */
+    public final GridCursor<T> find(L lower, L upper, TreeRowClosure<L, T> c, Object x) throws IgniteCheckedException {
         checkDestroyed();
 
         try {
             if (lower == null)
-                return findLowerUnbounded(upper, x);
+                return findLowerUnbounded(upper, c, x);
 
-            ForwardCursor cursor = new ForwardCursor(lower, upper, x);
+            ForwardCursor cursor = new ForwardCursor(lower, upper, c, x);
 
             cursor.find();
 
@@ -967,6 +1009,57 @@
         catch (IgniteCheckedException e) {
             throw new IgniteCheckedException("Runtime failure on bounds: [lower=" + lower + ", upper=" + upper + "]", e);
         }
+        catch (RuntimeException | AssertionError e) {
+            throw new CorruptedTreeException("Runtime failure on bounds: [lower=" + lower + ", upper=" + upper + "]", e);
+        }
+        finally {
+            checkDestroyed();
+        }
+    }
+
+    /**
+     * @param lower Lower bound inclusive.
+     * @param upper Upper bound inclusive.
+     * @param c Closure applied for all found items, iteration is stopped if closure returns {@code false}.
+     * @throws IgniteCheckedException If failed.
+     */
+    public void iterate(L lower, L upper, TreeRowClosure<L, T> c) throws IgniteCheckedException {
+        checkDestroyed();
+
+        try {
+            ClosureCursor cursor = new ClosureCursor(lower, upper, c);
+
+            cursor.iterate();
+        }
+        catch (IgniteCheckedException e) {
+            throw new IgniteCheckedException("Runtime failure on bounds: [lower=" + lower + ", upper=" + upper + "]", e);
+        }
+        catch (RuntimeException e) {
+            throw new IgniteException("Runtime failure on bounds: [lower=" + lower + ", upper=" + upper + "]", e);
+        }
+        catch (AssertionError e) {
+            throw new AssertionError("Assertion error on bounds: [lower=" + lower + ", upper=" + upper + "]", e);
+        }
+        finally {
+            checkDestroyed();
+        }
+    }
+
+    /**
+     * @param lower Lower bound inclusive.
+     * @param upper Upper bound inclusive.
+     * @param c Closure applied for all found items.
+     * @throws IgniteCheckedException If failed.
+     */
+    public void visit(L lower, L upper, TreeVisitorClosure<L, T> c) throws IgniteCheckedException {
+        checkDestroyed();
+
+        try {
+           new TreeVisitor(lower, upper, c).visit();
+        }
+        catch (IgniteCheckedException e) {
+            throw new IgniteCheckedException("Runtime failure on bounds: [lower=" + lower + ", upper=" + upper + "]", e);
+        }
         catch (RuntimeException e) {
             throw new IgniteException("Runtime failure on bounds: [lower=" + lower + ", upper=" + upper + "]", e);
         }
@@ -980,50 +1073,109 @@
 
     /** {@inheritDoc} */
     @Override public T findFirst() throws IgniteCheckedException {
+        return findFirst(null);
+    }
+
+    /**
+     * Returns a value mapped to the lowest key, or {@code null} if tree is empty or no entry matches the passed filter.
+     * @param filter Filter closure.
+     * @return Value.
+     * @throws IgniteCheckedException If failed.
+     */
+    public T findFirst(TreeRowClosure<L, T> filter) throws IgniteCheckedException {
         checkDestroyed();
 
         try {
-            long firstPageId;
+            for (;;) {
+                long curPageId;
 
-            long metaPage = acquirePage(metaPageId);
-            try {
-                firstPageId = getFirstPageId(metaPageId, metaPage, 0);
-            }
-            finally {
-                releasePage(metaPageId, metaPage);
-            }
-
-            long page = acquirePage(firstPageId);
-
-            try {
-                long pageAddr = readLock(firstPageId, page);
+                long metaPage = acquirePage(metaPageId);
 
                 try {
-                    BPlusIO<L> io = io(pageAddr);
-
-                    int cnt = io.getCount(pageAddr);
-
-                    if (cnt == 0)
-                        return null;
-
-                    return getRow(io, pageAddr, 0);
+                    curPageId = getFirstPageId(metaPageId, metaPage, 0); // Level 0 is always at the bottom.
                 }
                 finally {
-                    readUnlock(firstPageId, page, pageAddr);
+                    releasePage(metaPageId, metaPage);
                 }
-            }
-            finally {
-                releasePage(firstPageId, page);
+
+                long curPage = acquirePage(curPageId);
+                try {
+                    long curPageAddr = readLock(curPageId, curPage);
+
+                    if (curPageAddr == 0)
+                        continue; // The first page has gone: restart scan.
+
+                    try {
+                        BPlusIO<L> io = io(curPageAddr);
+
+                        assert io.isLeaf();
+
+                        for (;;) {
+                            int cnt = io.getCount(curPageAddr);
+
+                            for (int i = 0; i < cnt; ++i) {
+                                if (filter == null || filter.apply(this, io, curPageAddr, i))
+                                    return getRow(io, curPageAddr, i);
+                            }
+
+                            long nextPageId = io.getForward(curPageAddr);
+
+                            if (nextPageId == 0)
+                                return null;
+
+                            long nextPage = acquirePage(nextPageId);
+
+                            try {
+                                long nextPageAddr = readLock(nextPageId, nextPage);
+
+                                // In the current implementation the next page can't change when the current page is locked.
+                                assert nextPageAddr != 0 : nextPageAddr;
+
+                                try {
+                                    long pa = curPageAddr;
+                                    curPageAddr = 0; // Set to zero to avoid double unlocking in finalizer.
+
+                                    readUnlock(curPageId, curPage, pa);
+
+                                    long p = curPage;
+                                    curPage = 0; // Set to zero to avoid double release in finalizer.
+
+                                    releasePage(curPageId, p);
+
+                                    curPageId = nextPageId;
+                                    curPage = nextPage;
+                                    curPageAddr = nextPageAddr;
+
+                                    nextPage = 0;
+                                    nextPageAddr = 0;
+                                }
+                                finally {
+                                    if (nextPageAddr != 0)
+                                        readUnlock(nextPageId, nextPage, nextPageAddr);
+                                }
+                            }
+                            finally {
+                                if (nextPage != 0)
+                                    releasePage(nextPageId, nextPage);
+                            }
+                        }
+                    }
+                    finally {
+                        if (curPageAddr != 0)
+                            readUnlock(curPageId, curPage, curPageAddr);
+                    }
+                }
+                finally {
+                    if (curPage != 0)
+                        releasePage(curPageId, curPage);
+                }
             }
         }
         catch (IgniteCheckedException e) {
             throw new IgniteCheckedException("Runtime failure on first row lookup", e);
         }
-        catch (RuntimeException e) {
-            throw new IgniteException("Runtime failure on first row lookup", e);
-        }
-        catch (AssertionError e) {
-            throw new AssertionError("Assertion error on first row lookup", e);
+        catch (RuntimeException | AssertionError e) {
+            throw new CorruptedTreeException("Runtime failure on first row lookup", e);
         }
         finally {
             checkDestroyed();
@@ -1033,23 +1185,33 @@
     /** {@inheritDoc} */
     @SuppressWarnings("unchecked")
     @Override public T findLast() throws IgniteCheckedException {
+        return findLast(null);
+    }
+
+    /**
+     * Returns a value mapped to the greatest key, or {@code null} if tree is empty or no entry matches the passed filter.
+     * @param c Filter closure.
+     * @return Value.
+     * @throws IgniteCheckedException If failed.
+     */
+    public T findLast(final TreeRowClosure<L, T> c) throws IgniteCheckedException {
         checkDestroyed();
 
         try {
-            GetOne g = new GetOne(null, null, true);
-            doFind(g);
+            if (c == null) {
+                GetOne g = new GetOne(null, null, null, true);
+                doFind(g);
 
-            return (T)g.row;
+                return (T)g.row;
+            } else
+                return new GetLast(c).find();
         }
         catch (IgniteCheckedException e) {
             throw new IgniteCheckedException("Runtime failure on last row lookup", e);
         }
-        catch (RuntimeException e) {
+        catch (RuntimeException | AssertionError e) {
             throw new IgniteException("Runtime failure on last row lookup", e);
         }
-        catch (AssertionError e) {
-            throw new AssertionError("Assertion error on last row lookup", e);
-        }
         finally {
             checkDestroyed();
         }
@@ -1058,15 +1220,25 @@
     /**
      * @param row Lookup row for exact match.
      * @param x Implementation specific argument, {@code null} always means that we need to return full detached data row.
+     * @return Found result or {@code null}
+     * @throws IgniteCheckedException If failed.
+     */
+    public final <R> R findOne(L row, Object x) throws IgniteCheckedException {
+        return findOne(row, null, x);
+    }
+
+    /**
+     * @param row Lookup row for exact match.
+     * @param x Implementation specific argument, {@code null} always means that we need to return full detached data row.
      * @return Found result or {@code null}.
      * @throws IgniteCheckedException If failed.
      */
     @SuppressWarnings("unchecked")
-    public final <R> R findOne(L row, Object x) throws IgniteCheckedException {
+    public final <R> R findOne(L row, TreeRowClosure<L, T> c, Object x) throws IgniteCheckedException {
         checkDestroyed();
 
         try {
-            GetOne g = new GetOne(row, x, false);
+            GetOne g = new GetOne(row, c, x, false);
 
             doFind(g);
 
@@ -1075,11 +1247,8 @@
         catch (IgniteCheckedException e) {
             throw new IgniteCheckedException("Runtime failure on lookup row: " + row, e);
         }
-        catch (RuntimeException e) {
-            throw new IgniteException("Runtime failure on lookup row: " + row, e);
-        }
-        catch (AssertionError e) {
-            throw new AssertionError("Assertion error on lookup row: " + row, e);
+        catch (RuntimeException | AssertionError e) {
+            throw new CorruptedTreeException("Runtime failure on lookup row: " + row, e);
         }
         finally {
             checkDestroyed();
@@ -1093,7 +1262,7 @@
      */
     @SuppressWarnings("unchecked")
     @Override public final T findOne(L row) throws IgniteCheckedException {
-        return findOne(row, null);
+        return findOne(row, null, null);
     }
 
     /**
@@ -1633,14 +1802,14 @@
                 }
             }
         }
+        catch (UnregisteredClassException | UnregisteredBinaryTypeException e) {
+            throw e;
+        }
         catch (IgniteCheckedException e) {
             throw new IgniteCheckedException("Runtime failure on search row: " + row, e);
         }
-        catch (RuntimeException e) {
-            throw new IgniteException("Runtime failure on search row: " + row, e);
-        }
-        catch (AssertionError e) {
-            throw new AssertionError("Assertion error on search row: " + row, e);
+        catch (RuntimeException | AssertionError e) {
+            throw new CorruptedTreeException("Runtime failure on search row: " + row, e);
         }
         finally {
             x.releaseAll();
@@ -1796,11 +1965,8 @@
         catch (IgniteCheckedException e) {
             throw new IgniteCheckedException("Runtime failure on search row: " + row, e);
         }
-        catch (RuntimeException e) {
-            throw new IgniteException("Runtime failure on search row: " + row, e);
-        }
-        catch (AssertionError e) {
-            throw new AssertionError("Assertion error on search row: " + row, e);
+        catch (RuntimeException | AssertionError e) {
+            throw new CorruptedTreeException("Runtime failure on search row: " + row, e);
         }
         finally {
             r.releaseAll();
@@ -2115,11 +2281,8 @@
         catch (IgniteCheckedException e) {
             throw new IgniteCheckedException("Runtime failure on row: " + row, e);
         }
-        catch (RuntimeException e) {
-            throw new IgniteException("Runtime failure on row: " + row, e);
-        }
-        catch (AssertionError e) {
-            throw new AssertionError("Assertion error on row: " + row, e);
+        catch (RuntimeException | AssertionError e) {
+            throw new CorruptedTreeException("Runtime failure on row: " + row, e);
         }
         finally {
             checkDestroyed();
@@ -2377,6 +2540,88 @@
         }
     }
 
+
+    /**
+     * @param c Get.
+     * @throws IgniteCheckedException If failed.
+     */
+    private void doVisit(TreeVisitor c) throws IgniteCheckedException {
+        for (;;) { // Go down with retries.
+            c.init();
+
+            switch (visitDown(c, c.rootId, 0L, c.rootLvl)) {
+                case RETRY:
+                case RETRY_ROOT:
+                    checkInterrupted();
+
+                    continue;
+
+                default:
+                    return;
+            }
+        }
+    }
+
+    /**
+     * @param v Tree visitor.
+     * @param pageId Page ID.
+     * @param fwdId Expected forward page ID.
+     * @param lvl Level.
+     * @return Result code.
+     * @throws IgniteCheckedException If failed.
+     */
+    private Result visitDown(final TreeVisitor v, final long pageId, final long fwdId, final int lvl)
+            throws IgniteCheckedException {
+        long page = acquirePage(pageId);
+
+        try {
+            for (;;) {
+                v.checkLockRetry();
+
+                // Init args.
+                v.pageId = pageId;
+                v.fwdId = fwdId;
+
+                Result res = read(pageId, page, search, v, lvl, RETRY);
+
+                switch (res) {
+                    case GO_DOWN:
+                    case GO_DOWN_X:
+                        assert v.pageId != pageId;
+                        assert v.fwdId != fwdId || fwdId == 0;
+
+                        // Go down recursively.
+                        res = visitDown(v, v.pageId, v.fwdId, lvl - 1);
+
+                        switch (res) {
+                            case RETRY:
+                                checkInterrupted();
+
+                                continue; // The child page got split, need to reread our page.
+
+                            default:
+                                return res;
+                        }
+
+                    case NOT_FOUND:
+                        assert lvl == 0 : lvl;
+
+                        return v.init(pageId, page, fwdId);
+
+                    case FOUND:
+                        throw new IllegalStateException(); // Must never be called because we always have a shift.
+
+                    default:
+                        return res;
+                }
+            }
+        }
+        finally {
+            if (v.canRelease(pageId, lvl))
+                releasePage(pageId, page);
+        }
+    }
+
     /**
      * @param io IO.
      * @param pageAddr Page address.
@@ -2410,7 +2655,7 @@
     /**
      * Get operation.
      */
-    private abstract class Get {
+    public abstract class Get {
         /** */
         long rmvId;
 
@@ -2525,7 +2770,7 @@
          * @param lvl Level.
          * @return {@code true} If we can release the given page.
          */
-        boolean canRelease(long pageId, int lvl) {
+        public boolean canRelease(long pageId, int lvl) {
             return pageId != 0L;
         }
 
@@ -2560,12 +2805,28 @@
         /**
          * @throws IgniteCheckedException If the operation can not be retried.
          */
-        final void checkLockRetry() throws IgniteCheckedException {
-            if (lockRetriesCnt == 0)
-                throw new IgniteCheckedException("Maximum of retries " + getLockRetries() + " reached.");
+        void checkLockRetry() throws IgniteCheckedException {
+            if (lockRetriesCnt == 0) {
+                IgniteCheckedException e = new IgniteCheckedException("Maximum number of retries " +
+                    getLockRetries() + " reached for " + getClass().getSimpleName() + " operation " +
+                    "(the tree may be corrupted). Increase " + IGNITE_BPLUS_TREE_LOCK_RETRIES + " system property " +
+                    "if you regularly see this message (current value is " + getLockRetries() + ").");
+
+                if (failureProcessor != null)
+                    failureProcessor.process(new FailureContext(FailureType.CRITICAL_ERROR, e));
+
+                throw e;
+            }
 
             lockRetriesCnt--;
         }
+
+        /**
+         * @return Operation row.
+         */
+        public L row() {
+            return row;
+        }
     }
 
     /**
@@ -2575,15 +2836,20 @@
         /** */
         Object x;
 
+        /** */
+        TreeRowClosure<L, T> c;
+
         /**
          * @param row Row.
+         * @param c Closure filter.
          * @param x Implementation specific argument.
          * @param findLast Ignore row passed, find last row
          */
-        private GetOne(L row, Object x, boolean findLast) {
+        private GetOne(L row, TreeRowClosure<L,T> c, Object x, boolean findLast) {
             super(row, findLast);
 
             this.x = x;
+            this.c = c;
         }
 
         /** {@inheritDoc} */
@@ -2593,7 +2859,7 @@
             if (lvl != 0 && !canGetRowFromInner)
                 return false;
 
-            row = getRow(io, pageAddr, idx, x);
+            row = c == null || c.apply(BPlusTree.this, io,pageAddr, idx) ? getRow(io, pageAddr, idx, x) : null;
 
             return true;
         }
@@ -2604,14 +2870,14 @@
      */
     private final class GetCursor extends Get {
         /** */
-        ForwardCursor cursor;
+        AbstractForwardCursor cursor;
 
         /**
          * @param lower Lower bound.
          * @param shift Shift.
          * @param cursor Cursor.
          */
-        GetCursor(L lower, int shift, ForwardCursor cursor) {
+        GetCursor(L lower, int shift, AbstractForwardCursor cursor) {
             super(lower, false);
 
             assert shift != 0; // Either handle range of equal rows or find a greater row after concurrent merge.
@@ -2637,9 +2903,373 @@
     }
 
     /**
+     * Get a cursor for range.
+     */
+    private final class TreeVisitor extends Get {
+        /** */
+        long nextPageId;
+
+        /** */
+        L upper;
+
+        /** */
+        TreeVisitorClosure<L, T> p;
+
+        /** */
+        private boolean dirty;
+
+        /** */
+        private boolean writing;
+
+        /**
+         * @param lower Lower bound.
+         */
+        TreeVisitor(L lower, L upper, TreeVisitorClosure<L, T> p) {
+            super(lower, false);
+
+            this.shift = -1;
+            this.upper = upper;
+            this.p = p;
+        }
+
+        /** {@inheritDoc} */
+        @Override boolean found(BPlusIO<L> io, long pageAddr, int idx, int lvl) throws IgniteCheckedException {
+            throw new IllegalStateException(); // Must never be called because we always have a shift.
+        }
+
+        /** {@inheritDoc} */
+        @Override boolean notFound(BPlusIO<L> io, long pageAddr, int idx, int lvl) throws IgniteCheckedException {
+            if (lvl != 0)
+                return false;
+
+            if (!(writing = (p.state() & TreeVisitorClosure.CAN_WRITE) != 0))
+                init(pageAddr, io, idx);
+
+            return true;
+        }
+
+        Result init(long pageId, long page, long fwdId) throws IgniteCheckedException {
+            // Init args.
+            this.pageId = pageId;
+            this.fwdId = fwdId;
+
+            if (writing) {
+                long pageAddr = writeLock(pageId, page);
+
+                if (pageAddr == 0)
+                    return RETRY;
+
+                try {
+                    BPlusIO<L> io = io(pageAddr);
+
+                    // Check triangle invariant.
+                    if (io.getForward(pageAddr) != fwdId)
+                        return RETRY;
+
+                    init(pageAddr, io, -1);
+                } finally {
+                    unlock(pageId, page, pageAddr);
+                }
+            }
+
+            return NOT_FOUND;
+        }
+
+        /**
+         * @param pageAddr Page address.
+         * @param io IO.
+         * @param startIdx Start index.
+         * @throws IgniteCheckedException If failed.
+         */
+        private void init(long pageAddr, BPlusIO<L> io, int startIdx) throws IgniteCheckedException {
+            nextPageId = 0;
+
+            int cnt = io.getCount(pageAddr);
+
+            if (cnt != 0)
+                visit(pageAddr, io, startIdx, cnt);
+        }
+
+        /**
+         * @param pageAddr Page address.
+         * @param io IO.
+         * @param startIdx Start index.
+         * @param cnt Number of rows in the buffer.
+         * @throws IgniteCheckedException If failed.
+         */
+        @SuppressWarnings("unchecked")
+        private void visit(long pageAddr, BPlusIO<L> io, int startIdx, int cnt)
+                throws IgniteCheckedException {
+            assert io.isLeaf() : io;
+            assert cnt != 0 : cnt; // We can not see empty pages (empty tree handled in init).
+            assert startIdx >= -1 : startIdx;
+            assert cnt >= startIdx;
+
+            checkDestroyed();
+
+            nextPageId = io.getForward(pageAddr);
+
+            if (startIdx == -1)
+                startIdx = findLowerBound(pageAddr, io, cnt);
+
+            if (cnt == startIdx)
+                return; // Go to the next page;
+
+            cnt = findUpperBound(pageAddr, io, startIdx, cnt);
+
+            for (int i = startIdx; i < cnt; i++) {
+                int state = p.visit(BPlusTree.this, io, pageAddr, i, wal);
+
+                boolean stop = (state & TreeVisitorClosure.STOP) != 0;
+
+                if (writing)
+                    dirty = dirty || (state & TreeVisitorClosure.DIRTY) != 0;
+
+                if (stop) {
+                    nextPageId = 0; // The End.
+
+                    return;
+                }
+            }
+
+            if (nextPageId != 0) {
+                row = io.getLookupRow(BPlusTree.this, pageAddr, cnt - 1); // Need save last row.
+
+                shift = 1;
+            }
+        }
+
+        /**
+         * @param pageAddr Page address.
+         * @param io IO.
+         * @param cnt Count.
+         * @return Adjusted to lower bound start index.
+         * @throws IgniteCheckedException If failed.
+         */
+        private int findLowerBound(long pageAddr, BPlusIO<L> io, int cnt) throws IgniteCheckedException {
+            assert io.isLeaf();
+
+            // Compare with the first row on the page.
+            int cmp = compare(0, io, pageAddr, 0, row);
+
+            if (cmp < 0 || (cmp == 0 && shift == 1)) {
+                int idx = findInsertionPoint(0, io, pageAddr, 0, cnt, row, shift);
+
+                assert idx < 0;
+
+                return fix(idx);
+            }
+
+            return 0;
+        }
+
+        /**
+         * @param pageAddr Page address.
+         * @param io IO.
+         * @param low Start index.
+         * @param cnt Number of rows in the buffer.
+         * @return Corrected number of rows with respect to upper bound.
+         * @throws IgniteCheckedException If failed.
+         */
+        private int findUpperBound(long pageAddr, BPlusIO<L> io, int low, int cnt) throws IgniteCheckedException {
+            assert io.isLeaf();
+
+            // Compare with the last row on the page.
+            int cmp = compare(0, io, pageAddr, cnt - 1, upper);
+
+            if (cmp > 0) {
+                int idx = findInsertionPoint(0, io, pageAddr, low, cnt, upper, 1);
+
+                assert idx < 0;
+
+                cnt = fix(idx);
+
+                nextPageId = 0; // The End.
+            }
+
+            return cnt;
+        }
+
+        /**
+         * @throws IgniteCheckedException If failed.
+         */
+        private void nextPage() throws IgniteCheckedException {
+            for (;;) {
+                if (nextPageId == 0)
+                    return;
+
+                long pageId = nextPageId;
+                long page = acquirePage(pageId);
+                try {
+                    long pageAddr = lock(pageId, page); // Doing explicit null check.
+
+                    // If concurrent merge occurred we have to reinitialize cursor from the last returned row.
+                    if (pageAddr == 0L)
+                        break;
+
+                    try {
+                        BPlusIO<L> io = io(pageAddr);
+
+                        visit(pageAddr, io, -1, io.getCount(pageAddr));
+                    }
+                    finally {
+                        unlock(pageId, page, pageAddr);
+                    }
+                }
+                finally {
+                    releasePage(pageId, page);
+                }
+            }
+
+            doVisit(this); // restart from last read row
+        }
+
+        private void unlock(long pageId, long page, long pageAddr) {
+            if (writing) {
+                writeUnlock(pageId, page, pageAddr, dirty);
+
+                dirty = false; // reset dirty flag
+            }
+            else
+                readUnlock(pageId, page, pageAddr);
+        }
+
+        private long lock(long pageId, long page) {
+            if (writing = ((p.state() & TreeVisitorClosure.CAN_WRITE) != 0))
+                return writeLock(pageId, page);
+            else
+                return readLock(pageId, page);
+        }
+
+        /**
+         * @throws IgniteCheckedException If failed.
+         */
+        private void visit() throws IgniteCheckedException {
+            doVisit(this);
+
+            while (nextPageId != 0)
+                nextPage();
+        }
+    }
+
+    /**
+     * Get the last item in the tree which matches the passed filter.
+     */
+    private final class GetLast extends Get {
+        private final TreeRowClosure<L, T> c;
+        private boolean retry = true;
+        private long lastPageId;
+        private T row0;
+
+        /**
+         * @param c Filter closure.
+         */
+        public GetLast(TreeRowClosure<L, T> c) {
+            super(null, true);
+
+            assert c != null;
+
+            this.c = c;
+        }
+
+        /** {@inheritDoc} */
+        @Override boolean found(BPlusIO<L> io, long pageAddr, int idx, int lvl) throws IgniteCheckedException {
+            if (lvl != 0)
+                return false;
+
+            for (int i = idx; i >= 0; i--) {
+                if (c.apply(BPlusTree.this, io, pageAddr, i)) {
+                    retry = false;
+                    row0 = getRow(io, pageAddr, i);
+
+                    return true;
+                }
+            }
+
+            if(pageId == rootId)
+                retry = false; // We are at the root page, there are no other leafs.
+
+            if (retry) {
+                findLast = false;
+
+                // Restart from an item before the first item in the leaf (last item on the previous leaf).
+                row0 = getRow(io, pageAddr, 0);
+                shift = -1;
+
+                lastPageId = pageId; // Track leafs to detect a loop over the first leaf in the tree.
+            }
+
+            return true;
+        }
+
+        /** {@inheritDoc} */
+        @Override boolean notFound(BPlusIO<L> io, long pageAddr, int idx, int lvl) throws IgniteCheckedException {
+            if (lvl != 0)
+                return false;
+
+            if(io.getCount(pageAddr) == 0) {
+                // it's an empty tree
+                retry = false;
+
+                return true;
+            }
+
+            if (idx == 0 && lastPageId == pageId) {
+                // not found
+                retry = false;
+                row0 = null;
+
+                return true;
+            }
+            else {
+                for (int i = idx; i >= 0; i--) {
+                    if (c.apply(BPlusTree.this, io, pageAddr, i)) {
+                        retry = false;
+                        row0 = getRow(io, pageAddr, i);
+
+                        break;
+                    }
+                }
+            }
+
+            if (retry) {
+                // Restart from an item before the first item in the leaf (last item on the previous leaf).
+                row0 = getRow(io, pageAddr, 0);
+
+                lastPageId = pageId; // Track leafs to detect a loop over the first leaf in the tree.
+            }
+
+            return true;
+        }
+
+        /**
+         * @return Last item in the tree.
+         * @throws IgniteCheckedException If failure.
+         */
+        public T find() throws IgniteCheckedException {
+            while (retry) {
+                row = row0;
+
+                doFind(this);
+            }
+
+            return row0;
+        }
+    }
+
+    /**
      * Put operation.
      */
-    private final class Put extends Get {
+    public final class Put extends Get {
+        /** Mark of NULL value of page id. It means valid value can't be equal this value. */
+        private static final long NULL_PAGE_ID = 0L;
+
+        /** Mark of NULL value of page. */
+        private static final long NULL_PAGE = 0L;
+
+        /** Mark of NULL value of page address. */
+        private static final long NULL_PAGE_ADDRESS = 0L;
+
         /** Right child page ID for split row. */
         long rightId;
 
@@ -2647,9 +3277,8 @@
         T oldRow;
 
         /**
-         * This page is kept locked after split until insert to the upper level will not be finished.
-         * It is needed because split row will be "in flight" and if we'll release tail, remove on
-         * split row may fail.
+         * This page is kept locked after split until insert to the upper level will not be finished. It is needed
+         * because split row will be "in flight" and if we'll release tail, remove on split row may fail.
          */
         long tailId;
 
@@ -2710,10 +3339,10 @@
          * @param tailPageAddr Tail page address
          */
         private void tail(long tailId, long tailPage, long tailPageAddr) {
-            assert (tailId == 0L) == (tailPage == 0L);
-            assert (tailPage == 0L) == (tailPageAddr == 0L);
+            assert (tailId == NULL_PAGE_ID) == (tailPage == NULL_PAGE);
+            assert (tailPage == NULL_PAGE) == (tailPageAddr == NULL_PAGE_ADDRESS);
 
-            if (this.tailPage != 0L)
+            if (this.tailPage != NULL_PAGE)
                 writeUnlockAndClose(this.tailId, this.tailPage, this.tailAddr, null);
 
             this.tailId = tailId;
@@ -2722,8 +3351,8 @@
         }
 
         /** {@inheritDoc} */
-        @Override boolean canRelease(long pageId, int lvl) {
-            return pageId != 0L && tailId != pageId;
+        @Override public boolean canRelease(long pageId, int lvl) {
+            return pageId != NULL_PAGE_ID && tailId != pageId;
         }
 
         /**
@@ -2733,7 +3362,7 @@
             row = null;
             rightId = 0;
 
-            tail(0L, 0L, 0L);
+            tail(NULL_PAGE_ID, NULL_PAGE, NULL_PAGE_ADDRESS);
         }
 
         /** {@inheritDoc} */
@@ -2804,7 +3433,7 @@
 
                 long fwdPageAddr = writeLock(fwdId, fwdPage); // Initial write, no need to check for concurrent modification.
 
-                assert fwdPageAddr != 0L;
+                assert fwdPageAddr != NULL_PAGE_ADDRESS;
 
                 // TODO GG-11640 log a correct forward page record.
                 final Boolean fwdPageWalPlc = Boolean.TRUE;
@@ -2852,7 +3481,7 @@
 
                             long newRootAddr = writeLock(newRootId, newRootPage); // Initial write.
 
-                            assert newRootAddr != 0L;
+                            assert newRootAddr != NULL_PAGE_ADDRESS;
 
                             // Never write full new root page, because it is known to be new.
                             final Boolean newRootPageWalPlc = Boolean.FALSE;
@@ -2969,12 +3598,19 @@
 
             return write(pageId, page, replace, this, lvl, RETRY);
         }
+
+        /** {@inheritDoc} */
+        @Override void checkLockRetry() throws IgniteCheckedException {
+            //non null tailId means that lock on tail page still hold and we can't fail with exception.
+            if (tailId == NULL_PAGE_ID)
+                super.checkLockRetry();
+        }
     }
 
     /**
      * Invoke operation.
      */
-    private final class Invoke extends Get {
+    public final class Invoke extends Get {
         /** */
         Object x;
 
@@ -3112,7 +3748,7 @@
         }
 
         /** {@inheritDoc} */
-        @Override boolean canRelease(long pageId, int lvl) {
+        @Override public boolean canRelease(long pageId, int lvl) {
             if (pageId == 0L)
                 return false;
 
@@ -4089,7 +4725,7 @@
         }
 
         /** {@inheritDoc} */
-        @Override boolean canRelease(long pageId, int lvl) {
+        @Override public boolean canRelease(long pageId, int lvl) {
             return pageId != 0L && !isTail(pageId, lvl);
         }
 
@@ -4478,7 +5114,7 @@
      * @return Full detached data row.
      * @throws IgniteCheckedException If failed.
      */
-    protected final T getRow(BPlusIO<L> io, long pageAddr, int idx) throws IgniteCheckedException {
+    public final T getRow(BPlusIO<L> io, long pageAddr, int idx) throws IgniteCheckedException {
         return getRow(io, pageAddr, idx, null);
     }
 
@@ -4492,54 +5128,60 @@
      * @return Data row.
      * @throws IgniteCheckedException If failed.
      */
-    protected abstract T getRow(BPlusIO<L> io, long pageAddr, int idx, Object x) throws IgniteCheckedException;
+    public abstract T getRow(BPlusIO<L> io, long pageAddr, int idx, Object x) throws IgniteCheckedException;
 
     /**
-     * Forward cursor.
+     *
      */
     @SuppressWarnings("unchecked")
-    private final class ForwardCursor implements GridCursor<T> {
+    private abstract class AbstractForwardCursor {
         /** */
-        private T[] rows = (T[])EMPTY;
+        long nextPageId;
 
         /** */
-        private int row = -1;
-
-        /** */
-        private long nextPageId;
-
-        /** */
-        private L lowerBound;
+        L lowerBound;
 
         /** */
         private int lowerShift = -1; // Initially it is -1 to handle multiple equal rows.
 
         /** */
-        private final L upperBound;
-
-        /** */
-        private final Object x;
+        final L upperBound;
 
         /**
          * @param lowerBound Lower bound.
          * @param upperBound Upper bound.
          */
-        ForwardCursor(L lowerBound, L upperBound) {
+        AbstractForwardCursor(L lowerBound, L upperBound) {
             this.lowerBound = lowerBound;
             this.upperBound = upperBound;
-            this.x = null;
         }
 
         /**
-         * @param lowerBound Lower bound.
-         * @param upperBound Upper bound.
-         * @param x Implementation specific argument, {@code null} always means that we need to return full detached data row.
+         *
          */
-        ForwardCursor(L lowerBound, L upperBound, Object x) {
-            this.lowerBound = lowerBound;
-            this.upperBound = upperBound;
-            this.x = x;
-        }
+        abstract void init0();
+
+        /**
+         * @param pageAddr Page address.
+         * @param io IO.
+         * @param startIdx Start index.
+         * @param cnt Number of rows in the buffer.
+         * @return {@code true} If we were able to fetch rows from this page.
+         * @throws IgniteCheckedException If failed.
+         */
+        abstract boolean fillFromBuffer0(long pageAddr, BPlusIO<L> io, int startIdx, int cnt)
+            throws IgniteCheckedException;
+
+        /**
+         * @return {@code True} If we have rows to return after reading the next page.
+         * @throws IgniteCheckedException If failed.
+         */
+        abstract boolean reinitialize0() throws IgniteCheckedException;
+
+        /**
+         * @param readDone {@code True} if traversed all rows.
+         */
+        abstract void onNotFound(boolean readDone);
 
         /**
          * @param pageAddr Page address.
@@ -4547,9 +5189,10 @@
          * @param startIdx Start index.
          * @throws IgniteCheckedException If failed.
          */
-        private void init(long pageAddr, BPlusIO<L> io, int startIdx) throws IgniteCheckedException {
+        final void init(long pageAddr, BPlusIO<L> io, int startIdx) throws IgniteCheckedException {
             nextPageId = 0;
-            row = -1;
+
+            init0();
 
             int cnt = io.getCount(pageAddr);
 
@@ -4557,16 +5200,10 @@
             if (cnt == 0) {
                 assert io.getForward(pageAddr) == 0L;
 
-                rows = null;
+                onNotFound(true);
             }
-            else if (!fillFromBuffer(pageAddr, io, startIdx, cnt)) {
-                if (rows != EMPTY) {
-                    assert rows.length > 0; // Otherwise it makes no sense to create an array.
-
-                    // Fake clear.
-                    rows[0] = null;
-                }
-            }
+            else if (!fillFromBuffer(pageAddr, io, startIdx, cnt))
+                onNotFound(false);
         }
 
         /**
@@ -4576,7 +5213,7 @@
          * @return Adjusted to lower bound start index.
          * @throws IgniteCheckedException If failed.
          */
-        private int findLowerBound(long pageAddr, BPlusIO<L> io, int cnt) throws IgniteCheckedException {
+        final int findLowerBound(long pageAddr, BPlusIO<L> io, int cnt) throws IgniteCheckedException {
             assert io.isLeaf();
 
             // Compare with the first row on the page.
@@ -4601,7 +5238,7 @@
          * @return Corrected number of rows with respect to upper bound.
          * @throws IgniteCheckedException If failed.
          */
-        private int findUpperBound(long pageAddr, BPlusIO<L> io, int low, int cnt) throws IgniteCheckedException {
+        final int findUpperBound(long pageAddr, BPlusIO<L> io, int low, int cnt) throws IgniteCheckedException {
             assert io.isLeaf();
 
             // Compare with the last row on the page.
@@ -4633,84 +5270,20 @@
             throws IgniteCheckedException {
             assert io.isLeaf() : io;
             assert cnt != 0 : cnt; // We can not see empty pages (empty tree handled in init).
-            assert startIdx >= 0 : startIdx;
+            assert startIdx >= 0 || startIdx == -1: startIdx;
             assert cnt >= startIdx;
 
             checkDestroyed();
 
             nextPageId = io.getForward(pageAddr);
 
-            if (lowerBound != null && startIdx == 0)
-                startIdx = findLowerBound(pageAddr, io, cnt);
-
-            if (upperBound != null && cnt != startIdx)
-                cnt = findUpperBound(pageAddr, io, startIdx, cnt);
-
-            cnt -= startIdx;
-
-            if (cnt == 0)
-                return false;
-
-            if (rows == EMPTY)
-                rows = (T[])new Object[cnt];
-
-            int foundCnt = 0;
-
-            for (int i = 0; i < cnt; i++) {
-                T r = getRow(io, pageAddr, startIdx + i, x);
-
-                if (r != null)
-                    rows = GridArrays.set(rows, foundCnt++, r);
-            }
-
-            if (foundCnt == 0) {
-                rows = (T[])EMPTY;
-
-                return false;
-            }
-
-            GridArrays.clearTail(rows, foundCnt);
-
-            return true;
-        }
-
-        /** {@inheritDoc} */
-        @SuppressWarnings("SimplifiableIfStatement")
-        @Override public boolean next() throws IgniteCheckedException {
-            if (rows == null)
-                return false;
-
-            if (++row < rows.length && rows[row] != null) {
-                clearLastRow(); // Allow to GC the last returned row.
-
-                return true;
-            }
-
-            return nextPage();
-        }
-
-        /**
-         * @return Cleared last row.
-         */
-        private T clearLastRow() {
-            if (row == 0)
-                return null;
-
-            int last = row - 1;
-
-            T r = rows[last];
-
-            assert r != null;
-
-            rows[last] = null;
-
-            return r;
+            return fillFromBuffer0(pageAddr, io, startIdx, cnt);
         }
 
         /**
          * @throws IgniteCheckedException If failed.
          */
-        private void find() throws IgniteCheckedException {
+        final void find() throws IgniteCheckedException {
             assert lowerBound != null;
 
             doFind(new GetCursor(lowerBound, lowerShift, this));
@@ -4726,21 +5299,20 @@
             // to the previous lower bound.
             find();
 
-            return next();
+            return reinitialize0();
         }
 
         /**
+         * @param lastRow Last read row (to be used as new lower bound).
          * @return {@code true} If we have rows to return after reading the next page.
          * @throws IgniteCheckedException If failed.
          */
-        private boolean nextPage() throws IgniteCheckedException {
-            updateLowerBound(clearLastRow());
-
-            row = 0;
+        final boolean nextPage(L lastRow) throws IgniteCheckedException {
+            updateLowerBound(lastRow);
 
             for (;;) {
                 if (nextPageId == 0) {
-                    rows = null;
+                    onNotFound(true);
 
                     return false; // Done.
                 }
@@ -4757,7 +5329,7 @@
                     try {
                         BPlusIO<L> io = io(pageAddr);
 
-                        if (fillFromBuffer(pageAddr, io, 0, io.getCount(pageAddr)))
+                        if (fillFromBuffer(pageAddr, io, -1, io.getCount(pageAddr)))
                             return true;
 
                         // Continue fetching forward.
@@ -4778,12 +5350,239 @@
         /**
          * @param lower New exact lower bound.
          */
-        private void updateLowerBound(T lower) {
+        private void updateLowerBound(L lower) {
             if (lower != null) {
                 lowerShift = 1; // Now we have the full row an need to avoid duplicates.
                 lowerBound = lower; // Move the lower bound forward for further concurrent merge retries.
             }
         }
+    }
+
+    /**
+     * Closure cursor.
+     */
+    @SuppressWarnings("unchecked")
+    private final class ClosureCursor extends AbstractForwardCursor {
+        /** */
+        private final TreeRowClosure<L, T> p;
+
+        /** */
+        private L lastRow;
+
+        /**
+         * @param lowerBound Lower bound.
+         * @param upperBound Upper bound.
+         * @param p Row predicate.
+         */
+        ClosureCursor(L lowerBound, L upperBound, TreeRowClosure<L, T> p) {
+            super(lowerBound, upperBound);
+
+            assert lowerBound != null;
+            assert upperBound != null;
+            assert p != null;
+
+            this.p = p;
+        }
+
+        /** {@inheritDoc} */
+        @Override void init0() {
+            // No-op.
+        }
+
+        /** {@inheritDoc} */
+        @Override boolean fillFromBuffer0(long pageAddr, BPlusIO<L> io, int startIdx, int cnt)
+            throws IgniteCheckedException {
+            if (startIdx == -1)
+                startIdx = findLowerBound(pageAddr, io, cnt);
+
+            if (cnt == startIdx)
+                return false;
+
+            for (int i = startIdx; i < cnt; i++) {
+                int cmp = compare(0, io, pageAddr, i, upperBound);
+
+                if (cmp > 0) {
+                    nextPageId = 0; // The End.
+
+                    return false;
+                }
+
+                boolean stop = !p.apply(BPlusTree.this, io, pageAddr, i);
+
+                if (stop) {
+                    nextPageId = 0; // The End.
+
+                    return true;
+                }
+            }
+
+            if (nextPageId != 0)
+                lastRow = io.getLookupRow(BPlusTree.this, pageAddr, cnt - 1); // Need save last row.
+
+            return true;
+        }
+
+        /** {@inheritDoc} */
+        @Override boolean reinitialize0() throws IgniteCheckedException {
+            return true;
+        }
+
+        /** {@inheritDoc} */
+        @Override void onNotFound(boolean readDone) {
+            nextPageId = 0;
+        }
+
+        /**
+         * @throws IgniteCheckedException If failed.
+         */
+        private void iterate() throws IgniteCheckedException {
+            find();
+
+            if (nextPageId == 0) {
+                return;
+            }
+
+            for (;;) {
+                L lastRow0 = lastRow;
+
+                lastRow = null;
+
+                nextPage(lastRow0);
+
+                if (nextPageId == 0)
+                    return;
+            }
+        }
+    }
+
+    /**
+     * Forward cursor.
+     */
+    @SuppressWarnings("unchecked")
+    private final class ForwardCursor extends AbstractForwardCursor implements GridCursor<T> {
+        /** */
+        final Object x;
+
+        /** */
+        private T[] rows = (T[])EMPTY;
+
+        /** */
+        private int row = -1;
+
+        /** */
+        private final TreeRowClosure<L, T> c;
+
+        /**
+         * @param lowerBound Lower bound.
+         * @param upperBound Upper bound.
+         * @param c Filter closure.
+         * @param x Implementation specific argument, {@code null} always means that we need to return full detached data row.
+         */
+        ForwardCursor(L lowerBound, L upperBound, TreeRowClosure<L, T> c, Object x) {
+            super(lowerBound, upperBound);
+
+            this.c = c;
+            this.x = x;
+        }
+
+        /** {@inheritDoc} */
+        @Override boolean fillFromBuffer0(long pageAddr, BPlusIO<L> io, int startIdx, int cnt) throws IgniteCheckedException {
+            if (startIdx == -1) {
+                if (lowerBound != null)
+                    startIdx = findLowerBound(pageAddr, io, cnt);
+                else
+                    startIdx = 0;
+            }
+
+            if (upperBound != null && cnt != startIdx)
+                cnt = findUpperBound(pageAddr, io, startIdx, cnt);
+
+            int cnt0 = cnt - startIdx;
+
+            if (cnt0 == 0)
+                return false;
+
+            if (rows == EMPTY)
+                rows = (T[])new Object[cnt0];
+
+            int resCnt = 0;
+
+            for (int idx = startIdx; idx < cnt; idx++) {
+                if (c == null || c.apply(BPlusTree.this, io, pageAddr, idx))
+                    rows = GridArrays.set(rows, resCnt++, getRow(io, pageAddr, idx, x));
+            }
+
+            if (resCnt == 0) {
+                rows = (T[])EMPTY;
+
+                return false;
+            }
+
+            GridArrays.clearTail(rows, resCnt);
+
+            return true;
+        }
+
+        /** {@inheritDoc} */
+        @Override boolean reinitialize0() throws IgniteCheckedException {
+            return next();
+        }
+
+        /** {@inheritDoc} */
+        @Override void onNotFound(boolean readDone) {
+            if (readDone)
+                rows = null;
+            else {
+                if (rows != EMPTY) {
+                    assert rows.length > 0; // Otherwise it makes no sense to create an array.
+
+                    // Fake clear.
+                    rows[0] = null;
+                }
+            }
+        }
+
+        /** {@inheritDoc} */
+        @Override void init0() {
+            row = -1;
+        }
+
+        /** {@inheritDoc} */
+        @SuppressWarnings("SimplifiableIfStatement")
+        @Override public boolean next() throws IgniteCheckedException {
+            if (rows == null)
+                return false;
+
+            if (++row < rows.length && rows[row] != null) {
+                clearLastRow(); // Allow to GC the last returned row.
+
+                return true;
+            }
+
+            T lastRow = clearLastRow();
+
+            row = 0;
+
+            return nextPage(lastRow);
+        }
+
+        /**
+         * @return Cleared last row.
+         */
+        private T clearLastRow() {
+            if (row == 0)
+                return null;
+
+            int last = row - 1;
+
+            T r = rows[last];
+
+            assert r != null;
+
+            rows[last] = null;
+
+            return r;
+        }
 
         /** {@inheritDoc} */
         @Override public T get() {
@@ -4863,7 +5662,7 @@
     /**
      * Operation result.
      */
-    enum Result {
+    public enum Result {
         /** */
         GO_DOWN,
 
@@ -4920,6 +5719,36 @@
     }
 
     /**
+     * A generic visitor-style interface for performing inspection/modification operations on the tree.
+     */
+    public interface TreeVisitorClosure<L, T extends L> {
+        /** */
+        int STOP = 0x01;
+        /** */
+        int CAN_WRITE = STOP << 1;
+        /** */
+        int DIRTY = CAN_WRITE << 1;
+
+        /**
+         * Performs inspection or operation on a specified row.
+         *
+         * @param tree The tree.
+         * @param io Th tree IO object.
+         * @param pageAddr The page address.
+         * @param idx The item index.
+         * @return state bitset.
+         * @throws IgniteCheckedException If failed.
+         */
+        public int visit(BPlusTree<L, T> tree, BPlusIO<L> io, long pageAddr, int idx, IgniteWriteAheadLogManager wal)
+            throws IgniteCheckedException;
+
+        /**
+         * @return state bitset.
+         */
+        public int state();
+    }
+
+    /**
      * @return Return number of retries.
      */
     protected int getLockRetries() {
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/CorruptedTreeException.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/CorruptedTreeException.java
new file mode 100644
index 0000000..a6bfb1f
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/CorruptedTreeException.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.persistence.tree;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.InvalidEnvironmentException;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Exception to distinguish {@link BPlusTree} tree broken invariants.
+ */
+public class CorruptedTreeException extends IgniteCheckedException implements InvalidEnvironmentException {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /**
+     * @param msg Message.
+     * @param cause Cause.
+     */
+    public CorruptedTreeException(String msg, @Nullable Throwable cause) {
+        super(msg, cause);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/AbstractDataPageIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/AbstractDataPageIO.java
index 50b5779..4a12045 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/AbstractDataPageIO.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/AbstractDataPageIO.java
@@ -81,6 +81,7 @@
     public static final int MIN_DATA_PAGE_OVERHEAD = ITEMS_OFF + ITEM_SIZE + PAYLOAD_LEN_SIZE + LINK_SIZE;
 
     /**
+     * @param type Page type.
      * @param ver Page format version.
      */
     protected AbstractDataPageIO(int type, int ver) {
@@ -502,6 +503,23 @@
 
     /**
      * @param pageAddr Page address.
+     * @param itemId Item to position on.
+     * @param pageSize Page size.
+     * @param reqLen Required payload length.
+     * @return Offset to start of actual fragment data.
+     */
+    public int getPayloadOffset(final long pageAddr, final int itemId, final int pageSize, int reqLen) {
+        int dataOff = getDataOffset(pageAddr, itemId, pageSize);
+
+        int payloadSize = getPageEntrySize(pageAddr, dataOff, 0);
+
+        assert payloadSize >= reqLen : payloadSize;
+
+        return dataOff + PAYLOAD_LEN_SIZE + (isFragmented(pageAddr, dataOff) ? LINK_SIZE : 0);
+    }
+
+    /**
+     * @param pageAddr Page address.
      * @param idx Item index.
      * @return Item.
      */
@@ -982,6 +1000,16 @@
         int payloadSize = payload != null ? payload.length :
             Math.min(rowSize - written, getFreeSpace(pageAddr));
 
+        if (row != null) {
+            int remain = rowSize - written - payloadSize;
+            int hdrSize = row.headerSize();
+
+            // We need page header (i.e. MVCC info) is located entirely on the very first page in chain.
+            // So we force moving it to the next page if it could not fit entirely on this page.
+            if (remain > 0 && remain < hdrSize)
+                payloadSize -= hdrSize - remain;
+        }
+
         int fullEntrySize = getPageEntrySize(payloadSize, SHOW_PAYLOAD_LEN | SHOW_LINK | SHOW_ITEM);
         int dataOff = getDataOffsetForWrite(pageAddr, fullEntrySize, directCnt, indirectCnt, pageSize);
 
@@ -1227,13 +1255,6 @@
     }
 
     /**
-     * @param row Row.
-     * @return Row size in page.
-     * @throws IgniteCheckedException if failed.
-     */
-    public abstract int getRowSize(T row) throws IgniteCheckedException;
-
-    /**
      * Defines closure interface for applying computations to data page items.
      *
      * @param <T> Closure return type.
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/BPlusMetaIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/BPlusMetaIO.java
index afa3c9a..623951b 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/BPlusMetaIO.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/BPlusMetaIO.java
@@ -77,7 +77,7 @@
      * @return Number of levels in this tree.
      */
     public int getLevelsCount(long pageAddr) {
-        return PageUtils.getByte(pageAddr, LVLS_OFF);
+        return Byte.toUnsignedInt(PageUtils.getByte(pageAddr, LVLS_OFF));
     }
 
     /**
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/DataPageIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/DataPageIO.java
index 8a04749..87e37f6 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/DataPageIO.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/DataPageIO.java
@@ -21,16 +21,27 @@
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.internal.pagemem.PageUtils;
 import org.apache.ignite.internal.processors.cache.CacheObject;
-import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUtils;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccVersion;
 import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
 import org.apache.ignite.internal.util.GridStringBuilder;
 
+import static org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO.EntryPart.CACHE_ID;
+import static org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO.EntryPart.EXPIRE_TIME;
+import static org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO.EntryPart.KEY;
+import static org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO.EntryPart.MVCC_INFO;
+import static org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO.EntryPart.VALUE;
+import static org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO.EntryPart.VERSION;
+
 /**
  * Data pages IO.
  */
 public class DataPageIO extends AbstractDataPageIO<CacheDataRow> {
     /** */
+    public static final int MVCC_INFO_SIZE = 40;
+    
+    /** */
     public static final IOVersions<DataPageIO> VERSIONS = new IOVersions<>(
         new DataPageIO(1)
     );
@@ -43,21 +54,74 @@
     }
 
     /** {@inheritDoc} */
-    @Override
-    protected void writeFragmentData(
-        final CacheDataRow row,
-        final ByteBuffer buf,
-        final int rowOff,
-        final int payloadSize
-    ) throws IgniteCheckedException {
+    @Override protected void writeRowData(long pageAddr, int dataOff, int payloadSize, CacheDataRow row,
+        boolean newRow) throws IgniteCheckedException {
+        long addr = pageAddr + dataOff;
+
+        int cacheIdSize = row.cacheId() != 0 ? 4 : 0;
+        int mvccInfoSize = row.mvccCoordinatorVersion() > 0 ? MVCC_INFO_SIZE : 0;
+
+        if (newRow) {
+            PageUtils.putShort(addr, 0, (short)payloadSize);
+            addr += 2;
+
+            if (mvccInfoSize > 0) {
+                assert MvccUtils.mvccVersionIsValid(row.mvccCoordinatorVersion(), row.mvccCounter(), row.mvccOperationCounter());
+
+                // xid_min.
+                PageUtils.putLong(addr, 0, row.mvccCoordinatorVersion());
+                PageUtils.putLong(addr, 8, row.mvccCounter());
+                PageUtils.putInt(addr, 16, row.mvccOperationCounter() | (row.mvccTxState() << MVCC_HINTS_BIT_OFF));
+
+                assert row.newMvccCoordinatorVersion() == 0
+                    || MvccUtils.mvccVersionIsValid(row.newMvccCoordinatorVersion(), row.newMvccCounter(), row.newMvccOperationCounter());
+
+                // xid_max.
+                PageUtils.putLong(addr, 20, row.newMvccCoordinatorVersion());
+                PageUtils.putLong(addr, 28, row.newMvccCounter());
+                PageUtils.putInt(addr, 36, row.newMvccOperationCounter() | (row.newMvccTxState() << MVCC_HINTS_BIT_OFF));
+
+                addr += mvccInfoSize;
+            }
+
+            if (cacheIdSize != 0) {
+                PageUtils.putInt(addr, 0, row.cacheId());
+
+                addr += cacheIdSize;
+            }
+
+            addr += row.key().putValue(addr);
+        }
+        else
+            addr += (2 + mvccInfoSize + cacheIdSize  + row.key().valueBytesLength(null));
+
+        addr += row.value().putValue(addr);
+
+        CacheVersionIO.write(addr, row.version(), false);
+        addr += CacheVersionIO.size(row.version(), false);
+
+        PageUtils.putLong(addr, 0, row.expireTime());
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void writeFragmentData(CacheDataRow row, ByteBuffer buf, int rowOff,
+        int payloadSize) throws IgniteCheckedException {
         final int keySize = row.key().valueBytesLength(null);
+
         final int valSize = row.value().valueBytesLength(null);
 
-        int written = writeFragment(row, buf, rowOff, payloadSize, EntryPart.CACHE_ID, keySize, valSize);
-        written += writeFragment(row, buf, rowOff + written, payloadSize - written, EntryPart.KEY, keySize, valSize);
-        written += writeFragment(row, buf, rowOff + written, payloadSize - written, EntryPart.EXPIRE_TIME, keySize, valSize);
-        written += writeFragment(row, buf, rowOff + written, payloadSize - written, EntryPart.VALUE, keySize, valSize);
-        written += writeFragment(row, buf, rowOff + written, payloadSize - written, EntryPart.VERSION, keySize, valSize);
+        int written = writeFragment(row, buf, rowOff, payloadSize,
+            MVCC_INFO, keySize, valSize);
+        written += writeFragment(row, buf, rowOff + written, payloadSize - written,
+            CACHE_ID, keySize, valSize);
+        written += writeFragment(row, buf, rowOff + written, payloadSize - written,
+            KEY, keySize, valSize);
+        written += writeFragment(row, buf, rowOff + written, payloadSize - written,
+            EXPIRE_TIME, keySize, valSize);
+        written += writeFragment(row, buf, rowOff + written, payloadSize - written,
+            VALUE, keySize, valSize);
+        written += writeFragment(row, buf, rowOff + written, payloadSize - written,
+            VERSION, keySize, valSize);
 
         assert written == payloadSize;
     }
@@ -65,9 +129,13 @@
     /**
      * Try to write fragment data.
      *
+     * @param row Row.
+     * @param buf Byte buffer.
      * @param rowOff Offset in row data bytes.
      * @param payloadSize Data length that should be written in this fragment.
      * @param type Type of the part of entry.
+     * @param keySize Key size.
+     * @param valSize Value size.
      * @return Actually written data.
      * @throws IgniteCheckedException If fail.
      */
@@ -87,35 +155,42 @@
         final int curLen;
 
         int cacheIdSize = row.cacheId() == 0 ? 0 : 4;
+        int mvccInfoSize = row.mvccCoordinatorVersion() > 0 ? MVCC_INFO_SIZE : 0;
 
         switch (type) {
-            case CACHE_ID:
+            case MVCC_INFO:
                 prevLen = 0;
-                curLen = cacheIdSize;
+                curLen = mvccInfoSize;
+
+                break;
+
+            case CACHE_ID:
+                prevLen = mvccInfoSize;
+                curLen = mvccInfoSize + cacheIdSize;
 
                 break;
 
             case KEY:
-                prevLen = cacheIdSize;
-                curLen = cacheIdSize + keySize;
+                prevLen = mvccInfoSize + cacheIdSize;
+                curLen = mvccInfoSize + cacheIdSize + keySize;
 
                 break;
 
             case EXPIRE_TIME:
-                prevLen = cacheIdSize + keySize;
-                curLen = cacheIdSize + keySize + 8;
+                prevLen = mvccInfoSize + cacheIdSize + keySize;
+                curLen = mvccInfoSize + cacheIdSize + keySize + 8;
 
                 break;
 
             case VALUE:
-                prevLen = cacheIdSize + keySize + 8;
-                curLen = cacheIdSize + keySize + valSize + 8;
+                prevLen = mvccInfoSize + cacheIdSize + keySize + 8;
+                curLen = mvccInfoSize + cacheIdSize + keySize + valSize + 8;
 
                 break;
 
             case VERSION:
-                prevLen = cacheIdSize + keySize + valSize + 8;
-                curLen = cacheIdSize + keySize + valSize + CacheVersionIO.size(row.version(), false) + 8;
+                prevLen = mvccInfoSize + cacheIdSize + keySize + valSize + 8;
+                curLen = mvccInfoSize + cacheIdSize + keySize + valSize + CacheVersionIO.size(row.version(), false) + 8;
 
                 break;
 
@@ -128,13 +203,22 @@
 
         final int len = Math.min(curLen - rowOff, payloadSize);
 
-        if (type == EntryPart.EXPIRE_TIME)
+        if (type == EXPIRE_TIME)
             writeExpireTimeFragment(buf, row.expireTime(), rowOff, len, prevLen);
-        else if (type == EntryPart.CACHE_ID)
+        else if (type == CACHE_ID)
             writeCacheIdFragment(buf, row.cacheId(), rowOff, len, prevLen);
-        else if (type != EntryPart.VERSION) {
+        else if (type == MVCC_INFO)
+            writeMvccInfoFragment(buf,
+                row.mvccCoordinatorVersion(),
+                row.mvccCounter(),
+                row.mvccOperationCounter() | (row.mvccTxState() << MVCC_HINTS_BIT_OFF),
+                row.newMvccCoordinatorVersion(),
+                row.newMvccCounter(),
+                row.newMvccOperationCounter() | (row.newMvccTxState() << MVCC_HINTS_BIT_OFF),
+                len);
+        else if (type != VERSION) {
             // Write key or value.
-            final CacheObject co = type == EntryPart.KEY ? row.key() : row.value();
+            final CacheObject co = type == KEY ? row.key() : row.value();
 
             co.putValue(buf, rowOff - prevLen, len);
         }
@@ -145,6 +229,236 @@
     }
 
     /**
+     * @param pageAddr Page address.
+     * @param dataOff Data offset.
+     * @param ver New version.
+     */
+    public void updateVersion(long pageAddr, int dataOff, MvccVersion ver) {
+        long addr = pageAddr + dataOff;
+
+        updateVersion(addr, ver.coordinatorVersion(), ver.counter(), ver.operationCounter());
+    }
+
+    /**
+     * @param pageAddr Page address.
+     * @param itemId Item ID.
+     * @param pageSize Page size.
+     * @param mvccCrd Mvcc coordinator.
+     * @param mvccCntr Mvcc counter.
+     * @param mvccOpCntr Operation counter.
+     */
+    public void updateVersion(long pageAddr, int itemId, int pageSize, long mvccCrd, long mvccCntr, int mvccOpCntr) {
+        int dataOff = getDataOffset(pageAddr, itemId, pageSize);
+
+        long addr = pageAddr + dataOff + (isFragmented(pageAddr, dataOff) ? 10 : 2);
+
+        updateVersion(addr, mvccCrd, mvccCntr, mvccOpCntr);
+    }
+
+    /**
+     * @param addr Address.
+     * @param mvccCrd Mvcc coordinator.
+     * @param mvccCntr Mvcc counter.
+     */
+    private void updateVersion(long addr, long mvccCrd, long mvccCntr, int mvccOpCntr) {
+        PageUtils.putLong(addr, 0, mvccCrd);
+        PageUtils.putLong(addr, 8, mvccCntr);
+        PageUtils.putInt(addr, 16, mvccOpCntr);
+    }
+
+    /**
+     * @param pageAddr Page address.
+     * @param dataOff Data offset.
+     * @param newVer New version.
+     */
+    public void updateNewVersion(long pageAddr, int dataOff, MvccVersion newVer, byte newTxState) {
+        long addr = pageAddr + dataOff;
+
+        updateNewVersion(addr, newVer.coordinatorVersion(), newVer.counter(),
+            (newVer.operationCounter() & ~MVCC_HINTS_MASK) | ((int)newTxState << MVCC_HINTS_BIT_OFF));
+    }
+
+    /**
+     * @param pageAddr Page address.
+     * @param itemId Item ID.
+     * @param pageSize Page size.
+     * @param mvccCrd Mvcc coordinator.
+     * @param mvccCntr Mvcc counter.
+     * @param mvccOpCntr Operation counter.
+     */
+    public void updateNewVersion(long pageAddr, int itemId, int pageSize, long mvccCrd, long mvccCntr, int mvccOpCntr) {
+        int dataOff = getDataOffset(pageAddr, itemId, pageSize);
+
+        long addr = pageAddr + dataOff + (isFragmented(pageAddr, dataOff) ? 10 : 2);
+
+        updateNewVersion(addr, mvccCrd, mvccCntr, mvccOpCntr);
+    }
+
+    /**
+     * @param pageAddr Page address.
+     * @param itemId Item ID.
+     * @param pageSize Page size.
+     * @param txState Tx state hint.
+     */
+    public void updateTxState(long pageAddr, int itemId, int pageSize, byte txState) {
+        int dataOff = getDataOffset(pageAddr, itemId, pageSize);
+
+        long addr = pageAddr + dataOff + (isFragmented(pageAddr, dataOff) ? 10 : 2);
+
+        int opCntr = mvccOperationCounter(addr, 0);
+
+        mvccOperationCounter(addr, 0, ((int)txState << MVCC_HINTS_BIT_OFF) | (opCntr & ~MVCC_HINTS_MASK));
+    }
+
+    /**
+     * @param pageAddr Page address.
+     * @param itemId Item ID.
+     * @param pageSize Page size.
+     * @param txState Tx state hint.
+     */
+    public void updateNewTxState(long pageAddr, int itemId, int pageSize, byte txState) {
+        int dataOff = getDataOffset(pageAddr, itemId, pageSize);
+
+        long addr = pageAddr + dataOff + (isFragmented(pageAddr, dataOff) ? 10 : 2);
+
+        int opCntr = newMvccOperationCounter(addr, 0);
+
+        newMvccOperationCounter(addr, 0, ((int)txState << MVCC_HINTS_BIT_OFF) | (opCntr & ~MVCC_HINTS_MASK));
+    }
+
+    /**
+     * Marks row removed.
+     *
+     * @param addr Address.
+     * @param mvccCrd Mvcc coordinator.
+     * @param mvccCntr Mvcc counter.
+     */
+    private void updateNewVersion(long addr, long mvccCrd, long mvccCntr, int mvccOpCntr) {
+        // Skip xid_min.
+        addr += 20;
+
+        PageUtils.putLong(addr, 0, mvccCrd);
+        PageUtils.putLong(addr, 8, mvccCntr);
+        PageUtils.putInt(addr, 16, mvccOpCntr);
+    }
+
+    /**
+     * Returns MVCC coordinator number.
+     *
+     * @param pageAddr Page address.
+     * @param dataOff Data offset.
+     * @return MVCC coordinator number.
+     */
+    public long mvccCoordinator(long pageAddr, int dataOff) {
+        long addr = pageAddr + dataOff;
+
+        return PageUtils.getLong(addr, 0);
+    }
+
+    /**
+     * Returns MVCC counter value.
+     *
+     * @param pageAddr Page address.
+     * @param dataOff Data offset.
+     * @return MVCC counter value.
+     */
+    public long mvccCounter(long pageAddr, int dataOff) {
+        long addr = pageAddr + dataOff;
+
+        return PageUtils.getLong(addr, 8);
+    }
+
+    /**
+     * Returns MVCC operation counter value.
+     *
+     * @param pageAddr Page address.
+     * @param dataOff Data offset.
+     * @return MVCC counter value.
+     */
+    public int mvccOperationCounter(long pageAddr, int dataOff) {
+        long addr = pageAddr + dataOff;
+
+        return PageUtils.getInt(addr, 16);
+    }
+
+    /**
+     * Sets MVCC operation counter value.
+     *
+     * @param pageAddr Page address.
+     * @param dataOff Data offset.
+     * @param opCntr MVCC counter value.
+     */
+    public void mvccOperationCounter(long pageAddr, int dataOff, int opCntr) {
+        long addr = pageAddr + dataOff;
+
+        PageUtils.putInt(addr, 16, opCntr);
+    }
+
+    /**
+     * Returns new MVCC coordinator number.
+     *
+     * @param pageAddr Page address.
+     * @param dataOff Data offset.
+     * @return New MVCC coordinator number.
+     */
+    public long newMvccCoordinator(long pageAddr, int dataOff) {
+        long addr = pageAddr + dataOff;
+
+        // Skip xid_min.
+        addr += 20;
+
+        return PageUtils.getLong(addr, 0);
+    }
+
+    /**
+     * Returns new MVCC counter value.
+     *
+     * @param pageAddr Page address.
+     * @param dataOff Data offset.
+     * @return New MVCC counter value.
+     */
+    public long newMvccCounter(long pageAddr, int dataOff) {
+        long addr = pageAddr + dataOff;
+
+        // Skip xid_min.
+        addr += 20;
+
+        return PageUtils.getLong(addr, 8);
+    }
+
+    /**
+     * Returns MVCC operation counter value.
+     *
+     * @param pageAddr Page address.
+     * @param dataOff Data offset.
+     * @return MVCC counter value.
+     */
+    public int newMvccOperationCounter(long pageAddr, int dataOff) {
+        long addr = pageAddr + dataOff;
+
+        // Skip xid_min.
+        addr += 20;
+
+        return PageUtils.getInt(addr, 16);
+    }
+
+    /**
+     * Sets MVCC new operation counter value.
+     *
+     * @param pageAddr Page address.
+     * @param dataOff Data offset.
+     * @param opCntr MVCC operation counter value.
+     */
+    public void newMvccOperationCounter(long pageAddr, int dataOff, int opCntr) {
+        long addr = pageAddr + dataOff;
+
+        // Skip xid_min.
+        addr += 20;
+
+        PageUtils.putInt(addr, 16, opCntr);
+    }
+
+    /**
      * @param buf Byte buffer.
      * @param ver Version.
      * @param rowOff Row offset.
@@ -181,7 +495,6 @@
      */
     private void writeExpireTimeFragment(ByteBuffer buf, long expireTime, int rowOff, int len, int prevLen) {
         int size = 8;
-
         if (size <= len)
             buf.putLong(expireTime);
         else {
@@ -222,9 +535,48 @@
     }
 
     /**
+     * @param buf Byte buffer.
+     * @param mvccCrd Coordinator version.
+     * @param mvccCntr Counter.
+     * @param mvccOpCntr Operation counter.
+     * @param newMvccCrd New coordinator version.
+     * @param newMvccCntr New counter version.
+     * @param newMvccOpCntr New operation counter.
+     * @param len Length.
+     */
+    private void writeMvccInfoFragment(ByteBuffer buf, long mvccCrd, long mvccCntr, int mvccOpCntr, long newMvccCrd,
+        long newMvccCntr, int newMvccOpCntr, int len) {
+        if (mvccCrd == 0)
+            return;
+
+        assert len >= MVCC_INFO_SIZE : "Mvcc info should fit on the one page!";
+
+        assert MvccUtils.mvccVersionIsValid(mvccCrd, mvccCntr, mvccOpCntr);
+
+        // xid_min.
+        buf.putLong(mvccCrd);
+        buf.putLong(mvccCntr);
+        buf.putInt(mvccOpCntr);
+
+        assert newMvccCrd == 0 || MvccUtils.mvccVersionIsValid(newMvccCrd, newMvccCntr, newMvccOpCntr);
+
+        // xid_max.
+        buf.putLong(newMvccCrd);
+        buf.putLong(newMvccCntr);
+        buf.putInt(newMvccOpCntr);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void printPage(long addr, int pageSize, GridStringBuilder sb) throws IgniteCheckedException {
+        sb.a("DataPageIO [\n");
+        printPageLayout(addr, pageSize, sb);
+        sb.a("\n]");
+    }
+
+    /**
      *
      */
-    private enum EntryPart {
+    enum EntryPart {
         /** */
         KEY,
 
@@ -238,83 +590,9 @@
         EXPIRE_TIME,
 
         /** */
-        CACHE_ID
-    }
+        CACHE_ID,
 
-    /** {@inheritDoc} */
-    @Override
-    protected void writeRowData(
-        long pageAddr,
-        int dataOff,
-        int payloadSize,
-        CacheDataRow row,
-        boolean newRow
-    ) throws IgniteCheckedException {
-        long addr = pageAddr + dataOff;
-
-        int cacheIdSize = row.cacheId() != 0 ? 4 : 0;
-
-        if (newRow) {
-            PageUtils.putShort(addr, 0, (short)payloadSize);
-            addr += 2;
-
-            if (cacheIdSize != 0) {
-                PageUtils.putInt(addr, 0, row.cacheId());
-
-                addr += cacheIdSize;
-            }
-
-            addr += row.key().putValue(addr);
-        }
-        else
-            addr += (2 + cacheIdSize + row.key().valueBytesLength(null));
-
-        addr += row.value().putValue(addr);
-
-        CacheVersionIO.write(addr, row.version(), false);
-        addr += CacheVersionIO.size(row.version(), false);
-
-        PageUtils.putLong(addr, 0, row.expireTime());
-    }
-
-    /** {@inheritDoc} */
-    @Override
-    protected void writeRowData(
-        long pageAddr,
-        int dataOff,
-        byte[] payload
-    ) {
-        PageUtils.putShort(pageAddr, dataOff, (short)payload.length);
-        dataOff += 2;
-
-        PageUtils.putBytes(pageAddr, dataOff, payload);
-    }
-
-    /** {@inheritDoc} */
-    @Override public int getRowSize(CacheDataRow row) throws IgniteCheckedException {
-        return getRowSize(row, row.cacheId() != 0);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void printPage(long addr, int pageSize, GridStringBuilder sb) throws IgniteCheckedException {
-        sb.a("DataPageIO [\n");
-        printPageLayout(addr, pageSize, sb);
-        sb.a("\n]");
-    }
-
-    /**
-     * @param row Row.
-     * @param withCacheId If {@code true} adds cache ID size.
-     * @return Entry size on page.
-     * @throws IgniteCheckedException If failed.
-     */
-    public static int getRowSize(CacheDataRow row, boolean withCacheId) throws IgniteCheckedException {
-        KeyCacheObject key = row.key();
-        CacheObject val = row.value();
-
-        int keyLen = key.valueBytesLength(null);
-        int valLen = val.valueBytesLength(null);
-
-        return keyLen + valLen + CacheVersionIO.size(row.version(), false) + 8 + (withCacheId ? 4 : 0);
+        /** */
+        MVCC_INFO
     }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/IOVersions.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/IOVersions.java
index d74d344..9dcad9b 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/IOVersions.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/IOVersions.java
@@ -17,6 +17,8 @@
 
 package org.apache.ignite.internal.processors.cache.persistence.tree.io;
 
+import org.apache.ignite.internal.util.typedef.internal.S;
+
 /**
  * Registry for IO versions.
  */
@@ -99,4 +101,9 @@
 
         return res;
     }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(IOVersions.class, this);
+    }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageIO.java
index 4534bb5..f167d0c 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageIO.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageIO.java
@@ -25,6 +25,8 @@
 import org.apache.ignite.internal.pagemem.PageMemory;
 import org.apache.ignite.internal.pagemem.PageUtils;
 import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager;
+import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxLogInnerIO;
+import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxLogLeafIO;
 import org.apache.ignite.internal.processors.cache.persistence.IndexStorageImpl;
 import org.apache.ignite.internal.processors.cache.persistence.freelist.io.PagesListMetaIO;
 import org.apache.ignite.internal.processors.cache.persistence.freelist.io.PagesListNodeIO;
@@ -33,10 +35,14 @@
 import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageLockListener;
 import org.apache.ignite.internal.processors.cache.tree.CacheIdAwareDataInnerIO;
 import org.apache.ignite.internal.processors.cache.tree.CacheIdAwareDataLeafIO;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccCacheIdAwareDataInnerIO;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccCacheIdAwareDataLeafIO;
 import org.apache.ignite.internal.processors.cache.tree.CacheIdAwarePendingEntryInnerIO;
 import org.apache.ignite.internal.processors.cache.tree.CacheIdAwarePendingEntryLeafIO;
 import org.apache.ignite.internal.processors.cache.tree.DataInnerIO;
 import org.apache.ignite.internal.processors.cache.tree.DataLeafIO;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccDataInnerIO;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccDataLeafIO;
 import org.apache.ignite.internal.processors.cache.tree.PendingEntryInnerIO;
 import org.apache.ignite.internal.processors.cache.tree.PendingEntryLeafIO;
 import org.apache.ignite.internal.util.GridStringBuilder;
@@ -87,6 +93,12 @@
     /** */
     private static IOVersions<? extends BPlusLeafIO<?>> h2LeafIOs;
 
+    /** */
+    private static IOVersions<? extends BPlusInnerIO<?>> h2MvccInnerIOs;
+
+    /** */
+    private static IOVersions<? extends BPlusLeafIO<?>> h2MvccLeafIOs;
+
     /** Maximum payload size. */
     public static final short MAX_PAYLOAD_SIZE = 2048;
 
@@ -97,6 +109,12 @@
     private static List<IOVersions<? extends BPlusLeafIO<?>>> h2ExtraLeafIOs = new ArrayList<>(MAX_PAYLOAD_SIZE);
 
     /** */
+    private static List<IOVersions<? extends BPlusInnerIO<?>>> h2ExtraMvccInnerIOs = new ArrayList<>(MAX_PAYLOAD_SIZE);
+
+    /** */
+    private static List<IOVersions<? extends BPlusLeafIO<?>>> h2ExtraMvccLeafIOs = new ArrayList<>(MAX_PAYLOAD_SIZE);
+
+    /** */
     public static final int TYPE_OFF = 0;
 
     /** */
@@ -129,6 +147,12 @@
     /** */
     public static final int COMMON_HEADER_END = RESERVED_3_OFF + 8; // 40=type(2)+ver(2)+crc(4)+pageId(8)+rotatedIdPart(1)+reserved(1+2+4+2*8)
 
+    /** */
+    public static final int MVCC_HINTS_MASK = 0xC0000000;
+
+    /** */
+    public static final int MVCC_HINTS_BIT_OFF = 30;
+
     /* All the page types. */
 
     /** */
@@ -200,19 +224,55 @@
     /** */
     public static final short T_DATA_REF_METASTORAGE_LEAF = 23;
 
+    /** */
+    public static final short T_DATA_REF_MVCC_INNER = 24;
+
+    /** */
+    public static final short T_DATA_REF_MVCC_LEAF = 25;
+
+    /** */
+    public static final short T_CACHE_ID_DATA_REF_MVCC_INNER = 26;
+
+    /** */
+    public static final short T_CACHE_ID_DATA_REF_MVCC_LEAF = 27;
+
+    /** */
+    public static final short T_H2_MVCC_REF_LEAF = 28;
+
+    /** */
+    public static final short T_H2_MVCC_REF_INNER = 29;
+
+    /** */
+    public static final short T_TX_LOG_LEAF = 30;
+
+    /** */
+    public static final short T_TX_LOG_INNER = 31;
+
     /** Index for payload == 1. */
-    public static final short T_H2_EX_REF_LEAF_START = 10000;
+    public static final short T_H2_EX_REF_LEAF_START = 10_000;
 
     /** */
     public static final short T_H2_EX_REF_LEAF_END = T_H2_EX_REF_LEAF_START + MAX_PAYLOAD_SIZE - 1;
 
     /** */
-    public static final short T_H2_EX_REF_INNER_START = 20000;
+    public static final short T_H2_EX_REF_INNER_START = 20_000;
 
     /** */
     public static final short T_H2_EX_REF_INNER_END = T_H2_EX_REF_INNER_START + MAX_PAYLOAD_SIZE - 1;
 
     /** */
+    public static final short T_H2_EX_REF_MVCC_LEAF_START = 23_000;
+
+    /** */
+    public static final short T_H2_EX_REF_MVCC_LEAF_END = T_H2_EX_REF_MVCC_LEAF_START + MAX_PAYLOAD_SIZE - 1;
+
+    /** */
+    public static final short T_H2_EX_REF_MVCC_INNER_START = 26_000;
+
+    /** */
+    public static final short T_H2_EX_REF_MVCC_INNER_END = T_H2_EX_REF_MVCC_INNER_START + MAX_PAYLOAD_SIZE - 1;
+
+    /** */
     private final int ver;
 
     /** */
@@ -363,13 +423,19 @@
      *
      * @param innerIOs Inner IO versions.
      * @param leafIOs Leaf IO versions.
+     * @param mvccInnerIOs Inner IO versions with mvcc enabled.
+     * @param mvccLeafIOs Leaf IO versions with mvcc enabled.
      */
     public static void registerH2(
         IOVersions<? extends BPlusInnerIO<?>> innerIOs,
-        IOVersions<? extends BPlusLeafIO<?>> leafIOs
+        IOVersions<? extends BPlusLeafIO<?>> leafIOs,
+        IOVersions<? extends BPlusInnerIO<?>> mvccInnerIOs,
+        IOVersions<? extends BPlusLeafIO<?>> mvccLeafIOs
     ) {
         h2InnerIOs = innerIOs;
         h2LeafIOs = leafIOs;
+        h2MvccInnerIOs = mvccInnerIOs;
+        h2MvccLeafIOs = mvccLeafIOs;
     }
 
     /**
@@ -377,8 +443,10 @@
      *
      * @param innerExtIOs Extra versions.
      */
-    public static void registerH2ExtraInner(IOVersions<? extends BPlusInnerIO<?>> innerExtIOs) {
-        h2ExtraInnerIOs.add(innerExtIOs);
+    public static void registerH2ExtraInner(IOVersions<? extends BPlusInnerIO<?>> innerExtIOs, boolean mvcc) {
+        List<IOVersions<? extends BPlusInnerIO<?>>> ios = mvcc ? h2ExtraMvccInnerIOs : h2ExtraInnerIOs;
+
+        ios.add(innerExtIOs);
     }
 
     /**
@@ -386,24 +454,30 @@
      *
      * @param leafExtIOs Extra versions.
      */
-    public static void registerH2ExtraLeaf(IOVersions<? extends BPlusLeafIO<?>> leafExtIOs) {
-        h2ExtraLeafIOs.add(leafExtIOs);
+    public static void registerH2ExtraLeaf(IOVersions<? extends BPlusLeafIO<?>> leafExtIOs, boolean mvcc) {
+        List<IOVersions<? extends BPlusLeafIO<?>>> ios = mvcc ? h2ExtraMvccLeafIOs : h2ExtraLeafIOs;
+
+        ios.add(leafExtIOs);
     }
 
     /**
      * @param idx Index.
      * @return IOVersions for given idx.
      */
-    public static IOVersions<? extends BPlusInnerIO<?>> getInnerVersions(int idx) {
-        return h2ExtraInnerIOs.get(idx);
+    public static IOVersions<? extends BPlusInnerIO<?>> getInnerVersions(int idx, boolean mvcc) {
+        List<IOVersions<? extends BPlusInnerIO<?>>> ios = mvcc ? h2ExtraMvccInnerIOs : h2ExtraInnerIOs;
+
+        return ios.get(idx);
     }
 
     /**
      * @param idx Index.
      * @return IOVersions for given idx.
      */
-    public static IOVersions<? extends BPlusLeafIO<?>> getLeafVersions(int idx) {
-        return h2ExtraLeafIOs.get(idx);
+    public static IOVersions<? extends BPlusLeafIO<?>> getLeafVersions(int idx, boolean mvcc) {
+        List<IOVersions<? extends BPlusLeafIO<?>>> ios = mvcc ? h2ExtraMvccLeafIOs : h2ExtraLeafIOs;
+
+        return ios.get(idx);
     }
 
     /**
@@ -525,13 +599,18 @@
      */
     @SuppressWarnings("unchecked")
     public static <Q extends BPlusIO<?>> Q getBPlusIO(int type, int ver) throws IgniteCheckedException {
-
         if (type >= T_H2_EX_REF_LEAF_START && type <= T_H2_EX_REF_LEAF_END)
             return (Q)h2ExtraLeafIOs.get(type - T_H2_EX_REF_LEAF_START).forVersion(ver);
 
         if (type >= T_H2_EX_REF_INNER_START && type <= T_H2_EX_REF_INNER_END)
             return (Q)h2ExtraInnerIOs.get(type - T_H2_EX_REF_INNER_START).forVersion(ver);
 
+        if (type >= T_H2_EX_REF_MVCC_LEAF_START && type <= T_H2_EX_REF_MVCC_LEAF_END)
+            return (Q)h2ExtraMvccLeafIOs.get(type - T_H2_EX_REF_MVCC_LEAF_START).forVersion(ver);
+
+        if (type >= T_H2_EX_REF_MVCC_INNER_START && type <= T_H2_EX_REF_MVCC_INNER_END)
+            return (Q)h2ExtraMvccInnerIOs.get(type - T_H2_EX_REF_MVCC_INNER_START).forVersion(ver);
+
         switch (type) {
             case T_H2_REF_INNER:
                 if (h2InnerIOs == null)
@@ -545,6 +624,24 @@
 
                 return (Q)h2LeafIOs.forVersion(ver);
 
+            case T_H2_MVCC_REF_INNER:
+                if (h2MvccInnerIOs == null)
+                    break;
+
+                return (Q)h2MvccInnerIOs.forVersion(ver);
+
+            case T_H2_MVCC_REF_LEAF:
+                if (h2MvccLeafIOs == null)
+                    break;
+
+                return (Q)h2MvccLeafIOs.forVersion(ver);
+
+            case T_TX_LOG_INNER:
+                return (Q)TxLogInnerIO.VERSIONS.forVersion(ver);
+
+            case T_TX_LOG_LEAF:
+                return (Q)TxLogLeafIO.VERSIONS.forVersion(ver);
+
             case T_DATA_REF_INNER:
                 return (Q)DataInnerIO.VERSIONS.forVersion(ver);
 
@@ -557,6 +654,18 @@
             case T_CACHE_ID_AWARE_DATA_REF_LEAF:
                 return (Q)CacheIdAwareDataLeafIO.VERSIONS.forVersion(ver);
 
+            case T_CACHE_ID_DATA_REF_MVCC_INNER:
+                return (Q) MvccCacheIdAwareDataInnerIO.VERSIONS.forVersion(ver);
+
+            case T_CACHE_ID_DATA_REF_MVCC_LEAF:
+                return (Q) MvccCacheIdAwareDataLeafIO.VERSIONS.forVersion(ver);
+
+            case T_DATA_REF_MVCC_INNER:
+                return (Q)MvccDataInnerIO.VERSIONS.forVersion(ver);
+
+            case T_DATA_REF_MVCC_LEAF:
+                return (Q)MvccDataLeafIO.VERSIONS.forVersion(ver);
+
             case T_METASTORE_INNER:
                 return (Q)IndexStorageImpl.MetaStoreInnerIO.VERSIONS.forVersion(ver);
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/SimpleDataPageIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/SimpleDataPageIO.java
index f02e3a0..14489b7 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/SimpleDataPageIO.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/SimpleDataPageIO.java
@@ -113,12 +113,6 @@
     }
 
     /** {@inheritDoc} */
-    @Override public int getRowSize(MetastorageDataRow row) throws IgniteCheckedException {
-        return 4 + row.value().length;
-    }
-
-
-    /** {@inheritDoc} */
     @Override protected void printPage(long addr, int pageSize, GridStringBuilder sb) throws IgniteCheckedException {
         sb.a("SimpleDataPageIO [\n");
         printPageLayout(addr, pageSize, sb);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/util/PageHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/util/PageHandler.java
index a52038a..98c6f1f 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/util/PageHandler.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/util/PageHandler.java
@@ -151,16 +151,18 @@
         int intArg,
         R lockFailed
     ) throws IgniteCheckedException {
-        long pageAddr = readLock(pageMem, cacheId, pageId, page, lsnr);
+        long pageAddr = 0L;
 
-        if (pageAddr == 0L)
-            return lockFailed;
         try {
+            if ((pageAddr = readLock(pageMem, cacheId, pageId, page, lsnr)) == 0L)
+                return lockFailed;
+
             PageIO io = PageIO.getPageIO(pageAddr);
             return h.run(cacheId, pageId, page, pageAddr, io, null, arg, intArg);
         }
         finally {
-            readUnlock(pageMem, cacheId, pageId, page, pageAddr, lsnr);
+            if (pageAddr != 0L)
+                readUnlock(pageMem, cacheId, pageId, page, pageAddr, lsnr);
         }
     }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/util/PageHandlerWrapper.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/util/PageHandlerWrapper.java
new file mode 100644
index 0000000..495eba0
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/util/PageHandlerWrapper.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.persistence.tree.util;
+
+import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree;
+
+/**
+ * Wrapper factory for {@link PageHandler} instances.
+ *
+ * @param <R> Result type of actual {@link PageHandler} class.
+ */
+public interface PageHandlerWrapper<R> {
+    /**
+     * Wraps given {@code hnd}.
+     *
+     * @param tree Instance of {@link BPlusTree} where given {@code} is used.
+     * @param hnd Page handler to wrap.
+     * @return Wrapped version of given {@code hnd}.
+     */
+    public PageHandler<?, R> wrap(BPlusTree<?, ?> tree, PageHandler<?, R> hnd);
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/AbstractWalRecordsIterator.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/AbstractWalRecordsIterator.java
index 01b0933..aa8eb31 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/AbstractWalRecordsIterator.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/AbstractWalRecordsIterator.java
@@ -36,6 +36,7 @@
 import org.apache.ignite.internal.processors.cache.persistence.wal.serializer.SegmentHeader;
 import org.apache.ignite.internal.util.GridCloseableIteratorAdapter;
 import org.apache.ignite.internal.util.typedef.P2;
+import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.lang.IgniteBiTuple;
 import org.jetbrains.annotations.NotNull;
 import org.jetbrains.annotations.Nullable;
@@ -43,8 +44,8 @@
 import static org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordV1Serializer.readSegmentHeader;
 
 /**
- * Iterator over WAL segments. This abstract class provides most functionality for reading records in log.
- * Subclasses are to override segment switching functionality
+ * Iterator over WAL segments. This abstract class provides most functionality for reading records in log. Subclasses
+ * are to override segment switching functionality
  */
 public abstract class AbstractWalRecordsIterator
     extends GridCloseableIteratorAdapter<IgniteBiTuple<WALPointer, WALRecord>> implements WALIterator {
@@ -52,14 +53,14 @@
     private static final long serialVersionUID = 0L;
 
     /**
-     * Current record preloaded, to be returned on next()<br>
-     * Normally this should be not null because advance() method should already prepare some value<br>
+     * Current record preloaded, to be returned on next()<br> Normally this should be not null because advance() method
+     * should already prepare some value<br>
      */
     protected IgniteBiTuple<WALPointer, WALRecord> curRec;
 
     /**
-     * Current WAL segment absolute index. <br>
-     * Determined as lowest number of file at start, is changed during advance segment
+     * Current WAL segment absolute index. <br> Determined as lowest number of file at start, is changed during advance
+     * segment
      */
     protected long curWalSegmIdx = -1;
 
@@ -162,6 +163,13 @@
                 }
             }
             catch (WalSegmentTailReachedException e) {
+                AbstractReadFileHandle currWalSegment = this.currWalSegment;
+
+                IgniteCheckedException e0 = validateTailReachedException(e, currWalSegment);
+
+                if (e0 != null)
+                    throw e0;
+
                 log.warning(e.getMessage());
 
                 curRec = null;
@@ -172,6 +180,20 @@
     }
 
     /**
+     * @param tailReachedException Tail reached exception.
+     * @param currWalSegment Current WAL segment read handler.
+     * @return If need to throw exception after validation.
+     */
+    protected IgniteCheckedException validateTailReachedException(
+        WalSegmentTailReachedException tailReachedException,
+        AbstractReadFileHandle currWalSegment
+    ) {
+        return !currWalSegment.workDir() ? new IgniteCheckedException(
+            "WAL tail reached in archive directory, " +
+                "WAL segment file is corrupted.", tailReachedException) : null;
+    }
+
+    /**
      * Closes and returns WAL segment (if any)
      *
      * @return closed handle
@@ -188,16 +210,16 @@
     }
 
     /**
-     * Switches records iterator to the next WAL segment
-     * as result of this method, new reference to segment should be returned.
-     * Null for current handle means stop of iteration.
+     * Switches records iterator to the next WAL segment as result of this method, new reference to segment should be
+     * returned. Null for current handle means stop of iteration.
      *
      * @param curWalSegment current open WAL segment or null if there is no open segment yet
      * @return new WAL segment to read or null for stop iteration
      * @throws IgniteCheckedException if reading failed
      */
     protected abstract AbstractReadFileHandle advanceSegment(
-        @Nullable final AbstractReadFileHandle curWalSegment) throws IgniteCheckedException;
+        @Nullable final AbstractReadFileHandle curWalSegment
+    ) throws IgniteCheckedException;
 
     /**
      * Switches to new record.
@@ -205,7 +227,7 @@
      * @param hnd currently opened read handle.
      * @return next advanced record.
      */
-    private IgniteBiTuple<WALPointer, WALRecord> advanceRecord(
+    protected IgniteBiTuple<WALPointer, WALRecord> advanceRecord(
         @Nullable final AbstractReadFileHandle hnd
     ) throws IgniteCheckedException {
         if (hnd == null)
@@ -222,8 +244,11 @@
             return new IgniteBiTuple<>((WALPointer)actualFilePtr, postProcessRecord(rec));
         }
         catch (IOException | IgniteCheckedException e) {
-            if (e instanceof WalSegmentTailReachedException)
-                throw (WalSegmentTailReachedException)e;
+            if (e instanceof WalSegmentTailReachedException) {
+                throw new WalSegmentTailReachedException(
+                    "WAL segment tail reached. [idx=" + hnd.idx() +
+                        ", isWorkDir=" + hnd.workDir() + ", serVer=" + hnd.ser() + "]", e);
+            }
 
             if (!(e instanceof SegmentEofException) && !(e instanceof EOFException)) {
                 IgniteCheckedException e0 = handleRecordException(e, actualFilePtr);
@@ -237,8 +262,8 @@
     }
 
     /**
-     * Performs final conversions with record loaded from WAL.
-     * To be overridden by subclasses if any processing required.
+     * Performs final conversions with record loaded from WAL. To be overridden by subclasses if any processing
+     * required.
      *
      * @param rec record to post process.
      * @return post processed record.
@@ -252,11 +277,13 @@
      *
      * @param e problem from records reading
      * @param ptr file pointer was accessed
-     *
-     * @return {@code null} if the error was handled and we can go ahead,
-     *  {@code IgniteCheckedException} if the error was not handled, and we should stop the iteration.
+     * @return {@code null} if the error was handled and we can go ahead, {@code IgniteCheckedException} if the error
+     * was not handled, and we should stop the iteration.
      */
-    protected IgniteCheckedException handleRecordException(@NotNull final Exception e, @Nullable final FileWALPointer ptr) {
+    protected IgniteCheckedException handleRecordException(
+        @NotNull final Exception e,
+        @Nullable final FileWALPointer ptr
+    ) {
         if (log.isInfoEnabled())
             log.info("Stopping WAL iteration due to an exception: " + e.getMessage() + ", ptr=" + ptr);
 
@@ -264,9 +291,76 @@
     }
 
     /**
+     * Assumes fileIO will be closed in this method in case of error occurred.
+     *
+     * @param desc File descriptor.
+     * @param start Optional start pointer. Null means read from the beginning.
+     * @param fileIO fileIO associated with file descriptor
+     * @param segmentHeader read segment header from fileIO
+     * @return Initialized file read header.
+     * @throws IgniteCheckedException If initialized failed due to another unexpected error.
+     */
+    protected AbstractReadFileHandle initReadHandle(
+        @NotNull final AbstractFileDescriptor desc,
+        @Nullable final FileWALPointer start,
+        @NotNull final FileIO fileIO,
+        @NotNull final SegmentHeader segmentHeader
+    ) throws IgniteCheckedException {
+        try {
+            final boolean isCompacted = segmentHeader.isCompacted();
+
+            if (isCompacted)
+                serializerFactory.skipPositionCheck(true);
+
+            FileInput in = new FileInput(fileIO, buf);
+
+            if (start != null && desc.idx() == start.index()) {
+                if (isCompacted) {
+                    if (start.fileOffset() != 0)
+                        serializerFactory.recordDeserializeFilter(new StartSeekingFilter(start));
+                }
+                else {
+                    // Make sure we skip header with serializer version.
+                    long startOff = Math.max(start.fileOffset(), fileIO.position());
+
+                    in.seek(startOff);
+                }
+            }
+
+            int serVer = segmentHeader.getSerializerVersion();
+
+            return createReadFileHandle(fileIO, desc.idx(), serializerFactory.createSerializer(serVer), in);
+        }
+        catch (SegmentEofException | EOFException ignore) {
+            try {
+                fileIO.close();
+            }
+            catch (IOException ce) {
+                throw new IgniteCheckedException(ce);
+            }
+
+            return null;
+        }
+        catch (IgniteCheckedException e) {
+            U.closeWithSuppressingException(fileIO, e);
+
+            throw e;
+        }
+        catch (IOException e) {
+            U.closeWithSuppressingException(fileIO, e);
+
+            throw new IgniteCheckedException(
+                "Failed to initialize WAL segment after reading segment header: " + desc.file().getAbsolutePath(), e);
+        }
+    }
+
+    /**
+     * Assumes file descriptor will be opened in this method. The caller of this method must be responsible for closing
+     * opened file descriptor File descriptor will be closed ONLY in case of error occurred.
+     *
      * @param desc File descriptor.
      * @param start Optional start pointer. Null means read from the beginning
-     * @return Initialized file handle.
+     * @return Initialized file read header.
      * @throws FileNotFoundException If segment file is missing.
      * @throws IgniteCheckedException If initialized failed due to another unexpected error.
      */
@@ -274,35 +368,15 @@
         @NotNull final AbstractFileDescriptor desc,
         @Nullable final FileWALPointer start
     ) throws IgniteCheckedException, FileNotFoundException {
+        FileIO fileIO = null;
+
         try {
-            FileIO fileIO = desc.isCompressed() ? new UnzipFileIO(desc.file()) : ioFactory.create(desc.file());
+            fileIO = desc.isCompressed() ? new UnzipFileIO(desc.file()) : ioFactory.create(desc.file());
+
+            SegmentHeader segmentHeader;
 
             try {
-                SegmentHeader segmentHeader = readSegmentHeader(fileIO, curWalSegmIdx);
-
-                boolean isCompacted = segmentHeader.isCompacted();
-
-                if (isCompacted)
-                    serializerFactory.skipPositionCheck(true);
-
-                FileInput in = new FileInput(fileIO, buf);
-
-                if (start != null && desc.idx() == start.index()) {
-                    if (isCompacted) {
-                        if (start.fileOffset() != 0)
-                            serializerFactory.recordDeserializeFilter(new StartSeekingFilter(start));
-                    }
-                    else {
-                        // Make sure we skip header with serializer version.
-                        long startOff = Math.max(start.fileOffset(), fileIO.position());
-
-                        in.seek(startOff);
-                    }
-                }
-
-                int serVer = segmentHeader.getSerializerVersion();
-
-                return createReadFileHandle(fileIO, desc.idx(), serializerFactory.createSerializer(serVer), in);
+                segmentHeader = readSegmentHeader(fileIO, curWalSegmIdx);
             }
             catch (SegmentEofException | EOFException ignore) {
                 try {
@@ -315,20 +389,21 @@
                 return null;
             }
             catch (IOException | IgniteCheckedException e) {
-                try {
-                    fileIO.close();
-                }
-                catch (IOException ce) {
-                    e.addSuppressed(ce);
-                }
+                U.closeWithSuppressingException(fileIO, e);
 
                 throw e;
             }
+
+            return initReadHandle(desc, start, fileIO, segmentHeader);
         }
         catch (FileNotFoundException e) {
+            U.closeQuiet(fileIO);
+
             throw e;
         }
         catch (IOException e) {
+            U.closeQuiet(fileIO);
+
             throw new IgniteCheckedException(
                 "Failed to initialize WAL segment: " + desc.file().getAbsolutePath(), e);
         }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileDescriptor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileDescriptor.java
new file mode 100644
index 0000000..a73248a
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileDescriptor.java
@@ -0,0 +1,137 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.persistence.wal;
+
+import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager;
+import org.apache.ignite.internal.util.typedef.internal.SB;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
+
+import java.io.File;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+/**
+ * WAL file descriptor.
+ */
+public class FileDescriptor implements Comparable<FileDescriptor>, AbstractWalRecordsIterator.AbstractFileDescriptor {
+
+    /** file extension of WAL segment. */
+    private static final String WAL_SEGMENT_FILE_EXT = ".wal";
+
+    /** Length of WAL segment file name. */
+    private static final int WAL_SEGMENT_FILE_NAME_LENGTH = 16;
+
+    /** File represented by this class. */
+    protected final File file;
+
+    /** Absolute WAL segment file index. */
+    protected final long idx;
+
+    /**
+     * Creates file descriptor. Index is restored from file name.
+     *
+     * @param file WAL segment file.
+     */
+    public FileDescriptor(@NotNull File file) {
+        this(file, null);
+    }
+
+    /**
+     * @param file WAL segment file.
+     * @param idx Absolute WAL segment file index. For null value index is restored from file name.
+     */
+    public FileDescriptor(@NotNull File file, @Nullable Long idx) {
+        this.file = file;
+
+        String fileName = file.getName();
+
+        assert fileName.contains(WAL_SEGMENT_FILE_EXT);
+
+        this.idx = idx == null ? Long.parseLong(fileName.substring(0, WAL_SEGMENT_FILE_NAME_LENGTH)) : idx;
+    }
+
+    /**
+     * @param segment Segment index.
+     * @return Segment file name.
+     */
+    public static String fileName(long segment) {
+        SB b = new SB();
+
+        String segmentStr = Long.toString(segment);
+
+        for (int i = segmentStr.length(); i < WAL_SEGMENT_FILE_NAME_LENGTH; i++)
+            b.a('0');
+
+        b.a(segmentStr).a(WAL_SEGMENT_FILE_EXT);
+
+        return b.toString();
+    }
+
+    /** {@inheritDoc} */
+    @Override public int compareTo(@NotNull FileDescriptor o) {
+        return Long.compare(idx, o.idx);
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean equals(Object o) {
+        if (this == o)
+            return true;
+
+        if (!(o instanceof FileDescriptor))
+            return false;
+
+        FileDescriptor that = (FileDescriptor)o;
+
+        return idx == that.idx;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int hashCode() {
+        return (int)(idx ^ (idx >>> 32));
+    }
+
+    /**
+     * @return Absolute WAL segment file index
+     */
+    public long getIdx() {
+        return idx;
+    }
+
+    /**
+     * @return absolute pathname string of this file descriptor pathname.
+     */
+    public String getAbsolutePath() {
+        return file.getAbsolutePath();
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean isCompressed() {
+        return file.getName().endsWith(FilePageStoreManager.ZIP_SUFFIX);
+    }
+
+    /** {@inheritDoc} */
+    @Override public File file() {
+        return file;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long idx() {
+        return idx;
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java
index 712dca0..907a311 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java
@@ -59,6 +59,7 @@
 import java.util.concurrent.locks.LockSupport;
 import java.util.concurrent.locks.ReentrantLock;
 import java.util.regex.Pattern;
+import java.util.stream.Stream;
 import java.util.zip.ZipEntry;
 import java.util.zip.ZipInputStream;
 import java.util.zip.ZipOutputStream;
@@ -69,6 +70,7 @@
 import org.apache.ignite.configuration.IgniteConfiguration;
 import org.apache.ignite.configuration.WALMode;
 import org.apache.ignite.events.WalSegmentArchivedEvent;
+import org.apache.ignite.events.WalSegmentCompactedEvent;
 import org.apache.ignite.failure.FailureContext;
 import org.apache.ignite.failure.FailureType;
 import org.apache.ignite.internal.GridKernalContext;
@@ -76,7 +78,6 @@
 import org.apache.ignite.internal.IgniteInterruptedCheckedException;
 import org.apache.ignite.internal.managers.eventstorage.GridEventStorageManager;
 import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager;
-import org.apache.ignite.internal.pagemem.wal.StorageException;
 import org.apache.ignite.internal.pagemem.wal.WALIterator;
 import org.apache.ignite.internal.pagemem.wal.WALPointer;
 import org.apache.ignite.internal.pagemem.wal.record.CheckpointRecord;
@@ -88,11 +89,13 @@
 import org.apache.ignite.internal.processors.cache.WalStateManager.WALDisableContext;
 import org.apache.ignite.internal.processors.cache.persistence.DataStorageMetricsImpl;
 import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager;
+import org.apache.ignite.internal.processors.cache.persistence.StorageException;
 import org.apache.ignite.internal.processors.cache.persistence.file.FileIO;
 import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory;
+import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager;
 import org.apache.ignite.internal.processors.cache.persistence.file.RandomAccessFileIOFactory;
 import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFolderSettings;
-import org.apache.ignite.internal.processors.cache.persistence.wal.AbstractWalRecordsIterator.AbstractFileDescriptor;
+import org.apache.ignite.internal.processors.cache.persistence.wal.crc.IgniteDataIntegrityViolationException;
 import org.apache.ignite.internal.processors.cache.persistence.wal.crc.PureJavaCrc32;
 import org.apache.ignite.internal.processors.cache.persistence.wal.record.HeaderRecord;
 import org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordSerializer;
@@ -109,7 +112,7 @@
 import org.apache.ignite.internal.util.typedef.CIX1;
 import org.apache.ignite.internal.util.typedef.CO;
 import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.internal.SB;
+import org.apache.ignite.internal.util.typedef.X;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.internal.util.worker.GridWorker;
 import org.apache.ignite.lang.IgniteBiTuple;
@@ -123,10 +126,14 @@
 import static java.nio.file.StandardOpenOption.CREATE;
 import static java.nio.file.StandardOpenOption.READ;
 import static java.nio.file.StandardOpenOption.WRITE;
+import static org.apache.ignite.IgniteSystemProperties.IGNITE_CHECKPOINT_TRIGGER_ARCHIVE_SIZE_PERCENTAGE;
+import static org.apache.ignite.IgniteSystemProperties.IGNITE_THRESHOLD_WAL_ARCHIVE_SIZE_PERCENTAGE;
 import static org.apache.ignite.IgniteSystemProperties.IGNITE_WAL_MMAP;
+import static org.apache.ignite.IgniteSystemProperties.IGNITE_WAL_SEGMENT_SYNC_TIMEOUT;
 import static org.apache.ignite.IgniteSystemProperties.IGNITE_WAL_SERIALIZER_VERSION;
 import static org.apache.ignite.configuration.WALMode.LOG_ONLY;
 import static org.apache.ignite.events.EventType.EVT_WAL_SEGMENT_ARCHIVED;
+import static org.apache.ignite.events.EventType.EVT_WAL_SEGMENT_COMPACTED;
 import static org.apache.ignite.failure.FailureType.CRITICAL_ERROR;
 import static org.apache.ignite.failure.FailureType.SYSTEM_WORKER_TERMINATION;
 import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.SWITCH_SEGMENT_RECORD;
@@ -135,12 +142,15 @@
 import static org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordV1Serializer.readSegmentHeader;
 import static org.apache.ignite.internal.util.IgniteUtils.findField;
 import static org.apache.ignite.internal.util.IgniteUtils.findNonPublicMethod;
+import static org.apache.ignite.internal.util.IgniteUtils.sleep;
 
 /**
  * File WAL manager.
  */
 @SuppressWarnings("IfMayBeConditional")
 public class FileWriteAheadLogManager extends GridCacheSharedManagerAdapter implements IgniteWriteAheadLogManager {
+    /** Dfault wal segment sync timeout. */
+    public static final long DFLT_WAL_SEGMENT_SYNC_TIMEOUT = 500L;
     /** {@link MappedByteBuffer#force0(java.io.FileDescriptor, long, long)}. */
     private static final Method force0 = findNonPublicMethod(
         MappedByteBuffer.class, "force0",
@@ -164,9 +174,6 @@
     /** */
     private static final FileDescriptor[] EMPTY_DESCRIPTORS = new FileDescriptor[0];
 
-    /** WAL segment file extension. */
-    private static final String WAL_SEGMENT_FILE_EXT = ".wal";
-
     /** */
     private static final byte[] FILL_BUF = new byte[1024 * 1024];
 
@@ -231,6 +238,19 @@
     private static final AtomicLongFieldUpdater<FileWriteHandle> WRITTEN_UPD =
         AtomicLongFieldUpdater.newUpdater(FileWriteHandle.class, "written");
 
+    /**
+     * Percentage of archive size for checkpoint trigger. Need for calculate max size of WAL after last checkpoint.
+     * Checkpoint should be triggered when max size of WAL after last checkpoint more than maxWallArchiveSize * thisValue
+     */
+    private static final double CHECKPOINT_TRIGGER_ARCHIVE_SIZE_PERCENTAGE =
+        IgniteSystemProperties.getDouble(IGNITE_CHECKPOINT_TRIGGER_ARCHIVE_SIZE_PERCENTAGE, 0.25);
+
+    /**
+     * Percentage of WAL archive size to calculate threshold since which removing of old archive should be started.
+     */
+    private static final double THRESHOLD_WAL_ARCHIVE_SIZE_PERCENTAGE =
+        IgniteSystemProperties.getDouble(IGNITE_THRESHOLD_WAL_ARCHIVE_SIZE_PERCENTAGE, 0.5);
+
     /** Interrupted flag. */
     private final ThreadLocal<Boolean> interrupted = new ThreadLocal<Boolean>() {
         @Override protected Boolean initialValue() {
@@ -244,6 +264,15 @@
     /** WAL segment size in bytes. . This is maximum value, actual segments may be shorter. */
     private final long maxWalSegmentSize;
 
+    /**
+     * Maximum number of allowed segments without checkpoint. If we have their more checkpoint should be triggered.
+     * It is simple way to calculate WAL size without checkpoint instead fair WAL size calculating.
+     */
+    private final long maxSegCountWithoutCheckpoint;
+
+    /** Size of wal archive since which removing of old archive should be started */
+    private final long allowedThresholdWalArchiveSize;
+
     /** */
     private final WALMode mode;
 
@@ -298,8 +327,8 @@
         AtomicReferenceFieldUpdater.newUpdater(FileWriteAheadLogManager.class, FileWriteHandle.class, "currHnd");
 
     /**
-     * File archiver moves segments from work directory to archive. Locked segments may be kept not moved until
-     * release. For mode archive and work folders set to equal value, archiver is not created.
+     * File archiver moves segments from work directory to archive. Locked segments may be kept not moved until release.
+     * For mode archive and work folders set to equal value, archiver is not created.
      */
     @Nullable private volatile FileArchiver archiver;
 
@@ -351,6 +380,9 @@
      */
     @Nullable private volatile IgniteInClosure<FileIO> createWalFileListener;
 
+    /** Wal segment sync worker. */
+    private WalSegmentSyncer walSegmentSyncWorker;
+
     /**
      * @param ctx Kernal context.
      */
@@ -370,6 +402,12 @@
         alwaysWriteFullPages = dsCfg.isAlwaysWriteFullPages();
         ioFactory = new RandomAccessFileIOFactory();
         walAutoArchiveAfterInactivity = dsCfg.getWalAutoArchiveAfterInactivity();
+
+        maxSegCountWithoutCheckpoint =
+            (long)((dsCfg.getMaxWalArchiveSize() * CHECKPOINT_TRIGGER_ARCHIVE_SIZE_PERCENTAGE) / dsCfg.getWalSegmentSize());
+
+        allowedThresholdWalArchiveSize = (long)(dsCfg.getMaxWalArchiveSize() * THRESHOLD_WAL_ARCHIVE_SIZE_PERCENTAGE);
+
         evt = ctx.event();
         failureProcessor = ctx.failure();
     }
@@ -453,7 +491,10 @@
 
             walDisableContext = cctx.walState().walDisableContext();
 
-            if (mode != WALMode.NONE) {
+            if (mode != WALMode.NONE && mode != WALMode.FSYNC) {
+                walSegmentSyncWorker = new WalSegmentSyncer(igCfg.getIgniteInstanceName(),
+                    cctx.kernalContext().log(WalSegmentSyncer.class));
+
                 if (log.isInfoEnabled())
                     log.info("Started write-ahead log manager [mode=" + mode + ']');
             }
@@ -495,7 +536,7 @@
             String segmentName = FileDescriptor.fileName(i);
 
             File file = new File(walArchiveDir, segmentName);
-            File fileZip = new File(walArchiveDir, segmentName + ".zip");
+            File fileZip = new File(walArchiveDir, segmentName + FilePageStoreManager.ZIP_SUFFIX);
 
             if (file.exists())
                 res.add(file);
@@ -551,6 +592,9 @@
             if (currHnd != null)
                 currHnd.close(false);
 
+            if (walSegmentSyncWorker != null)
+                walSegmentSyncWorker.shutdown();
+
             if (walWriter != null)
                 walWriter.shutdown();
 
@@ -583,6 +627,9 @@
                 new IgniteThread(archiver).start();
             }
 
+            if (walSegmentSyncWorker != null)
+                new IgniteThread(walSegmentSyncWorker).start();
+
             if (compressor != null)
                 compressor.start();
         }
@@ -742,7 +789,7 @@
         rec.size(serializer.size(rec));
 
         while (true) {
-            if (rec.rollOver()){
+            if (rec.rollOver()) {
                 assert cctx.database().checkpointLockIsHeldByThread();
 
                 long idx = currWrHandle.idx;
@@ -864,7 +911,7 @@
     private boolean hasIndex(long absIdx) {
         String segmentName = FileDescriptor.fileName(absIdx);
 
-        String zipSegmentName = FileDescriptor.fileName(absIdx) + ".zip";
+        String zipSegmentName = FileDescriptor.fileName(absIdx) + FilePageStoreManager.ZIP_SUFFIX;
 
         boolean inArchive = new File(walArchiveDir, segmentName).exists() ||
             new File(walArchiveDir, zipSegmentName).exists();
@@ -940,9 +987,9 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void allowCompressionUntil(WALPointer ptr) {
+    @Override public void notchLastCheckpointPtr(WALPointer ptr) {
         if (compressor != null)
-            compressor.allowCompressionUntil(((FileWALPointer)ptr).index());
+            compressor.keepUncompressedIdxFrom(((FileWALPointer)ptr).index());
     }
 
     /** {@inheritDoc} */
@@ -965,6 +1012,11 @@
     }
 
     /** {@inheritDoc} */
+    @Override public long lastCompactedSegment() {
+        return compressor != null ? compressor.lastCompressedIdx : -1L;
+    }
+
+    /** {@inheritDoc} */
     @Override public boolean reserved(WALPointer ptr) {
         FileWALPointer fPtr = (FileWALPointer)ptr;
 
@@ -1118,6 +1170,9 @@
 
             next.writeHeader();
 
+            if (next.idx - lashCheckpointFileIdx() >= maxSegCountWithoutCheckpoint)
+                cctx.database().forceCheckpoint("too big size of WAL without checkpoint");
+
             boolean swapped = CURR_HND_UPD.compareAndSet(this, hnd, next);
 
             assert swapped : "Concurrent updates on rollover are not allowed";
@@ -1135,6 +1190,15 @@
     }
 
     /**
+     * Give last checkpoint file idx.
+     */
+    private long lashCheckpointFileIdx() {
+        WALPointer lastCheckpointMark = cctx.database().lastCheckpointMarkWalPointer();
+
+        return lastCheckpointMark == null ? 0 : ((FileWALPointer)lastCheckpointMark).index();
+    }
+
+    /**
      * @param lastReadPtr Last read WAL file pointer.
      * @return Initialized file write handle.
      * @throws StorageException If failed to initialize WAL write handle.
@@ -1274,7 +1338,6 @@
                         serializer,
                         rbuf);
 
-
                     if (interrupted)
                         Thread.currentThread().interrupt();
 
@@ -1344,7 +1407,7 @@
 
         File[] allFiles = walWorkDir.listFiles(WAL_SEGMENT_FILE_FILTER);
 
-        if(isArchiverEnabled())
+        if (isArchiverEnabled())
             if (allFiles.length != 0 && allFiles.length > dsCfg.getWalSegments())
                 throw new StorageException("Failed to initialize wal (work directory contains " +
                     "incorrect number of segments) [cur=" + allFiles.length + ", expected=" + dsCfg.getWalSegments() + ']');
@@ -1420,7 +1483,7 @@
 
             if (failureProcessor != null)
                 failureProcessor.process(new FailureContext(FailureType.CRITICAL_ERROR, ex));
-            
+
             throw ex;
         }
     }
@@ -1435,7 +1498,7 @@
         if (log.isDebugEnabled())
             log.debug("Creating new file [exists=" + file.exists() + ", file=" + file.getAbsolutePath() + ']');
 
-        File tmp = new File(file.getParent(), file.getName() + ".tmp");
+        File tmp = new File(file.getParent(), file.getName() + FilePageStoreManager.TMP_SUFFIX);
 
         formatFile(tmp);
 
@@ -1475,6 +1538,39 @@
         return new File(walWorkDir, FileDescriptor.fileName(segmentIdx));
     }
 
+    /**
+     * Files from archive WAL directory.
+     */
+    private FileDescriptor[] walArchiveFiles() {
+        return scan(walArchiveDir.listFiles(WAL_SEGMENT_COMPACTED_OR_RAW_FILE_FILTER));
+    }
+
+    /** {@inheritDoc} */
+    @Override public long maxArchivedSegmentToDelete() {
+        //When maxWalArchiveSize==MAX_VALUE deleting files is not permit.
+        if (dsCfg.getMaxWalArchiveSize() == Long.MAX_VALUE)
+            return -1;
+
+        FileDescriptor[] archivedFiles = walArchiveFiles();
+
+        Long totalArchiveSize = Stream.of(archivedFiles)
+            .map(desc -> desc.file().length())
+            .reduce(0L, Long::sum);
+
+        if (archivedFiles.length == 0 || totalArchiveSize < allowedThresholdWalArchiveSize)
+            return -1;
+
+        long sizeOfOldestArchivedFiles = 0;
+
+        for (FileDescriptor desc : archivedFiles) {
+            sizeOfOldestArchivedFiles += desc.file().length();
+
+            if (totalArchiveSize - sizeOfOldestArchivedFiles < allowedThresholdWalArchiveSize)
+                return desc.getIdx();
+        }
+
+        return archivedFiles[archivedFiles.length - 1].getIdx();
+    }
 
     /**
      * @return Sorted WAL files descriptors.
@@ -1817,7 +1913,7 @@
 
             String name = FileDescriptor.fileName(absIdx);
 
-            File dstTmpFile = new File(walArchiveDir, name + ".tmp");
+            File dstTmpFile = new File(walArchiveDir, name + FilePageStoreManager.TMP_SUFFIX);
 
             File dstFile = new File(walArchiveDir, name);
 
@@ -1896,7 +1992,7 @@
         private volatile long lastCompressedIdx = -1L;
 
         /** All segments prior to this (inclusive) can be compressed. */
-        private volatile long lastAllowedToCompressIdx = -1L;
+        private volatile long minUncompressedIdxToKeep = -1L;
 
         /**
          *
@@ -1925,10 +2021,10 @@
         }
 
         /**
-         * @param lastCpStartIdx Segment index to allow compression until (exclusively).
+         * @param idx Minimum raw segment index that should be preserved from deletion.
          */
-        synchronized void allowCompressionUntil(long lastCpStartIdx) {
-            lastAllowedToCompressIdx = lastCpStartIdx - 1;
+        synchronized void keepUncompressedIdxFrom(long idx) {
+            minUncompressedIdxToKeep = idx;
 
             notify();
         }
@@ -1951,7 +2047,7 @@
                 if (stopped)
                     return -1;
 
-                while (segmentToCompress > Math.min(lastAllowedToCompressIdx, archivedMonitor.lastArchivedAbsoluteIndex())) {
+                while (segmentToCompress > archivedMonitor.lastArchivedAbsoluteIndex()) {
                     wait();
 
                     if (stopped)
@@ -1988,7 +2084,7 @@
                 if (segmentReservedOrLocked(desc.idx))
                     return;
 
-                if (desc.idx < lastCompressedIdx && duplicateIndices.contains(desc.idx)) {
+                if (desc.idx < minUncompressedIdxToKeep && duplicateIndices.contains(desc.idx)) {
                     if (!desc.file.delete())
                         U.warn(log, "Failed to remove obsolete WAL segment (make sure the process has enough rights): " +
                             desc.file.getAbsolutePath() + ", exists: " + desc.file.exists());
@@ -2010,9 +2106,10 @@
                     if (currReservedSegment == -1)
                         continue;
 
-                    File tmpZip = new File(walArchiveDir, FileDescriptor.fileName(currReservedSegment) + ".zip" + ".tmp");
+                    File tmpZip = new File(walArchiveDir, FileDescriptor.fileName(currReservedSegment)
+                        + FilePageStoreManager.ZIP_SUFFIX + FilePageStoreManager.TMP_SUFFIX);
 
-                    File zip = new File(walArchiveDir, FileDescriptor.fileName(currReservedSegment) + ".zip");
+                    File zip = new File(walArchiveDir, FileDescriptor.fileName(currReservedSegment) + FilePageStoreManager.ZIP_SUFFIX);
 
                     File raw = new File(walArchiveDir, FileDescriptor.fileName(currReservedSegment));
                     if (!Files.exists(raw.toPath()))
@@ -2026,6 +2123,14 @@
                         try (FileIO f0 = ioFactory.create(zip, CREATE, READ, WRITE)) {
                             f0.force();
                         }
+
+                        if (evt.isRecordable(EVT_WAL_SEGMENT_COMPACTED)) {
+                            evt.record(new WalSegmentCompactedEvent(
+                                cctx.discovery().localNode(),
+                                currReservedSegment,
+                                zip.getAbsoluteFile())
+                            );
+                        }
                     }
 
                     lastCompressedIdx = currReservedSegment;
@@ -2163,8 +2268,10 @@
                         if (isCancelled())
                             break;
 
-                        File zip = new File(walArchiveDir, FileDescriptor.fileName(segmentToDecompress) + ".zip");
-                        File unzipTmp = new File(walArchiveDir, FileDescriptor.fileName(segmentToDecompress) + ".tmp");
+                        File zip = new File(walArchiveDir, FileDescriptor.fileName(segmentToDecompress)
+                            + FilePageStoreManager.ZIP_SUFFIX);
+                        File unzipTmp = new File(walArchiveDir, FileDescriptor.fileName(segmentToDecompress)
+                            + FilePageStoreManager.TMP_SUFFIX);
                         File unzip = new File(walArchiveDir, FileDescriptor.fileName(segmentToDecompress));
 
                         try (ZipInputStream zis = new ZipInputStream(new BufferedInputStream(new FileInputStream(zip)));
@@ -2336,98 +2443,6 @@
     }
 
     /**
-     * WAL file descriptor.
-     */
-    public static class FileDescriptor implements
-        Comparable<FileDescriptor>, AbstractFileDescriptor {
-        /** */
-        protected final File file;
-
-        /** Absolute WAL segment file index */
-        protected final long idx;
-
-        /**
-         * Creates file descriptor. Index is restored from file name
-         *
-         * @param file WAL segment file.
-         */
-        public FileDescriptor(@NotNull File file) {
-            this(file, null);
-        }
-
-        /**
-         * @param file WAL segment file.
-         * @param idx Absolute WAL segment file index. For null value index is restored from file name
-         */
-        public FileDescriptor(@NotNull File file, @Nullable Long idx) {
-            this.file = file;
-
-            String fileName = file.getName();
-
-            assert fileName.contains(WAL_SEGMENT_FILE_EXT);
-
-            this.idx = idx == null ? Long.parseLong(fileName.substring(0, 16)) : idx;
-        }
-
-        /**
-         * @param segment Segment index.
-         * @return Segment file name.
-         */
-        public static String fileName(long segment) {
-            SB b = new SB();
-
-            String segmentStr = Long.toString(segment);
-
-            for (int i = segmentStr.length(); i < 16; i++)
-                b.a('0');
-
-            b.a(segmentStr).a(WAL_SEGMENT_FILE_EXT);
-
-            return b.toString();
-        }
-
-        /** {@inheritDoc} */
-        @Override public int compareTo(@NotNull FileDescriptor o) {
-            return Long.compare(idx, o.idx);
-        }
-
-        /** {@inheritDoc} */
-        @Override public boolean equals(Object o) {
-            if (this == o)
-                return true;
-
-            if (!(o instanceof FileDescriptor))
-                return false;
-
-            FileDescriptor that = (FileDescriptor)o;
-
-            return idx == that.idx;
-        }
-
-        /** {@inheritDoc} */
-        @Override public int hashCode() {
-            return (int)(idx ^ (idx >>> 32));
-        }
-
-        /**
-         * @return True if segment is ZIP compressed.
-         */
-        @Override public boolean isCompressed() {
-            return file.getName().endsWith(".zip");
-        }
-
-        /** {@inheritDoc} */
-        @Override public File file() {
-            return file;
-        }
-
-        /** {@inheritDoc} */
-        @Override public long idx() {
-            return idx;
-        }
-    }
-
-    /**
      *
      */
     private abstract static class FileHandle {
@@ -2584,7 +2599,6 @@
             this.buf = buf;
         }
 
-
         /**
          * Write serializer version to current handle.
          */
@@ -2669,7 +2683,7 @@
          *
          * @param ptr Pointer.
          */
-        private void flushOrWait(FileWALPointer ptr) {
+        private void flushOrWait(FileWALPointer ptr) throws IgniteCheckedException {
             if (ptr != null) {
                 // If requested obsolete file index, it must be already flushed by close.
                 if (ptr.index() != idx)
@@ -2682,7 +2696,7 @@
         /**
          * @param ptr Pointer.
          */
-        private void flush(FileWALPointer ptr) {
+        private void flush(FileWALPointer ptr) throws IgniteCheckedException {
             if (ptr == null) { // Unconditional flush.
                 walWriter.flushAll();
 
@@ -3009,7 +3023,8 @@
          * @param serializerFactory Serializer factory.
          * @param archiver File Archiver.
          * @param decompressor Decompressor.
-         *@param log Logger  @throws IgniteCheckedException If failed to initialize WAL segment.
+         * @param log Logger
+         * @throws IgniteCheckedException If failed to initialize WAL segment.
          */
         private RecordsIterator(
             GridCacheSharedContext cctx,
@@ -3051,7 +3066,8 @@
 
             if (!desc.file().exists()) {
                 FileDescriptor zipFile = new FileDescriptor(
-                    new File(walArchiveDir, FileDescriptor.fileName(desc.idx()) + ".zip"));
+                    new File(walArchiveDir, FileDescriptor.fileName(desc.idx())
+                        + FilePageStoreManager.ZIP_SUFFIX));
 
                 if (!zipFile.file.exists()) {
                     throw new FileNotFoundException("Both compressed and raw segment files are missing in archive " +
@@ -3188,6 +3204,50 @@
             return nextHandle;
         }
 
+        /** {@inheritDoc} */
+        @Override protected IgniteCheckedException handleRecordException(
+            @NotNull Exception e,
+            @Nullable FileWALPointer ptr) {
+
+            if (e instanceof IgniteCheckedException)
+                if (X.hasCause(e, IgniteDataIntegrityViolationException.class))
+                    // This means that there is no explicit last sengment, so we iterate unil the very end.
+                    if (end == null) {
+                        long nextWalSegmentIdx = curWalSegmIdx + 1;
+
+                        // Check that we should not look this segment up in archive directory.
+                        // Basically the same check as in "advanceSegment" method.
+                        if (archiver != null)
+                            if (!canReadArchiveOrReserveWork(nextWalSegmentIdx))
+                                try {
+                                    long workIdx = nextWalSegmentIdx % dsCfg.getWalSegments();
+
+                                    FileDescriptor fd = new FileDescriptor(
+                                        new File(walWorkDir, FileDescriptor.fileName(workIdx)),
+                                        nextWalSegmentIdx
+                                    );
+
+                                    try {
+                                        ReadFileHandle nextHandle = initReadHandle(fd, null);
+
+                                        // "nextHandle == null" is true only if current segment is the last one in the
+                                        // whole history. Only in such case we ignore crc validation error and just stop
+                                        // as if we reached the end of the WAL.
+                                        if (nextHandle == null)
+                                            return null;
+                                    }
+                                    catch (IgniteCheckedException | FileNotFoundException initReadHandleException) {
+                                        e.addSuppressed(initReadHandleException);
+                                    }
+                                }
+                                finally {
+                                    releaseWorkSegment(nextWalSegmentIdx);
+                                }
+                    }
+
+            return super.handleRecordException(e, ptr);
+        }
+
         /**
          * @param absIdx Absolute index to check.
          * @return <ul><li> {@code True} if we can safely read the archive,  </li> <li>{@code false} if the segment has
@@ -3260,8 +3320,6 @@
 
         /** {@inheritDoc} */
         @Override protected void body() {
-            Throwable err = null;
-
             try {
                 while (!isCancelled()) {
                     while (waiters.isEmpty()) {
@@ -3320,7 +3378,7 @@
                             writeBuffer(seg.position(), seg.buffer());
                         }
                         catch (Throwable e) {
-                            log.error("Exception in WAL writer thread: ", e);
+                            log.error("Exception in WAL writer thread:", e);
 
                             err = e;
                         }
@@ -3384,21 +3442,21 @@
         /**
          * Forces all made changes to the file.
          */
-        void force() {
+        void force() throws IgniteCheckedException {
             flushBuffer(FILE_FORCE);
         }
 
         /**
          * Closes file.
          */
-        void close() {
+        void close() throws IgniteCheckedException {
             flushBuffer(FILE_CLOSE);
         }
 
         /**
          * Flushes all data from the buffer.
          */
-        void flushAll() {
+        void flushAll() throws IgniteCheckedException {
             flushBuffer(UNCONDITIONAL_FLUSH);
         }
 
@@ -3406,7 +3464,7 @@
          * @param expPos Expected position.
          */
         @SuppressWarnings("ForLoopReplaceableByForEach")
-        void flushBuffer(long expPos) {
+        void flushBuffer(long expPos) throws IgniteCheckedException {
             if (mmap)
                 return;
 
@@ -3432,6 +3490,11 @@
                 if (val == Long.MIN_VALUE) {
                     waiters.remove(t);
 
+                    Throwable walWriterError = walWriter.err;
+
+                    if (walWriterError != null)
+                        throw new IgniteCheckedException("Flush buffer failed.", walWriterError);
+
                     return;
                 }
                 else
@@ -3505,6 +3568,48 @@
     }
 
     /**
+     * Syncs WAL segment file.
+     */
+    private class WalSegmentSyncer extends GridWorker {
+        /** Sync timeout. */
+        long syncTimeout;
+
+        /**
+         * @param igniteInstanceName Ignite instance name.
+         * @param log Logger.
+         */
+        public WalSegmentSyncer(String igniteInstanceName, IgniteLogger log) {
+            super(igniteInstanceName, "wal-segment-syncer", log);
+
+            syncTimeout = Math.max(IgniteSystemProperties.getLong(IGNITE_WAL_SEGMENT_SYNC_TIMEOUT,
+                DFLT_WAL_SEGMENT_SYNC_TIMEOUT), 100L);
+        }
+
+        /** {@inheritDoc} */
+        @Override protected void body() throws InterruptedException, IgniteInterruptedCheckedException {
+            while (!isCancelled()) {
+                sleep(syncTimeout);
+
+                try {
+                    flush(null, true);
+                }
+                catch (IgniteCheckedException e) {
+                    U.error(log, "Exception when flushing WAL.", e);
+                }
+            }
+        }
+
+        /** Shutted down the worker. */
+        private void shutdown() {
+            synchronized (this) {
+                U.cancel(this);
+            }
+
+            U.join(this, log);
+        }
+    }
+
+    /**
      * Scans provided folder for a WAL segment files
      * @param walFilesDir directory to scan
      * @return found WAL file descriptors
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FsyncModeFileWriteAheadLogManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FsyncModeFileWriteAheadLogManager.java
index 7521f73..6a816a5 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FsyncModeFileWriteAheadLogManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FsyncModeFileWriteAheadLogManager.java
@@ -55,6 +55,7 @@
 import java.util.concurrent.locks.LockSupport;
 import java.util.concurrent.locks.ReentrantLock;
 import java.util.regex.Pattern;
+import java.util.stream.Stream;
 import java.util.zip.ZipEntry;
 import java.util.zip.ZipInputStream;
 import java.util.zip.ZipOutputStream;
@@ -66,13 +67,13 @@
 import org.apache.ignite.configuration.WALMode;
 import org.apache.ignite.events.EventType;
 import org.apache.ignite.events.WalSegmentArchivedEvent;
+import org.apache.ignite.events.WalSegmentCompactedEvent;
 import org.apache.ignite.failure.FailureContext;
 import org.apache.ignite.internal.GridKernalContext;
 import org.apache.ignite.internal.IgniteInternalFuture;
 import org.apache.ignite.internal.IgniteInterruptedCheckedException;
 import org.apache.ignite.internal.managers.eventstorage.GridEventStorageManager;
 import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager;
-import org.apache.ignite.internal.pagemem.wal.StorageException;
 import org.apache.ignite.internal.pagemem.wal.WALIterator;
 import org.apache.ignite.internal.pagemem.wal.WALPointer;
 import org.apache.ignite.internal.pagemem.wal.record.MarshalledRecord;
@@ -83,9 +84,12 @@
 import org.apache.ignite.internal.processors.cache.WalStateManager.WALDisableContext;
 import org.apache.ignite.internal.processors.cache.persistence.DataStorageMetricsImpl;
 import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager;
+import org.apache.ignite.internal.processors.cache.persistence.StorageException;
 import org.apache.ignite.internal.processors.cache.persistence.file.FileIO;
 import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory;
+import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager;
 import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFolderSettings;
+import org.apache.ignite.internal.processors.cache.persistence.wal.crc.IgniteDataIntegrityViolationException;
 import org.apache.ignite.internal.processors.cache.persistence.wal.crc.PureJavaCrc32;
 import org.apache.ignite.internal.processors.cache.persistence.wal.record.HeaderRecord;
 import org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordSerializer;
@@ -101,8 +105,8 @@
 import org.apache.ignite.internal.util.typedef.CIX1;
 import org.apache.ignite.internal.util.typedef.CO;
 import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.X;
 import org.apache.ignite.internal.util.typedef.internal.S;
-import org.apache.ignite.internal.util.typedef.internal.SB;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.internal.util.worker.GridWorker;
 import org.apache.ignite.lang.IgniteBiTuple;
@@ -116,7 +120,10 @@
 import static java.nio.file.StandardOpenOption.CREATE;
 import static java.nio.file.StandardOpenOption.READ;
 import static java.nio.file.StandardOpenOption.WRITE;
+import static org.apache.ignite.IgniteSystemProperties.IGNITE_CHECKPOINT_TRIGGER_ARCHIVE_SIZE_PERCENTAGE;
+import static org.apache.ignite.IgniteSystemProperties.IGNITE_THRESHOLD_WAL_ARCHIVE_SIZE_PERCENTAGE;
 import static org.apache.ignite.IgniteSystemProperties.IGNITE_WAL_SERIALIZER_VERSION;
+import static org.apache.ignite.events.EventType.EVT_WAL_SEGMENT_COMPACTED;
 import static org.apache.ignite.failure.FailureType.CRITICAL_ERROR;
 import static org.apache.ignite.failure.FailureType.SYSTEM_WORKER_TERMINATION;
 import static org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordV1Serializer.readSegmentHeader;
@@ -129,9 +136,6 @@
     public static final FileDescriptor[] EMPTY_DESCRIPTORS = new FileDescriptor[0];
 
     /** */
-    public static final String WAL_SEGMENT_FILE_EXT = ".wal";
-
-    /** */
     private static final byte[] FILL_BUF = new byte[1024 * 1024];
 
     /** Pattern for segment file names */
@@ -185,12 +189,34 @@
     /** Latest serializer version to use. */
     private static final int LATEST_SERIALIZER_VERSION = 2;
 
+    /**
+     * Percentage of archive size for checkpoint trigger. Need for calculate max size of WAL after last checkpoint.
+     * Checkpoint should be triggered when max size of WAL after last checkpoint more than maxWallArchiveSize * thisValue
+     */
+    private static final double CHECKPOINT_TRIGGER_ARCHIVE_SIZE_PERCENTAGE =
+        IgniteSystemProperties.getDouble(IGNITE_CHECKPOINT_TRIGGER_ARCHIVE_SIZE_PERCENTAGE, 0.25);
+
+    /**
+     * Percentage of WAL archive size to calculate threshold since which removing of old archive should be started.
+     */
+    private static final double THRESHOLD_WAL_ARCHIVE_SIZE_PERCENTAGE =
+        IgniteSystemProperties.getDouble(IGNITE_THRESHOLD_WAL_ARCHIVE_SIZE_PERCENTAGE, 0.5);
+
     /** */
     private final boolean alwaysWriteFullPages;
 
     /** WAL segment size in bytes */
     private final long maxWalSegmentSize;
 
+    /**
+     * Maximum number of allowed segments without checkpoint. If we have their more checkpoint should be triggered.
+     * It is simple way to calculate wal size without checkpoint instead fair wal size calculating.
+     */
+    private final long maxSegCountWithoutCheckpoint;
+
+    /** Size of wal archive since which removing of old archive should be started */
+    private final long allowedThresholdWalArchiveSize;
+
     /** */
     private final WALMode mode;
 
@@ -319,6 +345,11 @@
         walAutoArchiveAfterInactivity = dsCfg.getWalAutoArchiveAfterInactivity();
         evt = ctx.event();
 
+        maxSegCountWithoutCheckpoint =
+            (long)((dsCfg.getMaxWalArchiveSize() * CHECKPOINT_TRIGGER_ARCHIVE_SIZE_PERCENTAGE) / dsCfg.getWalSegmentSize());
+
+        allowedThresholdWalArchiveSize = (long)(dsCfg.getMaxWalArchiveSize() * THRESHOLD_WAL_ARCHIVE_SIZE_PERCENTAGE);
+
         assert mode == WALMode.FSYNC : dsCfg;
     }
 
@@ -587,10 +618,10 @@
         List<File> res = new ArrayList<>();
 
         for (long i = low.index(); i < high.index(); i++) {
-            String segmentName = FileWriteAheadLogManager.FileDescriptor.fileName(i);
+            String segmentName = FileDescriptor.fileName(i);
 
             File file = new File(walArchiveDir, segmentName);
-            File fileZip = new File(walArchiveDir, segmentName + ".zip");
+            File fileZip = new File(walArchiveDir, segmentName + FilePageStoreManager.ZIP_SUFFIX);
 
             if (file.exists())
                 res.add(file);
@@ -787,7 +818,7 @@
     private boolean hasIndex(long absIdx) {
         String segmentName = FileDescriptor.fileName(absIdx);
 
-        String zipSegmentName = FileDescriptor.fileName(absIdx) + ".zip";
+        String zipSegmentName = FileDescriptor.fileName(absIdx) + FilePageStoreManager.ZIP_SUFFIX;
 
         boolean inArchive = new File(walArchiveDir, segmentName).exists() ||
             new File(walArchiveDir, zipSegmentName).exists();
@@ -848,9 +879,9 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void allowCompressionUntil(WALPointer ptr) {
+    @Override public void notchLastCheckpointPtr(WALPointer ptr) {
         if (compressor != null)
-            compressor.allowCompressionUntil(((FileWALPointer)ptr).index());
+            compressor.keepUncompressedIdxFrom(((FileWALPointer)ptr).index());
     }
 
     /** {@inheritDoc} */
@@ -867,12 +898,51 @@
         return res >= 0 ? res : 0;
     }
 
+    /**
+     * Files from archive WAL directory.
+     */
+    private FileDescriptor[] walArchiveFiles() {
+        return scan(walArchiveDir.listFiles(WAL_SEGMENT_COMPACTED_OR_RAW_FILE_FILTER));
+    }
+
+    /** {@inheritDoc} */
+    @Override public long maxArchivedSegmentToDelete() {
+        //When maxWalArchiveSize==MAX_VALUE deleting files is not permit.
+        if (dsCfg.getMaxWalArchiveSize() == Long.MAX_VALUE)
+            return -1;
+
+        FileDescriptor[] archivedFiles = walArchiveFiles();
+
+        Long totalArchiveSize = Stream.of(archivedFiles)
+            .map(desc -> desc.file().length())
+            .reduce(0L, Long::sum);
+
+        if (archivedFiles.length == 0 || totalArchiveSize < allowedThresholdWalArchiveSize)
+            return -1;
+
+        long sizeOfOldestArchivedFiles = 0;
+
+        for (FileDescriptor desc : archivedFiles) {
+            sizeOfOldestArchivedFiles += desc.file().length();
+
+            if (totalArchiveSize - sizeOfOldestArchivedFiles < allowedThresholdWalArchiveSize)
+                return desc.getIdx();
+        }
+
+        return archivedFiles[archivedFiles.length - 1].getIdx();
+    }
+
     /** {@inheritDoc} */
     @Override public long lastArchivedSegment() {
         return archiver.lastArchivedAbsoluteIndex();
     }
 
     /** {@inheritDoc} */
+    @Override public long lastCompactedSegment() {
+        return compressor != null ? compressor.lastCompressedIdx : -1L;
+    }
+
+    /** {@inheritDoc} */
     @Override public boolean reserved(WALPointer ptr) {
         FileWALPointer fPtr = (FileWALPointer)ptr;
 
@@ -1051,6 +1121,9 @@
 
             FileWriteHandle next = initNextWriteHandle(cur.idx);
 
+            if (next.idx - lashCheckpointFileIdx() >= maxSegCountWithoutCheckpoint)
+                cctx.database().forceCheckpoint("too big size of WAL without checkpoint");
+
             boolean swapped = currentHndUpd.compareAndSet(this, hnd, next);
 
             assert swapped : "Concurrent updates on rollover are not allowed";
@@ -1068,6 +1141,15 @@
     }
 
     /**
+     * Give last checkpoint file idx
+     */
+    private long lashCheckpointFileIdx() {
+        WALPointer lastCheckpointMark = cctx.database().lastCheckpointMarkWalPointer();
+
+        return lastCheckpointMark == null ? 0 : ((FileWALPointer)lastCheckpointMark).index();
+    }
+
+    /**
      * @param lastReadPtr Last read WAL file pointer.
      * @return Initialized file write handle.
      * @throws StorageException If failed to initialize WAL write handle.
@@ -1266,7 +1348,7 @@
         if (log.isDebugEnabled())
             log.debug("Creating new file [exists=" + file.exists() + ", file=" + file.getAbsolutePath() + ']');
 
-        File tmp = new File(file.getParent(), file.getName() + ".tmp");
+        File tmp = new File(file.getParent(), file.getName() + FilePageStoreManager.TMP_SUFFIX);
 
         formatFile(tmp);
 
@@ -1661,7 +1743,7 @@
 
             String name = FileDescriptor.fileName(absIdx);
 
-            File dstTmpFile = new File(walArchiveDir, name + ".tmp");
+            File dstTmpFile = new File(walArchiveDir, name + FilePageStoreManager.TMP_SUFFIX);
 
             File dstFile = new File(walArchiveDir, name);
 
@@ -1739,7 +1821,7 @@
         private volatile long lastCompressedIdx = -1L;
 
         /** All segments prior to this (inclusive) can be compressed. */
-        private volatile long lastAllowedToCompressIdx = -1L;
+        private volatile long minUncompressedIdxToKeep = -1L;
 
         /**
          *
@@ -1768,10 +1850,10 @@
         }
 
         /**
-         * @param lastCpStartIdx Segment index to allow compression until (exclusively).
+         * @param idx Minimum raw segment index that should be preserved from deletion.
          */
-        synchronized void allowCompressionUntil(long lastCpStartIdx) {
-            lastAllowedToCompressIdx = lastCpStartIdx - 1;
+        synchronized void keepUncompressedIdxFrom(long idx) {
+            minUncompressedIdxToKeep = idx;
 
             notify();
         }
@@ -1794,7 +1876,7 @@
                 if (stopped)
                     return -1;
 
-                while (segmentToCompress > Math.min(lastAllowedToCompressIdx, archiver.lastArchivedAbsoluteIndex())) {
+                while (segmentToCompress > archiver.lastArchivedAbsoluteIndex()) {
                     wait();
 
                     if (stopped)
@@ -1813,19 +1895,19 @@
          * Deletes raw WAL segments if they aren't locked and already have compressed copies of themselves.
          */
         private void deleteObsoleteRawSegments() {
-            FsyncModeFileWriteAheadLogManager.FileDescriptor[] descs = scan(walArchiveDir.listFiles(WAL_SEGMENT_COMPACTED_OR_RAW_FILE_FILTER));
+            FileDescriptor[] descs = scan(walArchiveDir.listFiles(WAL_SEGMENT_COMPACTED_OR_RAW_FILE_FILTER));
 
             Set<Long> indices = new HashSet<>();
             Set<Long> duplicateIndices = new HashSet<>();
 
-            for (FsyncModeFileWriteAheadLogManager.FileDescriptor desc : descs) {
+            for (FileDescriptor desc : descs) {
                 if (!indices.add(desc.idx))
                     duplicateIndices.add(desc.idx);
             }
 
             FileArchiver archiver0 = archiver;
 
-            for (FsyncModeFileWriteAheadLogManager.FileDescriptor desc : descs) {
+            for (FileDescriptor desc : descs) {
                 if (desc.isCompressed())
                     continue;
 
@@ -1833,7 +1915,7 @@
                 if (archiver0 != null && archiver0.reserved(desc.idx))
                     return;
 
-                if (desc.idx < lastCompressedIdx && duplicateIndices.contains(desc.idx)) {
+                if (desc.idx < minUncompressedIdxToKeep && duplicateIndices.contains(desc.idx)) {
                     if (!desc.file.delete())
                         U.warn(log, "Failed to remove obsolete WAL segment (make sure the process has enough rights): " +
                             desc.file.getAbsolutePath() + ", exists: " + desc.file.exists());
@@ -1855,11 +1937,13 @@
                     if (currReservedSegment == -1)
                         continue;
 
-                    File tmpZip = new File(walArchiveDir, FileWriteAheadLogManager.FileDescriptor.fileName(currReservedSegment) + ".zip" + ".tmp");
+                    File tmpZip = new File(walArchiveDir, FileDescriptor.fileName(currReservedSegment)
+                        + FilePageStoreManager.ZIP_SUFFIX + FilePageStoreManager.TMP_SUFFIX);
 
-                    File zip = new File(walArchiveDir, FileWriteAheadLogManager.FileDescriptor.fileName(currReservedSegment) + ".zip");
+                    File zip = new File(walArchiveDir, FileDescriptor.fileName(currReservedSegment)
+                        + FilePageStoreManager.ZIP_SUFFIX);
 
-                    File raw = new File(walArchiveDir, FileWriteAheadLogManager.FileDescriptor.fileName(currReservedSegment));
+                    File raw = new File(walArchiveDir, FileDescriptor.fileName(currReservedSegment));
                     if (!Files.exists(raw.toPath()))
                         throw new IgniteCheckedException("WAL archive segment is missing: " + raw);
 
@@ -1871,6 +1955,14 @@
                         try (FileIO f0 = ioFactory.create(zip, CREATE, READ, WRITE)) {
                             f0.force();
                         }
+
+                        if (evt.isRecordable(EVT_WAL_SEGMENT_COMPACTED)) {
+                            evt.record(new WalSegmentCompactedEvent(
+                                cctx.discovery().localNode(),
+                                currReservedSegment,
+                                zip.getAbsoluteFile())
+                            );
+                        }
                     }
 
                     lastCompressedIdx = currReservedSegment;
@@ -1986,8 +2078,10 @@
                         if (isCancelled())
                             break;
 
-                        File zip = new File(walArchiveDir, FileDescriptor.fileName(segmentToDecompress) + ".zip");
-                        File unzipTmp = new File(walArchiveDir, FileDescriptor.fileName(segmentToDecompress) + ".tmp");
+                        File zip = new File(walArchiveDir, FileDescriptor.fileName(segmentToDecompress)
+                            + FilePageStoreManager.ZIP_SUFFIX);
+                        File unzipTmp = new File(walArchiveDir, FileDescriptor.fileName(segmentToDecompress)
+                            + FilePageStoreManager.TMP_SUFFIX);
                         File unzip = new File(walArchiveDir, FileDescriptor.fileName(segmentToDecompress));
 
                         try (ZipInputStream zis = new ZipInputStream(new BufferedInputStream(new FileInputStream(zip)));
@@ -2179,126 +2273,6 @@
     }
 
     /**
-     * WAL file descriptor.
-     */
-    public static class FileDescriptor implements Comparable<FileDescriptor>, AbstractWalRecordsIterator.AbstractFileDescriptor {
-        /** */
-        protected final File file;
-
-        /** Absolute WAL segment file index */
-        protected final long idx;
-
-        /**
-         * Creates file descriptor. Index is restored from file name
-         *
-         * @param file WAL segment file.
-         */
-        public FileDescriptor(@NotNull File file) {
-            this(file, null);
-        }
-
-        /**
-         * @param file WAL segment file.
-         * @param idx Absolute WAL segment file index. For null value index is restored from file name
-         */
-        public FileDescriptor(@NotNull File file, @Nullable Long idx) {
-            this.file = file;
-
-            String fileName = file.getName();
-
-            assert fileName.contains(WAL_SEGMENT_FILE_EXT);
-
-            this.idx = idx == null ? Long.parseLong(fileName.substring(0, 16)) : idx;
-        }
-
-        /**
-         * @param segment Segment index.
-         * @return Segment file name.
-         */
-        public static String fileName(long segment) {
-            SB b = new SB();
-
-            String segmentStr = Long.toString(segment);
-
-            for (int i = segmentStr.length(); i < 16; i++)
-                b.a('0');
-
-            b.a(segmentStr).a(WAL_SEGMENT_FILE_EXT);
-
-            return b.toString();
-        }
-
-        /**
-         * @param segment Segment number as integer.
-         * @return Segment number as aligned string.
-         */
-        private static String segmentNumber(long segment) {
-            SB b = new SB();
-
-            String segmentStr = Long.toString(segment);
-
-            for (int i = segmentStr.length(); i < 16; i++)
-                b.a('0');
-
-            b.a(segmentStr);
-
-            return b.toString();
-        }
-
-        /** {@inheritDoc} */
-        @Override public int compareTo(FileDescriptor o) {
-            return Long.compare(idx, o.idx);
-        }
-
-        /** {@inheritDoc} */
-        @Override public boolean equals(Object o) {
-            if (this == o)
-                return true;
-
-            if (!(o instanceof FileDescriptor))
-                return false;
-
-            FileDescriptor that = (FileDescriptor)o;
-
-            return idx == that.idx;
-        }
-
-        /** {@inheritDoc} */
-        @Override public int hashCode() {
-            return (int)(idx ^ (idx >>> 32));
-        }
-
-        /**
-         * @return Absolute WAL segment file index
-         */
-        public long getIdx() {
-            return idx;
-        }
-
-        /**
-         * @return absolute pathname string of this file descriptor pathname.
-         */
-        public String getAbsolutePath() {
-            return file.getAbsolutePath();
-        }
-
-        /** {@inheritDoc} */
-        @Override public boolean isCompressed() {
-            return file.getName().endsWith(".zip");
-        }
-
-        /** {@inheritDoc} */
-        @Override public File file() {
-            return file;
-        }
-
-        /** {@inheritDoc} */
-        @Override public long idx() {
-            return idx;
-        }
-    }
-
-    /**
      *
      */
     private abstract static class FileHandle {
@@ -3179,7 +3153,8 @@
 
             if (!desc.file().exists()) {
                 FileDescriptor zipFile = new FileDescriptor(
-                        new File(walArchiveDir, FileDescriptor.fileName(desc.idx()) + ".zip"));
+                        new File(walArchiveDir, FileDescriptor.fileName(desc.idx())
+                            + FilePageStoreManager.ZIP_SUFFIX));
 
                 if (!zipFile.file.exists()) {
                     throw new FileNotFoundException("Both compressed and raw segment files are missing in archive " +
@@ -3314,6 +3289,50 @@
             return nextHandle;
         }
 
+        /** {@inheritDoc} */
+        @Override protected IgniteCheckedException handleRecordException(
+            @NotNull Exception e,
+            @Nullable FileWALPointer ptr) {
+
+            if (e instanceof IgniteCheckedException)
+                if (X.hasCause(e, IgniteDataIntegrityViolationException.class))
+                    // This means that there is no explicit last sengment, so we iterate unil the very end.
+                    if (end == null) {
+                        long nextWalSegmentIdx = curWalSegmIdx + 1;
+
+                        // Check that we should not look this segment up in archive directory.
+                        // Basically the same check as in "advanceSegment" method.
+                        if (archiver != null)
+                            if (!canReadArchiveOrReserveWork(nextWalSegmentIdx))
+                                try {
+                                    long workIdx = nextWalSegmentIdx % dsCfg.getWalSegments();
+
+                                    FileDescriptor fd = new FileDescriptor(
+                                        new File(walWorkDir, FileDescriptor.fileName(workIdx)),
+                                        nextWalSegmentIdx
+                                    );
+
+                                    try {
+                                        ReadFileHandle nextHandle = initReadHandle(fd, null);
+
+                                        // "nextHandle == null" is true only if current segment is the last one in the
+                                        // whole history. Only in such case we ignore crc validation error and just stop
+                                        // as if we reached the end of the WAL.
+                                        if (nextHandle == null)
+                                            return null;
+                                    }
+                                    catch (IgniteCheckedException | FileNotFoundException initReadHandleException) {
+                                        e.addSuppressed(initReadHandleException);
+                                    }
+                                }
+                                finally {
+                                    releaseWorkSegment(nextWalSegmentIdx);
+                                }
+                    }
+
+            return super.handleRecordException(e, ptr);
+        }
+
         /**
          * @param absIdx Absolute index to check.
          * @return <ul><li> {@code True} if we can safely read the archive,  </li> <li>{@code false} if the segment has
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/SegmentedRingByteBuffer.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/SegmentedRingByteBuffer.java
index 43fe71a..7cfeb98 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/SegmentedRingByteBuffer.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/SegmentedRingByteBuffer.java
@@ -207,7 +207,7 @@
 
     /**
      * @param size Amount of bytes for reserve.
-     * @param safe Safe ьщву.
+     * @param safe Safe mode.
      */
     private WriteSegment offer0(int size, boolean safe) {
         if (size > cap)
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/SingleSegmentLogicalRecordsIterator.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/SingleSegmentLogicalRecordsIterator.java
index f688bb4..a42eb89 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/SingleSegmentLogicalRecordsIterator.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/SingleSegmentLogicalRecordsIterator.java
@@ -100,8 +100,8 @@
         else {
             segmentInitialized = true;
 
-            FileWriteAheadLogManager.FileDescriptor fd = new FileWriteAheadLogManager.FileDescriptor(
-                new File(archiveDir, FileWriteAheadLogManager.FileDescriptor.fileName(curWalSegmIdx)));
+            FileDescriptor fd = new FileDescriptor(
+                new File(archiveDir, FileDescriptor.fileName(curWalSegmIdx)));
 
             try {
                 return initReadHandle(fd, null);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/IgniteWalIteratorFactory.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/IgniteWalIteratorFactory.java
index 2bfc22d..aaff33a 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/IgniteWalIteratorFactory.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/IgniteWalIteratorFactory.java
@@ -43,9 +43,9 @@
 import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory;
 import org.apache.ignite.internal.processors.cache.persistence.file.UnzipFileIO;
 import org.apache.ignite.internal.processors.cache.persistence.wal.ByteBufferExpander;
+import org.apache.ignite.internal.processors.cache.persistence.wal.FileDescriptor;
 import org.apache.ignite.internal.processors.cache.persistence.wal.FileInput;
 import org.apache.ignite.internal.processors.cache.persistence.wal.FileWALPointer;
-import org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager.FileDescriptor;
 import org.apache.ignite.internal.util.typedef.T2;
 import org.apache.ignite.internal.util.typedef.internal.A;
 import org.apache.ignite.internal.util.typedef.internal.U;
@@ -108,6 +108,26 @@
      * This method may be used for work folder, file indexes are scanned from the file context.
      * In this mode only provided WAL segments will be scanned. New WAL files created during iteration will be ignored.
      *
+     * @param replayFrom File WAL pointer for start replay.
+     * @param filesOrDirs files to scan. A file can be the path to '.wal' file, or directory with '.wal' files.
+     * Order is not important, but it is significant to provide all segments without omissions.
+     * Path should not contain special symbols. Special symbols should be already masked.
+     * @return closable WAL records iterator, should be closed when non needed.
+     * @throws IgniteCheckedException if failed to read files
+     * @throws IllegalArgumentException If parameter validation failed.
+     */
+    public WALIterator iterator(
+        @NotNull FileWALPointer replayFrom,
+        @NotNull File... filesOrDirs
+    ) throws IgniteCheckedException, IllegalArgumentException {
+        return iterator(new IteratorParametersBuilder().from(replayFrom).filesOrDirs(filesOrDirs));
+    }
+
+    /**
+     * Creates iterator for file by file scan mode.
+     * This method may be used for work folder, file indexes are scanned from the file context.
+     * In this mode only provided WAL segments will be scanned. New WAL files created during iteration will be ignored.
+     *
      * @param filesOrDirs paths to scan. A path can be direct to '.wal' file, or directory with '.wal' files.
      * Order is not important, but it is significant to provide all segments without omissions.
      * Path should not contain special symbols. Special symbols should be already masked.
@@ -122,6 +142,26 @@
     }
 
     /**
+     * Creates iterator for file by file scan mode.
+     * This method may be used for work folder, file indexes are scanned from the file context.
+     * In this mode only provided WAL segments will be scanned. New WAL files created during iteration will be ignored.
+     *
+     * @param replayFrom File WAL pointer for start replay.
+     * @param filesOrDirs paths to scan. A path can be direct to '.wal' file, or directory with '.wal' files.
+     * Order is not important, but it is significant to provide all segments without omissions.
+     * Path should not contain special symbols. Special symbols should be already masked.
+     * @return closable WAL records iterator, should be closed when non needed.
+     * @throws IgniteCheckedException If failed to read files.
+     * @throws IllegalArgumentException If parameter validation failed.
+     */
+    public WALIterator iterator(
+        @NotNull FileWALPointer replayFrom,
+        @NotNull String... filesOrDirs
+    ) throws IgniteCheckedException, IllegalArgumentException {
+        return iterator(new IteratorParametersBuilder().from(replayFrom).filesOrDirs(filesOrDirs));
+    }
+
+    /**
      * @param iteratorParametersBuilder Iterator parameters builder.
      * @return closable WAL records iterator, should be closed when non needed
      */
@@ -133,11 +173,10 @@
         return new StandaloneWalRecordsIterator(log,
             prepareSharedCtx(iteratorParametersBuilder),
             iteratorParametersBuilder.ioFactory,
-            resolveWalFiles(
-                iteratorParametersBuilder.filesOrDirs,
-                iteratorParametersBuilder
-            ),
+            resolveWalFiles(iteratorParametersBuilder),
             iteratorParametersBuilder.filter,
+            iteratorParametersBuilder.lowBound,
+            iteratorParametersBuilder.highBound,
             iteratorParametersBuilder.keepBinary,
             iteratorParametersBuilder.bufferSize
         );
@@ -182,10 +221,7 @@
 
         List<T2<Long, Long>> gaps = new ArrayList<>();
 
-        List<FileDescriptor> descriptors = resolveWalFiles(
-            iteratorParametersBuilder.filesOrDirs,
-            iteratorParametersBuilder
-        );
+        List<FileDescriptor> descriptors = resolveWalFiles(iteratorParametersBuilder);
 
         Iterator<FileDescriptor> it = descriptors.iterator();
 
@@ -217,10 +253,11 @@
      * @param iteratorParametersBuilder IteratorParametersBuilder.
      * @return list of file descriptors with checked header records, having correct file index is set
      */
-    private List<FileDescriptor> resolveWalFiles(
-        File[] filesOrDirs,
+    public List<FileDescriptor> resolveWalFiles(
         IteratorParametersBuilder iteratorParametersBuilder
     ) {
+        File[] filesOrDirs = iteratorParametersBuilder.filesOrDirs;
+
         if (filesOrDirs == null || filesOrDirs.length == 0)
             return Collections.emptyList();
 
@@ -327,7 +364,7 @@
             kernalCtx, null, null, null,
             null, null, null, dbMgr, null,
             null, null, null, null,
-            null, null, null
+            null, null,null, null
         );
     }
 
@@ -336,6 +373,12 @@
      */
     public static class IteratorParametersBuilder {
         /** */
+        public static final FileWALPointer DFLT_LOW_BOUND = new FileWALPointer(Long.MIN_VALUE, 0, 0);
+
+        /** */
+        public static final FileWALPointer DFLT_HIGH_BOUND = new FileWALPointer(Long.MAX_VALUE, Integer.MAX_VALUE, 0);
+
+        /** */
         private File[] filesOrDirs;
 
         /** */
@@ -366,6 +409,12 @@
         /** */
         @Nullable private IgniteBiPredicate<RecordType, WALPointer> filter;
 
+        /** */
+        private FileWALPointer lowBound = DFLT_LOW_BOUND;
+
+        /** */
+        private FileWALPointer highBound = DFLT_HIGH_BOUND;
+
         /**
          * @param filesOrDirs Paths to files or directories.
          * @return IteratorParametersBuilder Self reference.
@@ -463,6 +512,26 @@
         }
 
         /**
+         * @param lowBound WAL pointer to start from.
+         * @return IteratorParametersBuilder Self reference.
+         */
+        public IteratorParametersBuilder from(FileWALPointer lowBound) {
+            this.lowBound = lowBound;
+
+            return this;
+        }
+
+        /**
+         * @param highBound WAL pointer to end of.
+         * @return IteratorParametersBuilder Self reference.
+         */
+        public IteratorParametersBuilder to(FileWALPointer highBound) {
+            this.highBound = highBound;
+
+            return this;
+        }
+
+        /**
          * Copy current state of builder to new instance.
          *
          * @return IteratorParametersBuilder Self reference.
@@ -476,6 +545,8 @@
                 .ioFactory(ioFactory)
                 .binaryMetadataFileStoreDir(binaryMetadataFileStoreDir)
                 .marshallerMappingFileStoreDir(marshallerMappingFileStoreDir)
+                .from(lowBound)
+                .to(highBound)
                 .filter(filter);
         }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java
index b9ab76a..f160549 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java
@@ -49,6 +49,7 @@
 import org.apache.ignite.internal.processors.authentication.IgniteAuthenticationProcessor;
 import org.apache.ignite.internal.processors.cache.GridCacheProcessor;
 import org.apache.ignite.internal.processors.cache.binary.CacheObjectBinaryProcessorImpl;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccProcessor;
 import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFolderSettings;
 import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFoldersResolver;
 import org.apache.ignite.internal.processors.cacheobject.IgniteCacheObjectProcessor;
@@ -467,6 +468,11 @@
     }
 
     /** {@inheritDoc} */
+    @Override public MvccProcessor coordinators() {
+        return null;
+    }
+
+    /** {@inheritDoc} */
     @Override public boolean invalid() {
         return false;
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneWalRecordsIterator.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneWalRecordsIterator.java
index 9df4468..7cfb66d 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneWalRecordsIterator.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneWalRecordsIterator.java
@@ -28,6 +28,7 @@
 import org.apache.ignite.internal.pagemem.wal.WALPointer;
 import org.apache.ignite.internal.pagemem.wal.record.DataEntry;
 import org.apache.ignite.internal.pagemem.wal.record.DataRecord;
+import org.apache.ignite.internal.pagemem.wal.record.FilteredRecord;
 import org.apache.ignite.internal.pagemem.wal.record.LazyDataEntry;
 import org.apache.ignite.internal.pagemem.wal.record.UnwrapDataEntry;
 import org.apache.ignite.internal.pagemem.wal.record.WALRecord;
@@ -40,22 +41,30 @@
 import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory;
 import org.apache.ignite.internal.processors.cache.persistence.file.UnzipFileIO;
 import org.apache.ignite.internal.processors.cache.persistence.wal.AbstractWalRecordsIterator;
+import org.apache.ignite.internal.processors.cache.persistence.wal.FileDescriptor;
 import org.apache.ignite.internal.processors.cache.persistence.wal.FileInput;
 import org.apache.ignite.internal.processors.cache.persistence.wal.FileWALPointer;
-import org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager.FileDescriptor;
 import org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager.ReadFileHandle;
+import org.apache.ignite.internal.processors.cache.persistence.wal.WalSegmentTailReachedException;
+import org.apache.ignite.internal.processors.cache.persistence.wal.crc.IgniteDataIntegrityViolationException;
 import org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordSerializer;
 import org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordSerializerFactoryImpl;
+import org.apache.ignite.internal.processors.cache.persistence.wal.serializer.SegmentHeader;
 import org.apache.ignite.internal.processors.cacheobject.IgniteCacheObjectProcessor;
+import org.apache.ignite.internal.util.typedef.T2;
+import org.apache.ignite.internal.util.typedef.X;
+import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.lang.IgniteBiPredicate;
+import org.apache.ignite.lang.IgniteBiTuple;
 import org.jetbrains.annotations.NotNull;
 import org.jetbrains.annotations.Nullable;
 
+import static org.apache.ignite.internal.processors.cache.persistence.wal.reader.IgniteWalIteratorFactory.IteratorParametersBuilder.DFLT_HIGH_BOUND;
 import static org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordV1Serializer.readSegmentHeader;
 
 /**
- * WAL reader iterator, for creation in standalone WAL reader tool
- * Operates over one directory, does not provide start and end boundaries
+ * WAL reader iterator, for creation in standalone WAL reader tool Operates over one directory, does not provide start
+ * and end boundaries
  */
 class StandaloneWalRecordsIterator extends AbstractWalRecordsIterator {
     /** Record buffer size */
@@ -76,14 +85,21 @@
     /** Keep binary. This flag disables converting of non primitive types (BinaryObjects) */
     private boolean keepBinary;
 
+    /** Replay from bound include. */
+    private final FileWALPointer lowBound;
+
+    /** Replay to bound include */
+    private final FileWALPointer highBound;
+
     /**
      * Creates iterator in file-by-file iteration mode. Directory
+     *
      * @param log Logger.
      * @param sharedCtx Shared context. Cache processor is to be configured if Cache Object Key & Data Entry is
      * required.
      * @param ioFactory File I/O factory.
-     * @param keepBinary Keep binary. This flag disables converting of non primitive types
-     * (BinaryObjects will be used instead)
+     * @param keepBinary Keep binary. This flag disables converting of non primitive types (BinaryObjects will be used
+     * instead)
      * @param walFiles Wal files.
      */
     StandaloneWalRecordsIterator(
@@ -92,6 +108,8 @@
         @NotNull FileIOFactory ioFactory,
         @NotNull List<FileDescriptor> walFiles,
         IgniteBiPredicate<RecordType, WALPointer> readTypeFilter,
+        FileWALPointer lowBound,
+        FileWALPointer highBound,
         boolean keepBinary,
         int initialReadBufferSize
     ) throws IgniteCheckedException {
@@ -103,6 +121,9 @@
             initialReadBufferSize
         );
 
+        this.lowBound = lowBound;
+        this.highBound = highBound;
+
         this.keepBinary = keepBinary;
 
         walFileDescriptors = walFiles;
@@ -113,8 +134,8 @@
     }
 
     /**
-     * For directory mode sets oldest file as initial segment,
-     * for file by file mode, converts all files to descriptors and gets oldest as initial.
+     * For directory mode sets oldest file as initial segment, for file by file mode, converts all files to descriptors
+     * and gets oldest as initial.
      *
      * @param walFiles files for file-by-file iteration mode
      */
@@ -129,6 +150,25 @@
     }
 
     /** {@inheritDoc} */
+    @Override protected IgniteCheckedException validateTailReachedException(
+        WalSegmentTailReachedException tailReachedException,
+        AbstractReadFileHandle currWalSegment
+    ) {
+        FileDescriptor lastWALSegmentDesc = walFileDescriptors.get(walFileDescriptors.size() - 1);
+
+        // Iterator can not be empty.
+        assert lastWALSegmentDesc != null;
+
+        return lastWALSegmentDesc.idx() != currWalSegment.idx() ?
+            new IgniteCheckedException(
+                "WAL tail reached not in the last available segment, " +
+                    "potentially corrupted segment, last available segment idx=" + lastWALSegmentDesc.idx() +
+                    ", path=" + lastWALSegmentDesc.file().getPath() +
+                    ", last read segment idx=" + currWalSegment.idx(), tailReachedException
+            ) : null;
+    }
+
+    /** {@inheritDoc} */
     @Override protected AbstractReadFileHandle advanceSegment(
         @Nullable final AbstractReadFileHandle curWalSegment
     ) throws IgniteCheckedException {
@@ -136,14 +176,19 @@
         if (curWalSegment != null)
             curWalSegment.close();
 
-        curWalSegmIdx++;
+        FileDescriptor fd;
 
-        curIdx++;
+        do {
+            curWalSegmIdx++;
 
-        if (curIdx >= walFileDescriptors.size())
-            return null;
+            curIdx++;
 
-        FileDescriptor fd = walFileDescriptors.get(curIdx);
+            if (curIdx >= walFileDescriptors.size())
+                return null;
+
+            fd = walFileDescriptors.get(curIdx);
+        }
+        while (!checkBounds(fd.idx()));
 
         if (log.isDebugEnabled())
             log.debug("Reading next file [absIdx=" + curWalSegmIdx + ", file=" + fd.file().getAbsolutePath() + ']');
@@ -153,7 +198,12 @@
         curRec = null;
 
         try {
-            return initReadHandle(fd, null);
+            FileWALPointer initPtr = null;
+
+            if (lowBound.index() == fd.idx())
+                initPtr = lowBound;
+
+            return initReadHandle(fd, initPtr);
         }
         catch (FileNotFoundException e) {
             if (log.isInfoEnabled())
@@ -164,24 +214,69 @@
     }
 
     /** {@inheritDoc} */
+    @Override protected IgniteBiTuple<WALPointer, WALRecord> advanceRecord(
+        @Nullable AbstractReadFileHandle hnd
+    ) throws IgniteCheckedException {
+        IgniteBiTuple<WALPointer, WALRecord> tup = super.advanceRecord(hnd);
+
+        if (tup == null)
+            return tup;
+
+        if (!checkBounds(tup.get1())) {
+            if (curRec != null) {
+                FileWALPointer prevRecPtr = (FileWALPointer)curRec.get1();
+
+                // Fast stop condition, after high bound reached.
+                if (prevRecPtr != null && prevRecPtr.compareTo(highBound) > 0)
+                    return null;
+            }
+
+            return new T2<>(tup.get1(), FilteredRecord.INSTANCE); // FilteredRecord for mark as filtered.
+        }
+
+        return tup;
+    }
+
+    /**
+     * @param ptr WAL pointer.
+     * @return {@code True} If pointer between low and high bounds. {@code False} if not.
+     */
+    private boolean checkBounds(WALPointer ptr) {
+        FileWALPointer ptr0 = (FileWALPointer)ptr;
+
+        return ptr0.compareTo(lowBound) >= 0 && ptr0.compareTo(highBound) <= 0;
+    }
+
+    /**
+     * @param idx WAL segment index.
+     * @return {@code True} If pointer between low and high bounds. {@code False} if not.
+     */
+    private boolean checkBounds(long idx) {
+        return idx >= lowBound.index() && idx <= highBound.index();
+    }
+
+    /** {@inheritDoc} */
     @Override protected AbstractReadFileHandle initReadHandle(
         @NotNull AbstractFileDescriptor desc,
         @Nullable FileWALPointer start
     ) throws IgniteCheckedException, FileNotFoundException {
 
         AbstractFileDescriptor fd = desc;
-
+        FileIO fileIO = null;
+        SegmentHeader segmentHeader;
         while (true) {
             try {
-                FileIO fileIO = fd.isCompressed() ? new UnzipFileIO(fd.file()) : ioFactory.create(fd.file());
+                fileIO = fd.isCompressed() ? new UnzipFileIO(fd.file()) : ioFactory.create(fd.file());
 
-                readSegmentHeader(fileIO, curWalSegmIdx);
+                segmentHeader = readSegmentHeader(fileIO, curWalSegmIdx);
 
                 break;
             }
             catch (IOException | IgniteCheckedException e) {
                 log.error("Failed to init segment curWalSegmIdx=" + curWalSegmIdx + ", curIdx=" + curIdx, e);
 
+                U.closeQuiet(fileIO);
+
                 curIdx++;
 
                 if (curIdx >= walFileDescriptors.size())
@@ -191,13 +286,13 @@
             }
         }
 
-        return super.initReadHandle(fd, start);
+        return initReadHandle(fd, start, fileIO, segmentHeader);
     }
 
     /** {@inheritDoc} */
     @NotNull @Override protected WALRecord postProcessRecord(@NotNull final WALRecord rec) {
-         GridKernalContext kernalCtx = sharedCtx.kernalContext();
-         IgniteCacheObjectProcessor processor = kernalCtx.cacheObjects();
+        GridKernalContext kernalCtx = sharedCtx.kernalContext();
+        IgniteCacheObjectProcessor processor = kernalCtx.cacheObjects();
 
         if (processor != null && rec.type() == RecordType.DATA_RECORD) {
             try {
@@ -211,6 +306,23 @@
         return super.postProcessRecord(rec);
     }
 
+    /** {@inheritDoc} */
+    @Override protected IgniteCheckedException handleRecordException(
+        @NotNull Exception e,
+        @Nullable FileWALPointer ptr
+    ) {
+        if (e instanceof IgniteCheckedException)
+            if (X.hasCause(e, IgniteDataIntegrityViolationException.class))
+                // "curIdx" is an index in walFileDescriptors list.
+                if (curIdx == walFileDescriptors.size() - 1)
+                    // This means that there is no explicit last sengment, so we stop as if we reached the end
+                    // of the WAL.
+                    if (highBound.equals(DFLT_HIGH_BOUND))
+                        return null;
+
+        return super.handleRecordException(e, ptr);
+    }
+
     /**
      * Performs post processing of lazy data record, converts it to unwrap record.
      *
@@ -247,6 +359,7 @@
 
     /**
      * Converts entry or lazy data entry into unwrapped entry
+     *
      * @param processor cache object processor for de-serializing objects.
      * @param fakeCacheObjCtx cache object context for de-serializing binary and unwrapping objects.
      * @param dataEntry entry to process
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/record/RecordTypes.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/record/RecordTypes.java
index 1807d1d..65f0aae 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/record/RecordTypes.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/record/RecordTypes.java
@@ -35,6 +35,9 @@
         DELTA_TYPE_SET.add(WALRecord.RecordType.DATA_PAGE_INSERT_FRAGMENT_RECORD);
         DELTA_TYPE_SET.add(WALRecord.RecordType.DATA_PAGE_REMOVE_RECORD);
         DELTA_TYPE_SET.add(WALRecord.RecordType.DATA_PAGE_SET_FREE_LIST_PAGE);
+        DELTA_TYPE_SET.add(WALRecord.RecordType.MVCC_DATA_PAGE_MARK_UPDATED_RECORD);
+        DELTA_TYPE_SET.add(WALRecord.RecordType.MVCC_DATA_PAGE_TX_STATE_HINT_UPDATED_RECORD);
+        DELTA_TYPE_SET.add(WALRecord.RecordType.MVCC_DATA_PAGE_NEW_TX_STATE_HINT_UPDATED_RECORD);
         DELTA_TYPE_SET.add(WALRecord.RecordType.BTREE_META_PAGE_INIT_ROOT);
         DELTA_TYPE_SET.add(WALRecord.RecordType.BTREE_META_PAGE_ADD_ROOT);
         DELTA_TYPE_SET.add(WALRecord.RecordType.BTREE_META_PAGE_CUT_ROOT);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordDataV1Serializer.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordDataV1Serializer.java
index ad06090..aa89c5a 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordDataV1Serializer.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordDataV1Serializer.java
@@ -41,6 +41,8 @@
 import org.apache.ignite.internal.pagemem.wal.record.WALRecord;
 import org.apache.ignite.internal.pagemem.wal.record.delta.DataPageInsertFragmentRecord;
 import org.apache.ignite.internal.pagemem.wal.record.delta.DataPageInsertRecord;
+import org.apache.ignite.internal.pagemem.wal.record.delta.DataPageMvccUpdateNewTxStateHintRecord;
+import org.apache.ignite.internal.pagemem.wal.record.delta.DataPageMvccUpdateTxStateHintRecord;
 import org.apache.ignite.internal.pagemem.wal.record.delta.DataPageRemoveRecord;
 import org.apache.ignite.internal.pagemem.wal.record.delta.DataPageSetFreeListPageRecord;
 import org.apache.ignite.internal.pagemem.wal.record.delta.DataPageUpdateRecord;
@@ -61,6 +63,7 @@
 import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdateLastSuccessfulSnapshotId;
 import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdateNextSnapshotId;
 import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdatePartitionDataRecord;
+import org.apache.ignite.internal.pagemem.wal.record.delta.DataPageMvccMarkUpdatedRecord;
 import org.apache.ignite.internal.pagemem.wal.record.delta.NewRootInitRecord;
 import org.apache.ignite.internal.pagemem.wal.record.delta.PageListMetaResetCountRecord;
 import org.apache.ignite.internal.pagemem.wal.record.delta.PagesListAddPageRecord;
@@ -194,6 +197,15 @@
             case DATA_PAGE_SET_FREE_LIST_PAGE:
                 return 4 + 8 + 8;
 
+            case MVCC_DATA_PAGE_MARK_UPDATED_RECORD:
+                return 4 + 8 + 4 + 8 + 8 + 4;
+
+            case MVCC_DATA_PAGE_TX_STATE_HINT_UPDATED_RECORD:
+                return 4 + 8 + 4 + 1;
+
+            case MVCC_DATA_PAGE_NEW_TX_STATE_HINT_UPDATED_RECORD:
+                return 4 + 8 + 4 + 1;
+
             case INIT_NEW_PAGE_RECORD:
                 return 4 + 8 + 2 + 2 + 8;
 
@@ -504,6 +516,41 @@
 
                 break;
 
+            case MVCC_DATA_PAGE_MARK_UPDATED_RECORD:
+                cacheId = in.readInt();
+                pageId = in.readLong();
+
+                itemId = in.readInt();
+                long newMvccCrd = in.readLong();
+                long newMvccCntr = in.readLong();
+                int newMvccOpCntr = in.readInt();
+
+                res = new DataPageMvccMarkUpdatedRecord(cacheId, pageId, itemId, newMvccCrd, newMvccCntr, newMvccOpCntr);
+
+                break;
+
+            case MVCC_DATA_PAGE_TX_STATE_HINT_UPDATED_RECORD:
+                cacheId = in.readInt();
+                pageId = in.readLong();
+
+                itemId = in.readInt();
+                byte txState = in.readByte();
+
+                res = new DataPageMvccUpdateTxStateHintRecord(cacheId, pageId, itemId, txState);
+
+                break;
+
+            case MVCC_DATA_PAGE_NEW_TX_STATE_HINT_UPDATED_RECORD:
+                cacheId = in.readInt();
+                pageId = in.readLong();
+
+                itemId = in.readInt();
+                byte newTxState = in.readByte();
+
+                res = new DataPageMvccUpdateNewTxStateHintRecord(cacheId, pageId, itemId, newTxState);
+
+                break;
+
             case INIT_NEW_PAGE_RECORD:
                 cacheId = in.readInt();
                 pageId = in.readLong();
@@ -1035,6 +1082,41 @@
 
                 break;
 
+            case MVCC_DATA_PAGE_MARK_UPDATED_RECORD:
+                DataPageMvccMarkUpdatedRecord rmvRec = (DataPageMvccMarkUpdatedRecord)rec;
+
+                buf.putInt(rmvRec.groupId());
+                buf.putLong(rmvRec.pageId());
+
+                buf.putInt(rmvRec.itemId());
+                buf.putLong(rmvRec.newMvccCrd());
+                buf.putLong(rmvRec.newMvccCntr());
+                buf.putInt(rmvRec.newMvccOpCntr());
+
+                break;
+
+            case MVCC_DATA_PAGE_TX_STATE_HINT_UPDATED_RECORD:
+                DataPageMvccUpdateTxStateHintRecord txStRec = (DataPageMvccUpdateTxStateHintRecord)rec;
+
+                buf.putInt(txStRec.groupId());
+                buf.putLong(txStRec.pageId());
+
+                buf.putInt(txStRec.itemId());
+                buf.put(txStRec.txState());
+
+                break;
+
+            case MVCC_DATA_PAGE_NEW_TX_STATE_HINT_UPDATED_RECORD:
+                DataPageMvccUpdateNewTxStateHintRecord newTxStRec = (DataPageMvccUpdateNewTxStateHintRecord)rec;
+
+                buf.putInt(newTxStRec.groupId());
+                buf.putLong(newTxStRec.pageId());
+
+                buf.putInt(newTxStRec.itemId());
+                buf.put(newTxStRec.txState());
+
+                break;
+
             case INIT_NEW_PAGE_RECORD:
                 InitNewPageRecord inpRec = (InitNewPageRecord)rec;
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordV2Serializer.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordV2Serializer.java
index 2c65ebe..68e55e0 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordV2Serializer.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordV2Serializer.java
@@ -113,10 +113,14 @@
             if (recType == SWITCH_SEGMENT_RECORD)
                 throw new SegmentEofException("Reached end of segment", null);
 
-            FileWALPointer ptr = readPositionAndCheckPoint(in, expPtr, skipPositionCheck);
+            FileWALPointer ptr = readPositionAndCheckPoint(in, expPtr, skipPositionCheck, recType);
 
-            if (recType == null)
-                throw new IOException("Unknown record type: " + recType);
+            if (recType == null) {
+                FileWALPointer exp = (FileWALPointer)expPtr;
+
+                throw new IOException("Unknown record type: " + recType +
+                    ", expected pointer [idx=" + exp.index() + ", offset=" + exp.fileOffset() + "]");
+            }
 
             if (recordFilter != null && !recordFilter.apply(recType, ptr)) {
                 int toSkip = ptr.length() - REC_TYPE_SIZE - FILE_WAL_POINTER_SIZE - CRC_SIZE;
@@ -241,7 +245,8 @@
     private static FileWALPointer readPositionAndCheckPoint(
         DataInput in,
         WALPointer expPtr,
-        boolean skipPositionCheck
+        boolean skipPositionCheck,
+        WALRecord.RecordType type
     ) throws IgniteCheckedException, IOException {
         long idx = in.readLong();
         int fileOff = in.readInt();
@@ -251,9 +256,9 @@
 
         if (!F.eq(idx, p.index()) || (!skipPositionCheck && !F.eq(fileOff, p.fileOffset())))
             throw new WalSegmentTailReachedException(
-                "WAL segment tail is reached. [ " +
-                        "Expected next state: {Index=" + p.index() + ",Offset=" + p.fileOffset() + "}, " +
-                        "Actual state : {Index=" + idx + ",Offset=" + fileOff + "} ]", null);
+                "WAL segment tail reached. [ " +
+                    "Expected next state: {Index=" + p.index() + ",Offset=" + p.fileOffset() + "}, " +
+                    "Actual state : {Index=" + idx + ",Offset=" + fileOff + "} ] recordType=" + type, null);
 
         return new FileWALPointer(idx, fileOff, len);
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheDistributedQueryManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheDistributedQueryManager.java
index aac1659..c5f64c9 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheDistributedQueryManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheDistributedQueryManager.java
@@ -35,6 +35,7 @@
 import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException;
 import org.apache.ignite.internal.managers.communication.GridIoPolicy;
 import org.apache.ignite.internal.managers.eventstorage.GridLocalEventListener;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
 import org.apache.ignite.internal.processors.query.GridQueryFieldMetadata;
 import org.apache.ignite.internal.util.GridBoundedConcurrentOrderedSet;
 import org.apache.ignite.internal.util.GridCloseableIteratorAdapter;
@@ -195,6 +196,9 @@
      */
     @SuppressWarnings("unchecked")
     @Override void processQueryRequest(UUID sndId, GridCacheQueryRequest req) {
+        assert req.mvccSnapshot() != null || !cctx.mvccEnabled() || req.cancel() ||
+            (req.type() == null && !req.fields()) : req; // Last assertion means next page request.
+
         if (req.cancel()) {
             cancelIds.add(new CancelMessageId(req.id(), sndId));
 
@@ -277,7 +281,8 @@
                 req.includeMetaData(),
                 req.keepBinary(),
                 req.subjectId(),
-                req.taskHash()
+                req.taskHash(),
+                req.mvccSnapshot()
             );
 
         return new GridCacheQueryInfo(
@@ -531,6 +536,8 @@
 
             String clsName = qry.query().queryClassName();
 
+            MvccSnapshot mvccSnapshot = qry.query().mvccSnapshot();
+
             final GridCacheQueryRequest req = new GridCacheQueryRequest(
                 cctx.cacheId(),
                 reqId,
@@ -551,6 +558,7 @@
                 qry.query().subjectId(),
                 qry.query().taskHash(),
                 queryTopologyVersion(),
+                mvccSnapshot,
                 // Force deployment anyway if scan query is used.
                 cctx.deploymentEnabled() || (qry.query().scanFilter() != null && cctx.gridDeploy().enabled()));
 
@@ -581,6 +589,7 @@
         Collection<ClusterNode> nodes) throws IgniteCheckedException {
         assert !cctx.isLocal() : cctx.name();
         assert qry.type() == GridCacheQueryType.SCAN: qry;
+        assert qry.mvccSnapshot() != null || !cctx.mvccEnabled();
 
         GridCloseableIterator locIter0 = null;
 
@@ -606,7 +615,7 @@
 
         final GridCacheQueryBean bean = new GridCacheQueryBean(qry, null, qry.<K, V>transform(), null);
 
-        final CacheQueryFuture fut = (CacheQueryFuture)queryDistributed(bean, nodes);
+        final CacheQueryFuture fut = queryDistributed(bean, nodes);
 
         return new GridCloseableIteratorAdapter() {
             /** */
@@ -749,6 +758,7 @@
                 qry.query().subjectId(),
                 qry.query().taskHash(),
                 queryTopologyVersion(),
+                null,
                 cctx.deploymentEnabled());
 
             addQueryFuture(req.id(), fut);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryAdapter.java
index 51fdd58..0e3ab43 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryAdapter.java
@@ -22,6 +22,7 @@
 import java.util.Collections;
 import java.util.Deque;
 import java.util.HashSet;
+import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.Map;
 import java.util.NoSuchElementException;
@@ -29,6 +30,7 @@
 import java.util.Set;
 import java.util.UUID;
 import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteException;
 import org.apache.ignite.IgniteLogger;
 import org.apache.ignite.cache.CacheMode;
 import org.apache.ignite.cache.query.Query;
@@ -43,6 +45,10 @@
 import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtUnreservedPartitionException;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccQueryTracker;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUtils;
 import org.apache.ignite.internal.processors.query.QueryUtils;
 import org.apache.ignite.internal.util.GridCloseableIteratorAdapter;
 import org.apache.ignite.internal.util.GridEmptyCloseableIterator;
@@ -60,6 +66,7 @@
 import org.apache.ignite.lang.IgniteClosure;
 import org.apache.ignite.lang.IgniteReducer;
 import org.apache.ignite.plugin.security.SecurityPermission;
+import org.jetbrains.annotations.NotNull;
 import org.jetbrains.annotations.Nullable;
 
 import static org.apache.ignite.cache.CacheMode.LOCAL;
@@ -112,6 +119,9 @@
     /** */
     private volatile boolean incBackups;
 
+    /** Local query. */
+    private boolean forceLocal;
+
     /** */
     private volatile boolean dedup;
 
@@ -127,19 +137,24 @@
     /** */
     private int taskHash;
 
+    /** */
+    private MvccSnapshot mvccSnapshot;
+
     /**
      * @param cctx Context.
      * @param type Query type.
      * @param filter Scan filter.
      * @param part Partition.
      * @param keepBinary Keep binary flag.
+     * @param forceLocal Flag to force local query.
      */
     public GridCacheQueryAdapter(GridCacheContext<?, ?> cctx,
         GridCacheQueryType type,
         @Nullable IgniteBiPredicate<Object, Object> filter,
         @Nullable IgniteClosure<Map.Entry, Object> transform,
         @Nullable Integer part,
-        boolean keepBinary) {
+        boolean keepBinary,
+        boolean forceLocal) {
         assert cctx != null;
         assert type != null;
         assert part == null || part >= 0;
@@ -150,6 +165,7 @@
         this.transform = transform;
         this.part = part;
         this.keepBinary = keepBinary;
+        this.forceLocal = forceLocal;
 
         log = cctx.logger(getClass());
 
@@ -213,6 +229,7 @@
      * @param keepBinary Keep binary flag.
      * @param subjId Security subject ID.
      * @param taskHash Task hash.
+     * @param mvccSnapshot Mvcc version.
      */
     public GridCacheQueryAdapter(GridCacheContext<?, ?> cctx,
         GridCacheQueryType type,
@@ -229,7 +246,8 @@
         boolean incMeta,
         boolean keepBinary,
         UUID subjId,
-        int taskHash) {
+        int taskHash,
+        MvccSnapshot mvccSnapshot) {
         this.cctx = cctx;
         this.type = type;
         this.log = log;
@@ -246,6 +264,14 @@
         this.keepBinary = keepBinary;
         this.subjId = subjId;
         this.taskHash = taskHash;
+        this.mvccSnapshot = mvccSnapshot;
+    }
+
+    /**
+     * @return MVCC snapshot.
+     */
+    @Nullable MvccSnapshot mvccSnapshot() {
+        return mvccSnapshot;
     }
 
     /**
@@ -293,6 +319,13 @@
     }
 
     /**
+     * @return {@code True} if the query is forced local.
+     */
+    public boolean forceLocal() {
+        return forceLocal;
+    }
+
+    /**
      * @return Security subject ID.
      */
     public UUID subjectId() {
@@ -400,7 +433,7 @@
      */
     @SuppressWarnings("unchecked")
     @Nullable public <K, V> IgniteClosure<Map.Entry<K, V>, Object> transform() {
-        return (IgniteClosure<Map.Entry<K, V>, Object>) transform;
+        return (IgniteClosure<Map.Entry<K, V>, Object>)transform;
     }
 
     /**
@@ -496,15 +529,20 @@
     /** {@inheritDoc} */
     @SuppressWarnings({"IfMayBeConditional", "unchecked"})
     @Override public GridCloseableIterator executeScanQuery() throws IgniteCheckedException {
-        assert type == SCAN : "Wrong processing of qyery: " + type;
+        assert type == SCAN : "Wrong processing of query: " + type;
 
         // Affinity nodes snapshot.
         Collection<ClusterNode> nodes = new ArrayList<>(nodes());
 
         cctx.checkSecurity(SecurityPermission.CACHE_READ);
 
-        if (nodes.isEmpty() && part == null)
+        if (nodes.isEmpty()) {
+            if (part != null && forceLocal)
+                throw new IgniteCheckedException("No queryable nodes for partition " + part
+                    + " [forced local query=" + this + "]");
+
             return new GridEmptyCloseableIterator();
+        }
 
         if (log.isDebugEnabled())
             log.debug("Executing query [query=" + this + ", nodes=" + nodes + ']');
@@ -519,15 +557,34 @@
 
         final GridCacheQueryManager qryMgr = cctx.queries();
 
+        MvccQueryTracker mvccTracker = null;
+
+        if (cctx.mvccEnabled() && mvccSnapshot == null) {
+            GridNearTxLocal tx = cctx.tm().userTx();
+
+            if (tx != null)
+                mvccSnapshot = MvccUtils.requestSnapshot(cctx, tx);
+            else {
+                mvccTracker = MvccUtils.mvccTracker(cctx, null);
+
+                mvccSnapshot = mvccTracker.snapshot();
+            }
+
+            assert mvccSnapshot != null;
+        }
+
         boolean loc = nodes.size() == 1 && F.first(nodes).id().equals(cctx.localNodeId());
 
-        if (loc)
-            return qryMgr.scanQueryLocal(this, true);
+        GridCloseableIterator it;
 
-        if (part != null)
-            return new ScanQueryFallbackClosableIterator(part, this, qryMgr, cctx);
+        if (loc)
+            it = qryMgr.scanQueryLocal(this, true);
+        else if (part != null)
+            it = new ScanQueryFallbackClosableIterator(part, this, qryMgr, cctx);
         else
-            return qryMgr.scanQueryDistributed(this, nodes);
+            it = qryMgr.scanQueryDistributed(this, nodes);
+
+        return mvccTracker != null ? new MvccTrackingIterator(it, mvccTracker) : it;
     }
 
     /**
@@ -573,9 +630,10 @@
      * @param cctx Cache context.
      * @param prj Projection (optional).
      * @return Collection of data nodes in provided projection (if any).
+     * @throws IgniteCheckedException If partition number is invalid.
      */
     private static Collection<ClusterNode> nodes(final GridCacheContext<?, ?> cctx,
-        @Nullable final ClusterGroup prj, @Nullable final Integer part) {
+        @Nullable final ClusterGroup prj, @Nullable final Integer part) throws IgniteCheckedException {
         assert cctx != null;
 
         final AffinityTopologyVersion topVer = cctx.affinity().affinityTopologyVersion();
@@ -585,6 +643,9 @@
         if (prj == null && part == null)
             return affNodes;
 
+        if (part != null && part >= cctx.affinity().partitions())
+            throw new IgniteCheckedException("Invalid partition number: " + part);
+
         final Set<ClusterNode> owners =
             part == null ? Collections.<ClusterNode>emptySet() : new HashSet<>(cctx.topology().owners(part, topVer));
 
@@ -781,7 +842,7 @@
          * @return Cache entry
          */
         private Object convert(Object obj) {
-            if(qry.transform() != null)
+            if (qry.transform() != null)
                 return obj;
 
             Map.Entry e = (Map.Entry)obj;
@@ -853,4 +914,93 @@
                 t.get2().cancel();
         }
     }
+
+    /**
+     * Wrapper for an MVCC-related iterators.
+     */
+    private static class MvccTrackingIterator implements GridCloseableIterator {
+        /** Serial version uid. */
+        private static final long serialVersionUID = -1905248502802333832L;
+        /** Underlying iterator. */
+        private final GridCloseableIterator it;
+
+        /** Query MVCC tracker. */
+        private final MvccQueryTracker mvccTracker;
+
+        /**
+         * Constructor.
+         *
+         * @param it Underlying iterator.
+         * @param mvccTracker Query MVCC tracker.
+         */
+        MvccTrackingIterator(GridCloseableIterator it, MvccQueryTracker mvccTracker) {
+            assert it != null && mvccTracker != null;
+
+            this.it = it;
+            this.mvccTracker = mvccTracker;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void close() throws IgniteCheckedException {
+            if (isClosed())
+                return;
+
+            try {
+                it.close();
+            }
+            finally {
+                mvccTracker.onDone();
+            }
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean isClosed() {
+            return it.isClosed();
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean hasNext() {
+            boolean hasNext = it.hasNext();
+
+            if (!hasNext)
+                try {
+                    close();
+                }
+                catch (IgniteCheckedException e) {
+                    throw new IgniteException(e);
+                }
+
+            return hasNext;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean hasNextX() throws IgniteCheckedException {
+            boolean hasNext = it.hasNext();
+
+            if (!hasNext)
+                close();
+
+            return hasNext;
+        }
+
+        /** {@inheritDoc} */
+        @Override public Object nextX() throws IgniteCheckedException {
+            return it.nextX();
+        }
+
+        /** {@inheritDoc} */
+        @Override public void removeX() throws IgniteCheckedException {
+            it.removeX();
+        }
+
+        /** {@inheritDoc} */
+        @NotNull @Override public Iterator iterator() {
+            return this;
+        }
+
+        /** {@inheritDoc} */
+        @Override public Object next() {
+            return it.next();
+        }
+    }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryManager.java
index 8f0edb7..5c7f383 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryManager.java
@@ -47,6 +47,7 @@
 import org.apache.ignite.IgniteException;
 import org.apache.ignite.IgniteLogger;
 import org.apache.ignite.IgniteSystemProperties;
+import org.apache.ignite.cache.CacheEntry;
 import org.apache.ignite.cache.QueryIndexType;
 import org.apache.ignite.cache.query.QueryMetrics;
 import org.apache.ignite.cluster.ClusterNode;
@@ -380,8 +381,8 @@
      * @param prevRowAvailable Whether previous row is available.
      * @throws IgniteCheckedException In case of error.
      */
-    public void store(CacheDataRow newRow, @Nullable CacheDataRow prevRow, boolean prevRowAvailable)
-        throws IgniteCheckedException {
+    public void store(CacheDataRow newRow, @Nullable CacheDataRow prevRow,
+        boolean prevRowAvailable) throws IgniteCheckedException {
         assert enabled();
         assert newRow != null && newRow.value() != null && newRow.link() != 0 : newRow;
 
@@ -414,7 +415,8 @@
      * @param prevRow Previous row.
      * @throws IgniteCheckedException Thrown in case of any errors.
      */
-    public void remove(KeyCacheObject key, @Nullable CacheDataRow prevRow) throws IgniteCheckedException {
+    public void remove(KeyCacheObject key, @Nullable CacheDataRow prevRow)
+        throws IgniteCheckedException {
         if (!QueryUtils.isEnabled(cctx.config()))
             return; // No-op.
 
@@ -615,7 +617,7 @@
                     break;
 
                 case SET:
-                    iter = setIterator(qry);
+                    iter = sharedCacheSetIterator(qry);
 
                     break;
 
@@ -747,49 +749,29 @@
      * @param qry Query.
      * @return Cache set items iterator.
      */
-    private GridCloseableIterator<IgniteBiTuple<K, V>> setIterator(GridCacheQueryAdapter<?> qry) {
+    private GridCloseableIterator<IgniteBiTuple<K, V>> sharedCacheSetIterator(
+        GridCacheQueryAdapter<?> qry) throws IgniteCheckedException {
         final GridSetQueryPredicate filter = (GridSetQueryPredicate)qry.scanFilter();
 
-        filter.init(cctx);
-
         IgniteUuid id = filter.setId();
 
-        Collection<SetItemKey> data = cctx.dataStructures().setData(id);
-
-        if (data == null)
-            data = Collections.emptyList();
-
-        final GridIterator<IgniteBiTuple<K, V>> it = F.iterator(
-            data,
-            new C1<SetItemKey, IgniteBiTuple<K, V>>() {
-                @Override public IgniteBiTuple<K, V> apply(SetItemKey e) {
-                    return new IgniteBiTuple<>((K)e.item(), (V)Boolean.TRUE);
+        GridCacheQueryAdapter<CacheEntry<K, ?>> qry0 = new GridCacheQueryAdapter<>(cctx,
+            SCAN,
+            new IgniteBiPredicate<Object, Object>() {
+                @Override public boolean apply(Object k, Object v) {
+                    return k instanceof SetItemKey && id.equals(((SetItemKey)k).setId());
                 }
             },
-            true,
-            new P1<SetItemKey>() {
-                @Override public boolean apply(SetItemKey e) {
-                    return filter.apply(e, null);
+            new IgniteClosure<Map.Entry, Object>() {
+                @Override public Object apply(Map.Entry entry) {
+                    return new IgniteBiTuple<K, V>((K)((SetItemKey)entry.getKey()).item(), (V)Boolean.TRUE);
                 }
-            });
+            },
+            qry.partition(),
+            false,
+            true);
 
-        return new GridCloseableIteratorAdapter<IgniteBiTuple<K, V>>() {
-            @Override protected boolean onHasNext() {
-                return it.hasNext();
-            }
-
-            @Override protected IgniteBiTuple<K, V> onNext() {
-                return it.next();
-            }
-
-            @Override protected void onRemove() {
-                it.remove();
-            }
-
-            @Override protected void onClose() {
-                // No-op.
-            }
-        };
+        return scanQueryLocal(qry0, false);
     }
 
     /**
@@ -803,6 +785,8 @@
     private GridCloseableIterator scanIterator(final GridCacheQueryAdapter<?> qry, IgniteClosure transformer,
         boolean locNode)
         throws IgniteCheckedException {
+        assert !cctx.mvccEnabled() || qry.mvccSnapshot() != null;
+
         final IgniteBiPredicate<K, V> keyValFilter = qry.scanFilter();
         final InternalScanFilter<K,V> intFilter = keyValFilter != null ? new InternalScanFilter<>(keyValFilter) : null;
 
@@ -843,13 +827,12 @@
 
                 locPart = locPart0;
 
-                it = cctx.offheap().cachePartitionIterator(cctx.cacheId(), part);
+                it = cctx.offheap().cachePartitionIterator(cctx.cacheId(), part, qry.mvccSnapshot());
             }
             else {
                 locPart = null;
 
-                // TODO shouldn't we reserve all involved partitions?
-                it = cctx.offheap().cacheIterator(cctx.cacheId(), true, backups, topVer);
+                it = cctx.offheap().cacheIterator(cctx.cacheId(), true, backups, topVer, qry.mvccSnapshot());
             }
 
             return new ScanQueryIterator(it, qry, topVer, locPart, keyValFilter, transformer, locNode, cctx, log);
@@ -1276,7 +1259,7 @@
                                 continue;
                         }
                         else
-                            data.add(!loc ? new GridCacheQueryResponseEntry<>(key, val) : F.t(key, val));
+                            data.add(new T2<>(key, val));
                     }
 
                     if (!loc) {
@@ -2721,7 +2704,7 @@
      */
     public <R> CacheQuery<R> createScanQuery(@Nullable IgniteBiPredicate<K, V> filter,
         @Nullable Integer part, boolean keepBinary) {
-        return createScanQuery(filter, null, part, keepBinary);
+        return createScanQuery(filter, null, part, keepBinary, false);
     }
 
     /**
@@ -2731,18 +2714,20 @@
      * @param trans Transformer.
      * @param part Partition.
      * @param keepBinary Keep binary flag.
+     * @param forceLocal Flag to force local scan.
      * @return Created query.
      */
     public <T, R> CacheQuery<R> createScanQuery(@Nullable IgniteBiPredicate<K, V> filter,
         @Nullable IgniteClosure<T, R> trans,
-        @Nullable Integer part, boolean keepBinary) {
+        @Nullable Integer part, boolean keepBinary, boolean forceLocal) {
 
         return new GridCacheQueryAdapter(cctx,
             SCAN,
             filter,
             trans,
             part,
-            keepBinary);
+            keepBinary,
+            forceLocal);
     }
 
     /**
@@ -2923,11 +2908,13 @@
             boolean locNode,
             GridCacheContext cctx,
             IgniteLogger log) {
+
             this.it = it;
             this.topVer = topVer;
             this.locPart = locPart;
             this.intScanFilter = scanFilter != null ? new InternalScanFilter<>(scanFilter) : null;
             this.cctx = cctx;
+
             this.log = log;
             this.locNode = locNode;
 
@@ -2938,7 +2925,7 @@
             readEvt = cctx.events().isRecordable(EVT_CACHE_QUERY_OBJECT_READ) &&
                 cctx.gridEvents().hasListener(EVT_CACHE_QUERY_OBJECT_READ);
 
-            if(readEvt){
+            if (readEvt){
                 taskName = cctx.kernalContext().task().resolveTaskName(qry.taskHash());
                 subjId = qry.subjectId();
             }
@@ -3024,7 +3011,7 @@
 
                                 val = entry.peek(true, true, topVer, expiryPlc);
 
-                                cctx.evicts().touch(entry, topVer);
+                                entry.touch(topVer);
 
                                 break;
                             }
@@ -3113,7 +3100,7 @@
                             }
                         }
                         else
-                            next0 = !locNode ? new GridCacheQueryResponseEntry<>(key0, val0):
+                            next0 = !locNode ? new T2<>(key0, val0):
                                 new CacheQueryEntry<>(key0, val0);
 
                         break;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryRequest.java
index 9dc7817..b7205b6 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryRequest.java
@@ -26,8 +26,8 @@
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
 import org.apache.ignite.internal.processors.cache.GridCacheDeployable;
 import org.apache.ignite.internal.processors.cache.GridCacheIdMessage;
-import org.apache.ignite.internal.processors.cache.GridCacheMessage;
 import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
 import org.apache.ignite.internal.util.tostring.GridToStringInclude;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.internal.CU;
@@ -129,6 +129,9 @@
     /** */
     private AffinityTopologyVersion topVer;
 
+    /** */
+    private MvccSnapshot mvccSnapshot;
+
     /**
      * Required by {@link Externalizable}
      */
@@ -223,6 +226,7 @@
      * @param subjId Subject ID.
      * @param taskHash Task name hash code.
      * @param topVer Topology version.
+     * @param mvccSnapshot Mvcc snapshot.
      * @param addDepInfo Deployment info flag.
      */
     public GridCacheQueryRequest(
@@ -245,6 +249,7 @@
         UUID subjId,
         int taskHash,
         AffinityTopologyVersion topVer,
+        MvccSnapshot mvccSnapshot,
         boolean addDepInfo
     ) {
         assert type != null || fields;
@@ -270,9 +275,17 @@
         this.subjId = subjId;
         this.taskHash = taskHash;
         this.topVer = topVer;
+        this.mvccSnapshot = mvccSnapshot;
         this.addDepInfo = addDepInfo;
     }
 
+    /**
+     * @return Mvcc version.
+     */
+    @Nullable MvccSnapshot mvccSnapshot() {
+        return mvccSnapshot;
+    }
+
     /** {@inheritDoc} */
     @Override public AffinityTopologyVersion topologyVersion() {
         return topVer != null ? topVer : AffinityTopologyVersion.NONE;
@@ -620,6 +633,12 @@
 
                 writer.incrementState();
 
+            case 23:
+                if (!writer.writeMessage("mvccSnapshot", mvccSnapshot))
+                    return false;
+
+                writer.incrementState();
+
         }
 
         return true;
@@ -800,6 +819,14 @@
 
                 reader.incrementState();
 
+            case 23:
+                mvccSnapshot = reader.readMessage("mvccSnapshot");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
         }
 
         return reader.afterMessageRead(GridCacheQueryRequest.class);
@@ -812,7 +839,7 @@
 
     /** {@inheritDoc} */
     @Override public byte fieldsCount() {
-        return 23;
+        return 24;
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryResponseEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryResponseEntry.java
index 2c1a4f5..650f0c0 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryResponseEntry.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheQueryResponseEntry.java
@@ -27,7 +27,10 @@
 /**
  * Class to store query results returned by remote nodes. It's required to fully
  * control serialization process. Local entries can be returned to user as is.
+ * <p>
+ * @deprecated Should be removed in Apache Ignite 3.0.
  */
+@Deprecated
 public class GridCacheQueryResponseEntry<K, V> implements Map.Entry<K, V>, Externalizable {
     /** */
     private static final long serialVersionUID = 0L;
@@ -113,4 +116,4 @@
     @Override public String toString() {
         return "[" + key + "=" + val + "]";
     }
-}
\ No newline at end of file
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/IgniteQueryErrorCode.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/IgniteQueryErrorCode.java
index e0ff9a4..5dab5fd 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/IgniteQueryErrorCode.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/IgniteQueryErrorCode.java
@@ -100,6 +100,26 @@
     /** Cache not found. */
     public final static int CACHE_NOT_FOUND = 4006;
 
+    /** Attempt to INSERT, UPDATE or MERGE key that exceed maximum column length. */
+    public final static int TOO_LONG_KEY = 4007;
+
+    /** Attempt to INSERT, UPDATE or MERGE value that exceed maximum column length. */
+    public final static int TOO_LONG_VALUE = 4008;
+
+    /* 5xxx - transactions related runtime errors. */
+
+    /** Transaction is already open. */
+    public final static int TRANSACTION_EXISTS = 5001;
+
+    /** MVCC disabled. */
+    public final static int MVCC_DISABLED = 5002;
+
+    /** Transaction type mismatch (SQL/non SQL). */
+    public final static int TRANSACTION_TYPE_MISMATCH = 5003;
+
+    /** Transaction is already completed. */
+    public final static int TRANSACTION_COMPLETED = 5004;
+
     /** */
     private IgniteQueryErrorCode() {
         // No-op.
@@ -126,6 +146,8 @@
     public static String codeToSqlState(int statusCode) {
         switch (statusCode) {
             case DUPLICATE_KEY:
+            case TOO_LONG_KEY:
+            case TOO_LONG_VALUE:
                 return SqlStateCode.CONSTRAINT_VIOLATION;
 
             case NULL_KEY:
@@ -151,6 +173,12 @@
             case KEY_UPDATE:
                 return SqlStateCode.PARSING_EXCEPTION;
 
+            case MVCC_DISABLED:
+            case TRANSACTION_EXISTS:
+            case TRANSACTION_TYPE_MISMATCH:
+            case TRANSACTION_COMPLETED:
+                return SqlStateCode.TRANSACTION_STATE_EXCEPTION;
+
             default:
                 return SqlStateCode.INTERNAL_ERROR;
         }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/QueryEntityTypeDescriptor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/QueryEntityTypeDescriptor.java
index 9f8abc1..d8c0c39 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/QueryEntityTypeDescriptor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/QueryEntityTypeDescriptor.java
@@ -30,7 +30,6 @@
 import org.apache.ignite.internal.util.tostring.GridToStringExclude;
 import org.apache.ignite.internal.util.tostring.GridToStringInclude;
 import org.apache.ignite.internal.util.typedef.internal.S;
-import org.apache.ignite.lang.IgniteBiTuple;
 
 /**
  * Descriptor of type.
@@ -55,8 +54,11 @@
     /** */
     private Set<String> notNullFields = new HashSet<>();
 
-    /** Decimal fields information. */
-    private Map<String, IgniteBiTuple<Integer, Integer>> decimalInfo = new HashMap<>();
+    /** Precision information. */
+    private Map<String, Integer> fieldsPrecision = new HashMap<>();
+
+    /** Scale information. */
+    private Map<String, Integer> fieldsScale = new HashMap<>();
 
     /** */
     private QueryEntityIndexDescriptor fullTextIdx;
@@ -191,13 +193,23 @@
     }
 
     /**
-     * Adds decimal info.
+     * Adds fieldsPrecision info.
      *
      * @param field Field.
-     * @param info Decimal column info.
+     * @param precision Precision.
      */
-    public void addDecimalInfo(String field, IgniteBiTuple<Integer, Integer> info) {
-        decimalInfo.put(field, info);
+    public void addPrecision(String field, Integer precision) {
+        fieldsPrecision.put(field, precision);
+    }
+
+    /**
+     * Adds fieldsScale info.
+     *
+     * @param field Field.
+     * @param scale Scale.
+     */
+    public void addScale(String field, int scale) {
+        fieldsScale.put(field, scale);
     }
 
     /**
@@ -208,10 +220,17 @@
     }
 
     /**
-     * @return Decimal info for fields.
+     * @return Precision info for fields.
      */
-    public Map<String, IgniteBiTuple<Integer, Integer>> decimalInfo() {
-        return decimalInfo;
+    public Map<String, Integer> fieldsPrecision() {
+        return fieldsPrecision;
+    }
+
+    /**
+     * @return Scale info for fields.
+     */
+    public Map<String, Integer> fieldsScale() {
+        return fieldsScale;
     }
 
     /**
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/SqlFieldsQueryEx.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/SqlFieldsQueryEx.java
index ff10e3d..676e61c 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/SqlFieldsQueryEx.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/SqlFieldsQueryEx.java
@@ -21,6 +21,7 @@
 import java.util.List;
 import java.util.concurrent.TimeUnit;
 import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.internal.processors.query.NestedTxMode;
 import org.apache.ignite.internal.util.typedef.F;
 
 /**
@@ -36,6 +37,12 @@
     /** Whether server side DML should be enabled. */
     private boolean skipReducerOnUpdate;
 
+    /** Auto commit flag. */
+    private boolean autoCommit = true;
+
+    /** Nested transactions handling mode. */
+    private NestedTxMode nestedTxMode = NestedTxMode.DEFAULT;
+
     /** Batched arguments list. */
     private List<Object[]> batchedArgs;
 
@@ -57,6 +64,8 @@
 
         this.isQry = qry.isQry;
         this.skipReducerOnUpdate = qry.skipReducerOnUpdate;
+        this.autoCommit = qry.autoCommit;
+        this.nestedTxMode = qry.nestedTxMode;
         this.batchedArgs = qry.batchedArgs;
     }
 
@@ -159,6 +168,36 @@
         return skipReducerOnUpdate;
     }
 
+    /**
+     * @return Nested transactions handling mode - behavior when the user attempts to open a transaction in scope of
+     * another transaction.
+     */
+    public NestedTxMode getNestedTxMode() {
+        return nestedTxMode;
+    }
+
+    /**
+     * @param nestedTxMode Nested transactions handling mode - behavior when the user attempts to open a transaction
+     * in scope of another transaction.
+     */
+    public void setNestedTxMode(NestedTxMode nestedTxMode) {
+        this.nestedTxMode = nestedTxMode;
+    }
+
+    /**
+     * @return Auto commit flag.
+     */
+    public boolean isAutoCommit() {
+        return autoCommit;
+    }
+
+    /**
+     * @param autoCommit Auto commit flag.
+     */
+    public void setAutoCommit(boolean autoCommit) {
+        this.autoCommit = autoCommit;
+    }
+
     /** {@inheritDoc} */
     @Override public SqlFieldsQuery copy() {
         return new SqlFieldsQueryEx(this);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/continuous/CacheContinuousQueryHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/continuous/CacheContinuousQueryHandler.java
index 9ff4623..829118b 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/continuous/CacheContinuousQueryHandler.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/continuous/CacheContinuousQueryHandler.java
@@ -1310,7 +1310,7 @@
         }
 
         /** {@inheritDoc} */
-        public String toString() {
+        @Override public String toString() {
             return S.toString(ContinuousQueryAsyncClosure.class, this);
         }
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/continuous/CacheContinuousQueryManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/continuous/CacheContinuousQueryManager.java
index 4005dd8..ab60f47 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/continuous/CacheContinuousQueryManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/query/continuous/CacheContinuousQueryManager.java
@@ -29,9 +29,11 @@
 import java.util.Map;
 import java.util.NoSuchElementException;
 import java.util.UUID;
+import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.Lock;
 import javax.cache.Cache;
 import javax.cache.configuration.CacheEntryListenerConfiguration;
 import javax.cache.configuration.Factory;
@@ -63,6 +65,7 @@
 import org.apache.ignite.internal.processors.cache.distributed.dht.atomic.GridDhtAtomicAbstractUpdateFuture;
 import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
 import org.apache.ignite.internal.processors.continuous.GridContinuousHandler;
+import org.apache.ignite.internal.util.StripedCompositeReadWriteLock;
 import org.apache.ignite.internal.processors.timeout.GridTimeoutProcessor;
 import org.apache.ignite.internal.util.tostring.GridToStringInclude;
 import org.apache.ignite.internal.util.typedef.CI2;
@@ -76,7 +79,6 @@
 import org.apache.ignite.plugin.security.SecurityPermission;
 import org.apache.ignite.resources.LoggerResource;
 import org.jetbrains.annotations.Nullable;
-import java.util.concurrent.ConcurrentHashMap;
 
 import static javax.cache.event.EventType.CREATED;
 import static javax.cache.event.EventType.EXPIRED;
@@ -126,6 +128,9 @@
     /** Ordered topic prefix. */
     private String topicPrefix;
 
+    /** ReadWriteLock to control the continuous query setup - this is to prevent the race between cache update and listener setup */
+    private final StripedCompositeReadWriteLock listenerLock = new StripedCompositeReadWriteLock(Runtime.getRuntime().availableProcessors()) ;
+
     /** Cancelable future task for backup cleaner */
     private GridTimeoutProcessor.CancelableTask cancelableTask;
 
@@ -195,6 +200,16 @@
     }
 
     /**
+     * Obtain the listener read lock, which must be held if any component need to
+     * read the list listener (generally caller to updateListener).
+     *
+     * @return Read lock for the listener update
+     */
+    public Lock getListenerReadLock() {
+        return listenerLock.readLock();
+    }
+
+    /**
      * @param lsnrs Listeners to notify.
      * @param key Entry key.
      * @param partId Partition id.
@@ -481,6 +496,11 @@
         final boolean keepBinary,
         final boolean includeExpired) throws IgniteCheckedException
     {
+        //TODO IGNITE-7953
+        if (cctx.transactionalSnapshot())
+            throw new UnsupportedOperationException("Continuous queries are not supported for transactional caches " +
+                "when MVCC is enabled.");
+
         IgniteOutClosure<CacheContinuousQueryHandler> clsr;
 
         if (rmtTransFactory != null) {
@@ -741,7 +761,8 @@
             final Iterator<CacheDataRow> it = cctx.offheap().cacheIterator(cctx.cacheId(),
                 true,
                 true,
-                AffinityTopologyVersion.NONE);
+                AffinityTopologyVersion.NONE,
+                null);
 
             locLsnr.onUpdated(new Iterable<CacheEntryEvent>() {
                 @Override public Iterator<CacheEntryEvent> iterator() {
@@ -878,9 +899,11 @@
      * @param internal Internal flag.
      * @return Whether listener was actually registered.
      */
-    GridContinuousHandler.RegisterStatus registerListener(UUID lsnrId,
+    GridContinuousHandler.RegisterStatus registerListener(
+        UUID lsnrId,
         CacheContinuousQueryListener lsnr,
-        boolean internal) {
+        boolean internal
+    ) {
         boolean added;
 
         if (internal) {
@@ -890,7 +913,9 @@
                 intLsnrCnt.incrementAndGet();
         }
         else {
-            synchronized (this) {
+            listenerLock.writeLock().lock();
+
+            try {
                 if (lsnrCnt.get() == 0) {
                     if (cctx.group().sharedGroup() && !cctx.isLocal())
                         cctx.group().addCacheWithContinuousQuery(cctx);
@@ -901,13 +926,16 @@
                 if (added)
                     lsnrCnt.incrementAndGet();
             }
+            finally {
+                listenerLock.writeLock().unlock();
+            }
 
             if (added)
                 lsnr.onExecution();
         }
 
-        return added ? GridContinuousHandler.RegisterStatus.REGISTERED :
-            GridContinuousHandler.RegisterStatus.NOT_REGISTERED;
+        return added ? GridContinuousHandler.RegisterStatus.REGISTERED
+            : GridContinuousHandler.RegisterStatus.NOT_REGISTERED;
     }
 
     /**
@@ -925,7 +953,9 @@
             }
         }
         else {
-            synchronized (this) {
+            listenerLock.writeLock().lock();
+
+            try {
                 if ((lsnr = lsnrs.remove(id)) != null) {
                     int cnt = lsnrCnt.decrementAndGet();
 
@@ -933,6 +963,9 @@
                         cctx.group().removeCacheWithContinuousQuery(cctx);
                 }
             }
+            finally {
+                listenerLock.writeLock().unlock();
+            }
 
             if (lsnr != null)
                 lsnr.onUnregister();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ratemetrics/HitRateMetricsSandbox.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ratemetrics/HitRateMetricsSandbox.java
index 3a542e8..92ef411 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ratemetrics/HitRateMetricsSandbox.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ratemetrics/HitRateMetricsSandbox.java
@@ -56,7 +56,7 @@
         });
 
         new Timer(100, new ActionListener() {
-            public void actionPerformed(ActionEvent evt) {
+            @Override public void actionPerformed(ActionEvent evt) {
                 rateLb.setText(Double.toString(metrics.getRate()));
             }
         }).start();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/store/GridCacheStoreManagerAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/store/GridCacheStoreManagerAdapter.java
index 64a052d..ab28aa3 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/store/GridCacheStoreManagerAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/store/GridCacheStoreManagerAdapter.java
@@ -1340,7 +1340,7 @@
         }
 
         /** {@inheritDoc} */
-        public String toString() {
+        @Override public String toString() {
             if (!S.INCLUDE_SENSITIVE)
                 return "[size=" + size() + "]";
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteInternalTx.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteInternalTx.java
index 9e06d9d..05ebe5d 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteInternalTx.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteInternalTx.java
@@ -30,6 +30,7 @@
 import org.apache.ignite.internal.processors.cache.GridCacheFilterFailedException;
 import org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate;
 import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
 import org.apache.ignite.internal.transactions.IgniteTxTimeoutCheckedException;
 import org.apache.ignite.internal.util.lang.GridTuple;
@@ -262,6 +263,11 @@
     public boolean activeCachesDeploymentEnabled();
 
     /**
+     * @param depEnabled Flag indicating whether deployment is enabled for caches from this transaction or not.
+     */
+    public void activeCachesDeploymentEnabled(boolean depEnabled);
+
+    /**
      * Attempts to set topology version and returns the current value.
      * If topology version was previously set, then it's value will
      * be returned (but not updated).
@@ -634,4 +640,20 @@
      * @param e Commit error.
      */
     public void commitError(Throwable e);
-}
\ No newline at end of file
+
+    /**
+     * @param mvccSnapshot Mvcc snapshot.
+     */
+    public void mvccSnapshot(MvccSnapshot mvccSnapshot);
+
+    /**
+     * @return Mvcc snapshot.
+     */
+    public MvccSnapshot mvccSnapshot();
+
+    /**
+     * @return Transaction counters.
+     * @param createIfAbsent {@code True} if non-null instance is needed.
+     */
+    @Nullable public TxCounters txCounters(boolean createIfAbsent);
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTransactionsImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTransactionsImpl.java
index 25ba849..11bf219 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTransactionsImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTransactionsImpl.java
@@ -31,9 +31,9 @@
 import org.apache.ignite.lang.IgnitePredicate;
 import org.apache.ignite.transactions.Transaction;
 import org.apache.ignite.transactions.TransactionConcurrency;
+import org.apache.ignite.transactions.TransactionException;
 import org.apache.ignite.transactions.TransactionIsolation;
 import org.apache.ignite.transactions.TransactionMetrics;
-import org.apache.ignite.transactions.TransactionException;
 import org.jetbrains.annotations.Nullable;
 
 /**
@@ -48,6 +48,7 @@
 
     /**
      * @param cctx Cache shared context.
+     * @param lb Label.
      */
     public IgniteTransactionsImpl(GridCacheSharedContext<K, V> cctx, @Nullable String lb) {
         this.cctx = cctx;
@@ -175,6 +176,7 @@
                 isolation,
                 timeout,
                 true,
+                null,
                 txSize,
                 lb
             );
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxAdapter.java
index 6fdb046..c6d1991 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxAdapter.java
@@ -34,6 +34,7 @@
 import java.util.Map;
 import java.util.Set;
 import java.util.UUID;
+import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
 import javax.cache.expiry.ExpiryPolicy;
@@ -53,6 +54,7 @@
 import org.apache.ignite.internal.processors.cache.CacheInvokeEntry;
 import org.apache.ignite.internal.processors.cache.CacheLazyEntry;
 import org.apache.ignite.internal.processors.cache.CacheObject;
+import org.apache.ignite.internal.processors.cache.CacheOperationContext;
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
 import org.apache.ignite.internal.processors.cache.GridCacheEntryEx;
 import org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException;
@@ -61,8 +63,12 @@
 import org.apache.ignite.internal.processors.cache.GridCacheReturn;
 import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
 import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearCacheEntry;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxState;
 import org.apache.ignite.internal.processors.cache.store.CacheStoreManager;
 import org.apache.ignite.internal.processors.cache.version.GridCacheLazyPlainVersionedEntry;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
@@ -71,6 +77,7 @@
 import org.apache.ignite.internal.processors.cluster.BaselineTopology;
 import org.apache.ignite.internal.transactions.IgniteTxRollbackCheckedException;
 import org.apache.ignite.internal.transactions.IgniteTxTimeoutCheckedException;
+import org.apache.ignite.internal.util.GridIntIterator;
 import org.apache.ignite.internal.util.GridSetWrapper;
 import org.apache.ignite.internal.util.future.GridFutureAdapter;
 import org.apache.ignite.internal.util.lang.GridMetadataAwareAdapter;
@@ -84,7 +91,9 @@
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.lang.IgniteBiTuple;
+import org.apache.ignite.lang.IgniteInClosure;
 import org.apache.ignite.lang.IgniteUuid;
+import org.apache.ignite.thread.IgniteThread;
 import org.apache.ignite.transactions.TransactionConcurrency;
 import org.apache.ignite.transactions.TransactionIsolation;
 import org.apache.ignite.transactions.TransactionState;
@@ -130,6 +139,10 @@
     private static final AtomicReferenceFieldUpdater<IgniteTxAdapter, FinalizationStatus> FINALIZING_UPD =
         AtomicReferenceFieldUpdater.newUpdater(IgniteTxAdapter.class, FinalizationStatus.class, "finalizing");
 
+    /** */
+    private static final AtomicReferenceFieldUpdater<IgniteTxAdapter, TxCounters> TX_COUNTERS_UPD =
+        AtomicReferenceFieldUpdater.newUpdater(IgniteTxAdapter.class, TxCounters.class, "txCounters");
+
     /** Logger. */
     protected static IgniteLogger log;
 
@@ -262,6 +275,16 @@
     /** UUID to consistent id mapper. */
     protected ConsistentIdMapper consistentIdMapper;
 
+    /** Mvcc tx update snapshot. */
+    protected volatile MvccSnapshot mvccSnapshot;
+
+    /** Rollback finish future. */
+    @GridToStringExclude
+    private volatile IgniteInternalFuture rollbackFut;
+
+    /** */
+    private volatile TxCounters txCounters = new TxCounters();
+
     /**
      * Empty constructor required for {@link Externalizable}.
      */
@@ -381,6 +404,18 @@
     }
 
     /**
+     * @return Mvcc info.
+     */
+    @Override @Nullable public MvccSnapshot mvccSnapshot() {
+        return mvccSnapshot;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void mvccSnapshot(MvccSnapshot mvccSnapshot) {
+        this.mvccSnapshot = mvccSnapshot;
+    }
+
+    /**
      * @return Shared cache context.
      */
     public GridCacheSharedContext<?, ?> context() {
@@ -444,36 +479,27 @@
 
     /**
      * Uncommits transaction by invalidating all of its entries. Courtesy to minimize inconsistency.
-     *
-     * @param nodeStopping {@code True} if tx was cancelled during node stop.
      */
     @SuppressWarnings({"CatchGenericClass"})
-    protected void uncommit(boolean nodeStopping) {
-        try {
-            if (!nodeStopping) {
-                for (IgniteTxEntry e : writeMap().values()) {
-                    try {
-                        GridCacheEntryEx entry = e.cached();
+    protected void uncommit() {
+        for (IgniteTxEntry e : writeMap().values()) {
+            try {
+                GridCacheEntryEx entry = e.cached();
 
-                        if (e.op() != NOOP)
-                            entry.invalidate(xidVer);
-                    }
-                    catch (Throwable t) {
-                        U.error(log, "Failed to invalidate transaction entries while reverting a commit.", t);
+                if (e.op() != NOOP)
+                    entry.invalidate(xidVer);
+            }
+            catch (Throwable t) {
+                U.error(log, "Failed to invalidate transaction entries while reverting a commit.", t);
 
-                        if (t instanceof Error)
-                            throw (Error)t;
+                if (t instanceof Error)
+                    throw (Error)t;
 
-                        break;
-                    }
-                }
-
-                cctx.tm().uncommitTx(this);
+                break;
             }
         }
-        catch (Exception ex) {
-            U.error(log, "Failed to do uncommit.", ex);
-        }
+
+        cctx.tm().uncommitTx(this);
     }
 
     /** {@inheritDoc} */
@@ -695,6 +721,20 @@
     }
 
     /**
+     * @return Rollback future.
+     */
+    public IgniteInternalFuture rollbackFuture() {
+        return rollbackFut;
+    }
+
+    /**
+     * @param fut Rollback future.
+     */
+    public void rollbackFuture(IgniteInternalFuture fut) {
+        rollbackFut = fut;
+    }
+
+    /**
      * Gets remaining allowed transaction time.
      *
      * @return Remaining transaction time. {@code 0} if timeout isn't specified. {@code -1} if time is out.
@@ -1117,9 +1157,62 @@
                 if (state != ACTIVE && state != SUSPENDED)
                     seal();
 
-                if (cctx.wal() != null && cctx.tm().logTxRecords() && txNodes != null) {
+                if (state == PREPARED || state == COMMITTED || state == ROLLED_BACK) {
+                    if (mvccSnapshot != null) {
+                        byte txState;
+
+                        switch (state) {
+                            case PREPARED:
+                                txState = TxState.PREPARED;
+                                break;
+                            case ROLLED_BACK:
+                                txState = TxState.ABORTED;
+                                break;
+                            case COMMITTED:
+                                txState = TxState.COMMITTED;
+                                break;
+                            default:
+                                throw new IllegalStateException("Illegal state: " + state);
+                        }
+
+                        try {
+                            if (!cctx.localNode().isClient()) {
+                                if (dht() && remote())
+                                    cctx.coordinators().updateState(mvccSnapshot, txState, false);
+                                else if (local()) {
+                                    IgniteInternalFuture<?> rollbackFut = rollbackFuture();
+
+                                    boolean syncUpdate = txState == TxState.PREPARED || txState == TxState.COMMITTED ||
+                                        rollbackFut == null || rollbackFut.isDone();
+
+                                    if (syncUpdate)
+                                        cctx.coordinators().updateState(mvccSnapshot, txState);
+                                    else {
+                                        // If tx was aborted, we need to wait tx log is updated on all backups.
+                                        rollbackFut.listen(new IgniteInClosure<IgniteInternalFuture>() {
+                                            @Override public void apply(IgniteInternalFuture fut) {
+                                                try {
+                                                    cctx.coordinators().updateState(mvccSnapshot, txState);
+                                                }
+                                                catch (IgniteCheckedException e) {
+                                                    U.error(log, "Failed to log TxState: " + txState, e);
+                                                }
+                                            }
+                                        });
+                                    }
+                                }
+                            }
+                        }
+                        catch (IgniteCheckedException e) {
+                            U.error(log, "Failed to log TxState: " + txState, e);
+
+                            throw new IgniteException("Failed to log TxState: " + txState, e);
+                        }
+                    }
+
                     // Log tx state change to WAL.
-                    if (state == PREPARED || state == COMMITTED || state == ROLLED_BACK) {
+                    if (cctx.wal() != null && cctx.tm().logTxRecords() && txNodes != null) {
+
                         BaselineTopology baselineTop = cctx.kernalContext().state().clusterState().baselineTopology();
 
                         Map<Short, Collection<Short>> participatingNodes = consistentIdMapper
@@ -1577,7 +1670,8 @@
                     /*closure name */recordEvt ? F.first(txEntry.entryProcessors()).get1() : null,
                     resolveTaskName(),
                     null,
-                    keepBinary);
+                    keepBinary,
+                    null); // TODO IGNITE-7371
             }
 
             boolean modified = false;
@@ -1607,6 +1701,8 @@
                 Object procRes = null;
                 Exception err = null;
 
+                IgniteThread.onEntryProcessorEntered(true);
+
                 try {
                     EntryProcessor<Object, Object, Object> processor = t.get1();
 
@@ -1619,6 +1715,9 @@
                 catch (Exception e) {
                     err = e;
                 }
+                finally {
+                    IgniteThread.onEntryProcessorLeft();
+                }
 
                 if (ret != null) {
                     if (err != null || procRes != null)
@@ -1760,6 +1859,32 @@
     }
 
     /**
+     * Notify Dr on tx finished.
+     *
+     * @param commit {@code True} if commited, {@code False} otherwise.
+     */
+    protected void notifyDrManager(boolean commit) {
+        if (system() || internal())
+            return;
+
+        IgniteTxState txState = txState();
+
+        if (mvccSnapshot == null || txState.cacheIds().isEmpty())
+            return;
+
+        GridIntIterator iter = txState.cacheIds().iterator();
+
+        while (iter.hasNext()) {
+            int cacheId = iter.next();
+
+            GridCacheContext ctx0 = cctx.cacheContext(cacheId);
+
+            if (ctx0.isDrEnabled())
+                ctx0.dr().onTxFinished(mvccSnapshot, commit, topologyVersionSnapshot());
+        }
+    }
+
+    /**
      * @param e Transaction entry.
      * @param primaryOnly Flag to include backups into check or not.
      * @return {@code True} if entry is locally mapped as a primary or back up node.
@@ -1889,6 +2014,56 @@
         return xidVer.hashCode();
     }
 
+    /**
+     * Adds cache to the list of active caches in transaction.
+     *
+     * @param cacheCtx Cache context to add.
+     * @param recovery Recovery flag. See {@link CacheOperationContext#setRecovery(boolean)}.
+     * @throws IgniteCheckedException If caches already enlisted in this transaction are not compatible with given
+     *      cache (e.g. they have different stores).
+     */
+    public abstract void addActiveCache(GridCacheContext cacheCtx, boolean recovery) throws IgniteCheckedException;
+
+    /** {@inheritDoc} */
+    @Nullable @Override public TxCounters txCounters(boolean createIfAbsent) {
+        if (createIfAbsent && txCounters == null)
+            TX_COUNTERS_UPD.compareAndSet(this, null, new TxCounters());
+
+        return txCounters;
+    }
+
+    /**
+     * Make counters accumulated during transaction visible outside of transaciton.
+     */
+    protected void applyTxCounters() {
+        TxCounters txCntrs = txCounters(false);
+
+        if (txCntrs == null)
+            return;
+
+        Map<Integer, ? extends Map<Integer, AtomicLong>> sizeDeltas = txCntrs.sizeDeltas();
+
+        for (Map.Entry<Integer, ? extends Map<Integer, AtomicLong>> entry : sizeDeltas.entrySet()) {
+            Integer cacheId = entry.getKey();
+            Map<Integer, AtomicLong> partDeltas = entry.getValue();
+
+            assert !F.isEmpty(partDeltas);
+
+            GridDhtPartitionTopology top = cctx.cacheContext(cacheId).topology();
+
+            for (Map.Entry<Integer, AtomicLong> e : partDeltas.entrySet()) {
+                Integer p = e.getKey();
+                long delta = e.getValue().get();
+
+                GridDhtLocalPartition dhtPart = top.localPartition(p);
+
+                assert dhtPart != null;
+
+                dhtPart.dataStore().updateSize(cacheId, delta);
+            }
+        }
+    }
+
     /** {@inheritDoc} */
     @Override public String toString() {
         return GridToStringBuilder.toString(IgniteTxAdapter.class, this,
@@ -1963,6 +2138,16 @@
         }
 
         /** {@inheritDoc} */
+        @Override public void mvccSnapshot(MvccSnapshot mvccSnapshot) {
+            // No-op.
+        }
+
+        /** {@inheritDoc} */
+        @Override public MvccSnapshot mvccSnapshot() {
+            return null;
+        }
+
+        /** {@inheritDoc} */
         @Override public boolean localResult() {
             return false;
         }
@@ -2043,6 +2228,11 @@
         }
 
         /** {@inheritDoc} */
+        @Override public void activeCachesDeploymentEnabled(boolean depEnabled) {
+            throw new IllegalStateException("Deserialized transaction can only be used as read-only.");
+        }
+
+        /** {@inheritDoc} */
         @Nullable @Override public Object addMeta(int key, Object val) {
             throw new IllegalStateException("Deserialized transaction can only be used as read-only.");
         }
@@ -2147,6 +2337,11 @@
         }
 
         /** {@inheritDoc} */
+        @Nullable @Override public TxCounters txCounters(boolean createIfAbsent) {
+            return null;
+        }
+
+        /** {@inheritDoc} */
         @Override public IgniteTxState txState() {
             return null;
         }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxEntry.java
index 71c6b65..8e65605 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxEntry.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxEntry.java
@@ -52,6 +52,7 @@
 import org.apache.ignite.plugin.extensions.communication.MessageCollectionItemType;
 import org.apache.ignite.plugin.extensions.communication.MessageReader;
 import org.apache.ignite.plugin.extensions.communication.MessageWriter;
+import org.apache.ignite.thread.IgniteThread;
 import org.jetbrains.annotations.Nullable;
 
 import static org.apache.ignite.internal.processors.cache.GridCacheOperation.READ;
@@ -762,6 +763,8 @@
         Object keyVal = null;
 
         for (T2<EntryProcessor<Object, Object, Object>, Object[]> t : entryProcessors()) {
+            IgniteThread.onEntryProcessorEntered(true);
+
             try {
                 CacheInvokeEntry<Object, Object> invokeEntry = new CacheInvokeEntry(key, keyVal, cacheVal, val,
                     ver, keepBinary(), cached());
@@ -777,6 +780,9 @@
             catch (Exception ignore) {
                 // No-op.
             }
+            finally {
+                IgniteThread.onEntryProcessorLeft();
+            }
         }
 
         return ctx.toCacheObject(val);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxHandler.java
index 5b82333..32f4dd4 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxHandler.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxHandler.java
@@ -25,6 +25,7 @@
 import org.apache.ignite.cluster.ClusterNode;
 import org.apache.ignite.failure.FailureContext;
 import org.apache.ignite.failure.FailureType;
+import org.apache.ignite.internal.IgniteFutureTimeoutCheckedException;
 import org.apache.ignite.internal.IgniteInternalFuture;
 import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException;
 import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
@@ -67,6 +68,7 @@
 import org.apache.ignite.internal.transactions.IgniteTxHeuristicCheckedException;
 import org.apache.ignite.internal.transactions.IgniteTxOptimisticCheckedException;
 import org.apache.ignite.internal.transactions.IgniteTxRollbackCheckedException;
+import org.apache.ignite.internal.transactions.IgniteTxTimeoutCheckedException;
 import org.apache.ignite.internal.util.future.GridCompoundFuture;
 import org.apache.ignite.internal.util.future.GridFinishedFuture;
 import org.apache.ignite.internal.util.typedef.C1;
@@ -340,8 +342,6 @@
             return new GridFinishedFuture<>(e);
         }
 
-        assert firstEntry != null : req;
-
         GridDhtTxLocal tx = null;
 
         GridCacheVersion mappedVer = ctx.tm().mappedVersion(req.version());
@@ -361,8 +361,10 @@
             GridDhtPartitionTopology top = null;
 
             if (req.firstClientRequest()) {
+                assert firstEntry != null : req;
+
                 assert req.concurrency() == OPTIMISTIC : req;
-                assert CU.clientNode(nearNode) : nearNode;
+                assert nearNode.isClient() : nearNode;
 
                 top = firstEntry.context().topology();
 
@@ -548,7 +550,7 @@
     /**
      * @param node Sender node.
      * @param req Request.
-     * @return {@code True} if update will be retried from future listener.
+     * @return {@code True} if update will be retried from future listener or topology version future is timed out.
      */
     private boolean waitForExchangeFuture(final ClusterNode node, final GridNearTxPrepareRequest req) {
         assert req.firstClientRequest() : req;
@@ -562,27 +564,37 @@
                 final IgniteThread thread = (IgniteThread)curThread;
 
                 if (thread.cachePoolThread()) {
-                    topFut.listen(new CI1<IgniteInternalFuture<AffinityTopologyVersion>>() {
-                        @Override public void apply(IgniteInternalFuture<AffinityTopologyVersion> fut) {
-                            ctx.kernalContext().closure().runLocalWithThreadPolicy(thread, new Runnable() {
-                                @Override public void run() {
-                                    try {
-                                        processNearTxPrepareRequest0(node, req);
-                                    }
-                                    finally {
-                                        ctx.io().onMessageProcessed(req);
-                                    }
+                    ctx.time().waitAsync(topFut, req.timeout(), (e, timedOut) -> {
+                            if (e != null || timedOut) {
+                                sendResponseOnTimeoutOrError(e, topFut, node, req);
+
+                                return;
+                            }
+                            ctx.kernalContext().closure().runLocalWithThreadPolicy(thread, () -> {
+                                try {
+                                    processNearTxPrepareRequest0(node, req);
+                                }
+                                finally {
+                                    ctx.io().onMessageProcessed(req);
                                 }
                             });
                         }
-                    });
+                    );
 
                     return true;
                 }
             }
 
             try {
-                topFut.get();
+                if (req.timeout() > 0)
+                    topFut.get(req.timeout());
+                else
+                    topFut.get();
+            }
+            catch (IgniteFutureTimeoutCheckedException e) {
+                sendResponseOnTimeoutOrError(null, topFut, node, req);
+
+                return true;
             }
             catch (IgniteCheckedException e) {
                 U.error(log, "Topology future failed: " + e, e);
@@ -593,6 +605,48 @@
     }
 
     /**
+     * @param e Exception or null if timed out.
+     * @param topFut Topology future.
+     * @param node Node.
+     * @param req Prepare request.
+     */
+    private void sendResponseOnTimeoutOrError(@Nullable IgniteCheckedException e,
+        GridDhtTopologyFuture topFut,
+        ClusterNode node,
+        GridNearTxPrepareRequest req) {
+        if (e == null)
+            e = new IgniteTxTimeoutCheckedException("Failed to wait topology version for near prepare " +
+                "[txId=" + req.version() +
+                ", topVer=" + topFut.initialVersion() +
+                ", node=" + node.id() +
+                ", req=" + req + ']');
+
+        GridNearTxPrepareResponse res = new GridNearTxPrepareResponse(
+            req.partition(),
+            req.version(),
+            req.futureId(),
+            req.miniId(),
+            req.version(),
+            req.version(),
+            null,
+            e,
+            null,
+            req.onePhaseCommit(),
+            req.deployInfo() != null);
+
+        try {
+            ctx.io().send(node.id(), res, req.policy());
+        }
+        catch (IgniteCheckedException e0) {
+            U.error(txPrepareMsgLog, "Failed to send wait topology version response for near prepare " +
+                "[txId=" + req.version() +
+                ", topVer=" + topFut.initialVersion() +
+                ", node=" + node.id() +
+                ", req=" + req + ']', e0);
+        }
+    }
+
+    /**
      * @param expVer Expected topology version.
      * @param curVer Current topology version.
      * @param req Request.
@@ -604,6 +658,8 @@
         if (expVer.equals(curVer))
             return false;
 
+        // TODO IGNITE-6754 check mvcc crd for mvcc enabled txs.
+
         for (IgniteTxEntry e : F.concat(false, req.reads(), req.writes())) {
             GridCacheContext ctx = e.context();
 
@@ -858,8 +914,11 @@
         else
             tx = ctx.tm().tx(dhtVer);
 
-        if (tx != null)
+        if (tx != null) {
+            tx.mvccSnapshot(req.mvccSnapshot());
+
             req.txState(tx.txState());
+        }
 
         if (tx == null && locTx != null && !req.commit()) {
             U.warn(log, "DHT local tx not found for near local tx rollback " +
@@ -1324,6 +1383,7 @@
                 tx.commitVersion(req.commitVersion());
                 tx.invalidate(req.isInvalidate());
                 tx.systemInvalidate(req.isSystemInvalidate());
+                tx.mvccSnapshot(req.mvccSnapshot());
 
                 // Complete remote candidates.
                 tx.doneRemote(req.baseVersion(), null, null, null);
@@ -1331,11 +1391,13 @@
                 tx.setPartitionUpdateCounters(
                     req.partUpdateCounters() != null ? req.partUpdateCounters().array() : null);
 
+                tx.txCounters(true).updateCounters(req.updateCounters());
+
                 tx.commitRemoteTx();
             }
             else {
                 tx.doneRemote(req.baseVersion(), null, null, null);
-
+                tx.mvccSnapshot(req.mvccSnapshot());
                 tx.rollbackRemoteTx();
             }
         }
@@ -1370,6 +1432,7 @@
         try {
             tx.commitVersion(req.writeVersion());
             tx.invalidate(req.isInvalidate());
+            tx.mvccSnapshot(req.mvccSnapshot());
 
             // Complete remote candidates.
             tx.doneRemote(req.version(), null, null, null);
@@ -1554,10 +1617,12 @@
         GridDhtTxPrepareRequest req,
         GridDhtTxPrepareResponse res
     ) throws IgniteCheckedException {
-        if (!F.isEmpty(req.writes())) {
+        if (req.queryUpdate() || !F.isEmpty(req.writes())) {
             GridDhtTxRemote tx = ctx.tm().tx(req.version());
 
             if (tx == null) {
+                assert !req.queryUpdate();
+
                 boolean single = req.last() && req.writes().size() == 1;
 
                 tx = new GridDhtTxRemote(
@@ -1664,7 +1729,8 @@
                                                 /*transformClo*/null,
                                                 tx.resolveTaskName(),
                                                 /*expiryPlc*/null,
-                                                /*keepBinary*/true);
+                                                /*keepBinary*/true,
+                                                null); // TODO IGNITE-7371
 
                                             if (val == null)
                                                 val = cacheCtx.toCacheObject(cacheCtx.store().load(null, entry.key()));
@@ -1712,7 +1778,7 @@
 
             res.invalidPartitionsByCacheId(tx.invalidPartitions());
 
-            if (tx.empty() && req.last()) {
+            if (!req.queryUpdate() && tx.empty() && req.last()) {
                 tx.skipCompletedVersions(req.skipCompletedVersion());
 
                 tx.rollbackRemoteTx();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxImplicitSingleStateImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxImplicitSingleStateImpl.java
index 10b06d8..10acb22 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxImplicitSingleStateImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxImplicitSingleStateImpl.java
@@ -31,6 +31,7 @@
 import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture;
 import org.apache.ignite.internal.processors.cache.store.CacheStoreManager;
+import org.apache.ignite.internal.util.GridIntList;
 import org.apache.ignite.internal.util.future.GridFutureAdapter;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.internal.CU;
@@ -57,9 +58,10 @@
     private boolean recovery;
 
     /** {@inheritDoc} */
-    @Override public void addActiveCache(GridCacheContext ctx, boolean recovery, IgniteTxLocalAdapter tx)
+    @Override public void addActiveCache(GridCacheContext ctx, boolean recovery, IgniteTxAdapter tx)
         throws IgniteCheckedException {
         assert cacheCtx == null : "Cache already set [cur=" + cacheCtx.name() + ", new=" + ctx.name() + ']';
+        assert tx.local();
 
         cacheCtx = ctx;
         this.recovery = recovery;
@@ -68,6 +70,11 @@
     }
 
     /** {@inheritDoc} */
+    @Nullable @Override public GridIntList cacheIds() {
+        return  GridIntList.asList(cacheCtx.cacheId());
+    }
+
+    /** {@inheritDoc} */
     @Nullable @Override public GridCacheContext singleCacheContext(GridCacheSharedContext cctx) {
         return cacheCtx;
     }
@@ -289,7 +296,14 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public boolean mvccEnabled(GridCacheSharedContext cctx) {
+        GridCacheContext ctx0 = cacheCtx;
+
+        return ctx0 != null && ctx0.mvccEnabled();
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
         return S.toString(IgniteTxImplicitSingleStateImpl.class, this);
     }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxLocalAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxLocalAdapter.java
index 6f11a57..bfe67ee 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxLocalAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxLocalAdapter.java
@@ -20,6 +20,7 @@
 import java.io.Externalizable;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.Map;
 import java.util.Set;
 import java.util.UUID;
@@ -30,9 +31,11 @@
 import javax.cache.processor.EntryProcessor;
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.cache.CacheWriteSynchronizationMode;
-import org.apache.ignite.internal.InvalidEnvironmentException;
+import org.apache.ignite.failure.FailureContext;
+import org.apache.ignite.failure.FailureType;
 import org.apache.ignite.internal.IgniteInternalFuture;
-import org.apache.ignite.internal.pagemem.wal.StorageException;
+import org.apache.ignite.internal.InvalidEnvironmentException;
+import org.apache.ignite.internal.NodeStoppingException;
 import org.apache.ignite.internal.pagemem.wal.WALPointer;
 import org.apache.ignite.internal.pagemem.wal.record.DataEntry;
 import org.apache.ignite.internal.pagemem.wal.record.DataRecord;
@@ -46,7 +49,6 @@
 import org.apache.ignite.internal.processors.cache.GridCacheEntryEx;
 import org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException;
 import org.apache.ignite.internal.processors.cache.GridCacheFilterFailedException;
-import org.apache.ignite.internal.processors.cache.GridCacheIndexUpdateException;
 import org.apache.ignite.internal.processors.cache.GridCacheMvccCandidate;
 import org.apache.ignite.internal.processors.cache.GridCacheOperation;
 import org.apache.ignite.internal.processors.cache.GridCacheReturn;
@@ -54,7 +56,11 @@
 import org.apache.ignite.internal.processors.cache.GridCacheUpdateTxResult;
 import org.apache.ignite.internal.processors.cache.IgniteCacheExpiryPolicy;
 import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition;
+import org.apache.ignite.internal.processors.cache.distributed.dht.PartitionUpdateCounters;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.persistence.StorageException;
 import org.apache.ignite.internal.processors.cache.store.CacheStoreManager;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersionConflictContext;
@@ -62,6 +68,7 @@
 import org.apache.ignite.internal.transactions.IgniteTxHeuristicCheckedException;
 import org.apache.ignite.internal.transactions.IgniteTxRollbackCheckedException;
 import org.apache.ignite.internal.transactions.IgniteTxTimeoutCheckedException;
+import org.apache.ignite.internal.util.GridLongList;
 import org.apache.ignite.internal.util.future.GridFinishedFuture;
 import org.apache.ignite.internal.util.lang.GridClosureException;
 import org.apache.ignite.internal.util.lang.GridTuple;
@@ -76,6 +83,7 @@
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.lang.IgniteBiClosure;
 import org.apache.ignite.lang.IgniteBiTuple;
+import org.apache.ignite.thread.IgniteThread;
 import org.apache.ignite.transactions.TransactionConcurrency;
 import org.apache.ignite.transactions.TransactionDeadlockException;
 import org.apache.ignite.transactions.TransactionIsolation;
@@ -149,6 +157,12 @@
     /** */
     protected CacheWriteSynchronizationMode syncMode;
 
+    /** */
+    private GridLongList mvccWaitTxs;
+
+    /** */
+    private volatile boolean qryEnlisted;
+
     /**
      * Empty constructor required for {@link Externalizable}.
      */
@@ -209,6 +223,10 @@
         txState = implicitSingle ? new IgniteTxImplicitSingleStateImpl() : new IgniteTxStateImpl();
     }
 
+    public GridLongList mvccWaitTransactions() {
+        return mvccWaitTxs;
+    }
+
     /**
      * @return Transaction write synchronization mode.
      */
@@ -280,10 +298,8 @@
         return depEnabled;
     }
 
-    /**
-     * @param depEnabled Flag indicating whether deployment is enabled for caches from this transaction or not.
-     */
-    public void activeCachesDeploymentEnabled(boolean depEnabled) {
+    /** {@inheritDoc} */
+    @Override public void activeCachesDeploymentEnabled(boolean depEnabled) {
         this.depEnabled = depEnabled;
     }
 
@@ -350,6 +366,8 @@
      * @param ret Result.
      */
     public void implicitSingleResult(GridCacheReturn ret) {
+        assert ret != null;
+
         if (ret.invokeResult())
             implicitRes.mergeEntryProcessResults(ret);
         else
@@ -473,7 +491,7 @@
 
     /** {@inheritDoc} */
     @SuppressWarnings({"CatchGenericClass"})
-    @Override public void userCommit() throws IgniteCheckedException {
+    @Override public final void userCommit() throws IgniteCheckedException {
         TransactionState state = state();
 
         if (state != COMMITTING) {
@@ -489,7 +507,7 @@
 
         Collection<IgniteTxEntry> commitEntries = (near() || cctx.snapshot().needTxReadLogging()) ? allEntries() : writeEntries();
 
-        boolean empty = F.isEmpty(commitEntries);
+        boolean empty = F.isEmpty(commitEntries) && !queryEnlisted();
 
         // Register this transaction as completed prior to write-phase to
         // ensure proper lock ordering for removed entries.
@@ -499,10 +517,14 @@
             cctx.tm().addCommittedTx(this);
 
         if (!empty) {
+            assert mvccWaitTxs == null;
+
             batchStoreCommit(writeEntries());
 
             WALPointer ptr = null;
 
+            Exception err = null;
+
             cctx.database().checkpointReadLock();
 
             try {
@@ -670,10 +692,16 @@
                                             CU.subjectId(this, cctx),
                                             resolveTaskName(),
                                             dhtVer,
-                                            null);
+                                            null,
+                                            mvccSnapshot());
 
-                                        if (updRes.success())
-                                            txEntry.updateCounter(updRes.updatePartitionCounter());
+                                        if (updRes.success()) {
+                                            txEntry.updateCounter(updRes.updateCounter());
+
+                                            GridLongList waitTxs = updRes.mvccWaitTransactions();
+
+                                            updateWaitTxs(waitTxs);
+                                        }
 
                                         if (updRes.loggedPointer() != null)
                                             ptr = updRes.loggedPointer();
@@ -684,27 +712,28 @@
                                             final GridCacheVersion dhtVer0 = dhtVer;
 
                                             updateNearEntrySafely(cacheCtx, txEntry.key(), entry -> entry.innerSet(
-                                                    null,
-                                                    eventNodeId(),
-                                                    nodeId,
-                                                    val0,
-                                                    false,
-                                                    false,
-                                                    txEntry.ttl(),
-                                                    false,
-                                                    metrics0,
-                                                    txEntry.keepBinary(),
-                                                    txEntry.hasOldValue(),
-                                                    txEntry.oldValue(),
-                                                    topVer,
-                                                    CU.empty0(),
-                                                    DR_NONE,
-                                                    txEntry.conflictExpireTime(),
-                                                    null,
-                                                    CU.subjectId(this, cctx),
-                                                    resolveTaskName(),
-                                                    dhtVer0,
-                                                    null)
+                                                null,
+                                                eventNodeId(),
+                                                nodeId,
+                                                val0,
+                                                false,
+                                                false,
+                                                txEntry.ttl(),
+                                                false,
+                                                metrics0,
+                                                txEntry.keepBinary(),
+                                                txEntry.hasOldValue(),
+                                                txEntry.oldValue(),
+                                                topVer,
+                                                CU.empty0(),
+                                                DR_NONE,
+                                                txEntry.conflictExpireTime(),
+                                                null,
+                                                CU.subjectId(this, cctx),
+                                                resolveTaskName(),
+                                                dhtVer0,
+                                                null,
+                                                mvccSnapshot())
                                             );
                                         }
                                     }
@@ -721,15 +750,21 @@
                                             txEntry.oldValue(),
                                             topVer,
                                             null,
-                                            cached.detached()  ? DR_NONE : drType,
+                                            cached.detached() ? DR_NONE : drType,
                                             cached.isNear() ? null : explicitVer,
                                             CU.subjectId(this, cctx),
                                             resolveTaskName(),
                                             dhtVer,
-                                            null);
+                                            null,
+                                            mvccSnapshot());
 
-                                        if (updRes.success())
-                                            txEntry.updateCounter(updRes.updatePartitionCounter());
+                                        if (updRes.success()) {
+                                            txEntry.updateCounter(updRes.updateCounter());
+
+                                            GridLongList waitTxs = updRes.mvccWaitTransactions();
+
+                                            updateWaitTxs(waitTxs);
+                                        }
 
                                         if (updRes.loggedPointer() != null)
                                             ptr = updRes.loggedPointer();
@@ -739,23 +774,24 @@
                                             final GridCacheVersion dhtVer0 = dhtVer;
 
                                             updateNearEntrySafely(cacheCtx, txEntry.key(), entry -> entry.innerRemove(
-                                                    null,
-                                                    eventNodeId(),
-                                                    nodeId,
-                                                    false,
-                                                    false,
-                                                    metrics0,
-                                                    txEntry.keepBinary(),
-                                                    txEntry.hasOldValue(),
-                                                    txEntry.oldValue(),
-                                                    topVer,
-                                                    CU.empty0(),
-                                                    DR_NONE,
-                                                    null,
-                                                    CU.subjectId(this, cctx),
-                                                    resolveTaskName(),
-                                                    dhtVer0,
-                                                    null)
+                                                null,
+                                                eventNodeId(),
+                                                nodeId,
+                                                false,
+                                                false,
+                                                metrics0,
+                                                txEntry.keepBinary(),
+                                                txEntry.hasOldValue(),
+                                                txEntry.oldValue(),
+                                                topVer,
+                                                CU.empty0(),
+                                                DR_NONE,
+                                                null,
+                                                CU.subjectId(this, cctx),
+                                                resolveTaskName(),
+                                                dhtVer0,
+                                                null,
+                                                mvccSnapshot())
                                             );
                                         }
                                     }
@@ -795,7 +831,7 @@
                                             log.debug("Ignoring READ entry when committing: " + txEntry);
                                     }
                                     else {
-                                        assert ownsLock(txEntry.cached()):
+                                        assert ownsLock(txEntry.cached()) :
                                             "Transaction does not own lock for group lock entry during  commit [tx=" +
                                                 this + ", txEntry=" + txEntry + ']';
 
@@ -833,39 +869,34 @@
                         // Need to remove version from committed list.
                         cctx.tm().removeCommittedTx(this);
 
-                        if (X.hasCause(ex, GridCacheIndexUpdateException.class) && cacheCtx.cache().isMongoDataCache()) {
-                            if (log.isDebugEnabled())
-                                log.debug("Failed to update mongo document index (transaction entry will " +
-                                    "be ignored): " + txEntry);
+                        boolean isNodeStopping = X.hasCause(ex, NodeStoppingException.class);
+                        boolean hasInvalidEnvironmentIssue = X.hasCause(ex, InvalidEnvironmentException.class);
 
-                            // Set operation to NOOP.
-                            txEntry.op(NOOP);
+                        IgniteCheckedException err0 = new IgniteTxHeuristicCheckedException("Failed to locally write to cache " +
+                            "(all transaction entries will be invalidated, however there was a window when " +
+                            "entries for this transaction were visible to others): " + this, ex);
 
-                            errorWhenCommitting();
-
-                            throw ex;
+                        if (isNodeStopping) {
+                            U.warn(log, "Failed to commit transaction, node is stopping [tx=" + this +
+                                ", err=" + ex + ']');
                         }
-                        else {
-                            boolean hasInvalidEnvironmentIssue = X.hasCause(ex, InvalidEnvironmentException.class);
+                        else if (hasInvalidEnvironmentIssue) {
+                            U.warn(log, "Failed to commit transaction, node is in invalid state and will be stopped [tx=" + this +
+                                ", err=" + ex + ']');
+                        }
+                        else
+                            U.error(log, "Commit failed.", err0);
 
-                            IgniteCheckedException err = new IgniteTxHeuristicCheckedException("Failed to locally write to cache " +
-                                "(all transaction entries will be invalidated, however there was a window when " +
-                                "entries for this transaction were visible to others): " + this, ex);
+                        COMMIT_ERR_UPD.compareAndSet(this, null, err0);
 
-                            if (hasInvalidEnvironmentIssue) {
-                                U.warn(log, "Failed to commit transaction, node is stopping " +
-                                    "[tx=" + this + ", err=" + ex + ']');
-                            }
-                            else
-                                U.error(log, "Heuristic transaction failure.", err);
+                        state(UNKNOWN);
 
-                            COMMIT_ERR_UPD.compareAndSet(this, null, err);
-
-                            state(UNKNOWN);
-
+                        if (hasInvalidEnvironmentIssue)
+                            cctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, ex));
+                        else if (!isNodeStopping) { // Skip fair uncommit in case of node stopping or invalidation.
                             try {
                                 // Courtesy to minimize damage.
-                                uncommit(hasInvalidEnvironmentIssue);
+                                uncommit();
                             }
                             catch (Throwable ex1) {
                                 U.error(log, "Failed to uncommit transaction: " + this, ex1);
@@ -873,25 +904,31 @@
                                 if (ex1 instanceof Error)
                                     throw ex1;
                             }
-
-                            if (ex instanceof Error)
-                                throw ex;
-
-                            throw err;
                         }
+
+                        if (ex instanceof Error)
+                            throw ex;
+
+                        throw err0;
                     }
                 }
 
+                applyTxCounters();
+
                 if (ptr != null && !cctx.tm().logTxRecords())
                     cctx.wal().flush(ptr, false);
             }
             catch (StorageException e) {
+                err = e;
+
                 throw new IgniteCheckedException("Failed to log transaction record " +
                     "(transaction will be rolled back): " + this, e);
             }
             finally {
                 cctx.database().checkpointReadUnlock();
 
+                notifyDrManager(state() == COMMITTING && err == null);
+
                 cctx.tm().resetContext();
             }
         }
@@ -912,6 +949,18 @@
     }
 
     /**
+     * @param waitTxs Tx ids to wait for.
+     */
+    private void updateWaitTxs(@Nullable GridLongList waitTxs) {
+        if (waitTxs != null) {
+            if (this.mvccWaitTxs == null)
+                this.mvccWaitTxs = waitTxs;
+            else
+                this.mvccWaitTxs.addAll(waitTxs);
+        }
+    }
+
+    /**
      * Safely performs {@code updateClojure} operation on near cache entry with given {@code entryKey}.
      * In case of {@link GridCacheEntryRemovedException} operation will be retried.
      *
@@ -945,6 +994,7 @@
         }
     }
 
+
     /**
      * Commits transaction to transaction manager. Used for one-phase commit transactions only.
      *
@@ -1012,6 +1062,8 @@
     @Override public void userRollback(boolean clearThreadMap) throws IgniteCheckedException {
         TransactionState state = state();
 
+        notifyDrManager(false);
+
         if (state != ROLLING_BACK && state != ROLLED_BACK) {
             setRollbackOnly();
 
@@ -1079,8 +1131,8 @@
      * @param read {@code True} if read.
      * @param accessTtl TTL for read operation.
      * @param filter Filter to check entries.
-     * @throws IgniteCheckedException If error.
      * @param computeInvoke If {@code true} computes return value for invoke operation.
+     * @throws IgniteCheckedException If error.
      */
     @SuppressWarnings("unchecked")
     protected final void postLockWrite(
@@ -1142,7 +1194,8 @@
                                     null,
                                     resolveTaskName(),
                                     null,
-                                    txEntry.keepBinary());
+                                    txEntry.keepBinary(),
+                                    null);  // TODO IGNITE-7371
                             }
                         }
                         else {
@@ -1241,6 +1294,8 @@
         Object key0 = null;
         Object val0 = null;
 
+        IgniteThread.onEntryProcessorEntered(true);
+
         try {
             Object res = null;
 
@@ -1252,15 +1307,13 @@
 
                 res = entryProcessor.process(invokeEntry, t.get2());
 
-                val0 = invokeEntry.value();
+                val0 = invokeEntry.getValue(txEntry.keepBinary());
 
                 key0 = invokeEntry.key();
             }
 
-            val0 = ctx.toCacheObject(val0);
-
-            if (val0 != null)
-                ctx.validateKeyAndValue(txEntry.key(), (CacheObject)val0);
+            if (val0 != null) // no validation for remove case
+                ctx.validateKeyAndValue(txEntry.key(), ctx.toCacheObject(val0));
 
             if (res != null)
                 ret.addEntryProcessResult(ctx, txEntry.key(), key0, res, null, txEntry.keepBinary());
@@ -1268,6 +1321,9 @@
         catch (Exception e) {
             ret.addEntryProcessResult(ctx, txEntry.key(), key0, null, e, txEntry.keepBinary());
         }
+        finally {
+            IgniteThread.onEntryProcessorLeft();
+        }
     }
 
     /**
@@ -1279,14 +1335,8 @@
         return !txState.init(txSize) || cctx.tm().onStarted(this);
     }
 
-    /**
-     * Adds cache to the list of active caches in transaction.
-     *
-     * @param cacheCtx Cache context to add.
-     * @throws IgniteCheckedException If caches already enlisted in this transaction are not compatible with given
-     *      cache (e.g. they have different stores).
-     */
-    protected final void addActiveCache(GridCacheContext cacheCtx, boolean recovery) throws IgniteCheckedException {
+    /** {@inheritDoc} */
+    @Override public final void addActiveCache(GridCacheContext cacheCtx, boolean recovery) throws IgniteCheckedException {
         txState.addActiveCache(cacheCtx, recovery, this);
     }
 
@@ -1347,7 +1397,7 @@
      * @param skipStore Skip store flag.
      * @return Transaction entry.
      */
-    protected final IgniteTxEntry addEntry(GridCacheOperation op,
+    public final IgniteTxEntry addEntry(GridCacheOperation op,
         @Nullable CacheObject val,
         @Nullable EntryProcessor entryProcessor,
         Object[] invokeArgs,
@@ -1500,6 +1550,18 @@
         }
     }
 
+    /**
+     * @return Map of affected partitions: cacheId -> partId.
+     */
+    public Map<Integer, Set<Integer>> partsMap() {
+        return null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void touchPartition(int cacheId, int partId) {
+        txState.touchPartition(cacheId, partId);
+    }
+
     /** {@inheritDoc} */
     @Override public String toString() {
         return GridToStringBuilder.toString(IgniteTxLocalAdapter.class, this, "super", super.toString(),
@@ -1582,6 +1644,76 @@
     }
 
     /**
+     * Merges mvcc update counters to the partition update counters. For mvcc transactions we update partitions
+     * counters only on commit phase.
+     */
+    private Map<Integer, PartitionUpdateCounters> applyAndCollectLocalUpdateCounters() {
+        if (F.isEmpty(txState.touchedPartitions()))
+            return null;
+
+        HashMap<Integer, PartitionUpdateCounters> updCntrs = new HashMap<>();
+
+        for (Map.Entry<Integer, Set<Integer>> entry : txState.touchedPartitions().entrySet()) {
+            Integer cacheId = entry.getKey();
+
+            Set<Integer> parts = entry.getValue();
+
+            assert !F.isEmpty(parts);
+
+            GridCacheContext ctx0 = cctx.cacheContext(cacheId);
+
+            Map<Integer, Long> partCntrs = new HashMap<>(parts.size());
+
+            for (Integer p : parts) {
+                GridDhtLocalPartition dhtPart = ctx0.topology().localPartition(p);
+
+                assert dhtPart != null;
+
+                long cntr = dhtPart.mvccUpdateCounter();
+
+                dhtPart.updateCounter(cntr);
+
+                partCntrs.put(p, cntr);
+            }
+
+            updCntrs.put(cacheId, new PartitionUpdateCounters(partCntrs));
+        }
+
+        return updCntrs;
+    }
+
+    /**
+     * @return {@code True} if there are entries, enlisted by query.
+     */
+    public boolean queryEnlisted() {
+        return qryEnlisted;
+    }
+
+    /**
+     * @param ver Mvcc version.
+     */
+    public void markQueryEnlisted(MvccSnapshot ver) {
+        if (!qryEnlisted) {
+            if (mvccSnapshot == null)
+                mvccSnapshot = ver;
+
+            cctx.coordinators().registerLocalTransaction(ver.coordinatorVersion(), ver.counter());
+
+            qryEnlisted = true;
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void applyTxCounters() {
+        super.applyTxCounters();
+
+        Map<Integer, PartitionUpdateCounters> updCntrs = applyAndCollectLocalUpdateCounters();
+
+        // remember counters for subsequent sending to backups
+        txCounters(true).updateCounters(updCntrs);
+    }
+
+    /**
      * Post-lock closure alias.
      *
      * @param <T> Return type.
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxLocalEx.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxLocalEx.java
index b61b1a9..651be60 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxLocalEx.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxLocalEx.java
@@ -56,4 +56,12 @@
      * @throws IgniteCheckedException If finish failed.
      */
     public boolean localFinish(boolean commit, boolean clearThreadMap) throws IgniteCheckedException;
+
+    /**
+     * Remembers that particular cache partition was touched by current tx.
+     *
+     * @param cacheId Cache id.
+     * @param partId Partition id.
+     */
+    public void touchPartition(int cacheId, int partId);
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxLocalState.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxLocalState.java
index 123d396..01eb4f4 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxLocalState.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxLocalState.java
@@ -17,6 +17,9 @@
 
 package org.apache.ignite.internal.processors.cache.transactions;
 
+import java.util.Map;
+import java.util.Set;
+
 /**
  *
  */
@@ -41,4 +44,17 @@
      *
      */
     public void seal();
+
+    /**
+     * @return Cache partitions touched by current tx.
+     */
+    public Map<Integer, Set<Integer>> touchedPartitions();
+
+    /**
+     * Remembers that particular cache partition was touched by current tx.
+     *
+     * @param cacheId Cache id.
+     * @param partId Partition id.
+     */
+    public void touchPartition(int cacheId, int partId);
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxLocalStateAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxLocalStateAdapter.java
index 4943aac..9c6ef8f 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxLocalStateAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxLocalStateAdapter.java
@@ -17,7 +17,13 @@
 
 package org.apache.ignite.internal.processors.cache.transactions;
 
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
 import org.apache.ignite.internal.util.typedef.internal.U;
 
@@ -25,6 +31,11 @@
  *
  */
 public abstract class IgniteTxLocalStateAdapter implements IgniteTxLocalState {
+    /** */
+    private static final Function<Integer, Set<Integer>> CREATE_INT_SET = k -> new HashSet<>();
+    /** */
+    private Map<Integer, Set<Integer>> touchedParts;
+
     /**
      * @param cacheCtx Cache context.
      * @param tx Transaction.
@@ -40,4 +51,19 @@
                 cacheCtx.cache().metrics0().onTxRollback(durationNanos);
         }
     }
+
+    /** {@inheritDoc} */
+    @Override public Map<Integer, Set<Integer>> touchedPartitions() {
+        Map<Integer, Set<Integer>> parts = touchedParts;
+
+        return parts != null ? Collections.unmodifiableMap(parts) : null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void touchPartition(int cacheId, int partId) {
+        if (touchedParts == null)
+            touchedParts = new HashMap<>();
+
+        touchedParts.computeIfAbsent(cacheId, CREATE_INT_SET).add(partId);
+    }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxManager.java
index ffaaf43..d6e9d7a 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxManager.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxManager.java
@@ -313,6 +313,16 @@
     }
 
     /**
+     * Rollback all active transactions with acquired Mvcc snapshot.
+     */
+    public void rollbackMvccTxOnCoordinatorChange() {
+        for (IgniteInternalTx tx : activeTransactions()) {
+            if (tx.mvccSnapshot() != null)
+                ((GridNearTxLocal)tx).rollbackNearTxLocalAsync(false, false);
+        }
+    }
+
+    /**
      * @param cacheId Cache ID.
      * @param txMap Transactions map.
      */
@@ -456,6 +466,7 @@
      * @param concurrency Concurrency.
      * @param isolation Isolation.
      * @param timeout transaction timeout.
+     * @param sql Whether this transaction is being started via SQL API or not, or {@code null} if unknown.
      * @param txSize Expected transaction size.
      * @param lb Label.
      * @return New transaction.
@@ -468,6 +479,7 @@
         TransactionIsolation isolation,
         long timeout,
         boolean storeEnabled,
+        Boolean sql,
         int txSize,
         @Nullable String lb
     ) {
@@ -487,6 +499,7 @@
             isolation,
             timeout,
             storeEnabled,
+            sql,
             txSize,
             subjId,
             taskNameHash,
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxRemoteSingleStateImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxRemoteSingleStateImpl.java
index b61a99c..3a2ef37 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxRemoteSingleStateImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxRemoteSingleStateImpl.java
@@ -124,7 +124,7 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public String toString() {
         return S.toString(IgniteTxRemoteSingleStateImpl.class, this);
     }
 
@@ -142,4 +142,9 @@
 
         return null;
     }
+
+    /** {@inheritDoc} */
+    @Override public boolean mvccEnabled(GridCacheSharedContext cctx) {
+        return entry != null && entry.context().mvccEnabled();
+    }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxRemoteStateAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxRemoteStateAdapter.java
index bcb900c..c1d973e 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxRemoteStateAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxRemoteStateAdapter.java
@@ -22,6 +22,7 @@
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
 import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture;
+import org.apache.ignite.internal.util.GridIntList;
 import org.apache.ignite.internal.util.future.GridFutureAdapter;
 import org.jetbrains.annotations.Nullable;
 
@@ -31,6 +32,9 @@
  *
  */
 public abstract class IgniteTxRemoteStateAdapter implements IgniteTxRemoteState {
+    /** Active cache IDs. */
+    private GridIntList activeCacheIds = new GridIntList();
+
     /** {@inheritDoc} */
     @Override public boolean implicitSingle() {
         return false;
@@ -38,9 +42,12 @@
 
     /** {@inheritDoc} */
     @Nullable @Override public Integer firstCacheId() {
-        assert false;
+        return activeCacheIds.isEmpty() ? null : activeCacheIds.get(0);
+    }
 
-        return null;
+    /** {@inheritDoc} */
+    @Nullable @Override public GridIntList cacheIds() {
+        return activeCacheIds;
     }
 
     /** {@inheritDoc} */
@@ -67,9 +74,15 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void addActiveCache(GridCacheContext cacheCtx, boolean recovery, IgniteTxLocalAdapter tx)
+    @Override public void addActiveCache(GridCacheContext cctx, boolean recovery, IgniteTxAdapter tx)
         throws IgniteCheckedException {
-        assert false;
+        assert !tx.local();
+
+        int cacheId = cctx.cacheId();
+
+        // Check if we can enlist new cache to transaction.
+        if (!activeCacheIds.contains(cacheId))
+            activeCacheIds.add(cacheId);
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxRemoteStateImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxRemoteStateImpl.java
index 1326491..35c3fb3 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxRemoteStateImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxRemoteStateImpl.java
@@ -176,7 +176,7 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public String toString() {
         return S.toString(IgniteTxRemoteStateImpl.class, this);
     }
 
@@ -209,4 +209,14 @@
 
         return null;
     }
+
+    /** {@inheritDoc} */
+    @Override public boolean mvccEnabled(GridCacheSharedContext cctx) {
+        for (IgniteTxEntry e : writeMap.values()) {
+            if (e.context().mvccEnabled())
+                return true;
+        }
+
+        return false;
+    }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxState.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxState.java
index 1fe0d2a..e42fe7f 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxState.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxState.java
@@ -26,6 +26,7 @@
 import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture;
 import org.apache.ignite.internal.processors.cache.store.CacheStoreManager;
+import org.apache.ignite.internal.util.GridIntList;
 import org.apache.ignite.internal.util.future.GridFutureAdapter;
 import org.jetbrains.annotations.Nullable;
 
@@ -45,6 +46,13 @@
     @Nullable public Integer firstCacheId();
 
     /**
+     * Gets caches ids affected with current tx.
+     *
+     * @return tx cache ids.
+     */
+    @Nullable public GridIntList cacheIds();
+
+    /**
      * Unwind evicts for caches involved in this transaction.
      * @param cctx Grid cache shared context.
      */
@@ -83,7 +91,7 @@
      * @param tx Transaction.
      * @throws IgniteCheckedException If cache check failed.
      */
-    public void addActiveCache(GridCacheContext cacheCtx, boolean recovery, IgniteTxLocalAdapter tx)
+    public void addActiveCache(GridCacheContext cacheCtx, boolean recovery, IgniteTxAdapter tx)
         throws IgniteCheckedException;
 
     /**
@@ -180,4 +188,10 @@
      * @return {@code True} if transaction is empty.
      */
     public boolean empty();
+
+    /**
+     * @param cctx Context.
+     * @return {@code True} if MVCC mode is enabled for transaction.
+     */
+    public boolean mvccEnabled(GridCacheSharedContext cctx);
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxStateImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxStateImpl.java
index 4f14b5c..371c6d0 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxStateImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/IgniteTxStateImpl.java
@@ -29,8 +29,6 @@
 import org.apache.ignite.cache.CacheWriteSynchronizationMode;
 import org.apache.ignite.internal.cluster.ClusterTopologyServerNotFoundException;
 import org.apache.ignite.internal.processors.cache.CacheStoppedException;
-import org.apache.ignite.internal.managers.discovery.DiscoCache;
-import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
 import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
 import org.apache.ignite.internal.processors.cache.KeyCacheObject;
@@ -73,6 +71,10 @@
     @GridToStringInclude
     protected Boolean recovery;
 
+    /** */
+    @GridToStringInclude
+    protected Boolean mvccEnabled;
+
     /** {@inheritDoc} */
     @Override public boolean implicitSingle() {
         return false;
@@ -84,6 +86,11 @@
     }
 
     /** {@inheritDoc} */
+    @Nullable @Override public GridIntList cacheIds() {
+        return activeCacheIds;
+    }
+
+    /** {@inheritDoc} */
     @Override public void unwindEvicts(GridCacheSharedContext cctx) {
         for (int i = 0; i < activeCacheIds.size(); i++) {
             int cacheId = activeCacheIds.get(i);
@@ -203,8 +210,10 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void addActiveCache(GridCacheContext cacheCtx, boolean recovery, IgniteTxLocalAdapter tx)
+    @Override public void addActiveCache(GridCacheContext cacheCtx, boolean recovery, IgniteTxAdapter tx)
         throws IgniteCheckedException {
+        assert tx.local();
+
         GridCacheSharedContext cctx = cacheCtx.shared();
 
         int cacheId = cacheCtx.cacheId();
@@ -215,6 +224,12 @@
 
         this.recovery = recovery;
 
+        if (this.mvccEnabled != null && this.mvccEnabled != cacheCtx.mvccEnabled())
+            throw new IgniteCheckedException("Failed to enlist new cache to existing transaction " +
+                "(caches with different mvcc settings can't be enlisted in one transaction).");
+
+        this.mvccEnabled = cacheCtx.mvccEnabled();
+
         // Check if we can enlist new cache to transaction.
         if (!activeCacheIds.contains(cacheId)) {
             String err = cctx.verifyTxCompatibility(tx, activeCacheIds, cacheCtx);
@@ -462,7 +477,12 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public boolean mvccEnabled(GridCacheSharedContext cctx) {
+        return Boolean.TRUE == mvccEnabled;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
         return S.toString(IgniteTxStateImpl.class, this, "txMap", allEntriesCopy());
     }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/TxCounters.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/TxCounters.java
new file mode 100644
index 0000000..2ad4f94
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/transactions/TxCounters.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.transactions;
+
+import java.util.Collections;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicLong;
+import org.apache.ignite.internal.processors.cache.distributed.dht.PartitionUpdateCounters;
+
+/**
+ * Values which should be tracked during transaction execution and applied on commit.
+ */
+public class TxCounters {
+    /** Size changes for cache partitions made by transaction */
+    private final ConcurrentMap<Integer, ConcurrentMap<Integer, AtomicLong>> sizeDeltas = new ConcurrentHashMap<>();
+    /** Update counters for cache partitions in the end of transaction */
+    private Map<Integer, PartitionUpdateCounters> updCntrs;
+
+    /**
+     * Accumulates size change for cache partition.
+     *
+     * @param cacheId Cache id.
+     * @param part Partition id.
+     * @param delta Size delta.
+     */
+    public void accumulateSizeDelta(int cacheId, int part, long delta) {
+        ConcurrentMap<Integer, AtomicLong> partDeltas = sizeDeltas.get(cacheId);
+
+        if (partDeltas == null) {
+            ConcurrentMap<Integer, AtomicLong> partDeltas0 =
+                sizeDeltas.putIfAbsent(cacheId, partDeltas = new ConcurrentHashMap<>());
+
+            if (partDeltas0 != null)
+                partDeltas = partDeltas0;
+        }
+
+        AtomicLong accDelta = partDeltas.get(part);
+
+        if (accDelta == null) {
+            AtomicLong accDelta0 = partDeltas.putIfAbsent(part, accDelta = new AtomicLong());
+
+            if (accDelta0 != null)
+                accDelta = accDelta0;
+        }
+
+        // here AtomicLong is used more as a container,
+        // every instance is assumed to be accessed in thread-confined manner
+        accDelta.set(accDelta.get() + delta);
+    }
+
+    /** */
+    public void updateCounters(Map<Integer, PartitionUpdateCounters> updCntrs) {
+        this.updCntrs = updCntrs;
+    }
+
+    /** */
+    public Map<Integer, PartitionUpdateCounters> updateCounters() {
+        return updCntrs != null ? Collections.unmodifiableMap(updCntrs) : Collections.emptyMap();
+    }
+
+    /** */
+    public Map<Integer, ? extends Map<Integer, AtomicLong>> sizeDeltas() {
+        return Collections.unmodifiableMap(sizeDeltas);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/AbstractDataInnerIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/AbstractDataInnerIO.java
index d5b28ef..6594451 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/AbstractDataInnerIO.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/AbstractDataInnerIO.java
@@ -17,7 +17,9 @@
 
 package org.apache.ignite.internal.processors.cache.tree;
 
+import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.internal.pagemem.PageUtils;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUtils;
 import org.apache.ignite.internal.processors.cache.persistence.CacheDataRowAdapter;
 import org.apache.ignite.internal.processors.cache.persistence.CacheSearchRow;
 import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree;
@@ -26,6 +28,8 @@
 import org.apache.ignite.internal.util.typedef.internal.CU;
 import org.apache.ignite.lang.IgniteInClosure;
 
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.mvccVersionIsValid;
+
 /**
  *
  */
@@ -36,61 +40,126 @@
      * @param canGetRow If we can get full row from this page.
      * @param itemSize Single item size on page.
      */
-    AbstractDataInnerIO(int type, int ver, boolean canGetRow, int itemSize) {
+    protected AbstractDataInnerIO(int type, int ver, boolean canGetRow, int itemSize) {
         super(type, ver, canGetRow, itemSize);
     }
 
     /** {@inheritDoc} */
-    @Override public void storeByOffset(long pageAddr, int off, CacheSearchRow row) {
+    @Override public final void storeByOffset(long pageAddr, int off, CacheSearchRow row) {
         assert row.link() != 0;
 
         PageUtils.putLong(pageAddr, off, row.link());
-        PageUtils.putInt(pageAddr, off + 8, row.hash());
+        off += 8;
+
+        PageUtils.putInt(pageAddr, off, row.hash());
+        off += 4;
 
         if (storeCacheId()) {
             assert row.cacheId() != CU.UNDEFINED_CACHE_ID : row;
 
-            PageUtils.putInt(pageAddr, off + 12, row.cacheId());
+            PageUtils.putInt(pageAddr, off, row.cacheId());
+            off += 4;
+        }
+
+        if (storeMvccVersion()) {
+            long mvccCrd = row.mvccCoordinatorVersion();
+            long mvccCntr = row.mvccCounter();
+            int opCntr = row.mvccOperationCounter();
+
+            assert mvccVersionIsValid(mvccCrd, mvccCntr, opCntr);
+
+            PageUtils.putLong(pageAddr, off, mvccCrd);
+            off += 8;
+
+            PageUtils.putLong(pageAddr, off, mvccCntr);
+            off += 8;
+
+            PageUtils.putInt(pageAddr, off, opCntr);
         }
     }
 
     /** {@inheritDoc} */
-    @Override public CacheSearchRow getLookupRow(BPlusTree<CacheSearchRow, ?> tree, long pageAddr, int idx) {
-        int cacheId = getCacheId(pageAddr, idx);
-        int hash = getHash(pageAddr, idx);
+    @Override public final CacheSearchRow getLookupRow(BPlusTree<CacheSearchRow, ?> tree, long pageAddr, int idx)
+        throws IgniteCheckedException {
         long link = getLink(pageAddr, idx);
+        int hash = getHash(pageAddr, idx);
+
+        int cacheId = storeCacheId() ? getCacheId(pageAddr, idx) : CU.UNDEFINED_CACHE_ID;
+
+        if (storeMvccVersion()) {
+            long mvccCrd = getMvccCoordinatorVersion(pageAddr, idx);
+            long mvccCntr = getMvccCounter(pageAddr, idx);
+            int opCntr = getMvccOperationCounter(pageAddr, idx);
+
+            assert mvccVersionIsValid(mvccCrd, mvccCntr, opCntr);
+
+            return ((CacheDataTree)tree).rowStore().mvccRow(cacheId,
+                hash,
+                link,
+                CacheDataRowAdapter.RowData.KEY_ONLY,
+                mvccCrd,
+                mvccCntr,
+                opCntr);
+        }
 
         return ((CacheDataTree)tree).rowStore().keySearchRow(cacheId, hash, link);
     }
 
     /** {@inheritDoc} */
-    @Override public void store(long dstPageAddr, int dstIdx, BPlusIO<CacheSearchRow> srcIo, long srcPageAddr,
-                                int srcIdx) {
-        int hash = ((RowLinkIO)srcIo).getHash(srcPageAddr, srcIdx);
-        long link = ((RowLinkIO)srcIo).getLink(srcPageAddr, srcIdx);
+    @Override public final void store(long dstPageAddr,
+        int dstIdx,
+        BPlusIO<CacheSearchRow> srcIo,
+        long srcPageAddr,
+        int srcIdx)
+    {
+        RowLinkIO rowIo = ((RowLinkIO)srcIo);
+
+        long link =rowIo.getLink(srcPageAddr, srcIdx);
+        int hash = rowIo.getHash(srcPageAddr, srcIdx);
+
         int off = offset(dstIdx);
 
         PageUtils.putLong(dstPageAddr, off, link);
-        PageUtils.putInt(dstPageAddr, off + 8, hash);
+        off += 8;
+
+        PageUtils.putInt(dstPageAddr, off, hash);
+        off += 4;
 
         if (storeCacheId()) {
-            int cacheId = ((RowLinkIO)srcIo).getCacheId(srcPageAddr, srcIdx);
+            int cacheId = rowIo.getCacheId(srcPageAddr, srcIdx);
 
             assert cacheId != CU.UNDEFINED_CACHE_ID;
 
-            PageUtils.putInt(dstPageAddr, off + 12, cacheId);
+            PageUtils.putInt(dstPageAddr, off, cacheId);
+            off += 4;
+        }
+
+        if (storeMvccVersion()) {
+            long mvccCrd = rowIo.getMvccCoordinatorVersion(srcPageAddr, srcIdx);
+            long mvccCntr = rowIo.getMvccCounter(srcPageAddr, srcIdx);
+            int opCntr = rowIo.getMvccOperationCounter(srcPageAddr, srcIdx);
+
+            assert MvccUtils.mvccVersionIsValid(mvccCrd, mvccCntr, opCntr);
+
+            PageUtils.putLong(dstPageAddr, off, mvccCrd);
+            off += 8;
+
+            PageUtils.putLong(dstPageAddr, off, mvccCntr);
+            off += 8;
+
+            PageUtils.putInt(dstPageAddr, off, opCntr);
         }
     }
 
     /** {@inheritDoc} */
-    @Override public long getLink(long pageAddr, int idx) {
+    @Override public final long getLink(long pageAddr, int idx) {
         assert idx < getCount(pageAddr) : idx;
 
         return PageUtils.getLong(pageAddr, offset(idx));
     }
 
     /** {@inheritDoc} */
-    @Override public int getHash(long pageAddr, int idx) {
+    @Override public final int getHash(long pageAddr, int idx) {
         return PageUtils.getInt(pageAddr, offset(idx) + 8);
     }
 
@@ -105,5 +174,14 @@
     /**
      * @return {@code True} if cache ID has to be stored.
      */
-    protected abstract boolean storeCacheId();
+    protected boolean storeCacheId() {
+        return false;
+    }
+
+    /**
+     * @return {@code True} if mvcc version has to be stored.
+     */
+    protected boolean storeMvccVersion() {
+        return false;
+    }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/AbstractDataLeafIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/AbstractDataLeafIO.java
index e9a3a9b..f0e5c90e 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/AbstractDataLeafIO.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/AbstractDataLeafIO.java
@@ -17,6 +17,7 @@
 
 package org.apache.ignite.internal.processors.cache.tree;
 
+import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.internal.pagemem.PageUtils;
 import org.apache.ignite.internal.processors.cache.persistence.CacheDataRowAdapter;
 import org.apache.ignite.internal.processors.cache.persistence.CacheSearchRow;
@@ -26,6 +27,8 @@
 import org.apache.ignite.internal.util.typedef.internal.CU;
 import org.apache.ignite.lang.IgniteInClosure;
 
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.mvccVersionIsValid;
+
 /**
  *
  */
@@ -35,7 +38,7 @@
      * @param ver Page format version.
      * @param itemSize Single item size on page.
      */
-    AbstractDataLeafIO(int type, int ver, int itemSize) {
+    public AbstractDataLeafIO(int type, int ver, int itemSize) {
         super(type, ver, itemSize);
     }
 
@@ -44,52 +47,133 @@
         assert row.link() != 0;
 
         PageUtils.putLong(pageAddr, off, row.link());
-        PageUtils.putInt(pageAddr, off + 8, row.hash());
+        off += 8;
+
+        PageUtils.putInt(pageAddr, off, row.hash());
+        off += 4;
 
         if (storeCacheId()) {
             assert row.cacheId() != CU.UNDEFINED_CACHE_ID;
 
-            PageUtils.putInt(pageAddr, off + 12, row.cacheId());
+            PageUtils.putInt(pageAddr, off, row.cacheId());
+            off += 4;
+        }
+
+        if (storeMvccVersion()) {
+            long mvccCrdVer = row.mvccCoordinatorVersion();
+            long mvccCntr = row.mvccCounter();
+            int mvccOpCntr = row.mvccOperationCounter();
+
+            assert mvccVersionIsValid(mvccCrdVer, mvccCntr, mvccOpCntr);
+
+            PageUtils.putLong(pageAddr, off, mvccCrdVer);
+            off += 8;
+
+            PageUtils.putLong(pageAddr, off, mvccCntr);
+            off += 8;
+
+            PageUtils.putInt(pageAddr, off, mvccOpCntr);
+            off += 4;
+
+            // Lock version the same as mvcc version, a new row is
+            // always locked by Tx, in scope of which it was created.
+            PageUtils.putLong(pageAddr, off, mvccCrdVer);
+            off += 8;
+
+            PageUtils.putLong(pageAddr, off, mvccCntr);
         }
     }
 
     /** {@inheritDoc} */
     @Override public void store(long dstPageAddr, int dstIdx, BPlusIO<CacheSearchRow> srcIo, long srcPageAddr,
-                                int srcIdx) {
-        int hash = ((RowLinkIO)srcIo).getHash(srcPageAddr, srcIdx);
-        long link = ((RowLinkIO)srcIo).getLink(srcPageAddr, srcIdx);
+        int srcIdx) {
+        RowLinkIO rowIo = (RowLinkIO) srcIo;
+
+        long link = rowIo.getLink(srcPageAddr, srcIdx);
+        int hash = rowIo.getHash(srcPageAddr, srcIdx);
+
         int off = offset(dstIdx);
 
         PageUtils.putLong(dstPageAddr, off, link);
-        PageUtils.putInt(dstPageAddr, off + 8, hash);
+        off += 8;
+
+        PageUtils.putInt(dstPageAddr, off, hash);
+        off += 4;
 
         if (storeCacheId()) {
-            int cacheId = ((RowLinkIO)srcIo).getCacheId(srcPageAddr, srcIdx);
+            int cacheId = rowIo.getCacheId(srcPageAddr, srcIdx);
 
             assert cacheId != CU.UNDEFINED_CACHE_ID;
 
-            PageUtils.putInt(dstPageAddr, off + 12, cacheId);
+            PageUtils.putInt(dstPageAddr, off, cacheId);
+            off += 4;
+        }
+
+        if (storeMvccVersion()) {
+            long mvccCrd = rowIo.getMvccCoordinatorVersion(srcPageAddr, srcIdx);
+            long mvccCntr = rowIo.getMvccCounter(srcPageAddr, srcIdx);
+            int mvccOpCntr = rowIo.getMvccOperationCounter(srcPageAddr, srcIdx);
+
+            assert mvccVersionIsValid(mvccCrd, mvccCntr, mvccOpCntr);
+
+            long lockCrdVer = rowIo.getMvccLockCoordinatorVersion(srcPageAddr, srcIdx);
+            long lockCntr = rowIo.getMvccLockCounter(srcPageAddr, srcIdx);
+
+            // Lock version cannot be blank
+            assert mvccVersionIsValid(lockCrdVer, lockCntr);
+
+            PageUtils.putLong(dstPageAddr, off, mvccCrd);
+            off += 8;
+
+            PageUtils.putLong(dstPageAddr, off, mvccCntr);
+            off += 8;
+
+            PageUtils.putInt(dstPageAddr, off, mvccOpCntr);
+            off += 4;
+
+            PageUtils.putLong(dstPageAddr, off, lockCrdVer);
+            off += 8;
+
+            PageUtils.putLong(dstPageAddr, off, lockCntr);
         }
     }
 
     /** {@inheritDoc} */
-    @Override public CacheSearchRow getLookupRow(BPlusTree<CacheSearchRow, ?> tree, long buf, int idx) {
-        int cacheId = getCacheId(buf, idx);
-        int hash = getHash(buf, idx);
-        long link = getLink(buf, idx);
+    @Override public final CacheSearchRow getLookupRow(BPlusTree<CacheSearchRow, ?> tree, long pageAddr, int idx)
+        throws IgniteCheckedException {
+        long link = getLink(pageAddr, idx);
+        int hash = getHash(pageAddr, idx);
+
+        int cacheId = storeCacheId() ? getCacheId(pageAddr, idx) : CU.UNDEFINED_CACHE_ID;
+
+        if (storeMvccVersion()) {
+            long mvccCrd = getMvccCoordinatorVersion(pageAddr, idx);
+            long mvccCntr = getMvccCounter(pageAddr, idx);
+            int mvccOpCntr = getMvccOperationCounter(pageAddr, idx);
+
+            assert mvccVersionIsValid(mvccCrd, mvccCntr, mvccOpCntr);
+
+            return ((CacheDataTree)tree).rowStore().mvccRow(cacheId,
+                hash,
+                link,
+                CacheDataRowAdapter.RowData.KEY_ONLY,
+                mvccCrd,
+                mvccCntr,
+                mvccOpCntr);
+        }
 
         return ((CacheDataTree)tree).rowStore().keySearchRow(cacheId, hash, link);
     }
 
     /** {@inheritDoc} */
-    @Override public long getLink(long pageAddr, int idx) {
+    @Override public final long getLink(long pageAddr, int idx) {
         assert idx < getCount(pageAddr) : idx;
 
         return PageUtils.getLong(pageAddr, offset(idx));
     }
 
     /** {@inheritDoc} */
-    @Override public int getHash(long pageAddr, int idx) {
+    @Override public final int getHash(long pageAddr, int idx) {
         return PageUtils.getInt(pageAddr, offset(idx) + 8);
     }
 
@@ -104,5 +188,14 @@
     /**
      * @return {@code True} if cache ID has to be stored.
      */
-    protected abstract boolean storeCacheId();
+    protected boolean storeCacheId() {
+        return false;
+    }
+
+    /**
+     * @return {@code True} if mvcc version has to be stored.
+     */
+    protected boolean storeMvccVersion() {
+        return false;
+    }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataRowStore.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataRowStore.java
index 28d8919..7514798 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataRowStore.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataRowStore.java
@@ -23,6 +23,7 @@
 import org.apache.ignite.internal.processors.cache.persistence.CacheSearchRow;
 import org.apache.ignite.internal.processors.cache.persistence.RowStore;
 import org.apache.ignite.internal.processors.cache.persistence.freelist.FreeList;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccDataRow;
 import org.apache.ignite.internal.util.typedef.internal.CU;
 
 /**
@@ -54,12 +55,30 @@
      * @return Search row.
      */
     CacheSearchRow keySearchRow(int cacheId, int hash, long link) {
-        DataRow dataRow = new DataRow(grp, hash, link, partId, CacheDataRowAdapter.RowData.KEY_ONLY);
+        return initDataRow(new DataRow(grp, hash, link, partId, CacheDataRowAdapter.RowData.KEY_ONLY), cacheId);
+    }
 
-        if (dataRow.cacheId() == CU.UNDEFINED_CACHE_ID && grp.sharedGroup())
-            dataRow.cacheId(cacheId);
+    /**
+     * @param cacheId Cache ID.
+     * @param hash Hash code.
+     * @param link Link.
+     * @param rowData Required row data.
+     * @param crdVer Mvcc coordinator version.
+     * @param mvccCntr Mvcc counter.
+     * @param opCntr Mvcc operation counter.
+     * @return Search row.
+     */
+    MvccDataRow mvccRow(int cacheId, int hash, long link, CacheDataRowAdapter.RowData rowData, long crdVer, long mvccCntr, int opCntr) {
+        MvccDataRow dataRow = new MvccDataRow(grp,
+            hash,
+            link,
+            partId,
+            rowData,
+            crdVer,
+            mvccCntr,
+            opCntr);
 
-        return dataRow;
+        return initDataRow(dataRow, cacheId);
     }
 
     /**
@@ -70,8 +89,14 @@
      * @return Data row.
      */
     CacheDataRow dataRow(int cacheId, int hash, long link, CacheDataRowAdapter.RowData rowData) {
-        DataRow dataRow = new DataRow(grp, hash, link, partId, rowData);
+        return initDataRow(new DataRow(grp, hash, link, partId, rowData), cacheId);
+    }
 
+    /**
+     * @param dataRow Data row.
+     * @param cacheId Cache ID.
+     */
+    private <T extends DataRow> T initDataRow(T dataRow, int cacheId) {
         if (dataRow.cacheId() == CU.UNDEFINED_CACHE_ID && grp.sharedGroup())
             dataRow.cacheId(cacheId);
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java
index f2bfa41..4226ec4 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheDataTree.java
@@ -21,6 +21,7 @@
 import org.apache.ignite.internal.pagemem.PageUtils;
 import org.apache.ignite.internal.processors.cache.CacheGroupContext;
 import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUtils;
 import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
 import org.apache.ignite.internal.processors.cache.persistence.CacheDataRowAdapter;
 import org.apache.ignite.internal.processors.cache.persistence.CacheSearchRow;
@@ -28,12 +29,19 @@
 import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO;
 import org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO;
 import org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPagePayload;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.IOVersions;
 import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccCacheIdAwareDataInnerIO;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccCacheIdAwareDataLeafIO;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccDataInnerIO;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccDataLeafIO;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccDataRow;
 import org.apache.ignite.internal.util.GridUnsafe;
 import org.apache.ignite.internal.util.typedef.internal.CU;
 
 import static org.apache.ignite.internal.pagemem.PageIdUtils.itemId;
 import static org.apache.ignite.internal.pagemem.PageIdUtils.pageId;
+import static org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO.MVCC_INFO_SIZE;
 
 /**
  *
@@ -46,7 +54,7 @@
     private final CacheGroupContext grp;
 
     /**
-     * @param grp Ccahe group.
+     * @param grp Cache group.
      * @param name Tree name.
      * @param reuseList Reuse list.
      * @param rowStore Row store.
@@ -69,8 +77,9 @@
             grp.offheap().globalRemoveId(),
             metaPageId,
             reuseList,
-            grp.sharedGroup() ? CacheIdAwareDataInnerIO.VERSIONS : DataInnerIO.VERSIONS,
-            grp.sharedGroup() ? CacheIdAwareDataLeafIO.VERSIONS : DataLeafIO.VERSIONS);
+            innerIO(grp),
+            leafIO(grp),
+            grp.shared().kernalContext().failure());
 
         assert rowStore != null;
 
@@ -83,15 +92,40 @@
     }
 
     /**
+     * @param grp Cache group.
+     * @return Tree inner IO.
+     */
+    private static IOVersions<? extends AbstractDataInnerIO> innerIO(CacheGroupContext grp) {
+        if (grp.mvccEnabled())
+            return grp.sharedGroup() ? MvccCacheIdAwareDataInnerIO.VERSIONS : MvccDataInnerIO.VERSIONS;
+
+        return grp.sharedGroup() ? CacheIdAwareDataInnerIO.VERSIONS : DataInnerIO.VERSIONS;
+    }
+
+    /**
+     * @param grp Cache group.
+     * @return Tree leaf IO.
+     */
+    private static IOVersions<? extends AbstractDataLeafIO> leafIO(CacheGroupContext grp) {
+        if (grp.mvccEnabled())
+            return grp.sharedGroup() ? MvccCacheIdAwareDataLeafIO.VERSIONS : MvccDataLeafIO.VERSIONS;
+
+        return grp.sharedGroup() ? CacheIdAwareDataLeafIO.VERSIONS : DataLeafIO.VERSIONS;
+    }
+
+    /**
      * @return Row store.
      */
-    CacheDataRowStore rowStore() {
+    public CacheDataRowStore rowStore() {
         return rowStore;
     }
 
     /** {@inheritDoc} */
     @Override protected int compare(BPlusIO<CacheSearchRow> iox, long pageAddr, int idx, CacheSearchRow row)
         throws IgniteCheckedException {
+        assert !grp.mvccEnabled() || row.mvccCoordinatorVersion() != 0
+            || (row.getClass() == SearchRow.class && row.key() == null) : row;
+
         RowLinkIO io = (RowLinkIO)iox;
 
         int cmp;
@@ -101,8 +135,6 @@
 
             int cacheId = io.getCacheId(pageAddr, idx);
 
-            assert cacheId != CU.UNDEFINED_CACHE_ID : "Cache ID is not stored";
-
             cmp = Integer.compare(cacheId, row.cacheId());
 
             if (cmp != 0)
@@ -129,21 +161,43 @@
 
         assert row.key() != null : row;
 
-        return compareKeys(row.key(), link);
+        cmp = compareKeys(row.key(), link);
+
+        if (cmp != 0 || !grp.mvccEnabled())
+            return cmp;
+
+        long crd = io.getMvccCoordinatorVersion(pageAddr, idx);
+        long cntr = io.getMvccCounter(pageAddr, idx);
+        int opCntr = io.getMvccOperationCounter(pageAddr, idx);
+
+        assert MvccUtils.mvccVersionIsValid(crd, cntr, opCntr);
+
+        return -MvccUtils.compare(crd, cntr, opCntr, row); // descending order
     }
 
     /** {@inheritDoc} */
-    @Override protected CacheDataRow getRow(BPlusIO<CacheSearchRow> io, long pageAddr, int idx, Object flags)
+    @Override public CacheDataRow getRow(BPlusIO<CacheSearchRow> io, long pageAddr, int idx, Object flags)
         throws IgniteCheckedException {
-        long link = ((RowLinkIO)io).getLink(pageAddr, idx);
-        int hash = ((RowLinkIO)io).getHash(pageAddr, idx);
-        int cacheId = ((RowLinkIO)io).getCacheId(pageAddr, idx);
+        RowLinkIO rowIo = (RowLinkIO)io;
+
+        long link = rowIo.getLink(pageAddr, idx);
+        int hash = rowIo.getHash(pageAddr, idx);
+
+        int cacheId = grp.sharedGroup() ? rowIo.getCacheId(pageAddr, idx) : CU.UNDEFINED_CACHE_ID;
 
         CacheDataRowAdapter.RowData x = flags != null ?
             (CacheDataRowAdapter.RowData)flags :
             CacheDataRowAdapter.RowData.FULL;
 
-        return rowStore.dataRow(cacheId, hash, link, x);
+        if (grp.mvccEnabled()) {
+            long mvccCrdVer = rowIo.getMvccCoordinatorVersion(pageAddr, idx);
+            long mvccCntr = rowIo.getMvccCounter(pageAddr, idx);
+            int mvccOpCntr = rowIo.getMvccOperationCounter(pageAddr, idx);
+
+            return rowStore.mvccRow(cacheId, hash, link, x, mvccCrdVer, mvccCntr, mvccOpCntr);
+        }
+        else
+            return rowStore.dataRow(cacheId, hash, link, x);
     }
 
     /**
@@ -173,6 +227,9 @@
                 if (data.nextLink() == 0) {
                     long addr = pageAddr + data.offset();
 
+                    if (grp.mvccEnabled())
+                        addr += MVCC_INFO_SIZE; // Skip MVCC info.
+
                     if (grp.storeCacheIdInDataPage())
                         addr += 4; // Skip cache id.
 
@@ -219,7 +276,7 @@
         }
 
         // TODO GG-11768.
-        CacheDataRowAdapter other = new CacheDataRowAdapter(link);
+        CacheDataRowAdapter other = grp.mvccEnabled() ? new MvccDataRow(link) : new CacheDataRowAdapter(link);
         other.initFromLink(grp, CacheDataRowAdapter.RowData.KEY_ONLY);
 
         byte[] bytes1 = other.key().valueBytes(grp.cacheObjectContext());
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheIdAwareDataInnerIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheIdAwareDataInnerIO.java
index acb42a0..65d90e9 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheIdAwareDataInnerIO.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheIdAwareDataInnerIO.java
@@ -37,12 +37,12 @@
     }
 
     /** {@inheritDoc} */
-    @Override public int getCacheId(long pageAddr, int idx) {
-        return PageUtils.getInt(pageAddr, offset(idx) + 12);
+    @Override protected boolean storeCacheId() {
+        return true;
     }
 
     /** {@inheritDoc} */
-    @Override protected boolean storeCacheId() {
-        return true;
+    @Override public int getCacheId(long pageAddr, int idx) {
+        return PageUtils.getInt(pageAddr, offset(idx) + 12);
     }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheIdAwareDataLeafIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheIdAwareDataLeafIO.java
index 7263168..70f0f84 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheIdAwareDataLeafIO.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/CacheIdAwareDataLeafIO.java
@@ -37,12 +37,12 @@
     }
 
     /** {@inheritDoc} */
-    @Override public int getCacheId(long pageAddr, int idx) {
-        return PageUtils.getInt(pageAddr, offset(idx) + 12);
+    @Override protected boolean storeCacheId() {
+        return true;
     }
 
     /** {@inheritDoc} */
-    @Override protected boolean storeCacheId() {
-        return true;
+    @Override public int getCacheId(long pageAddr, int idx) {
+        return PageUtils.getInt(pageAddr, offset(idx) + 12);
     }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/DataInnerIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/DataInnerIO.java
index 8625338..cd8debb 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/DataInnerIO.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/DataInnerIO.java
@@ -18,7 +18,6 @@
 package org.apache.ignite.internal.processors.cache.tree;
 
 import org.apache.ignite.internal.processors.cache.persistence.tree.io.IOVersions;
-import org.apache.ignite.internal.util.typedef.internal.CU;
 
 /**
  *
@@ -35,14 +34,4 @@
     private DataInnerIO(int ver) {
         super(T_DATA_REF_INNER, ver, true, 12);
     }
-
-    /** {@inheritDoc} */
-    @Override public int getCacheId(long pageAddr, int idx) {
-        return CU.UNDEFINED_CACHE_ID;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected boolean storeCacheId() {
-        return false;
-    }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/DataLeafIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/DataLeafIO.java
index d53964f..3ba9619 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/DataLeafIO.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/DataLeafIO.java
@@ -18,7 +18,6 @@
 package org.apache.ignite.internal.processors.cache.tree;
 
 import org.apache.ignite.internal.processors.cache.persistence.tree.io.IOVersions;
-import org.apache.ignite.internal.util.typedef.internal.CU;
 
 /**
  *
@@ -35,14 +34,4 @@
     private DataLeafIO(int ver) {
         super(T_DATA_REF_LEAF, ver, 12);
     }
-
-    /** {@inheritDoc} */
-    @Override public int getCacheId(long pageAddr, int idx) {
-        return CU.UNDEFINED_CACHE_ID;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected boolean storeCacheId() {
-        return false;
-    }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/DataRow.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/DataRow.java
index 29bbaaf..806f030 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/DataRow.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/DataRow.java
@@ -42,7 +42,7 @@
      * @param part Partition.
      * @param rowData Required row data.
      */
-    DataRow(CacheGroupContext grp, int hash, long link, int part, RowData rowData) {
+    protected DataRow(CacheGroupContext grp, int hash, long link, int part, RowData rowData) {
         super(link);
 
         this.hash = hash;
@@ -50,8 +50,9 @@
         this.part = part;
 
         try {
-            // We can not init data row lazily because underlying buffer can be concurrently cleared.
-            initFromLink(grp, rowData);
+            // We can not init data row lazily outside of entry lock because underlying buffer can be concurrently cleared.
+            if (rowData != RowData.LINK_ONLY)
+                initFromLink(grp, rowData);
         }
         catch (IgniteCheckedException e) {
             throw new IgniteException(e);
@@ -81,6 +82,27 @@
         this.cacheId = cacheId;
     }
 
+    /**
+     * @param link Link.
+     */
+    protected DataRow(long link) {
+        super(link);
+    }
+
+    /**
+     *
+     */
+    DataRow() {
+        super(0);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void key(KeyCacheObject key) {
+        super.key(key);
+
+        hash = key.hashCode();
+    }
+
     /** {@inheritDoc} */
     @Override public int partition() {
         return part;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/PendingEntriesTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/PendingEntriesTree.java
index 0b1c931..554b86a 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/PendingEntriesTree.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/PendingEntriesTree.java
@@ -30,7 +30,7 @@
  */
 public class PendingEntriesTree extends BPlusTree<PendingRow, PendingRow> {
     /** */
-    public final static Object WITHOUT_KEY = new Object();
+    public static final Object WITHOUT_KEY = new Object();
 
     /** */
     private final CacheGroupContext grp;
@@ -60,7 +60,8 @@
             metaPageId,
             reuseList,
             grp.sharedGroup() ? CacheIdAwarePendingEntryInnerIO.VERSIONS : PendingEntryInnerIO.VERSIONS,
-            grp.sharedGroup() ? CacheIdAwarePendingEntryLeafIO.VERSIONS : PendingEntryLeafIO.VERSIONS);
+            grp.sharedGroup() ? CacheIdAwarePendingEntryLeafIO.VERSIONS : PendingEntryLeafIO.VERSIONS,
+            grp.shared().kernalContext().failure());
 
         this.grp = grp;
 
@@ -70,8 +71,7 @@
     }
 
     /** {@inheritDoc} */
-    @Override protected int compare(BPlusIO<PendingRow> iox, long pageAddr, int idx, PendingRow row)
-        throws IgniteCheckedException {
+    @Override protected int compare(BPlusIO<PendingRow> iox, long pageAddr, int idx, PendingRow row) {
         PendingRowIO io = (PendingRowIO)iox;
 
         int cmp;
@@ -111,7 +111,7 @@
     }
 
     /** {@inheritDoc} */
-    @Override protected PendingRow getRow(BPlusIO<PendingRow> io, long pageAddr, int idx, Object flag)
+    @Override public PendingRow getRow(BPlusIO<PendingRow> io, long pageAddr, int idx, Object flag)
         throws IgniteCheckedException {
         PendingRow row = io.getLookupRow(this, pageAddr, idx);
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/PendingRow.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/PendingRow.java
index 1a6a183..4116ae0 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/PendingRow.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/PendingRow.java
@@ -21,6 +21,7 @@
 import org.apache.ignite.internal.processors.cache.CacheGroupContext;
 import org.apache.ignite.internal.processors.cache.KeyCacheObject;
 import org.apache.ignite.internal.processors.cache.persistence.CacheDataRowAdapter;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccDataRow;
 import org.apache.ignite.internal.util.typedef.internal.S;
 
 /**
@@ -68,7 +69,7 @@
      * @throws IgniteCheckedException If failed.
      */
     PendingRow initKey(CacheGroupContext grp) throws IgniteCheckedException {
-        CacheDataRowAdapter rowData = new CacheDataRowAdapter(link);
+        CacheDataRowAdapter rowData = grp.mvccEnabled() ? new MvccDataRow(link) : new CacheDataRowAdapter(link);
         rowData.initFromLink(grp, CacheDataRowAdapter.RowData.KEY_ONLY);
 
         key = rowData.key();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/RowLinkIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/RowLinkIO.java
index 55f880c..8487351 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/RowLinkIO.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/RowLinkIO.java
@@ -40,5 +40,70 @@
      * @param idx Index.
      * @return Cache ID or {@code 0} if cache ID is not defined.
      */
-    public int getCacheId(long pageAddr, int idx);
+    default int getCacheId(long pageAddr, int idx) {
+        throw new UnsupportedOperationException();
+    }
+
+    /**
+     * @param pageAddr Page address.
+     * @param idx Index.
+     * @return Mvcc coordinator version.
+     */
+    default long getMvccCoordinatorVersion(long pageAddr, int idx) {
+        throw new UnsupportedOperationException();
+    }
+
+    /**
+     * @param pageAddr Page address.
+     * @param idx Index.
+     * @return Mvcc operation counter.
+     */
+    default int getMvccOperationCounter(long pageAddr, int idx) {
+        throw new UnsupportedOperationException();
+    }
+
+    /**
+     * @param pageAddr Page address.
+     * @param idx Index.
+     * @return Mvcc counter.
+     */
+    default long getMvccCounter(long pageAddr, int idx) {
+        throw new UnsupportedOperationException();
+    }
+
+    /**
+     * @param pageAddr Page address.
+     * @param idx Index.
+     * @return Mvcc coordinator version.
+     */
+    default long getMvccLockCoordinatorVersion(long pageAddr, int idx) {
+        throw new UnsupportedOperationException();
+    }
+
+    /**
+     * @param pageAddr Page address.
+     * @param idx Index.
+     * @return Mvcc counter.
+     */
+    default long getMvccLockCounter(long pageAddr, int idx) {
+        throw new UnsupportedOperationException();
+    }
+
+    /**
+     * @param pageAddr Page address.
+     * @param idx Index.
+     * @param lockCrd Mvcc lock coordinator version.
+     */
+    default void setMvccLockCoordinatorVersion(long pageAddr, int idx, long lockCrd) {
+        throw new UnsupportedOperationException();
+    }
+
+    /**
+     * @param pageAddr Page address.
+     * @param idx Index.
+     * @param lockCntr Mvcc lock counter.
+     */
+    default void setMvccLockCounter(long pageAddr, int idx, long lockCntr) {
+        throw new UnsupportedOperationException();
+    }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/SearchRow.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/SearchRow.java
index 6257013..4b166d4 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/SearchRow.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/SearchRow.java
@@ -18,7 +18,13 @@
 package org.apache.ignite.internal.processors.cache.tree;
 
 import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxState;
 import org.apache.ignite.internal.processors.cache.persistence.CacheSearchRow;
+import org.apache.ignite.internal.util.typedef.internal.S;
+
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_COUNTER_NA;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_CRD_COUNTER_NA;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_OP_COUNTER_NA;
 
 /**
  *
@@ -73,4 +79,29 @@
     @Override public int cacheId() {
         return cacheId;
     }
+
+    /** {@inheritDoc} */
+    @Override public long mvccCoordinatorVersion() {
+        return MVCC_CRD_COUNTER_NA;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long mvccCounter() {
+        return MVCC_COUNTER_NA;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int mvccOperationCounter() {
+        return MVCC_OP_COUNTER_NA;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte mvccTxState() {
+        return TxState.NA;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(SearchRow.class, this);
+    }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/data/MvccCacheIdAwareDataInnerIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/data/MvccCacheIdAwareDataInnerIO.java
new file mode 100644
index 0000000..32cd8ba
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/data/MvccCacheIdAwareDataInnerIO.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.tree.mvcc.data;
+
+import org.apache.ignite.internal.pagemem.PageUtils;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.IOVersions;
+import org.apache.ignite.internal.processors.cache.tree.AbstractDataInnerIO;
+
+/**
+ *
+ */
+public final class MvccCacheIdAwareDataInnerIO extends AbstractDataInnerIO {
+    /** */
+    public static final IOVersions<MvccCacheIdAwareDataInnerIO> VERSIONS = new IOVersions<>(
+        new MvccCacheIdAwareDataInnerIO(1)
+    );
+
+    /**
+     * @param ver Page format version.
+     */
+    private MvccCacheIdAwareDataInnerIO(int ver) {
+        super(T_CACHE_ID_DATA_REF_MVCC_INNER, ver, true, 36);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected boolean storeCacheId() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected boolean storeMvccVersion() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int getCacheId(long pageAddr, int idx) {
+        return PageUtils.getInt(pageAddr, offset(idx) + 12);
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getMvccCoordinatorVersion(long pageAddr, int idx) {
+        return PageUtils.getLong(pageAddr, offset(idx) + 16);
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getMvccCounter(long pageAddr, int idx) {
+        return PageUtils.getLong(pageAddr, offset(idx) + 24);
+    }
+
+    @Override public int getMvccOperationCounter(long pageAddr, int idx) {
+        return PageUtils.getInt(pageAddr, offset(idx) + 32);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/data/MvccCacheIdAwareDataLeafIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/data/MvccCacheIdAwareDataLeafIO.java
new file mode 100644
index 0000000..3da7729
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/data/MvccCacheIdAwareDataLeafIO.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.tree.mvcc.data;
+
+import org.apache.ignite.internal.pagemem.PageUtils;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.IOVersions;
+import org.apache.ignite.internal.processors.cache.tree.AbstractDataLeafIO;
+
+/**
+ *
+ */
+public final class MvccCacheIdAwareDataLeafIO extends AbstractDataLeafIO {
+    /** */
+    public static final IOVersions<MvccCacheIdAwareDataLeafIO> VERSIONS = new IOVersions<>(
+        new MvccCacheIdAwareDataLeafIO(1)
+    );
+
+    /**
+     * @param ver Page format version.
+     */
+    private MvccCacheIdAwareDataLeafIO(int ver) {
+        super(T_CACHE_ID_DATA_REF_MVCC_LEAF, ver, 52);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected boolean storeCacheId() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected boolean storeMvccVersion() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int getCacheId(long pageAddr, int idx) {
+        return PageUtils.getInt(pageAddr, offset(idx) + 12);
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getMvccCoordinatorVersion(long pageAddr, int idx) {
+        return PageUtils.getLong(pageAddr, offset(idx) + 16);
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getMvccCounter(long pageAddr, int idx) {
+        return PageUtils.getLong(pageAddr, offset(idx) + 24);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int getMvccOperationCounter(long pageAddr, int idx) {
+        return PageUtils.getInt(pageAddr, offset(idx) + 32);
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getMvccLockCoordinatorVersion(long pageAddr, int idx) {
+        return PageUtils.getLong(pageAddr, offset(idx) + 36);
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getMvccLockCounter(long pageAddr, int idx) {
+        return PageUtils.getLong(pageAddr, offset(idx) + 44);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setMvccLockCoordinatorVersion(long pageAddr, int idx, long lockCrd) {
+        PageUtils.putLong(pageAddr, offset(idx) + 36, lockCrd);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setMvccLockCounter(long pageAddr, int idx, long lockCntr) {
+        PageUtils.putLong(pageAddr, offset(idx) + 44, lockCntr);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/data/MvccDataInnerIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/data/MvccDataInnerIO.java
new file mode 100644
index 0000000..0b2f91c
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/data/MvccDataInnerIO.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.tree.mvcc.data;
+
+import org.apache.ignite.internal.pagemem.PageUtils;
+import org.apache.ignite.internal.processors.cache.persistence.CacheDataRowAdapter;
+import org.apache.ignite.internal.processors.cache.persistence.CacheSearchRow;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.IOVersions;
+import org.apache.ignite.internal.processors.cache.tree.AbstractDataInnerIO;
+import org.apache.ignite.internal.util.typedef.internal.CU;
+import org.apache.ignite.lang.IgniteInClosure;
+
+/**
+ *
+ */
+public final class MvccDataInnerIO extends AbstractDataInnerIO {
+    /** */
+    public static final IOVersions<MvccDataInnerIO> VERSIONS = new IOVersions<>(
+        new MvccDataInnerIO(1)
+    );
+
+    /**
+     * @param ver Page format version.
+     */
+    private MvccDataInnerIO(int ver) {
+        super(T_DATA_REF_MVCC_INNER, ver, true, 32);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void visit(long pageAddr, IgniteInClosure<CacheSearchRow> c) {
+        int cnt = getCount(pageAddr);
+
+        for (int i = 0; i < cnt; i++)
+            c.apply(new MvccDataRow(getLink(pageAddr, i)));
+    }
+
+    /** {@inheritDoc} */
+    @Override protected boolean storeMvccVersion() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getMvccCoordinatorVersion(long pageAddr, int idx) {
+        return PageUtils.getLong(pageAddr, offset(idx) + 12);
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getMvccCounter(long pageAddr, int idx) {
+        return PageUtils.getLong(pageAddr, offset(idx) + 20);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int getMvccOperationCounter(long pageAddr, int idx) {
+        return PageUtils.getInt(pageAddr, offset(idx) + 28);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/data/MvccDataLeafIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/data/MvccDataLeafIO.java
new file mode 100644
index 0000000..ab498d9
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/data/MvccDataLeafIO.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.tree.mvcc.data;
+
+import org.apache.ignite.internal.pagemem.PageUtils;
+import org.apache.ignite.internal.processors.cache.persistence.CacheSearchRow;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.IOVersions;
+import org.apache.ignite.internal.processors.cache.tree.AbstractDataLeafIO;
+import org.apache.ignite.lang.IgniteInClosure;
+
+/**
+ *
+ */
+public final class MvccDataLeafIO extends AbstractDataLeafIO {
+    /** */
+    public static final IOVersions<MvccDataLeafIO> VERSIONS = new IOVersions<>(
+        new MvccDataLeafIO(1)
+    );
+
+    /**
+     * @param ver Page format version.
+     */
+    private MvccDataLeafIO(int ver) {
+        super(T_DATA_REF_MVCC_LEAF, ver, 48);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void visit(long pageAddr, IgniteInClosure<CacheSearchRow> c) {
+        int cnt = getCount(pageAddr);
+
+        for (int i = 0; i < cnt; i++)
+            c.apply(new MvccDataRow(getLink(pageAddr, i)));
+    }
+
+    /** {@inheritDoc} */
+    @Override protected boolean storeMvccVersion() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getMvccCoordinatorVersion(long pageAddr, int idx) {
+        return PageUtils.getLong(pageAddr, offset(idx) + 12);
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getMvccCounter(long pageAddr, int idx) {
+        return PageUtils.getLong(pageAddr, offset(idx) + 20);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int getMvccOperationCounter(long pageAddr, int idx) {
+        return PageUtils.getInt(pageAddr, offset(idx) + 28);
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getMvccLockCoordinatorVersion(long pageAddr, int idx) {
+        return PageUtils.getLong(pageAddr, offset(idx) + 32);
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getMvccLockCounter(long pageAddr, int idx) {
+        return PageUtils.getLong(pageAddr, offset(idx) + 40);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setMvccLockCoordinatorVersion(long pageAddr, int idx, long lockCrd) {
+        PageUtils.putLong(pageAddr, offset(idx) + 32, lockCrd);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setMvccLockCounter(long pageAddr, int idx, long lockCntr) {
+        PageUtils.putLong(pageAddr, offset(idx) + 40, lockCntr);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/data/MvccDataRow.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/data/MvccDataRow.java
new file mode 100644
index 0000000..fc98452
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/data/MvccDataRow.java
@@ -0,0 +1,263 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.tree.mvcc.data;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.pagemem.PageUtils;
+import org.apache.ignite.internal.processors.cache.CacheGroupContext;
+import org.apache.ignite.internal.processors.cache.CacheObject;
+import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUtils;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccVersion;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccVersionImpl;
+import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxState;
+import org.apache.ignite.internal.processors.cache.tree.DataRow;
+import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
+import org.apache.ignite.internal.util.tostring.GridToStringInclude;
+import org.apache.ignite.internal.util.typedef.internal.S;
+
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_COUNTER_NA;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_CRD_COUNTER_NA;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_OP_COUNTER_NA;
+import static org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO.MVCC_INFO_SIZE;
+import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO.MVCC_HINTS_BIT_OFF;
+import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO.MVCC_HINTS_MASK;
+
+/**
+ *
+ */
+public class MvccDataRow extends DataRow {
+    /** Mvcc coordinator version. */
+    @GridToStringInclude
+    protected long mvccCrd;
+
+    /** Mvcc counter. */
+    @GridToStringInclude
+    protected long mvccCntr;
+
+    /** Mvcc operation counter. */
+    @GridToStringInclude
+    protected int mvccOpCntr;
+
+    /** Mvcc tx state. */
+    @GridToStringInclude
+    protected byte mvccTxState;
+
+    /** New mvcc coordinator version. */
+    @GridToStringInclude
+    protected long newMvccCrd;
+
+    /** New mvcc counter. */
+    @GridToStringInclude
+    protected long newMvccCntr;
+
+    /** New mvcc operation counter. */
+    @GridToStringInclude
+    protected int newMvccOpCntr;
+
+    /** New mvcc tx state. */
+    @GridToStringInclude
+    protected byte newMvccTxState;
+
+    /**
+     * @param link Link.
+     */
+    public MvccDataRow(long link) {
+        super(link);
+    }
+
+    /**
+     * @param grp Context.
+     * @param hash Key hash.
+     * @param link Link.
+     * @param part Partition number.
+     * @param rowData Data.
+     * @param crdVer Mvcc coordinator version.
+     * @param mvccCntr Mvcc counter.
+     * @param mvccOpCntr Mvcc operation counter.
+     */
+    public MvccDataRow(CacheGroupContext grp,
+        int hash,
+        long link,
+        int part,
+        RowData rowData,
+        long crdVer,
+        long mvccCntr,
+        int mvccOpCntr) {
+        super(grp, hash, link, part, rowData);
+
+        assert MvccUtils.mvccVersionIsValid(crdVer, mvccCntr, mvccOpCntr);
+
+        assert rowData == RowData.LINK_ONLY
+            || this.mvccCrd == crdVer && this.mvccCntr == mvccCntr && this.mvccOpCntr == mvccOpCntr :
+        "mvccVer=" + new MvccVersionImpl(crdVer, mvccCntr, mvccOpCntr) +
+            ", dataMvccVer=" + new MvccVersionImpl(this.mvccCrd, this.mvccCntr, this.mvccOpCntr) ;
+
+        if (rowData == RowData.LINK_ONLY) {
+            this.mvccCrd = crdVer;
+            this.mvccCntr = mvccCntr;
+            this.mvccOpCntr = mvccOpCntr;
+        }
+    }
+
+    /**
+     * @param key Key.
+     * @param val Value.
+     * @param ver Version.
+     * @param part Partition.
+     * @param expireTime Expire time.
+     * @param cacheId Cache ID.
+     * @param mvccVer Mvcc version.
+     * @param newMvccVer New mvcc version.
+     */
+    public MvccDataRow(KeyCacheObject key, CacheObject val, GridCacheVersion ver, int part, long expireTime, int cacheId,
+        MvccVersion mvccVer, MvccVersion newMvccVer) {
+        super(key, val, ver, part, expireTime, cacheId);
+
+        this.mvccCrd = mvccVer.coordinatorVersion();
+        this.mvccCntr = mvccVer.counter();
+        this.mvccOpCntr = mvccVer.operationCounter();
+
+        if (newMvccVer == null) {
+            newMvccCrd = MVCC_CRD_COUNTER_NA;
+            newMvccCntr = MVCC_COUNTER_NA;
+            newMvccOpCntr = MVCC_OP_COUNTER_NA;
+        }
+        else {
+            newMvccCrd = newMvccVer.coordinatorVersion();
+            newMvccCntr = newMvccVer.counter();
+            newMvccOpCntr = newMvccVer.operationCounter();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override protected int readHeader(long addr, int off) {
+        // xid_min.
+        mvccCrd = PageUtils.getLong(addr, off);
+        mvccCntr = PageUtils.getLong(addr, off + 8);
+
+        int withHint = PageUtils.getInt(addr, off + 16);
+
+        mvccOpCntr = withHint & ~MVCC_HINTS_MASK;
+        mvccTxState = (byte)(withHint >>> MVCC_HINTS_BIT_OFF);
+
+        assert MvccUtils.mvccVersionIsValid(mvccCrd, mvccCntr, mvccOpCntr);
+
+        // xid_max.
+        newMvccCrd = PageUtils.getLong(addr, off + 20);
+        newMvccCntr = PageUtils.getLong(addr, off + 28);
+
+        withHint = PageUtils.getInt(addr, off + 36);
+
+        newMvccOpCntr = withHint & ~MVCC_HINTS_MASK;
+        newMvccTxState = (byte)(withHint >>> MVCC_HINTS_BIT_OFF);
+
+        assert newMvccCrd == MVCC_CRD_COUNTER_NA || MvccUtils.mvccVersionIsValid(newMvccCrd, newMvccCntr, newMvccOpCntr);
+
+        return MVCC_INFO_SIZE;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long mvccCoordinatorVersion() {
+        return mvccCrd;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long mvccCounter() {
+        return mvccCntr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int mvccOperationCounter() {
+        return mvccOpCntr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte mvccTxState() {
+        return mvccTxState;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long newMvccCoordinatorVersion() {
+        return newMvccCrd;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long newMvccCounter() {
+        return newMvccCntr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int newMvccOperationCounter() {
+        return newMvccOpCntr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte newMvccTxState() {
+        return newMvccTxState;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void newMvccVersion(long crd, long cntr, int opCntr) {
+        newMvccCrd = crd;
+        newMvccCntr = cntr;
+        newMvccOpCntr = opCntr;
+
+        // reset tx state
+        newMvccTxState = TxState.NA;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void mvccVersion(long crd, long cntr, int opCntr) {
+        mvccCrd = crd;
+        mvccCntr = cntr;
+        mvccOpCntr = opCntr;
+
+        // reset tx state
+        mvccTxState = TxState.NA;
+    }
+
+    /**
+     * @param mvccTxState Mvcc version Tx state hint.
+     */
+    public void mvccTxState(byte mvccTxState) {
+        this.mvccTxState = mvccTxState;
+    }
+
+    /**
+     * @param newMvccTxState New Mvcc version Tx state hint.
+     */
+    public void newMvccTxState(byte newMvccTxState) {
+        this.newMvccTxState = newMvccTxState;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int size() throws IgniteCheckedException {
+        return super.size() + MVCC_INFO_SIZE;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int headerSize() {
+        return MVCC_INFO_SIZE;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(MvccDataRow.class, this, "super", super.toString());
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/data/MvccUpdateDataRow.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/data/MvccUpdateDataRow.java
new file mode 100644
index 0000000..81b1b58
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/data/MvccUpdateDataRow.java
@@ -0,0 +1,485 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.tree.mvcc.data;
+
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager;
+import org.apache.ignite.internal.processors.cache.CacheObject;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUtils;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccVersion;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccVersionImpl;
+import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxState;
+import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
+import org.apache.ignite.internal.processors.cache.persistence.CacheSearchRow;
+import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO;
+import org.apache.ignite.internal.processors.cache.tree.RowLinkIO;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.search.MvccLinkAwareSearchRow;
+import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
+import org.apache.ignite.internal.util.tostring.GridToStringExclude;
+import org.apache.ignite.internal.util.typedef.internal.S;
+
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_COUNTER_NA;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_CRD_COUNTER_NA;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_OP_COUNTER_NA;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.compare;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.isActive;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.isVisible;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.mvccVersionIsValid;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.unexpectedStateException;
+import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO.MVCC_HINTS_BIT_OFF;
+import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO.MVCC_HINTS_MASK;
+
+/**
+ *
+ */
+public class MvccUpdateDataRow extends MvccDataRow implements MvccUpdateResult, BPlusTree.TreeVisitorClosure<CacheSearchRow, CacheDataRow> {
+    /** */
+    private static final int FIRST = DIRTY << 1;
+    /** */
+    private static final int CHECK_VERSION = FIRST << 1;
+    /** */
+    private static final int LAST_COMMITTED_FOUND = CHECK_VERSION << 1;
+    /** */
+    private static final int CAN_CLEANUP = LAST_COMMITTED_FOUND << 1;
+    /** */
+    private static final int PRIMARY = CAN_CLEANUP << 1;
+    /** */
+    private static final int REMOVE_OR_LOCK = PRIMARY << 1;
+    /** */
+    private static final int NEED_HISTORY = REMOVE_OR_LOCK << 1;
+    /**
+     * During mvcc transaction processing conflicting row version could be met in storage.
+     * Not all such cases should lead to transaction abort.
+     * E.g. if UPDATE for a row meets concurrent INSERT for the same row
+     * (and row did not exist before both operations) then it means that UPDATE does not see the row at all
+     * and can proceed.
+     * This flag enables such mode when conflicting version should not lead to abort immediately
+     * but more versions should be checked.
+     */
+    private static final int FAST_UPDATE = NEED_HISTORY << 1;
+    /** */
+    private static final int FAST_MISMATCH = FAST_UPDATE << 1;
+    /** */
+    private static final int DELETED = FAST_MISMATCH << 1;
+
+    /** */
+    @GridToStringExclude
+    private final GridCacheContext cctx;
+
+    /** */
+    private ResultType res;
+
+    /** */
+    @GridToStringExclude
+    private int state;
+
+    /** */
+    private List<MvccLinkAwareSearchRow> cleanupRows;
+
+    /** */
+    private final MvccSnapshot mvccSnapshot;
+
+    /** */
+    private CacheDataRow oldRow;
+
+    /** */
+    @GridToStringExclude
+    private long resCrd;
+
+    /** */
+    @GridToStringExclude
+    private long resCntr;
+
+    /** */
+    private List<MvccLinkAwareSearchRow> historyRows;
+
+    /**
+     * @param cctx Cache context.
+     * @param key Key.
+     * @param val Value.
+     * @param ver Version.
+     * @param part Partition.
+     * @param expireTime Expire time.
+     * @param mvccSnapshot MVCC snapshot.
+     * @param newVer Update version.
+     * @param primary Primary node flag.
+     * @param lockOnly Whether no actual update should be done and the only thing to do is to acquire lock.
+     * @param needHistory Whether to collect rows created or affected by the current tx.
+     * @param fastUpdate Fast update visit mode.
+     */
+    public MvccUpdateDataRow(
+        GridCacheContext cctx,
+        KeyCacheObject key,
+        CacheObject val,
+        GridCacheVersion ver,
+        int part,
+        long expireTime,
+        MvccSnapshot mvccSnapshot,
+        MvccVersion newVer,
+        boolean primary,
+        boolean lockOnly,
+        boolean needHistory,
+        boolean fastUpdate) {
+        super(key,
+            val,
+            ver,
+            part,
+            expireTime,
+            cctx.cacheId(),
+            mvccSnapshot,
+            newVer);
+
+        this.mvccSnapshot = mvccSnapshot;
+        this.cctx = cctx;
+
+        assert !lockOnly || val == null;
+
+        int flags = FIRST;
+
+        if (primary)
+            flags |= PRIMARY | CHECK_VERSION;
+
+        if (primary && (lockOnly || val == null))
+            flags |= CAN_WRITE | REMOVE_OR_LOCK;
+
+        if (needHistory)
+            flags |= NEED_HISTORY;
+
+        if (fastUpdate)
+            flags |= FAST_UPDATE;
+
+        setFlags(flags);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int visit(BPlusTree<CacheSearchRow, CacheDataRow> tree,
+        BPlusIO<CacheSearchRow> io,
+        long pageAddr,
+        int idx, IgniteWriteAheadLogManager wal)
+        throws IgniteCheckedException {
+        unsetFlags(DIRTY);
+
+        RowLinkIO rowIo = (RowLinkIO)io;
+
+        // Check if entry is locked on primary node.
+        if (isFlagsSet(PRIMARY | FIRST)) {
+            long lockCrd = rowIo.getMvccLockCoordinatorVersion(pageAddr, idx);
+            long lockCntr = rowIo.getMvccLockCounter(pageAddr, idx);
+
+            // We cannot continue while entry is locked by another transaction.
+            if ((lockCrd != mvccCrd || lockCntr != mvccCntr)
+                && isActive(cctx, lockCrd, lockCntr, mvccSnapshot)) {
+                resCrd = lockCrd;
+                resCntr = lockCntr;
+
+                res = ResultType.LOCKED;
+
+                return setFlags(STOP);
+            }
+        }
+
+        MvccDataRow row = (MvccDataRow)tree.getRow(io, pageAddr, idx, RowData.LINK_WITH_HEADER);
+
+        // Check whether the row was updated by current transaction.
+        // In this case the row is already locked by current transaction and visible to it.
+        if (isFlagsSet(FIRST)) {
+            boolean removed = row.newMvccCoordinatorVersion() != MVCC_CRD_COUNTER_NA;
+
+            long rowCrd, rowCntr; int rowOpCntr;
+
+            if (removed) {
+                rowCrd = row.newMvccCoordinatorVersion();
+                rowCntr = row.newMvccCounter();
+                rowOpCntr = row.newMvccOperationCounter();
+            }
+            else {
+                rowCrd = row.mvccCoordinatorVersion();
+                rowCntr = row.mvccCounter();
+                rowOpCntr = row.mvccOperationCounter();
+            }
+
+            if (compare(mvccSnapshot, rowCrd, rowCntr) == 0) {
+                res = mvccOpCntr == rowOpCntr ? ResultType.VERSION_FOUND :
+                    removed ? ResultType.PREV_NULL : ResultType.PREV_NOT_NULL;
+
+                if (removed)
+                    setFlags(DELETED);
+                else
+                    oldRow = row;
+
+                setFlags(LAST_COMMITTED_FOUND);
+            }
+        }
+
+        long rowLink = row.link();
+
+        long rowCrd = row.mvccCoordinatorVersion();
+        long rowCntr = row.mvccCounter();
+
+        // with hint bits
+        int rowOpCntr = (row.mvccTxState() << MVCC_HINTS_BIT_OFF) | (row.mvccOperationCounter() & ~MVCC_HINTS_MASK);
+
+        long rowNewCrd = row.newMvccCoordinatorVersion();
+        long rowNewCntr = row.newMvccCounter();
+
+        // with hint bits
+        int rowNewOpCntr = (row.newMvccTxState() << MVCC_HINTS_BIT_OFF) | (row.newMvccOperationCounter() & ~MVCC_HINTS_MASK);
+
+        // Search for youngest committed by another transaction row.
+        if (!isFlagsSet(LAST_COMMITTED_FOUND)) {
+            if (!(resCrd == rowCrd && resCntr == rowCntr)) { // It's possible it is a chain of aborted changes
+                byte txState = MvccUtils.state(cctx, rowCrd, rowCntr, rowOpCntr);
+
+                if (txState == TxState.COMMITTED) {
+                    setFlags(LAST_COMMITTED_FOUND);
+
+                    if (rowNewCrd != MVCC_CRD_COUNTER_NA) {
+                        if (rowNewCrd == rowCrd && rowNewCntr == rowCntr)
+                            // Row was deleted by the same Tx it was created
+                            txState = TxState.COMMITTED;
+                        else if (rowNewCrd == resCrd && rowNewCntr == resCntr)
+                            // The row is linked to the previously checked aborted version;
+                            txState = TxState.ABORTED;
+                        else
+                            // Check with TxLog if removed version is committed;
+                            txState = MvccUtils.state(cctx, rowNewCrd, rowNewCntr, rowNewOpCntr);
+
+                        if (!(txState == TxState.COMMITTED || txState == TxState.ABORTED))
+                            throw unexpectedStateException(cctx, txState, rowNewCrd, rowNewCntr, rowNewOpCntr, mvccSnapshot);
+
+                        if (txState == TxState.COMMITTED)
+                            setFlags(DELETED);
+                    }
+
+                    if (isFlagsSet(DELETED))
+                        res = ResultType.PREV_NULL;
+                    else {
+                        res = ResultType.PREV_NOT_NULL;
+
+                        oldRow = row;
+                    }
+
+                    if (isFlagsSet(CHECK_VERSION)) {
+                        long crdVer, cntr; int opCntr;
+
+                        if (isFlagsSet(DELETED)) {
+                            crdVer = rowNewCrd;
+                            cntr = rowNewCntr;
+                            opCntr = rowNewOpCntr;
+                        }
+                        else {
+                            crdVer = rowCrd;
+                            cntr = rowCntr;
+                            opCntr = rowOpCntr;
+                        }
+
+                        // If last committed row is not visible it is possible write conflict.
+                        if (!isVisible(cctx, mvccSnapshot, crdVer, cntr, opCntr, false)) {
+                            // In case when row is accessed without previous version check (FAST_UPDATE)
+                            // it is possible that we should consider this row non existent for current transaction
+                            // without signalling write conflict.
+                            // To do this we need to find youngest visible version and if it is removed version
+                            // or there is no visible version then there is no conflict.
+                            if (isFlagsSet(FAST_UPDATE)
+                                && !(isFlagsSet(DELETED)
+                                    && isVisible(cctx, mvccSnapshot, rowCrd, rowCntr, rowOpCntr, false))) {
+                                res = ResultType.PREV_NULL;
+
+                                setFlags(FAST_MISMATCH);
+                            }
+                            else {
+                                resCrd = crdVer;
+                                resCntr = cntr;
+
+                                res = ResultType.VERSION_MISMATCH; // Write conflict.
+
+                                return setFlags(STOP);
+                            }
+                        }
+                    }
+
+                    // Lock entry for primary partition if needed.
+                    // If invisible row is found for FAST_UPDATE case we should not lock row.
+                    if (isFlagsSet(PRIMARY | REMOVE_OR_LOCK) && !isFlagsSet(FAST_MISMATCH)) {
+                        rowIo.setMvccLockCoordinatorVersion(pageAddr, idx, mvccCrd);
+                        rowIo.setMvccLockCounter(pageAddr, idx, mvccCntr);
+
+                        // TODO Delta record IGNITE-7991
+
+                        setFlags(DIRTY);
+                    }
+
+                    unsetFlags(CAN_WRITE); // No need to acquire write locks anymore
+                }
+                else if (txState == TxState.ABORTED) { // save aborted version to fast check new version of next row
+                    resCrd = rowCrd;
+                    resCntr = rowCntr;
+                }
+                else
+                    throw unexpectedStateException(cctx, txState, rowCrd, rowCntr, rowOpCntr, mvccSnapshot);
+            }
+        }
+        // Search for youngest visible row.
+        // If we have not found any visible version then we does not see this row.
+        else if (isFlagsSet(FAST_MISMATCH)) {
+            assert !isFlagsSet(CAN_CLEANUP);
+            assert mvccVersionIsValid(rowNewCrd, rowNewCntr, rowNewOpCntr);
+
+            // Update version could be visible only if it is removal version,
+            // previous create versions were already checked in previous step and are definitely invisible.
+            // If we found visible removal version then we does not see this row.
+            if (isVisible(cctx, mvccSnapshot, rowNewCrd, rowNewCntr, rowNewOpCntr, false))
+                unsetFlags(FAST_MISMATCH);
+            // If the youngest visible for current transaction version is not removal version then it is write conflict.
+            else if (isVisible(cctx, mvccSnapshot, rowCrd, rowCntr, rowOpCntr, false)) {
+                resCrd = rowCrd;
+                resCntr = rowCntr;
+
+                res = ResultType.VERSION_MISMATCH;
+
+                return setFlags(STOP);
+            }
+        }
+
+        long cleanupVer = mvccSnapshot.cleanupVersion();
+
+        if (cleanupVer > MVCC_OP_COUNTER_NA // Do not clean if cleanup version is not assigned.
+            && !isFlagsSet(CAN_CLEANUP)
+            && isFlagsSet(LAST_COMMITTED_FOUND | DELETED)) {
+            assert mvccVersionIsValid(rowNewCrd, rowNewCntr, rowNewOpCntr);
+
+            // We can cleanup previous row only if it was deleted by another
+            // transaction and delete version is less or equal to cleanup one
+            if (rowNewCrd < mvccCrd || Long.compare(cleanupVer, rowNewCntr) >= 0)
+                setFlags(CAN_CLEANUP);
+        }
+
+        if (isFlagsSet(CAN_CLEANUP)
+            || !isFlagsSet(LAST_COMMITTED_FOUND)) { // can cleanup aborted versions
+            if (cleanupRows == null)
+                cleanupRows = new ArrayList<>();
+
+            cleanupRows.add(new MvccLinkAwareSearchRow(cacheId, key, rowCrd, rowCntr, rowOpCntr & ~MVCC_HINTS_MASK, rowLink));
+        }
+        else {
+            // Row obsoleted by current operation, all rows created or updated with current tx.
+            if (isFlagsSet(NEED_HISTORY)
+                && (row == oldRow
+                    || (rowCrd == mvccCrd && rowCntr == mvccCntr)
+                    || (rowNewCrd == mvccCrd && rowNewCntr == mvccCntr))) {
+                if (historyRows == null)
+                    historyRows = new ArrayList<>();
+
+                historyRows.add(new MvccLinkAwareSearchRow(cacheId, key, rowCrd, rowCntr, rowOpCntr & ~MVCC_HINTS_MASK, rowLink));
+            }
+
+            if (cleanupVer > MVCC_OP_COUNTER_NA // Do not clean if cleanup version is not assigned.
+                && !isFlagsSet(CAN_CLEANUP)
+                && isFlagsSet(LAST_COMMITTED_FOUND)
+                && (rowCrd < mvccCrd || Long.compare(cleanupVer, rowCntr) >= 0))
+                // all further versions are guaranteed to be less than cleanup version
+                setFlags(CAN_CLEANUP);
+        }
+
+        return unsetFlags(FIRST);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int state() {
+        return state;
+    }
+
+    /**
+     * @return Old row.
+     */
+    public CacheDataRow oldRow() {
+        return oldRow;
+    }
+
+    /**
+     * @return {@code True} if previous value was non-null.
+     */
+    @Override public ResultType resultType() {
+        return res == null ? ResultType.PREV_NULL : res;
+    }
+
+    /**
+     * @return Rows which are safe to cleanup.
+     */
+    public List<MvccLinkAwareSearchRow> cleanupRows() {
+        return cleanupRows;
+    }
+
+    /**
+     * @return Result version.
+     */
+    @Override public MvccVersion resultVersion() {
+        switch (resultType()) {
+            case VERSION_FOUND:
+            case PREV_NULL:
+
+                return new MvccVersionImpl(mvccCrd, mvccCntr, mvccOpCntr);
+            case PREV_NOT_NULL:
+
+                return new MvccVersionImpl(oldRow.mvccCoordinatorVersion(), oldRow.mvccCounter(), oldRow.mvccOperationCounter());
+            case LOCKED:
+            case VERSION_MISMATCH:
+
+                assert resCrd != MVCC_CRD_COUNTER_NA && resCntr != MVCC_COUNTER_NA;
+
+                return new MvccVersionImpl(resCrd, resCntr, MVCC_OP_COUNTER_NA);
+            default:
+
+                throw new IllegalStateException("Unexpected result type: " + resultType());
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public List<MvccLinkAwareSearchRow> history() {
+        if (isFlagsSet(NEED_HISTORY) && historyRows == null)
+            historyRows = new ArrayList<>();
+
+        return historyRows;
+    }
+
+    /** */
+    private boolean isFlagsSet(int flags) {
+        return (state & flags) == flags;
+    }
+
+    /** */
+    private int setFlags(int flags) {
+        return state |= flags;
+    }
+
+    /** */
+    private int unsetFlags(int flags) {
+        return state &= (~flags);
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(MvccUpdateDataRow.class, this, "super", super.toString());
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/data/MvccUpdateDataRowNative.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/data/MvccUpdateDataRowNative.java
new file mode 100644
index 0000000..38611a9
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/data/MvccUpdateDataRowNative.java
@@ -0,0 +1,240 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.tree.mvcc.data;
+
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.cache.CacheObject;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUtils;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccVersion;
+import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
+import org.apache.ignite.internal.processors.cache.persistence.CacheSearchRow;
+import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO;
+import org.apache.ignite.internal.processors.cache.tree.RowLinkIO;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.search.MvccLinkAwareSearchRow;
+import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
+import org.apache.ignite.internal.util.GridLongList;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ *
+ */
+public class MvccUpdateDataRowNative extends MvccDataRow implements BPlusTree.TreeRowClosure<CacheSearchRow, CacheDataRow> {
+    /** */
+    private final MvccSnapshot mvccSnapshot;
+    /** */
+    private ResultType res;
+    /** */
+    private boolean canCleanup;
+    /** */
+    private GridLongList activeTxs;
+    /** */
+    private List<MvccLinkAwareSearchRow> cleanupRows;
+    /** */
+    private CacheDataRow oldRow;
+
+    /**
+     * @param key Key.
+     * @param val Value.
+     * @param ver Version.
+     * @param expireTime Expire time.
+     * @param mvccSnapshot MVCC snapshot.
+     * @param newVer Update version.
+     * @param part Partition.
+     * @param cctx Cache context.
+     */
+    public MvccUpdateDataRowNative(
+        KeyCacheObject key,
+        CacheObject val,
+        GridCacheVersion ver,
+        long expireTime,
+        MvccSnapshot mvccSnapshot,
+        MvccVersion newVer,
+        int part,
+        GridCacheContext cctx) {
+        super(key,
+            val,
+            ver,
+            part,
+            expireTime,
+            cctx.cacheId(),
+            mvccSnapshot,
+            newVer);
+
+        this.mvccSnapshot = mvccSnapshot;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean apply(BPlusTree<CacheSearchRow, CacheDataRow> tree,
+        BPlusIO<CacheSearchRow> io,
+        long pageAddr,
+        int idx)
+        throws IgniteCheckedException {
+        RowLinkIO rowIo = (RowLinkIO)io;
+
+        // Assert version grows.
+        assert assertVersion(rowIo, pageAddr, idx);
+
+        boolean checkActive = mvccSnapshot.activeTransactions().size() > 0;
+
+        boolean txActive = false;
+
+        long rowCrdVer = rowIo.getMvccCoordinatorVersion(pageAddr, idx);
+
+        long crdVer = mvccCoordinatorVersion();
+
+        boolean isFirstRmvd = false;
+
+        if (res == null) {
+            int cmp = Long.compare(crdVer, rowCrdVer);
+
+            if (cmp == 0)
+                cmp = Long.compare(mvccSnapshot.counter(), rowIo.getMvccCounter(pageAddr, idx));
+
+            if (cmp == 0)
+                res = ResultType.VERSION_FOUND;
+            else {
+                oldRow = tree.getRow(io, pageAddr, idx, RowData.LINK_WITH_HEADER);
+
+                isFirstRmvd = oldRow.newMvccCoordinatorVersion() != 0;
+
+                if (isFirstRmvd)
+                    res = ResultType.PREV_NULL;
+                else
+                    res = ResultType.PREV_NOT_NULL;
+            }
+        }
+
+        // Suppose transactions on previous coordinator versions are done.
+        if (checkActive && crdVer == rowCrdVer) {
+            long rowMvccCntr = rowIo.getMvccCounter(pageAddr, idx);
+
+            long activeTx = isFirstRmvd ? oldRow.newMvccCounter() : rowMvccCntr;
+
+            if (mvccSnapshot.activeTransactions().contains(activeTx)) {
+                txActive = true;
+
+                if (activeTxs == null)
+                    activeTxs = new GridLongList();
+
+                activeTxs.add(activeTx);
+            }
+        }
+
+        if (!txActive) {
+            assert Long.compare(crdVer, rowCrdVer) >= 0;
+
+            int cmp;
+
+            long rowCntr = rowIo.getMvccCounter(pageAddr, idx);
+
+            if (crdVer == rowCrdVer)
+                cmp = Long.compare(mvccSnapshot.cleanupVersion(), rowCntr);
+            else
+                cmp = 1;
+
+            if (cmp >= 0) {
+                // Do not cleanup oldest version.
+                if (canCleanup) {
+                    assert MvccUtils.mvccVersionIsValid(rowCrdVer, rowCntr);
+
+                    // Should not be possible to cleanup active tx.
+                    assert rowCrdVer != crdVer || !mvccSnapshot.activeTransactions().contains(rowCntr);
+
+                    if (cleanupRows == null)
+                        cleanupRows = new ArrayList<>();
+
+                    cleanupRows.add(new MvccLinkAwareSearchRow(cacheId, key, rowCrdVer, rowCntr,
+                        rowIo.getMvccOperationCounter(pageAddr, idx), rowIo.getLink(pageAddr, idx)));
+                }
+                else
+                    canCleanup = true;
+            }
+        }
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int mvccOperationCounter() {
+        return MvccUtils.MVCC_START_OP_CNTR;
+    }
+
+    /**
+     * @return Old row.
+     */
+    public CacheDataRow oldRow() {
+        return oldRow;
+    }
+
+    /**
+     * @return {@code True} if previous value was non-null.
+     */
+    public ResultType resultType() {
+        return res == null ? ResultType.PREV_NULL : res;
+    }
+
+    /**
+     * @return Active transactions to wait for.
+     */
+    @Nullable public GridLongList activeTransactions() {
+        return activeTxs;
+    }
+
+    /**
+     * @return Rows which are safe to cleanup.
+     */
+    public List<MvccLinkAwareSearchRow> cleanupRows() {
+        return cleanupRows;
+    }
+
+    /**
+     * @param io IO.
+     * @param pageAddr Page address.
+     * @param idx Item index.
+     * @return Always {@code true}.
+     */
+    private boolean assertVersion(RowLinkIO io, long pageAddr, int idx) {
+        long rowCrdVer = io.getMvccCoordinatorVersion(pageAddr, idx);
+        long rowCntr = io.getMvccCounter(pageAddr, idx);
+
+        int cmp = Long.compare(mvccCoordinatorVersion(), rowCrdVer);
+
+        if (cmp == 0)
+            cmp = Long.compare(mvccSnapshot.counter(), rowCntr);
+
+        // Can be equals if execute update on backup and backup already rebalanced value updated on primary.
+        assert cmp >= 0 : "[updCrd=" + mvccCoordinatorVersion() +
+            ", updCntr=" + mvccSnapshot.counter() +
+            ", rowCrd=" + rowCrdVer +
+            ", rowCntr=" + rowCntr + ']';
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(MvccUpdateDataRowNative.class, this, "super", super.toString());
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/data/MvccUpdateResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/data/MvccUpdateResult.java
new file mode 100644
index 0000000..086a30d
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/data/MvccUpdateResult.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.tree.mvcc.data;
+
+import java.util.List;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccVersion;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.search.MvccLinkAwareSearchRow;
+
+/**
+ *
+ */
+public interface MvccUpdateResult {
+    /**
+     * @return Type of result.
+     */
+    public ResultType resultType();
+
+    /**
+     * @return Result version.
+     */
+    public MvccVersion resultVersion();
+
+    /**
+     *
+     * @return Collection of row created or affected by the current tx.
+     */
+    public List<MvccLinkAwareSearchRow> history();
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/data/ResultType.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/data/ResultType.java
new file mode 100644
index 0000000..eecb4a5
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/data/ResultType.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.tree.mvcc.data;
+
+/**
+ *
+ */
+public enum ResultType {
+    /** */
+    VERSION_FOUND,
+    /** */
+    PREV_NULL,
+    /** */
+    PREV_NOT_NULL,
+    /** */
+    LOCKED,
+    /** */
+    VERSION_MISMATCH
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/search/MvccFirstRowTreeClosure.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/search/MvccFirstRowTreeClosure.java
new file mode 100644
index 0000000..8546a66
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/search/MvccFirstRowTreeClosure.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.tree.mvcc.search;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccVersion;
+import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxState;
+import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
+import org.apache.ignite.internal.processors.cache.persistence.CacheDataRowAdapter;
+import org.apache.ignite.internal.processors.cache.persistence.CacheSearchRow;
+import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO;
+import org.apache.ignite.internal.processors.cache.tree.RowLinkIO;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.jetbrains.annotations.Nullable;
+
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.getNewVersion;
+
+/**
+ * Closure which returns the very first encountered row.
+ */
+public class MvccFirstRowTreeClosure implements MvccTreeClosure {
+    /** */
+    private final GridCacheContext cctx;
+
+    /** */
+    private CacheDataRow res;
+
+    /**
+     * @param cctx Cache context.
+     */
+    public MvccFirstRowTreeClosure(GridCacheContext cctx) {
+        this.cctx = cctx;
+    }
+
+    /**
+     * @return Found row.
+     */
+    @Nullable public CacheDataRow row() {
+        return res;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean apply(BPlusTree<CacheSearchRow, CacheDataRow> tree, BPlusIO<CacheSearchRow> io,
+        long pageAddr, int idx) throws IgniteCheckedException {
+        RowLinkIO rowIo = (RowLinkIO)io;
+
+        MvccVersion newVersion = getNewVersion(cctx, rowIo.getLink(pageAddr, idx));
+
+        if (newVersion == null)
+            res = tree.getRow(io, pageAddr, idx, CacheDataRowAdapter.RowData.NO_KEY);
+
+        return false;  // Stop search.
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(MvccFirstRowTreeClosure.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/search/MvccLinkAwareSearchRow.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/search/MvccLinkAwareSearchRow.java
new file mode 100644
index 0000000..1e763a6
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/search/MvccLinkAwareSearchRow.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.tree.mvcc.search;
+
+import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+
+/**
+ * MVCC search row which contains a link. Now used only for cleanup purposes.
+ */
+public class MvccLinkAwareSearchRow extends MvccSearchRow {
+    /** */
+    private final long link;
+
+    /**
+     * @param cacheId Cache ID.
+     * @param key Key.
+     * @param crdVer Mvcc coordinator version.
+     * @param mvccCntr Mvcc counter.
+     * @param link Link.
+     */
+    public MvccLinkAwareSearchRow(int cacheId, KeyCacheObject key, long crdVer, long mvccCntr, int mvccOpCntr, long link) {
+        super(cacheId, key, crdVer, mvccCntr, mvccOpCntr);
+
+        assert link != 0L;
+
+        this.link = link;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long link() {
+        return link;
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/search/MvccMaxSearchRow.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/search/MvccMaxSearchRow.java
new file mode 100644
index 0000000..f5f3b67
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/search/MvccMaxSearchRow.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.tree.mvcc.search;
+
+import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.tree.SearchRow;
+import org.apache.ignite.internal.util.typedef.internal.S;
+
+/**
+ * Search row for maximum key version.
+ */
+public class MvccMaxSearchRow extends SearchRow {
+    /**
+     * @param cacheId Cache ID.
+     * @param key Key.
+     */
+    public MvccMaxSearchRow(int cacheId, KeyCacheObject key) {
+        super(cacheId, key);
+    }
+
+    /** {@inheritDoc} */
+    @Override public long mvccCoordinatorVersion() {
+        return Long.MAX_VALUE;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long mvccCounter() {
+        return Long.MAX_VALUE;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int mvccOperationCounter() {
+        return Integer.MAX_VALUE;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(MvccMaxSearchRow.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/search/MvccMinSearchRow.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/search/MvccMinSearchRow.java
new file mode 100644
index 0000000..a1cb8fc
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/search/MvccMinSearchRow.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.tree.mvcc.search;
+
+import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.tree.SearchRow;
+import org.apache.ignite.internal.util.typedef.internal.S;
+
+/**
+ * Search row for minimum key version.
+ */
+public class MvccMinSearchRow extends SearchRow {
+    /**
+     * @param cacheId Cache ID.
+     * @param key Key.
+     */
+    public MvccMinSearchRow(int cacheId, KeyCacheObject key) {
+        super(cacheId, key);
+    }
+
+    /** {@inheritDoc} */
+    @Override public long mvccCoordinatorVersion() {
+        return 1L;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long mvccCounter() {
+        return 1L;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int mvccOperationCounter() {
+        return 1;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(MvccMinSearchRow.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/search/MvccSearchRow.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/search/MvccSearchRow.java
new file mode 100644
index 0000000..d0510f7
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/search/MvccSearchRow.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.tree.mvcc.search;
+
+import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.tree.SearchRow;
+import org.apache.ignite.internal.util.typedef.internal.S;
+
+/**
+ * MVCC search row which uses specific MVCC version (coordinator + counter) to filter rows.
+ */
+public class MvccSearchRow extends SearchRow {
+    /** Coordinator version. */
+    protected long crdVer;
+
+    /** Counter. */
+    private long cntr;
+
+    /** Operation counter. */
+    protected int opCntr;
+
+    /**
+     * @param cacheId Cache ID.
+     * @param key Key.
+     * @param crdVer Coordinator version.
+     * @param cntr Counter.
+     */
+    public MvccSearchRow(int cacheId, KeyCacheObject key, long crdVer, long cntr, int opCntr) {
+        super(cacheId, key);
+
+        this.crdVer = crdVer;
+        this.cntr = cntr;
+        this.opCntr = opCntr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long mvccCoordinatorVersion() {
+        return crdVer;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long mvccCounter() {
+        return cntr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int mvccOperationCounter() {
+        return opCntr;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(MvccSearchRow.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/search/MvccSnapshotSearchRow.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/search/MvccSnapshotSearchRow.java
new file mode 100644
index 0000000..432593d
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/search/MvccSnapshotSearchRow.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.tree.mvcc.search;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUtils;
+import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
+import org.apache.ignite.internal.processors.cache.persistence.CacheDataRowAdapter;
+import org.apache.ignite.internal.processors.cache.persistence.CacheSearchRow;
+import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO;
+import org.apache.ignite.internal.processors.cache.tree.RowLinkIO;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.jetbrains.annotations.Nullable;
+
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_INVISIBLE;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_VISIBLE;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_VISIBLE_REMOVED;
+
+/**
+ * Search row which returns the first row visible for the given snapshot. Usage:
+ * - set this row as the upper bound
+ * - pass the same row as search closure.
+ */
+public class MvccSnapshotSearchRow extends MvccSearchRow implements MvccTreeClosure {
+    /** */
+    private final GridCacheContext cctx;
+
+    /** Resulting row. */
+    private CacheDataRow res;
+
+    /** */
+    private MvccSnapshot snapshot;
+
+    /**
+     * Constructor.
+     *
+     * @param cctx
+     * @param key Key.
+     * @param snapshot Snapshot.
+     */
+    public MvccSnapshotSearchRow(GridCacheContext cctx, KeyCacheObject key,
+        MvccSnapshot snapshot) {
+        super(cctx.cacheId(), key, snapshot.coordinatorVersion(), snapshot.counter(), Integer.MAX_VALUE);
+
+        this.cctx = cctx;
+
+        this.snapshot = snapshot;
+    }
+
+    /**
+     * @return Found row.
+     */
+    @Nullable public CacheDataRow row() {
+        return res;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean apply(BPlusTree<CacheSearchRow, CacheDataRow> tree, BPlusIO<CacheSearchRow> io,
+        long pageAddr, int idx) throws IgniteCheckedException {
+        RowLinkIO rowIo = (RowLinkIO)io;
+
+        long rowCrdVer = rowIo.getMvccCoordinatorVersion(pageAddr, idx);
+        long rowCntr = rowIo.getMvccCounter(pageAddr, idx);
+        int rowOpCntr = rowIo.getMvccOperationCounter(pageAddr, idx);
+
+        assert MvccUtils.mvccVersionIsValid(rowCrdVer, rowCntr, rowOpCntr);
+
+        if (MvccUtils.isVisible(cctx, snapshot, rowCrdVer, rowCntr, rowOpCntr, false)) {
+            int state = MvccUtils.getVisibleState(cctx, rowIo.getLink(pageAddr, idx), snapshot);
+
+            if (state == MVCC_INVISIBLE)
+                return true;
+
+            if (state == MVCC_VISIBLE_REMOVED)
+                res = null;
+            else {
+                assert state == MVCC_VISIBLE;
+
+                res = tree.getRow(io, pageAddr, idx, CacheDataRowAdapter.RowData.NO_KEY);
+
+                res.key(key());
+            }
+
+            return false; // Stop search.
+        }
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(MvccSnapshotSearchRow.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/search/MvccTreeClosure.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/search/MvccTreeClosure.java
new file mode 100644
index 0000000..031903f
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/tree/mvcc/search/MvccTreeClosure.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.tree.mvcc.search;
+
+import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
+import org.apache.ignite.internal.processors.cache.persistence.CacheSearchRow;
+import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree;
+
+/**
+ * Common interface for MVCC search row closures (no-op, only to have clear inheritance hierarchy).
+ */
+public interface MvccTreeClosure extends BPlusTree.TreeRowClosure<CacheSearchRow, CacheDataRow> {
+    // No-op.
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/GridClusterStateProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/GridClusterStateProcessor.java
index 8d2620f..727a372 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/GridClusterStateProcessor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cluster/GridClusterStateProcessor.java
@@ -56,6 +56,7 @@
 import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetastorageLifecycleListener;
 import org.apache.ignite.internal.processors.cache.persistence.metastorage.ReadOnlyMetastorage;
 import org.apache.ignite.internal.processors.cache.persistence.metastorage.ReadWriteMetastorage;
+import org.apache.ignite.internal.processors.task.GridInternal;
 import org.apache.ignite.internal.util.future.GridFinishedFuture;
 import org.apache.ignite.internal.util.future.GridFutureAdapter;
 import org.apache.ignite.internal.util.tostring.GridToStringExclude;
@@ -510,13 +511,16 @@
 
                 transitionFuts.put(msg.requestId(), new GridFutureAdapter<Void>());
 
+                DiscoveryDataClusterState prevState = globalState;
+
                 globalState = DiscoveryDataClusterState.createTransitionState(
-                    globalState,
+                    prevState,
                     msg.activate(),
-                    msg.activate() ? msg.baselineTopology() : globalState.baselineTopology(),
+                    msg.activate() ? msg.baselineTopology() : prevState.baselineTopology(),
                     msg.requestId(),
                     topVer,
-                    nodeIds);
+                    nodeIds
+                );
 
                 if (msg.forceChangeBaselineTopology())
                     globalState.setTransitionResult(msg.requestId(), msg.activate());
@@ -1460,6 +1464,7 @@
     /**
      *
      */
+    @GridInternal
     private static class ClientChangeGlobalStateComputeRequest implements IgniteRunnable {
         /** */
         private static final long serialVersionUID = 0L;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/continuous/GridContinuousProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/continuous/GridContinuousProcessor.java
index 6723ea4..4cbff6d 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/continuous/GridContinuousProcessor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/continuous/GridContinuousProcessor.java
@@ -31,6 +31,7 @@
 import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReadWriteLock;
@@ -47,6 +48,7 @@
 import org.apache.ignite.internal.GridMessageListenHandler;
 import org.apache.ignite.internal.IgniteClientDisconnectedCheckedException;
 import org.apache.ignite.internal.IgniteDeploymentCheckedException;
+import org.apache.ignite.internal.IgniteFutureTimeoutCheckedException;
 import org.apache.ignite.internal.IgniteInternalFuture;
 import org.apache.ignite.internal.IgniteInterruptedCheckedException;
 import org.apache.ignite.internal.NodeStoppingException;
@@ -74,6 +76,7 @@
 import org.apache.ignite.internal.util.typedef.CI1;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.T2;
+import org.apache.ignite.internal.util.typedef.internal.LT;
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.internal.util.worker.GridWorker;
@@ -1165,7 +1168,35 @@
                     throw e;
                 }
 
-                fut.get();
+                while (true) {
+                    try {
+                        fut.get(100, TimeUnit.MILLISECONDS);
+
+                        break;
+                    }
+                    catch (IgniteFutureTimeoutCheckedException ignored) {
+                        // Additional failover to break waiting on node left/fail
+                        // in case left/fail event processing failed, hanged or delayed.
+                        if (!ctx.discovery().alive(nodeId)) {
+                            SyncMessageAckFuture fut0 = syncMsgFuts.remove(futId);
+
+                            if (fut0 != null) {
+                                ClusterTopologyCheckedException err = new ClusterTopologyCheckedException(
+                                    "Node left grid after receiving, but before processing the message [node=" +
+                                        nodeId + "]");
+
+                                fut0.onDone(err);
+                            }
+
+                            break;
+                        }
+
+                        LT.warn(log, "Failed to wait for ack message. [node=" + nodeId +
+                            ", routine=" + routineId + "]");
+                    }
+                }
+
+                assert fut.isDone() : "Future in not finished [fut= " + fut + "]";
             }
             else {
                 final GridContinuousBatch batch = info.add(obj);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerImpl.java
index 7980155..4af1e47 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastreamer/DataStreamerImpl.java
@@ -114,6 +114,7 @@
 import org.apache.ignite.lang.IgniteUuid;
 import org.apache.ignite.plugin.security.SecurityPermission;
 import org.apache.ignite.stream.StreamReceiver;
+import org.jetbrains.annotations.NotNull;
 import org.jetbrains.annotations.Nullable;
 
 import static org.apache.ignite.events.EventType.EVT_NODE_FAILED;
@@ -381,12 +382,12 @@
     }
 
     /** {@inheritDoc} */
-    public void perThreadBufferSize(int size) {
+    @Override public void perThreadBufferSize(int size) {
         bufLdrSzPerThread = size;
     }
 
     /** {@inheritDoc} */
-    public int perThreadBufferSize() {
+    @Override public int perThreadBufferSize() {
         return bufLdrSzPerThread;
     }
 
@@ -624,48 +625,59 @@
      * @return Future.
      */
     public IgniteFuture<?> addDataInternal(Collection<? extends DataStreamerEntry> entries) {
+        return addDataInternal(entries, true);
+    }
+
+    /**
+     * @param entries Entries.
+     * @param useThreadBuffer
+     * @return Future.
+     */
+    public IgniteFuture<?> addDataInternal(Collection<? extends DataStreamerEntry> entries, boolean useThreadBuffer) {
         IgniteCacheFutureImpl fut = null;
 
         GridFutureAdapter internalFut = null;
 
-        List entriesList;
+        Collection entriesList;
 
         lock(false);
 
         try {
             long threadId = Thread.currentThread().getId();
 
-            ThreadBuffer threadBuf = threadBufMap.get(threadId);
+            if (useThreadBuffer) {
+                ThreadBuffer threadBuf = threadBufMap.get(threadId);
 
-            if (threadBuf == null) {
-                internalFut = new GridFutureAdapter();
+                if (threadBuf == null) {
+                    fut = createDataLoadFuture();
 
-                fut = new IgniteCacheFutureImpl(internalFut);
+                    // Initial capacity should be more than batch by 12.5% in order to avoid resizing.
+                    threadBuf = new ThreadBuffer(fut,
+                        new ArrayList<>(bufLdrSzPerThread + (bufLdrSzPerThread >> 3)));
 
-                internalFut.listen(rmvActiveFut);
+                    threadBufMap.put(threadId, threadBuf);
+                }
+                else
+                    // Use existed thread-buffer future.
+                    fut = threadBuf.getFuture();
 
-                activeFuts.add(internalFut);
+                entriesList = threadBuf.getEntries();
 
-                // Initial capacity should be more than batch by 12.5% in order to avoid resizing.
-                threadBuf = new ThreadBuffer(fut,
-                    new ArrayList<>(bufLdrSzPerThread + (bufLdrSzPerThread >> 3)));
-
-                threadBufMap.put(threadId, threadBuf);
+                entriesList.addAll(entries);
             }
             else {
-                fut = threadBuf.getFuture();
+                entriesList = entries;
 
-                internalFut = (GridFutureAdapter)fut.internalFuture();
+                fut = createDataLoadFuture();
             }
 
-            entriesList = threadBuf.getEntries();
+            internalFut = (GridFutureAdapter)fut.internalFuture();
 
-            entriesList.addAll(entries);
-
-            if (entriesList.size() >= bufLdrSzPerThread) {
+            if (!useThreadBuffer || entriesList.size() >= bufLdrSzPerThread) {
                 loadData(entriesList, internalFut);
 
-                threadBufMap.remove(threadId);
+                if (useThreadBuffer)
+                    threadBufMap.remove(threadId);
             }
 
             return fut;
@@ -685,6 +697,22 @@
     }
 
     /**
+     * Creates data load future and register its as active future.
+     * @return Data load future.
+     */
+    @NotNull protected IgniteCacheFutureImpl createDataLoadFuture() {
+        GridFutureAdapter internalFut0 = new GridFutureAdapter();
+
+        IgniteCacheFutureImpl fut = new IgniteCacheFutureImpl(internalFut0);
+
+        internalFut0.listen(rmvActiveFut);
+
+        activeFuts.add(internalFut0);
+
+        return fut;
+    }
+
+    /**
      * Load thread batch of DataStreamerEntry.
      */
     private void loadData(Collection<? extends DataStreamerEntry> entries, GridFutureAdapter fut) {
@@ -2243,7 +2271,7 @@
                             primary ? GridDrType.DR_LOAD : GridDrType.DR_PRELOAD,
                             false);
 
-                        cctx.evicts().touch(entry, topVer);
+                        entry.touch(topVer);
 
                         CU.unwindEvicts(cctx);
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/DataStructuresProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/DataStructuresProcessor.java
index 8f6876c..92671e0 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/DataStructuresProcessor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/DataStructuresProcessor.java
@@ -193,7 +193,10 @@
      *
      */
     public void onBeforeActivate() {
-        initLatch = new CountDownLatch(1);
+        CountDownLatch latch0 = initLatch;
+
+        if (latch0 == null || latch0.getCount() == 0)
+            initLatch = new CountDownLatch(1);
     }
 
     /**
@@ -1578,7 +1581,7 @@
                 hdr = (GridCacheSetHeader) cctx.cache().withNoRetries().getAndRemove(new GridCacheSetHeaderKey(name));
 
                 if (hdr != null)
-                    cctx.dataStructures().removeSetData(hdr.id());
+                    cctx.dataStructures().removeSetData(hdr.id(), hdr.separated());
             }
         };
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/GridCacheQueueAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/GridCacheQueueAdapter.java
index e42c00b..6505624 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/GridCacheQueueAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/GridCacheQueueAdapter.java
@@ -406,7 +406,7 @@
     }
 
     /** {@inheritDoc} */
-    public void affinityRun(IgniteRunnable job) {
+    @Override public void affinityRun(IgniteRunnable job) {
         if (!collocated)
             throw new IgniteException("Failed to execute affinityRun() for non-collocated queue: " + name() +
                 ". This operation is supported only for collocated queues.");
@@ -415,7 +415,7 @@
     }
 
     /** {@inheritDoc} */
-    public <R> R affinityCall(IgniteCallable<R> job) {
+    @Override public <R> R affinityCall(IgniteCallable<R> job) {
         if (!collocated)
             throw new IgniteException("Failed to execute affinityCall() for non-collocated queue: " + name() +
                 ". This operation is supported only for collocated queues.");
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/GridCacheSetImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/GridCacheSetImpl.java
index ba65d9e..f6a1378 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/GridCacheSetImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/GridCacheSetImpl.java
@@ -45,7 +45,6 @@
 import org.apache.ignite.internal.processors.cache.query.CacheQuery;
 import org.apache.ignite.internal.processors.cache.query.CacheQueryFuture;
 import org.apache.ignite.internal.processors.cache.query.GridCacheQueryAdapter;
-import org.apache.ignite.internal.util.GridConcurrentHashSet;
 import org.apache.ignite.internal.util.lang.GridCloseableIterator;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.internal.CU;
@@ -158,14 +157,8 @@
                 return cache.sizeAsync(new CachePeekMode[] {}).get() - 1;
             }
 
-            if (ctx.isLocal() || ctx.isReplicated()) {
-                GridConcurrentHashSet<SetItemKey> set = ctx.dataStructures().setData(id);
-
-                return set != null ? set.size() : 0;
-            }
-
             CacheQuery qry = new GridCacheQueryAdapter<>(ctx, SET, null, null,
-                new GridSetQueryPredicate<>(id, collocated), null, false, false);
+                new GridSetQueryPredicate<>(id, collocated), collocated ? hdrPart : null, false, false);
 
             Collection<ClusterNode> nodes = dataNodes(ctx.affinity().affinityTopologyVersion());
 
@@ -192,9 +185,7 @@
     @Override public boolean isEmpty() {
         onAccess();
 
-        GridConcurrentHashSet<SetItemKey> set = ctx.dataStructures().setData(id);
-
-        return (set == null || set.isEmpty()) && size() == 0;
+        return size() == 0;
     }
 
     /** {@inheritDoc} */
@@ -437,7 +428,7 @@
     @SuppressWarnings("unchecked")
     private WeakReferenceCloseableIterator<T> sharedCacheIterator() throws IgniteCheckedException {
         CacheQuery qry = new GridCacheQueryAdapter<>(ctx, SET, null, null,
-            new GridSetQueryPredicate<>(id, collocated), null, false, false);
+            new GridSetQueryPredicate<>(id, collocated), collocated ? hdrPart : null, false, false);
 
         Collection<ClusterNode> nodes = dataNodes(ctx.affinity().affinityTopologyVersion());
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/GridSetQueryPredicate.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/GridSetQueryPredicate.java
index bc6c182..2de3dec 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/GridSetQueryPredicate.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/datastructures/GridSetQueryPredicate.java
@@ -99,7 +99,7 @@
      */
     private boolean filterKeys() {
         return !collocated && !(ctx.isLocal() || ctx.isReplicated()) &&
-            (ctx.config().getBackups() > 0 || CU.isNearEnabled(ctx));
+                (CU.isNearEnabled(ctx) || ctx.isPartitioned());
     }
 
     /** {@inheritDoc} */
@@ -118,4 +118,4 @@
     @Override public String toString() {
         return S.toString(GridSetQueryPredicate.class, this);
     }
-}
\ No newline at end of file
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopFileBlock.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopFileBlock.java
index 351abce..978bf20 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopFileBlock.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopFileBlock.java
@@ -159,7 +159,7 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public String toString() {
         return S.toString(HadoopFileBlock.class, this, "hosts", Arrays.toString(hosts));
     }
 }
\ No newline at end of file
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopJobEx.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopJobEx.java
index ba78af9..46606da 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopJobEx.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/hadoop/HadoopJobEx.java
@@ -47,7 +47,7 @@
      *
      * @return Input splits.
      */
-    abstract public Collection<HadoopInputSplit> input();
+    @Override abstract public Collection<HadoopInputSplit> input();
 
     /**
      * Returns context for task execution.
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsDirectoryInfo.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsDirectoryInfo.java
index 269098b..26979e5 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsDirectoryInfo.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsDirectoryInfo.java
@@ -62,7 +62,7 @@
      * @param len New length.
      * @return Updated file info.
      */
-    public IgfsEntryInfo length(long len) {
+    @Override public IgfsEntryInfo length(long len) {
         throw new UnsupportedOperationException("length");
     }
 
@@ -107,47 +107,47 @@
     }
 
     /** {@inheritDoc} */
-    protected IgfsDirectoryInfo copy() {
+    @Override protected IgfsDirectoryInfo copy() {
         return new IgfsDirectoryInfo(id, listing, props, accessTime, modificationTime);
     }
 
     /** {@inheritDoc} */
-    public boolean isFile() {
+    @Override public boolean isFile() {
         return false;
     }
 
     /** {@inheritDoc} */
-    public long length() {
+    @Override public long length() {
         return 0;
     }
 
     /** {@inheritDoc} */
-    public int blockSize() {
+    @Override public int blockSize() {
         return 0;
     }
 
     /** {@inheritDoc} */
-    public long blocksCount() {
+    @Override public long blocksCount() {
         return 0;
     }
 
     /** {@inheritDoc} */
-    public Map<String, IgfsListingEntry> listing() {
+    @Override public Map<String, IgfsListingEntry> listing() {
         return listing != null ? listing : Collections.<String, IgfsListingEntry>emptyMap();
     }
 
     /** {@inheritDoc} */
-    public boolean hasChildren() {
+    @Override public boolean hasChildren() {
         return !F.isEmpty(listing);
     }
 
     /** {@inheritDoc} */
-    public boolean hasChild(String name) {
+    @Override public boolean hasChild(String name) {
         return listing != null && listing.containsKey(name);
     }
 
     /** {@inheritDoc} */
-    public boolean hasChild(String name, IgniteUuid expId) {
+    @Override public boolean hasChild(String name, IgniteUuid expId) {
         if (listing != null) {
             IgfsListingEntry entry = listing.get(name);
 
@@ -159,22 +159,22 @@
     }
 
     /** {@inheritDoc} */
-    @Nullable public IgniteUuid affinityKey() {
+    @Override @Nullable public IgniteUuid affinityKey() {
         return null;
     }
 
     /** {@inheritDoc} */
-    public IgfsFileMap fileMap() {
+    @Override public IgfsFileMap fileMap() {
         return null;
     }
 
     /** {@inheritDoc} */
-    @Nullable public IgniteUuid lockId() {
+    @Override @Nullable public IgniteUuid lockId() {
         return null;
     }
 
     /** {@inheritDoc} */
-    public boolean evictExclude() {
+    @Override public boolean evictExclude() {
         return true;
     }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsFileInfo.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsFileInfo.java
index 337f281..3db3c87 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsFileInfo.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsFileInfo.java
@@ -152,62 +152,62 @@
     }
 
     /** {@inheritDoc} */
-    public boolean isFile() {
+    @Override public boolean isFile() {
         return true;
     }
 
     /** {@inheritDoc} */
-    public long length() {
+    @Override public long length() {
         return len;
     }
 
     /** {@inheritDoc} */
-    public int blockSize() {
+    @Override public int blockSize() {
         return blockSize;
     }
 
     /** {@inheritDoc} */
-    public long blocksCount() {
+    @Override public long blocksCount() {
         return (len + blockSize() - 1) / blockSize();
     }
 
     /** {@inheritDoc} */
-    public Map<String, IgfsListingEntry> listing() {
+    @Override public Map<String, IgfsListingEntry> listing() {
         return Collections.emptyMap();
     }
 
     /** {@inheritDoc} */
-    public boolean hasChildren() {
+    @Override public boolean hasChildren() {
         return false;
     }
 
     /** {@inheritDoc} */
-    public boolean hasChild(String name) {
+    @Override public boolean hasChild(String name) {
         return false;
     }
 
     /** {@inheritDoc} */
-    public boolean hasChild(String name, IgniteUuid expId) {
+    @Override public boolean hasChild(String name, IgniteUuid expId) {
         return false;
     }
 
     /** {@inheritDoc} */
-    @Nullable public IgniteUuid affinityKey() {
+    @Override @Nullable public IgniteUuid affinityKey() {
         return affKey;
     }
 
     /** {@inheritDoc} */
-    public IgfsFileMap fileMap() {
+    @Override public IgfsFileMap fileMap() {
         return fileMap;
     }
 
     /** {@inheritDoc} */
-    @Nullable public IgniteUuid lockId() {
+    @Override @Nullable public IgniteUuid lockId() {
         return lockId;
     }
 
     /** {@inheritDoc} */
-    public boolean evictExclude() {
+    @Override public boolean evictExclude() {
         return evictExclude;
     }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsFileWorkerBatch.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsFileWorkerBatch.java
index 17875a2..ee809e0 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsFileWorkerBatch.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/IgfsFileWorkerBatch.java
@@ -128,7 +128,7 @@
     /**
      * Process the batch.
      */
-    @SuppressWarnings("unchecked")
+    @Override @SuppressWarnings("unchecked")
     public void run() {
         Throwable err = null;
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/data/IgfsDataPutProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/data/IgfsDataPutProcessor.java
index 2029d4e..4e4a6da 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/data/IgfsDataPutProcessor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/igfs/data/IgfsDataPutProcessor.java
@@ -62,7 +62,7 @@
     }
 
     /** {@inheritDoc} */
-    public Void process(MutableEntry<IgfsBlockKey, byte[]> entry, Object... args)
+    @Override public Void process(MutableEntry<IgfsBlockKey, byte[]> entry, Object... args)
         throws EntryProcessorException {
         byte[] curVal = entry.getValue();
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/job/GridJobProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/job/GridJobProcessor.java
index ae9cb7e..bc05cc7 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/job/GridJobProcessor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/job/GridJobProcessor.java
@@ -132,7 +132,7 @@
     private final Collection<IgniteUuid> heldJobs = new GridConcurrentHashSet<>();
 
     /** If value is {@code true}, job was cancelled from future. */
-    private final GridBoundedConcurrentLinkedHashMap<IgniteUuid, Boolean> cancelReqs =
+    private volatile GridBoundedConcurrentLinkedHashMap<IgniteUuid, Boolean> cancelReqs =
         new GridBoundedConcurrentLinkedHashMap<>(FINISHED_JOBS_COUNT,
             FINISHED_JOBS_COUNT < 128 ? FINISHED_JOBS_COUNT : 128,
             0.75f, 16);
@@ -263,7 +263,9 @@
         // Clear collections.
         activeJobs.clear();
         cancelledJobs.clear();
-        cancelReqs.clear();
+        cancelReqs = new GridBoundedConcurrentLinkedHashMap<>(FINISHED_JOBS_COUNT,
+            FINISHED_JOBS_COUNT < 128 ? FINISHED_JOBS_COUNT : 128,
+            0.75f, 16);
 
         if (log.isDebugEnabled())
             log.debug("Job processor stopped.");
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/ClientListenerNioListener.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/ClientListenerNioListener.java
index eea391c..c9670c6 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/ClientListenerNioListener.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/ClientListenerNioListener.java
@@ -128,8 +128,6 @@
         if (connCtx == null) {
             onHandshake(ses, msg);
 
-            ses.addMeta(CONN_CTX_HANDSHAKE_PASSED, true);
-
             return;
         }
 
@@ -235,6 +233,8 @@
                 throw new IgniteCheckedException("Unsupported version.");
 
             connCtx.handler().writeHandshake(writer);
+
+            ses.addMeta(CONN_CTX_HANDSHAKE_PASSED, true);
         }
         catch (IgniteAccessControlException authEx) {
             writer.writeBoolean(false);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/SqlStateCode.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/SqlStateCode.java
index eff680f..68ac200 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/SqlStateCode.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/SqlStateCode.java
@@ -61,6 +61,9 @@
     /** Requested operation is not supported. */
     public final static String UNSUPPORTED_OPERATION = "0A000";
 
+    /** Transaction state exception. */
+    public final static String TRANSACTION_STATE_EXCEPTION = "25000";
+
     /** Parsing exception. */
     public final static String PARSING_EXCEPTION = "42000";
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcBatchExecuteRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcBatchExecuteRequest.java
index bdc558c..404a1c9 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcBatchExecuteRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcBatchExecuteRequest.java
@@ -17,17 +17,20 @@
 
 package org.apache.ignite.internal.processors.odbc.jdbc;
 
-import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.tostring.GridToStringInclude;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.jetbrains.annotations.Nullable;
 
+import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcConnectionContext.VER_2_4_0;
+import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcConnectionContext.VER_2_7_0;
+
 /**
  * JDBC batch execute request.
  */
@@ -39,6 +42,9 @@
     @GridToStringInclude(sensitive = true)
     private List<JdbcQuery> queries;
 
+    /** Client auto commit flag state. */
+    private boolean autoCommit;
+
     /**
      * Last stream batch flag - whether open streamers on current connection
      * must be flushed and closed after this batch.
@@ -63,15 +69,17 @@
     /**
      * @param schemaName Schema name.
      * @param queries Queries.
+     * @param autoCommit Client auto commit flag state.
      * @param lastStreamBatch {@code true} in case the request is the last batch at the stream.
      */
-    public JdbcBatchExecuteRequest(String schemaName, List<JdbcQuery> queries, boolean lastStreamBatch) {
+    public JdbcBatchExecuteRequest(String schemaName, List<JdbcQuery> queries, boolean autoCommit, boolean lastStreamBatch) {
         super(BATCH_EXEC);
 
         assert lastStreamBatch || !F.isEmpty(queries);
 
         this.schemaName = schemaName;
         this.queries = queries;
+        this.autoCommit = autoCommit;
         this.lastStreamBatch = lastStreamBatch;
     }
 
@@ -81,15 +89,17 @@
      * @param type Request type.
      * @param schemaName Schema name.
      * @param queries Queries.
+     * @param autoCommit Client auto commit flag state.
      * @param lastStreamBatch {@code true} in case the request is the last batch at the stream.
      */
-    protected JdbcBatchExecuteRequest(byte type, String schemaName, List<JdbcQuery> queries, boolean lastStreamBatch) {
+    protected JdbcBatchExecuteRequest(byte type, String schemaName, List<JdbcQuery> queries, boolean autoCommit, boolean lastStreamBatch) {
         super(type);
 
         assert lastStreamBatch || !F.isEmpty(queries);
 
         this.schemaName = schemaName;
         this.queries = queries;
+        this.autoCommit = autoCommit;
         this.lastStreamBatch = lastStreamBatch;
     }
 
@@ -108,6 +118,13 @@
     }
 
     /**
+     * @return Auto commit flag.
+     */
+    boolean autoCommit() {
+        return autoCommit;
+    }
+
+    /**
      * @return Last stream batch flag.
      */
     public boolean isLastStreamBatch() {
@@ -115,8 +132,8 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
-        super.writeBinary(writer);
+    @Override public void writeBinary(BinaryWriterExImpl writer, ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.writeBinary(writer, ver);
 
         writer.writeString(schemaName);
 
@@ -124,17 +141,23 @@
             writer.writeInt(queries.size());
 
             for (JdbcQuery q : queries)
-                q.writeBinary(writer);
+                q.writeBinary(writer, ver);
+
         }
         else
             writer.writeInt(0);
 
-        writer.writeBoolean(lastStreamBatch);
+        if (ver.compareTo(VER_2_4_0) >= 0)
+            writer.writeBoolean(lastStreamBatch);
+
+        if (ver.compareTo(VER_2_7_0) >= 0)
+            writer.writeBoolean(autoCommit);
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
-        super.readBinary(reader);
+    @SuppressWarnings("SimplifiableIfStatement")
+    @Override public void readBinary(BinaryReaderExImpl reader, ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.readBinary(reader, ver);
 
         schemaName = reader.readString();
 
@@ -145,18 +168,16 @@
         for (int i = 0; i < n; ++i) {
             JdbcQuery qry = new JdbcQuery();
 
-            qry.readBinary(reader);
+            qry.readBinary(reader, ver);
 
             queries.add(qry);
         }
 
-        try {
-            if (reader.available() > 0)
-                lastStreamBatch = reader.readBoolean();
-        }
-        catch (IOException e) {
-            throw new BinaryObjectException(e);
-        }
+        if (ver.compareTo(VER_2_4_0) >= 0)
+            lastStreamBatch = reader.readBoolean();
+
+        if (ver.compareTo(VER_2_7_0) >= 0)
+            autoCommit = reader.readBoolean();
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcBatchExecuteResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcBatchExecuteResult.java
index 3fc9dd7..0d93244e 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcBatchExecuteResult.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcBatchExecuteResult.java
@@ -20,6 +20,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.internal.S;
 
 /**
@@ -97,8 +98,9 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
-        super.writeBinary(writer);
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.writeBinary(writer, ver);
 
         writer.writeInt(errCode);
         writer.writeString(errMsg);
@@ -107,8 +109,9 @@
 
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
-        super.readBinary(reader);
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.readBinary(reader, ver);
 
         errCode = reader.readInt();
         errMsg = reader.readString();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcBulkLoadAckResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcBulkLoadAckResult.java
index e670baf..b0750fd 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcBulkLoadAckResult.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcBulkLoadAckResult.java
@@ -21,6 +21,7 @@
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
 import org.apache.ignite.internal.processors.bulkload.BulkLoadAckClientParameters;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.sql.command.SqlBulkLoadCommand;
 import org.apache.ignite.internal.util.typedef.internal.S;
 
@@ -81,8 +82,9 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
-        super.writeBinary(writer);
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.writeBinary(writer, ver);
 
         writer.writeLong(qryId);
         writer.writeString(params.localFileName());
@@ -90,8 +92,9 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
-        super.readBinary(reader);
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.readBinary(reader, ver);
 
         qryId = reader.readLong();
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcBulkLoadBatchRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcBulkLoadBatchRequest.java
index 7db4951..347a5df 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcBulkLoadBatchRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcBulkLoadBatchRequest.java
@@ -20,6 +20,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.sql.command.SqlBulkLoadCommand;
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.jetbrains.annotations.NotNull;
@@ -143,8 +144,9 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
-        super.writeBinary(writer);
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.writeBinary(writer, ver);
 
         writer.writeLong(qryId);
         writer.writeInt(batchIdx);
@@ -153,8 +155,9 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
-        super.readBinary(reader);
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.readBinary(reader, ver);
 
         qryId = reader.readLong();
         batchIdx = reader.readInt();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMeta.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMeta.java
index 5b6304d..2b08fb4 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMeta.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMeta.java
@@ -21,6 +21,7 @@
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
 import org.apache.ignite.internal.jdbc.thin.JdbcThinUtils;
 import org.apache.ignite.internal.jdbc2.JdbcUtils;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.processors.query.GridQueryFieldMetadata;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.internal.S;
@@ -158,7 +159,8 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) {
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) {
         writer.writeString(schemaName);
         writer.writeString(tblName);
         writer.writeString(colName);
@@ -169,7 +171,8 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) {
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) {
         schemaName = reader.readString();
         tblName = reader.readString();
         colName = reader.readString();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMetaV2.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMetaV2.java
index a2b4acf..6c77b55 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMetaV2.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMetaV2.java
@@ -19,6 +19,7 @@
 
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.internal.S;
 
 /**
@@ -54,15 +55,17 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) {
-        super.writeBinary(writer);
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) {
+        super.writeBinary(writer, ver);
 
         writer.writeBoolean(nullable);
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) {
-        super.readBinary(reader);
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) {
+        super.readBinary(reader, ver);
 
         nullable = reader.readBoolean();
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMetaV3.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMetaV3.java
index 9911be0..8f8adfe 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMetaV3.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMetaV3.java
@@ -19,6 +19,7 @@
 
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.internal.S;
 
 /**
@@ -63,15 +64,17 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) {
-        super.writeBinary(writer);
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) {
+        super.writeBinary(writer, ver);
 
         writer.writeString(dfltValue);
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) {
-        super.readBinary(reader);
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) {
+        super.readBinary(reader, ver);
 
         dfltValue = reader.readString();
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMetaV4.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMetaV4.java
index ec76983..6bdc597 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMetaV4.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcColumnMetaV4.java
@@ -19,6 +19,7 @@
 
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.internal.S;
 
 /**
@@ -68,16 +69,18 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) {
-        super.writeBinary(writer);
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) {
+        super.writeBinary(writer, ver);
 
         writer.writeInt(precision);
         writer.writeInt(scale);
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) {
-        super.readBinary(reader);
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) {
+        super.readBinary(reader, ver);
 
         precision = reader.readInt();
         scale = reader.readInt();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcConnectionContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcConnectionContext.java
index e74b25a..5e9a1b3 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcConnectionContext.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcConnectionContext.java
@@ -29,8 +29,10 @@
 import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.processors.odbc.ClientListenerRequestHandler;
 import org.apache.ignite.internal.processors.odbc.ClientListenerResponse;
+import org.apache.ignite.internal.processors.query.NestedTxMode;
 import org.apache.ignite.internal.util.GridSpinBusyLock;
 import org.apache.ignite.internal.util.nio.GridNioSession;
+import org.apache.ignite.internal.util.typedef.F;
 
 /**
  * JDBC Connection Context.
@@ -51,8 +53,11 @@
     /** Version 2.5.0: adds precision and scale for columns feature. */
     static final ClientListenerProtocolVersion VER_2_5_0 = ClientListenerProtocolVersion.create(2, 5, 0);
 
+    /** Version 2.7.0: adds maximum length for columns feature.*/
+    static final ClientListenerProtocolVersion VER_2_7_0 = ClientListenerProtocolVersion.create(2, 7, 0);
+
     /** Current version. */
-    private static final ClientListenerProtocolVersion CURRENT_VER = VER_2_5_0;
+    private static final ClientListenerProtocolVersion CURRENT_VER = VER_2_7_0;
 
     /** Supported versions. */
     private static final Set<ClientListenerProtocolVersion> SUPPORTED_VERS = new HashSet<>();
@@ -77,6 +82,7 @@
 
     static {
         SUPPORTED_VERS.add(CURRENT_VER);
+        SUPPORTED_VERS.add(VER_2_7_0);
         SUPPORTED_VERS.add(VER_2_5_0);
         SUPPORTED_VERS.add(VER_2_4_0);
         SUPPORTED_VERS.add(VER_2_3_0);
@@ -125,31 +131,48 @@
         boolean autoCloseCursors = reader.readBoolean();
 
         boolean lazyExec = false;
+        boolean skipReducerOnUpdate = false;
+
+        NestedTxMode nestedTxMode = NestedTxMode.DEFAULT;
+        AuthorizationContext actx = null;
 
         if (ver.compareTo(VER_2_1_5) >= 0)
             lazyExec = reader.readBoolean();
 
-        boolean skipReducerOnUpdate = false;
-
         if (ver.compareTo(VER_2_3_0) >= 0)
             skipReducerOnUpdate = reader.readBoolean();
 
-        String user = null;
-        String passwd = null;
+        if (ver.compareTo(VER_2_7_0) >= 0) {
+            String nestedTxModeName = reader.readString();
 
-        try {
-            if (reader.available() > 0) {
-                user = reader.readString();
-                passwd = reader.readString();
+            if (!F.isEmpty(nestedTxModeName)) {
+                try {
+                    nestedTxMode = NestedTxMode.valueOf(nestedTxModeName);
+                }
+                catch (IllegalArgumentException e) {
+                    throw new IgniteCheckedException("Invalid nested transactions handling mode: " + nestedTxModeName);
+                }
             }
         }
-        catch (Exception e) {
-            throw new IgniteCheckedException("Handshake error: " + e.getMessage(), e);
+
+        if (ver.compareTo(VER_2_5_0) >= 0) {
+            String user = null;
+            String passwd = null;
+
+            try {
+                if (reader.available() > 0) {
+                    user = reader.readString();
+                    passwd = reader.readString();
+                }
+            }
+            catch (Exception e) {
+                throw new IgniteCheckedException("Handshake error: " + e.getMessage(), e);
+            }
+
+            actx = authenticate(user, passwd);
         }
 
-        AuthorizationContext actx = authenticate(user, passwd);
-
-        parser = new JdbcMessageParser(ctx);
+        parser = new JdbcMessageParser(ctx, ver);
 
         JdbcResponseSender sender = new JdbcResponseSender() {
             @Override public void send(ClientListenerResponse resp) {
@@ -165,7 +188,9 @@
         };
 
         handler = new JdbcRequestHandler(ctx, busyLock, sender, maxCursors, distributedJoins, enforceJoinOrder,
-            collocated, replicatedOnly, autoCloseCursors, lazyExec, skipReducerOnUpdate, actx, ver);
+            collocated, replicatedOnly, autoCloseCursors, lazyExec, skipReducerOnUpdate, nestedTxMode, actx, ver);
+
+        handler.start();
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcIndexMeta.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcIndexMeta.java
index d33f887..d7de6d7 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcIndexMeta.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcIndexMeta.java
@@ -24,6 +24,7 @@
 import org.apache.ignite.cache.QueryIndexType;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.processors.query.GridQueryIndexDescriptor;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.internal.S;
@@ -123,7 +124,8 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
         writer.writeString(schemaName);
         writer.writeString(tblName);
         writer.writeString(idxName);
@@ -142,7 +144,8 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
         schemaName = reader.readString();
         tblName = reader.readString();
         idxName = reader.readString();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMessageParser.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMessageParser.java
index 7a9c2ab..1718c00 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMessageParser.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMessageParser.java
@@ -24,6 +24,7 @@
 import org.apache.ignite.internal.binary.streams.BinaryHeapOutputStream;
 import org.apache.ignite.internal.binary.streams.BinaryInputStream;
 import org.apache.ignite.internal.processors.odbc.ClientListenerMessageParser;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.processors.odbc.ClientListenerRequest;
 import org.apache.ignite.internal.processors.odbc.ClientListenerResponse;
 
@@ -34,14 +35,20 @@
     /** Kernal context. */
     private final GridKernalContext ctx;
 
+    /** Client protocol version. */
+    private final ClientListenerProtocolVersion ver;
+
     /** Initial output stream capacity. */
     protected static final int INIT_CAP = 1024;
 
     /**
      * @param ctx Context.
+     * @param ver Client protocol version.
      */
-    public JdbcMessageParser(GridKernalContext ctx) {
+    public JdbcMessageParser(GridKernalContext ctx,
+        ClientListenerProtocolVersion ver) {
         this.ctx = ctx;
+        this.ver = ver;
     }
 
     /**
@@ -68,7 +75,7 @@
 
         BinaryReaderExImpl reader = createReader(msg);
 
-        return JdbcRequest.readRequest(reader);
+        return JdbcRequest.readRequest(reader, ver);
     }
 
     /** {@inheritDoc} */
@@ -81,7 +88,7 @@
 
         BinaryWriterExImpl writer = createWriter(INIT_CAP);
 
-        res.writeBinary(writer);
+        res.writeBinary(writer, ver);
 
         return writer.array();
     }}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaColumnsRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaColumnsRequest.java
index fca1bf7..389629e 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaColumnsRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaColumnsRequest.java
@@ -20,6 +20,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.jetbrains.annotations.Nullable;
 
@@ -78,8 +79,9 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
-        super.writeBinary(writer);
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.writeBinary(writer, ver);
 
         writer.writeString(schemaName);
         writer.writeString(tblName);
@@ -87,8 +89,9 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
-        super.readBinary(reader);
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.readBinary(reader, ver);
 
         schemaName = reader.readString();
         tblName = reader.readString();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaColumnsResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaColumnsResult.java
index 9931ce0f..199c76d 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaColumnsResult.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaColumnsResult.java
@@ -24,6 +24,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.internal.S;
 
@@ -77,8 +78,9 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
-        super.writeBinary(writer);
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.writeBinary(writer, ver);
 
         if (F.isEmpty(meta))
             writer.writeInt(0);
@@ -86,13 +88,14 @@
             writer.writeInt(meta.size());
 
             for(JdbcColumnMeta m : meta)
-                m.writeBinary(writer);
+                m.writeBinary(writer, ver);
         }
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
-        super.readBinary(reader);
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.readBinary(reader, ver);
 
         int size = reader.readInt();
 
@@ -104,7 +107,7 @@
             for (int i = 0; i < size; ++i) {
                 JdbcColumnMeta m = createMetaColumn();
 
-                m.readBinary(reader);
+                m.readBinary(reader, ver);
 
                 meta.add(m);
             }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaIndexesRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaIndexesRequest.java
index d4a53d8..28fe558 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaIndexesRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaIndexesRequest.java
@@ -20,6 +20,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.jetbrains.annotations.Nullable;
 
@@ -66,16 +67,18 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
-        super.writeBinary(writer);
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.writeBinary(writer, ver);
 
         writer.writeString(schemaName);
         writer.writeString(tblName);
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
-        super.readBinary(reader);
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.readBinary(reader, ver);
 
         schemaName = reader.readString();
         tblName = reader.readString();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaIndexesResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaIndexesResult.java
index 2316dfc..b180954 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaIndexesResult.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaIndexesResult.java
@@ -24,6 +24,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.internal.S;
 
@@ -57,8 +58,9 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
-        super.writeBinary(writer);
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.writeBinary(writer, ver);
 
         if (F.isEmpty(meta))
             writer.writeInt(0);
@@ -66,13 +68,14 @@
             writer.writeInt(meta.size());
 
             for(JdbcIndexMeta m : meta)
-                m.writeBinary(writer);
+                m.writeBinary(writer, ver);
         }
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
-        super.readBinary(reader);
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.readBinary(reader, ver);
 
         int size = reader.readInt();
 
@@ -84,7 +87,7 @@
             for (int i = 0; i < size; ++i) {
                 JdbcIndexMeta m = new JdbcIndexMeta();
 
-                m.readBinary(reader);
+                m.readBinary(reader, ver);
 
                 meta.add(m);
             }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaParamsRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaParamsRequest.java
index 6b955f9..360f17d 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaParamsRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaParamsRequest.java
@@ -20,6 +20,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.internal.S;
 
 /**
@@ -65,16 +66,18 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
-        super.writeBinary(writer);
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.writeBinary(writer, ver);
 
         writer.writeString(schemaName);
         writer.writeString(sql);
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
-        super.readBinary(reader);
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.readBinary(reader, ver);
 
         schemaName = reader.readString();
         sql = reader.readString();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaParamsResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaParamsResult.java
index 7563e01..43c2422 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaParamsResult.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaParamsResult.java
@@ -23,6 +23,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.internal.S;
 
@@ -49,8 +50,9 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
-        super.writeBinary(writer);
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.writeBinary(writer, ver);
 
         if (F.isEmpty(meta))
             writer.writeInt(0);
@@ -58,13 +60,14 @@
             writer.writeInt(meta.size());
 
             for(JdbcParameterMeta m : meta)
-                m.writeBinary(writer);
+                m.writeBinary(writer, ver);
         }
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
-        super.readBinary(reader);
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.readBinary(reader, ver);
 
         int size = reader.readInt();
 
@@ -76,7 +79,7 @@
             for (int i = 0; i < size; ++i) {
                 JdbcParameterMeta m = new JdbcParameterMeta();
 
-                m.readBinary(reader);
+                m.readBinary(reader, ver);
 
                 meta.add(m);
             }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaPrimaryKeysRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaPrimaryKeysRequest.java
index 957225a..def53b8 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaPrimaryKeysRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaPrimaryKeysRequest.java
@@ -20,6 +20,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.jetbrains.annotations.Nullable;
 
@@ -66,16 +67,18 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
-        super.writeBinary(writer);
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.writeBinary(writer, ver);
 
         writer.writeString(schemaName);
         writer.writeString(tblName);
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
-        super.readBinary(reader);
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.readBinary(reader, ver);
 
         schemaName = reader.readString();
         tblName = reader.readString();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaPrimaryKeysResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaPrimaryKeysResult.java
index bd0dd90..f6d986e 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaPrimaryKeysResult.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaPrimaryKeysResult.java
@@ -24,6 +24,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.internal.S;
 
@@ -51,8 +52,9 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
-        super.writeBinary(writer);
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.writeBinary(writer, ver);
 
         if (F.isEmpty(meta))
             writer.writeInt(0);
@@ -60,13 +62,14 @@
             writer.writeInt(meta.size());
 
             for(JdbcPrimaryKeyMeta m : meta)
-                m.writeBinary(writer);
+                m.writeBinary(writer, ver);
         }
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
-        super.readBinary(reader);
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.readBinary(reader, ver);
 
         int size = reader.readInt();
 
@@ -78,7 +81,7 @@
             for (int i = 0; i < size; ++i) {
                 JdbcPrimaryKeyMeta m = new JdbcPrimaryKeyMeta();
 
-                m.readBinary(reader);
+                m.readBinary(reader, ver);
 
                 meta.add(m);
             }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaSchemasRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaSchemasRequest.java
index 43bbe5d..715c798 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaSchemasRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaSchemasRequest.java
@@ -20,6 +20,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.internal.S;
 
 /**
@@ -53,15 +54,17 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
-        super.writeBinary(writer);
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.writeBinary(writer, ver);
 
         writer.writeString(schemaName);
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
-        super.readBinary(reader);
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.readBinary(reader, ver);
 
         this.schemaName = reader.readString();
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaSchemasResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaSchemasResult.java
index 48b6aae..8d24a41 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaSchemasResult.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaSchemasResult.java
@@ -21,6 +21,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.internal.S;
 
 /**
@@ -46,15 +47,17 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
-        super.writeBinary(writer);
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.writeBinary(writer, ver);
 
         JdbcUtils.writeStringCollection(writer, schemas);
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
-        super.readBinary(reader);
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.readBinary(reader, ver);
 
         schemas = JdbcUtils.readStringList(reader);
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaTablesRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaTablesRequest.java
index 740b656..3ea6c35 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaTablesRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaTablesRequest.java
@@ -20,6 +20,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.internal.S;
 
 /**
@@ -65,16 +66,18 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
-        super.writeBinary(writer);
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.writeBinary(writer, ver);
 
         writer.writeString(schemaName);
         writer.writeString(tblName);
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
-        super.readBinary(reader);
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.readBinary(reader, ver);
 
         this.schemaName = reader.readString();
         this.tblName = reader.readString();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaTablesResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaTablesResult.java
index 585667e..d5afb7c 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaTablesResult.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcMetaTablesResult.java
@@ -23,6 +23,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.internal.S;
 
@@ -49,8 +50,9 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
-        super.writeBinary(writer);
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.writeBinary(writer, ver);
 
         if (F.isEmpty(meta))
             writer.writeInt(0);
@@ -58,13 +60,14 @@
             writer.writeInt(meta.size());
 
             for(JdbcTableMeta m : meta)
-                m.writeBinary(writer);
+                m.writeBinary(writer, ver);
         }
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
-        super.readBinary(reader);
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.readBinary(reader, ver);
 
         int size = reader.readInt();
 
@@ -76,7 +79,7 @@
             for (int i = 0; i < size; ++i) {
                 JdbcTableMeta m = new JdbcTableMeta();
 
-                m.readBinary(reader);
+                m.readBinary(reader, ver);
 
                 meta.add(m);
             }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcOrderedBatchExecuteRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcOrderedBatchExecuteRequest.java
index 3e84731..743978d 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcOrderedBatchExecuteRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcOrderedBatchExecuteRequest.java
@@ -21,6 +21,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.jetbrains.annotations.NotNull;
 
@@ -42,12 +43,13 @@
     /**
      * @param schemaName Schema name.
      * @param queries Queries.
+     * @param autoCommit Client auto commit flag state.
      * @param lastStreamBatch {@code true} in case the request is the last batch at the stream.
      * @param order Request order.
      */
     public JdbcOrderedBatchExecuteRequest(String schemaName, List<JdbcQuery> queries,
-        boolean lastStreamBatch, long order) {
-        super(BATCH_EXEC_ORDERED, schemaName, queries, lastStreamBatch);
+        boolean autoCommit, boolean lastStreamBatch, long order) {
+        super(BATCH_EXEC_ORDERED, schemaName, queries, autoCommit, lastStreamBatch);
 
         this.order = order;
     }
@@ -60,15 +62,15 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
-        super.writeBinary(writer);
+    @Override public void writeBinary(BinaryWriterExImpl writer, ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.writeBinary(writer, ver);
 
         writer.writeLong(order);
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
-        super.readBinary(reader);
+    @Override public void readBinary(BinaryReaderExImpl reader, ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.readBinary(reader, ver);
 
         order = reader.readLong();
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcOrderedBatchExecuteResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcOrderedBatchExecuteResult.java
index 84853d4..76f665f 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcOrderedBatchExecuteResult.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcOrderedBatchExecuteResult.java
@@ -20,6 +20,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.internal.S;
 
 /**
@@ -54,16 +55,18 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
-        super.writeBinary(writer);
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.writeBinary(writer, ver);
 
         writer.writeLong(order);
     }
 
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
-        super.readBinary(reader);
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.readBinary(reader, ver);
 
         order = reader.readLong();
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcParameterMeta.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcParameterMeta.java
index c0cfc9e..52e5a19 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcParameterMeta.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcParameterMeta.java
@@ -22,6 +22,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.internal.S;
 
 /**
@@ -133,7 +134,8 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
         writer.writeInt(isNullable);
         writer.writeBoolean(signed);
         writer.writeInt(precision);
@@ -145,7 +147,8 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
         isNullable = reader.readInt();
         signed = reader.readBoolean();
         precision = reader.readInt();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcPrimaryKeyMeta.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcPrimaryKeyMeta.java
index 6b9bf70..dffbdca 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcPrimaryKeyMeta.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcPrimaryKeyMeta.java
@@ -21,6 +21,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.F;
 
 /**
@@ -89,7 +90,8 @@
 
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
         writer.writeString(schemaName);
         writer.writeString(tblName);
         writer.writeString(name);
@@ -98,7 +100,8 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
         schemaName = reader.readString();
         tblName = reader.readString();
         name = reader.readString();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQuery.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQuery.java
index f7ffb99..d9960da 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQuery.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQuery.java
@@ -19,6 +19,7 @@
 
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.processors.odbc.SqlListenerUtils;
 import org.apache.ignite.internal.util.typedef.internal.S;
 
@@ -63,7 +64,8 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) {
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) {
         writer.writeString(sql);
 
         if (args == null || args.length == 0)
@@ -77,7 +79,8 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) {
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) {
         sql = reader.readString();
 
         int argsNum = reader.readInt();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryCloseRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryCloseRequest.java
index 872889c..5c631c3 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryCloseRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryCloseRequest.java
@@ -20,6 +20,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.internal.S;
 
 /**
@@ -52,15 +53,17 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
-        super.writeBinary(writer);
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.writeBinary(writer, ver);
 
         writer.writeLong(queryId);
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
-        super.readBinary(reader);
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.readBinary(reader, ver);
 
         queryId = reader.readLong();
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryExecuteMultipleStatementsResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryExecuteMultipleStatementsResult.java
index 9bbdd59..44a56aa 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryExecuteMultipleStatementsResult.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryExecuteMultipleStatementsResult.java
@@ -23,6 +23,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.internal.S;
 
 /**
@@ -80,14 +81,15 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
-        super.writeBinary(writer);
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.writeBinary(writer, ver);
 
         if (results != null && results.size() > 0) {
             writer.writeInt(results.size());
 
             for (JdbcResultInfo r : results)
-                r.writeBinary(writer);
+                r.writeBinary(writer, ver);
 
             if (results.get(0).isQuery()) {
                 writer.writeBoolean(last);
@@ -101,8 +103,9 @@
 
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
-        super.readBinary(reader);
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.readBinary(reader, ver);
 
         int cnt = reader.readInt();
 
@@ -114,7 +117,7 @@
             for (int i = 0; i < cnt; ++i) {
                 JdbcResultInfo r = new JdbcResultInfo();
 
-                r.readBinary(reader);
+                r.readBinary(reader, ver);
 
                 results.add(r);
             }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryExecuteRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryExecuteRequest.java
index 3e54fc8..e4f3398 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryExecuteRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryExecuteRequest.java
@@ -21,12 +21,15 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.processors.odbc.SqlListenerUtils;
 import org.apache.ignite.internal.util.tostring.GridToStringInclude;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.jetbrains.annotations.Nullable;
 
+import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcConnectionContext.VER_2_7_0;
+
 /**
  * JDBC query execute request.
  */
@@ -51,10 +54,15 @@
     /** Expected statement type. */
     private JdbcStatementType stmtType;
 
+    /** Client auto commit flag state. */
+    private boolean autoCommit;
+
     /**
      */
     JdbcQueryExecuteRequest() {
         super(QRY_EXEC);
+
+        autoCommit = true;
     }
 
     /**
@@ -62,11 +70,12 @@
      * @param schemaName Cache name.
      * @param pageSize Fetch size.
      * @param maxRows Max rows.
+     * @param autoCommit Connection auto commit flag state.
      * @param sqlQry SQL query.
      * @param args Arguments list.
      */
     public JdbcQueryExecuteRequest(JdbcStatementType stmtType, String schemaName, int pageSize, int maxRows,
-        String sqlQry, Object[] args) {
+        boolean autoCommit, String sqlQry, Object[] args) {
         super(QRY_EXEC);
 
         this.schemaName = F.isEmpty(schemaName) ? null : schemaName;
@@ -75,6 +84,7 @@
         this.sqlQry = sqlQry;
         this.args = args;
         this.stmtType = stmtType;
+        this.autoCommit = autoCommit;
     }
 
     /**
@@ -119,9 +129,17 @@
         return stmtType;
     }
 
+    /**
+     * @return Auto commit flag.
+     */
+    boolean autoCommit() {
+        return autoCommit;
+    }
+
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
-        super.writeBinary(writer);
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.writeBinary(writer, ver);
 
         writer.writeString(schemaName);
         writer.writeInt(pageSize);
@@ -135,12 +153,17 @@
                 SqlListenerUtils.writeObject(writer, arg, false);
         }
 
+        if (ver.compareTo(VER_2_7_0) >= 0)
+            writer.writeBoolean(autoCommit);
+
         writer.writeByte((byte)stmtType.ordinal());
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
-        super.readBinary(reader);
+    @SuppressWarnings("SimplifiableIfStatement")
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.readBinary(reader, ver);
 
         schemaName = reader.readString();
         pageSize = reader.readInt();
@@ -154,6 +177,9 @@
         for (int i = 0; i < argsNum; ++i)
             args[i] = SqlListenerUtils.readObject(reader, false);
 
+        if (ver.compareTo(VER_2_7_0) >= 0)
+            autoCommit = reader.readBoolean();
+
         try {
             if (reader.available() > 0)
                 stmtType = JdbcStatementType.fromOrdinal(reader.readByte());
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryExecuteResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryExecuteResult.java
index fdebdb8..342e8ef 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryExecuteResult.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryExecuteResult.java
@@ -21,6 +21,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.internal.S;
 
 /**
@@ -112,8 +113,9 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
-        super.writeBinary(writer);
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.writeBinary(writer, ver);
 
         writer.writeLong(queryId);
         writer.writeBoolean(isQuery);
@@ -131,8 +133,9 @@
 
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
-        super.readBinary(reader);
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.readBinary(reader, ver);
 
         queryId = reader.readLong();
         isQuery = reader.readBoolean();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryFetchRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryFetchRequest.java
index 776c3bf..59ed9a8 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryFetchRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryFetchRequest.java
@@ -20,6 +20,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.internal.S;
 
 /**
@@ -65,16 +66,18 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
-        super.writeBinary(writer);
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.writeBinary(writer, ver);
 
         writer.writeLong(queryId);
         writer.writeInt(pageSize);
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
-        super.readBinary(reader);
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.readBinary(reader, ver);
 
         queryId = reader.readLong();
         pageSize = reader.readInt();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryFetchResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryFetchResult.java
index ac4a603..e62efcb 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryFetchResult.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryFetchResult.java
@@ -21,6 +21,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.internal.S;
 
 /**
@@ -66,8 +67,9 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
-        super.writeBinary(writer);
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.writeBinary(writer, ver);
 
         writer.writeBoolean(last);
 
@@ -75,8 +77,9 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
-        super.readBinary(reader);
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.readBinary(reader, ver);
 
         last = reader.readBoolean();
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryMetadataRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryMetadataRequest.java
index bdef321..f30ecfd 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryMetadataRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryMetadataRequest.java
@@ -20,6 +20,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.internal.S;
 
 /**
@@ -53,15 +54,17 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
-        super.writeBinary(writer);
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.writeBinary(writer, ver);
 
         writer.writeLong(qryId);
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
-        super.readBinary(reader);
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.readBinary(reader, ver);
 
         qryId = reader.readLong();
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryMetadataResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryMetadataResult.java
index c8c0991..d366e60 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryMetadataResult.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcQueryMetadataResult.java
@@ -23,6 +23,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.internal.S;
 
@@ -58,8 +59,9 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
-        super.writeBinary(writer);
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.writeBinary(writer, ver);
 
         if (F.isEmpty(meta))
             writer.writeInt(0);
@@ -67,13 +69,14 @@
             writer.writeInt(meta.size());
 
             for (JdbcColumnMeta m : meta)
-                m.writeBinary(writer);
+                m.writeBinary(writer, ver);
         }
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
-        super.readBinary(reader);
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
+        super.readBinary(reader, ver);
 
         int size = reader.readInt();
 
@@ -85,7 +88,7 @@
             for (int i = 0; i < size; ++i) {
                 JdbcColumnMeta m = new JdbcColumnMeta();
 
-                m.readBinary(reader);
+                m.readBinary(reader, ver);
 
                 meta.add(m);
             }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRawBinarylizable.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRawBinarylizable.java
index c3f1874..22c514d 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRawBinarylizable.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRawBinarylizable.java
@@ -20,6 +20,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 
 /**
  * Interface that allows to implement custom serialization
@@ -30,15 +31,18 @@
      * Writes fields to provided writer.
      *
      * @param writer Binary object writer.
+     * @param ver ver Protocol version.
      * @throws BinaryObjectException In case of error.
      */
-    public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException;
+    public void writeBinary(BinaryWriterExImpl writer, ClientListenerProtocolVersion ver) throws BinaryObjectException;
 
     /**
      * Reads fields from provided reader.
      *
      * @param reader Binary object reader.
+     * @param ver Protocol version.
      * @throws BinaryObjectException In case of error.
      */
-    public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException;
+    public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException;
 }
\ No newline at end of file
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequest.java
index 3d5b869..0674edf 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequest.java
@@ -21,6 +21,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.processors.odbc.ClientListenerRequestNoId;
 
 /**
@@ -77,12 +78,14 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
         writer.writeByte(type);
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
         // No-op.
     }
 
@@ -95,10 +98,12 @@
 
     /**
      * @param reader Binary reader.
+     * @param ver Protocol version.
      * @return Request object.
      * @throws BinaryObjectException On error.
      */
-    public static JdbcRequest readRequest(BinaryReaderExImpl reader) throws BinaryObjectException {
+    public static JdbcRequest readRequest(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
         int reqType = reader.readByte();
 
         JdbcRequest req;
@@ -173,7 +178,7 @@
                 throw new IgniteException("Unknown SQL listener request ID: [request ID=" + reqType + ']');
         }
 
-        req.readBinary(reader);
+        req.readBinary(reader, ver);
 
         return req;
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java
index f8b1c40..97ce20a 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandler.java
@@ -32,12 +32,11 @@
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.LockSupport;
 import javax.cache.configuration.Factory;
+import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.IgniteLogger;
 import org.apache.ignite.cache.query.BulkLoadContextCursor;
 import org.apache.ignite.cache.query.FieldsQueryCursor;
-import org.apache.ignite.cache.query.SqlFieldsQuery;
 import org.apache.ignite.internal.GridKernalContext;
 import org.apache.ignite.internal.IgniteInterruptedCheckedException;
 import org.apache.ignite.internal.IgniteVersionUtils;
@@ -46,6 +45,7 @@
 import org.apache.ignite.internal.processors.bulkload.BulkLoadAckClientParameters;
 import org.apache.ignite.internal.processors.bulkload.BulkLoadProcessor;
 import org.apache.ignite.internal.processors.cache.QueryCursorImpl;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUtils;
 import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode;
 import org.apache.ignite.internal.processors.cache.query.SqlFieldsQueryEx;
 import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
@@ -57,9 +57,11 @@
 import org.apache.ignite.internal.processors.query.GridQueryProperty;
 import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor;
 import org.apache.ignite.internal.processors.query.IgniteSQLException;
+import org.apache.ignite.internal.processors.query.NestedTxMode;
 import org.apache.ignite.internal.processors.query.QueryUtils;
 import org.apache.ignite.internal.processors.query.SqlClientContext;
 import org.apache.ignite.internal.util.GridSpinBusyLock;
+import org.apache.ignite.internal.util.future.GridFutureAdapter;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.X;
 import org.apache.ignite.internal.util.typedef.internal.S;
@@ -72,7 +74,7 @@
 import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcBulkLoadBatchRequest.CMD_FINISHED_ERROR;
 import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcConnectionContext.VER_2_3_0;
 import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcConnectionContext.VER_2_4_0;
-import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcConnectionContext.VER_2_5_0;
+import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcConnectionContext.VER_2_7_0;
 import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcRequest.BATCH_EXEC;
 import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcRequest.BATCH_EXEC_ORDERED;
 import static org.apache.ignite.internal.processors.odbc.jdbc.JdbcRequest.BULK_LOAD_BATCH;
@@ -106,6 +108,9 @@
     /** Busy lock. */
     private final GridSpinBusyLock busyLock;
 
+    /** Worker. */
+    private final JdbcRequestHandlerWorker worker;
+
     /** Maximum allowed cursors. */
     private final int maxCursors;
 
@@ -127,6 +132,9 @@
     /** Automatic close of cursors. */
     private final boolean autoCloseCursors;
 
+    /** Nested transactions handling mode. */
+    private final NestedTxMode nestedTxMode;
+
     /** Protocol version. */
     private ClientListenerProtocolVersion protocolVer;
 
@@ -152,7 +160,7 @@
     public JdbcRequestHandler(GridKernalContext ctx, GridSpinBusyLock busyLock,
         JdbcResponseSender sender, int maxCursors,
         boolean distributedJoins, boolean enforceJoinOrder, boolean collocated, boolean replicatedOnly,
-        boolean autoCloseCursors, boolean lazy, boolean skipReducerOnUpdate,
+        boolean autoCloseCursors, boolean lazy, boolean skipReducerOnUpdate, NestedTxMode nestedTxMode,
         AuthorizationContext actx, ClientListenerProtocolVersion protocolVer) {
         this.ctx = ctx;
         this.sender = sender;
@@ -177,10 +185,14 @@
         this.busyLock = busyLock;
         this.maxCursors = maxCursors;
         this.autoCloseCursors = autoCloseCursors;
+        this.nestedTxMode = nestedTxMode;
         this.protocolVer = protocolVer;
         this.actx = actx;
 
         log = ctx.log(getClass());
+
+        // TODO IGNITE-9484 Do not create worker if there is a possibility to unbind TX from threads.
+        worker = new JdbcRequestHandlerWorker(ctx.igniteInstanceName(), log, this, ctx);
     }
 
     /** {@inheritDoc} */
@@ -191,6 +203,34 @@
 
         JdbcRequest req = (JdbcRequest)req0;
 
+        if (!MvccUtils.mvccEnabled(ctx))
+            return doHandle(req);
+        else {
+            GridFutureAdapter<ClientListenerResponse> fut = worker.process(req);
+
+            try {
+                return fut.get();
+            }
+            catch (IgniteCheckedException e) {
+                return exceptionToResult(e);
+            }
+        }
+    }
+
+    /**
+     * Start worker, if it's present.
+     */
+    void start() {
+        if (worker != null)
+            worker.start();
+    }
+
+    /**
+     * Actually handle the request.
+     * @param req Request.
+     * @return Request handling result.
+     */
+    ClientListenerResponse doHandle(JdbcRequest req) {
         if (!busyLock.enterBusy())
             return new JdbcResponse(IgniteQueryErrorCode.UNKNOWN,
                 "Failed to handle JDBC request because node is stopping.");
@@ -365,6 +405,17 @@
     public void onDisconnect() {
         if (busyLock.enterBusy())
         {
+            if (worker != null) {
+                worker.cancel();
+
+                try {
+                    worker.join();
+                }
+                catch (InterruptedException e) {
+                    // No-op.
+                }
+            }
+
             try
             {
                 for (JdbcQueryCursor cursor : qryCursors.values())
@@ -412,11 +463,11 @@
         try {
             String sql = req.sqlQuery();
 
-            SqlFieldsQuery qry;
+            SqlFieldsQueryEx qry;
 
             switch(req.expectedStatementType()) {
                 case ANY_STATEMENT_TYPE:
-                    qry = new SqlFieldsQuery(sql);
+                    qry = new SqlFieldsQueryEx(sql, null);
 
                     break;
 
@@ -441,6 +492,8 @@
             qry.setCollocated(cliCtx.isCollocated());
             qry.setReplicatedOnly(cliCtx.isReplicatedOnly());
             qry.setLazy(cliCtx.isLazy());
+            qry.setNestedTxMode(nestedTxMode);
+            qry.setAutoCommit(req.autoCommit());
 
             if (req.pageSize() <= 0)
                 return new JdbcResponse(IgniteQueryErrorCode.UNKNOWN, "Invalid fetch size: " + req.pageSize());
@@ -657,6 +710,8 @@
                 qry.setCollocated(cliCtx.isCollocated());
                 qry.setReplicatedOnly(cliCtx.isReplicatedOnly());
                 qry.setLazy(cliCtx.isLazy());
+                qry.setNestedTxMode(nestedTxMode);
+                qry.setAutoCommit(req.autoCommit());
 
                 qry.setSchema(schemaName);
             }
@@ -822,7 +877,7 @@
 
                         JdbcColumnMeta columnMeta;
 
-                        if (protocolVer.compareTo(VER_2_5_0) >= 0) {
+                        if (protocolVer.compareTo(VER_2_7_0) >= 0) {
                             GridQueryProperty prop = table.property(colName);
 
                             columnMeta = new JdbcColumnMetaV4(table.schemaName(), table.tableName(),
@@ -853,7 +908,7 @@
 
             JdbcMetaColumnsResult res;
 
-            if (protocolVer.compareTo(VER_2_5_0) >= 0)
+            if (protocolVer.compareTo(VER_2_7_0) >= 0)
                 res = new JdbcMetaColumnsResultV4(meta);
             else if (protocolVer.compareTo(VER_2_4_0) >= 0)
                 res = new JdbcMetaColumnsResultV3(meta);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandlerWorker.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandlerWorker.java
new file mode 100644
index 0000000..7211787
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcRequestHandlerWorker.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.odbc.jdbc;
+
+import java.util.concurrent.LinkedBlockingQueue;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteLogger;
+import org.apache.ignite.internal.GridKernalContext;
+import org.apache.ignite.internal.IgniteInterruptedCheckedException;
+import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode;
+import org.apache.ignite.internal.processors.odbc.ClientListenerNioListener;
+import org.apache.ignite.internal.processors.odbc.ClientListenerResponse;
+import org.apache.ignite.internal.util.future.GridFutureAdapter;
+import org.apache.ignite.internal.util.typedef.T2;
+import org.apache.ignite.internal.util.typedef.internal.A;
+import org.apache.ignite.internal.util.worker.GridWorker;
+import org.apache.ignite.thread.IgniteThread;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * JDBC request handler worker to maintain single threaded transactional execution of SQL statements when MVCC is on.<p>
+ * This worker is intended for internal use as a temporary solution and from within {@link JdbcRequestHandler},
+ * therefore it does not do any fine-grained lifecycle handling as it relies on existing guarantees from
+ * {@link ClientListenerNioListener}.
+ */
+class JdbcRequestHandlerWorker extends GridWorker {
+    /** Requests queue.*/
+    private final LinkedBlockingQueue<T2<JdbcRequest, GridFutureAdapter<ClientListenerResponse>>> queue =
+        new LinkedBlockingQueue<>();
+
+    /** Handler.*/
+    private final JdbcRequestHandler hnd;
+
+    /** Context.*/
+    private final GridKernalContext ctx;
+
+    /** Response */
+    private final static ClientListenerResponse ERR_RESPONSE = new JdbcResponse(IgniteQueryErrorCode.UNKNOWN,
+        "Connection closed.");
+
+    /**
+     * Constructor.
+     * @param igniteInstanceName Instance name.
+     * @param log Logger.
+     * @param hnd Handler.
+     * @param ctx Kernal context.
+     */
+    JdbcRequestHandlerWorker(@Nullable String igniteInstanceName, IgniteLogger log, JdbcRequestHandler hnd,
+        GridKernalContext ctx) {
+        super(igniteInstanceName, "jdbc-request-handler-worker", log);
+
+        A.notNull(hnd, "hnd");
+
+        this.hnd = hnd;
+
+        this.ctx = ctx;
+    }
+
+    /**
+     * Start this worker.
+     */
+    void start() {
+        new IgniteThread(this).start();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void body() throws InterruptedException, IgniteInterruptedCheckedException {
+        try {
+            while (!isCancelled()) {
+                T2<JdbcRequest, GridFutureAdapter<ClientListenerResponse>> req = queue.take();
+
+                GridFutureAdapter<ClientListenerResponse> fut = req.get2();
+
+                try {
+                    ClientListenerResponse res = hnd.doHandle(req.get1());
+
+                    fut.onDone(res);
+                }
+                catch (Exception e) {
+                    fut.onDone(e);
+                }
+            }
+        }
+        finally {
+            // Notify indexing that this worker is being stopped.
+            try {
+                ctx.query().getIndexing().onClientDisconnect();
+            }
+            catch (Exception e) {
+                // No-op.
+            }
+
+            // Drain the queue on stop.
+            T2<JdbcRequest, GridFutureAdapter<ClientListenerResponse>> req = queue.poll();
+
+            while (req != null) {
+                req.get2().onDone(ERR_RESPONSE);
+
+                req = queue.poll();
+            }
+        }
+    }
+
+    /**
+     * Initiate request processing.
+     * @param req Request.
+     * @return Future to track request processing.
+     */
+    GridFutureAdapter<ClientListenerResponse> process(JdbcRequest req) {
+        GridFutureAdapter<ClientListenerResponse> fut = new GridFutureAdapter<>();
+
+        queue.add(new T2<>(req, fut));
+
+        return fut;
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcResponse.java
index 91f26d8..5d5b4e3 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcResponse.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcResponse.java
@@ -20,6 +20,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.processors.odbc.ClientListenerResponse;
 import org.apache.ignite.internal.util.tostring.GridToStringInclude;
 import org.apache.ignite.internal.util.typedef.internal.S;
@@ -76,14 +77,15 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
         writer.writeInt(status());
 
         if (status() == STATUS_SUCCESS) {
             writer.writeBoolean(res != null);
 
             if (res != null)
-                res.writeBinary(writer);
+                res.writeBinary(writer, ver);
         }
         else
             writer.writeString(error());
@@ -91,12 +93,13 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
         status(reader.readInt());
 
         if (status() == STATUS_SUCCESS) {
             if (reader.readBoolean())
-                res = JdbcResult.readResult(reader);
+                res = JdbcResult.readResult(reader, ver);
         }
         else
             error(reader.readString());
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcResult.java
index 199e5da..3a0d7bb 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcResult.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcResult.java
@@ -21,6 +21,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 
 /**
  * JDBC response result.
@@ -87,21 +88,24 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
         writer.writeByte(type);
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
         // No-op.
     }
 
     /**
      * @param reader Binary reader.
+     * @param ver Protocol verssion.
      * @return Request object.
      * @throws BinaryObjectException On error.
      */
-    public static JdbcResult readResult(BinaryReaderExImpl reader) throws BinaryObjectException {
+    public static JdbcResult readResult(BinaryReaderExImpl reader, ClientListenerProtocolVersion ver) throws BinaryObjectException {
         int resId = reader.readByte();
 
         JdbcResult res;
@@ -191,7 +195,7 @@
                 throw new IgniteException("Unknown SQL listener request ID: [request ID=" + resId + ']');
         }
 
-        res.readBinary(reader);
+        res.readBinary(reader, ver);
 
         return res;
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcResultInfo.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcResultInfo.java
index f0706e4..5fab77a 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcResultInfo.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcResultInfo.java
@@ -19,6 +19,7 @@
 
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.internal.S;
 
 /**
@@ -75,14 +76,16 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) {
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) {
         writer.writeBoolean(isQuery);
         writer.writeLong(updCnt);
         writer.writeLong(qryId);
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) {
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) {
         isQuery = reader.readBoolean();
         updCnt = reader.readLong();
         qryId = reader.readLong();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcTableMeta.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcTableMeta.java
index 5e15189..d4324d6 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcTableMeta.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/jdbc/JdbcTableMeta.java
@@ -21,6 +21,7 @@
 import org.apache.ignite.binary.BinaryObjectException;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.internal.S;
 
@@ -66,13 +67,15 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void writeBinary(BinaryWriterExImpl writer) throws BinaryObjectException {
+    @Override public void writeBinary(BinaryWriterExImpl writer,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
         writer.writeString(schemaName);
         writer.writeString(tblName);
     }
 
     /** {@inheritDoc} */
-    @Override public void readBinary(BinaryReaderExImpl reader) throws BinaryObjectException {
+    @Override public void readBinary(BinaryReaderExImpl reader,
+        ClientListenerProtocolVersion ver) throws BinaryObjectException {
         schemaName = reader.readString();
         tblName = reader.readString();
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcColumnMeta.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcColumnMeta.java
index 1d4f1a8..173ab39 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcColumnMeta.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcColumnMeta.java
@@ -19,6 +19,7 @@
 
 import org.apache.ignite.binary.BinaryRawWriter;
 import org.apache.ignite.internal.binary.BinaryUtils;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.processors.query.GridQueryFieldMetadata;
 
 /**
@@ -32,31 +33,51 @@
     private final String tableName;
 
     /** Column name. */
-    private final String columnName;
+    public final String columnName;
 
     /** Data type. */
     private final Class<?> dataType;
 
+    /** Precision. */
+    public final int precision;
+
+    /** Scale. */
+    public final int scale;
+
+    /** Client version. */
+    private final ClientListenerProtocolVersion ver;
+
     /**
      * @param schemaName Cache name.
      * @param tableName Table name.
      * @param columnName Column name.
      * @param dataType Data type.
+     * @param precision Precision.
+     * @param scale Scale.
+     * @param ver Client version.
      */
-    public OdbcColumnMeta(String schemaName, String tableName, String columnName, Class<?> dataType) {
+    public OdbcColumnMeta(String schemaName, String tableName, String columnName, Class<?> dataType,
+        int precision, int scale, ClientListenerProtocolVersion ver) {
         this.schemaName = OdbcUtils.addQuotationMarksIfNeeded(schemaName);
         this.tableName = tableName;
         this.columnName = columnName;
         this.dataType = dataType;
+        this.precision = precision;
+        this.scale = scale;
+        this.ver = ver;
     }
 
     /**
      * @param info Field metadata.
+     * @param ver Client version.
      */
-    public OdbcColumnMeta(GridQueryFieldMetadata info) {
+    public OdbcColumnMeta(GridQueryFieldMetadata info, ClientListenerProtocolVersion ver) {
         this.schemaName = OdbcUtils.addQuotationMarksIfNeeded(info.schemaName());
         this.tableName = info.typeName();
         this.columnName = info.fieldName();
+        this.precision = info.precision();
+        this.scale = info.scale();
+        this.ver = ver;
 
         Class<?> type;
 
@@ -77,6 +98,8 @@
         hash = 31 * hash + tableName.hashCode();
         hash = 31 * hash + columnName.hashCode();
         hash = 31 * hash + dataType.hashCode();
+        hash = 31 * hash + Integer.hashCode(precision);
+        hash = 31 * hash + Integer.hashCode(scale);
 
         return hash;
     }
@@ -87,7 +110,8 @@
             OdbcColumnMeta other = (OdbcColumnMeta) o;
 
             return this == other || schemaName.equals(other.schemaName) && tableName.equals(other.tableName) &&
-                columnName.equals(other.columnName) && dataType.equals(other.dataType);
+                columnName.equals(other.columnName) && dataType.equals(other.dataType) &&
+                precision == other.precision && scale == other.scale;
         }
 
         return false;
@@ -106,5 +130,10 @@
         byte typeId = BinaryUtils.typeByClass(dataType);
 
         writer.writeByte(typeId);
+
+        if (ver.compareTo(OdbcConnectionContext.VER_2_7_0) >= 0) {
+            writer.writeInt(precision);
+            writer.writeInt(scale);
+        }
     }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcConnectionContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcConnectionContext.java
index 8d8c745..d82dcc6 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcConnectionContext.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcConnectionContext.java
@@ -17,8 +17,6 @@
 
 package org.apache.ignite.internal.processors.odbc.odbc;
 
-import java.util.HashSet;
-import java.util.Set;
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.internal.GridKernalContext;
 import org.apache.ignite.internal.binary.BinaryReaderExImpl;
@@ -27,8 +25,12 @@
 import org.apache.ignite.internal.processors.odbc.ClientListenerMessageParser;
 import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.processors.odbc.ClientListenerRequestHandler;
+import org.apache.ignite.internal.processors.query.NestedTxMode;
 import org.apache.ignite.internal.util.GridSpinBusyLock;
 
+import java.util.HashSet;
+import java.util.Set;
+
 /**
  * ODBC Connection Context.
  */
@@ -48,8 +50,11 @@
     /** Version 2.5.0: added authentication. */
     public static final ClientListenerProtocolVersion VER_2_5_0 = ClientListenerProtocolVersion.create(2, 5, 0);
 
+    /** Version 2.7.0: added precision and scale. */
+    public static final ClientListenerProtocolVersion VER_2_7_0 = ClientListenerProtocolVersion.create(2, 7, 0);
+
     /** Current version. */
-    private static final ClientListenerProtocolVersion CURRENT_VER = VER_2_5_0;
+    private static final ClientListenerProtocolVersion CURRENT_VER = VER_2_7_0;
 
     /** Supported versions. */
     private static final Set<ClientListenerProtocolVersion> SUPPORTED_VERS = new HashSet<>();
@@ -68,6 +73,7 @@
 
     static {
         SUPPORTED_VERS.add(CURRENT_VER);
+        SUPPORTED_VERS.add(VER_2_5_0);
         SUPPORTED_VERS.add(VER_2_3_0);
         SUPPORTED_VERS.add(VER_2_3_2);
         SUPPORTED_VERS.add(VER_2_1_5);
@@ -107,6 +113,7 @@
         boolean enforceJoinOrder = reader.readBoolean();
         boolean replicatedOnly = reader.readBoolean();
         boolean collocated = reader.readBoolean();
+
         boolean lazy = false;
 
         if (ver.compareTo(VER_2_1_5) >= 0)
@@ -120,17 +127,25 @@
         String user = null;
         String passwd = null;
 
+        NestedTxMode nestedTxMode = NestedTxMode.DEFAULT;
+
         if (ver.compareTo(VER_2_5_0) >= 0) {
             user = reader.readString();
             passwd = reader.readString();
+
+            byte nestedTxModeVal = reader.readByte();
+
+            nestedTxMode = NestedTxMode.fromByte(nestedTxModeVal);
         }
 
         AuthorizationContext actx = authenticate(user, passwd);
 
-        handler = new OdbcRequestHandler(ctx, busyLock, maxCursors, distributedJoins,
-                enforceJoinOrder, replicatedOnly, collocated, lazy, skipReducerOnUpdate, actx);
+        handler = new OdbcRequestHandler(ctx, busyLock, maxCursors, distributedJoins, enforceJoinOrder,
+            replicatedOnly, collocated, lazy, skipReducerOnUpdate, actx, nestedTxMode, ver);
 
         parser = new OdbcMessageParser(ctx, ver);
+
+        handler.start();
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcMessageParser.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcMessageParser.java
index c2137bd..3ed6f21 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcMessageParser.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcMessageParser.java
@@ -47,7 +47,7 @@
     protected static final int INIT_CAP = 1024;
 
     /** Kernal context. */
-    protected GridKernalContext ctx;
+    protected final GridKernalContext ctx;
 
     /** Logger. */
     private final IgniteLogger log;
@@ -101,7 +101,12 @@
                 if (ver.compareTo(OdbcConnectionContext.VER_2_3_2) >= 0)
                     timeout = reader.readInt();
 
-                res = new OdbcQueryExecuteRequest(schema, sql, params, timeout);
+                boolean autoCommit = true;
+
+                if (ver.compareTo(OdbcConnectionContext.VER_2_7_0) >= 0)
+                    autoCommit = reader.readBoolean();
+
+                res = new OdbcQueryExecuteRequest(schema, sql, params, timeout, autoCommit);
 
                 break;
             }
@@ -123,7 +128,12 @@
                 if (ver.compareTo(OdbcConnectionContext.VER_2_3_2) >= 0)
                     timeout = reader.readInt();
 
-                res = new OdbcQueryExecuteBatchRequest(schema, sql, last, params, timeout);
+                boolean autoCommit = true;
+
+                if (ver.compareTo(OdbcConnectionContext.VER_2_7_0) >= 0)
+                    autoCommit = reader.readBoolean();
+
+                res = new OdbcQueryExecuteBatchRequest(schema, sql, last, params, timeout, autoCommit);
 
                 break;
             }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryExecuteBatchRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryExecuteBatchRequest.java
index 0e4effd..75c2831 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryExecuteBatchRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryExecuteBatchRequest.java
@@ -41,6 +41,10 @@
     @GridToStringExclude
     private final Object[][] args;
 
+    /** Autocommit flag. */
+    @GridToStringInclude
+    private final boolean autoCommit;
+
     /** Query timeout in seconds. */
     @GridToStringInclude
     private final int timeout;
@@ -51,9 +55,10 @@
      * @param last Last page flag.
      * @param args Arguments list.
      * @param timeout Timeout in seconds.
+     * @param autoCommit Autocommit flag.
      */
     public OdbcQueryExecuteBatchRequest(@Nullable String schema, String sqlQry, boolean last, Object[][] args,
-        int timeout) {
+        int timeout, boolean autoCommit) {
         super(QRY_EXEC_BATCH);
 
         assert sqlQry != null : "SQL query should not be null";
@@ -64,6 +69,7 @@
         this.last = last;
         this.args = args;
         this.timeout = timeout;
+        this.autoCommit = autoCommit;
     }
 
     /**
@@ -106,4 +112,11 @@
     @Override public String toString() {
         return S.toString(OdbcQueryExecuteBatchRequest.class, this, "args", args, true);
     }
+
+    /**
+     * @return Autocommit flag.
+     */
+    public boolean autoCommit() {
+        return autoCommit;
+    }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryExecuteRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryExecuteRequest.java
index 1fde908..7034b86 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryExecuteRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryExecuteRequest.java
@@ -37,6 +37,10 @@
     @GridToStringExclude
     private final Object[] args;
 
+    /** Autocommit flag. */
+    @GridToStringInclude
+    private final boolean autoCommit;
+
     /** Query timeout in seconds. */
     @GridToStringInclude
     private final int timeout;
@@ -47,7 +51,8 @@
      * @param args Arguments list.
      * @param timeout Timeout in seconds.
      */
-    public OdbcQueryExecuteRequest(@Nullable String schema, String sqlQry, Object[] args, int timeout) {
+    public OdbcQueryExecuteRequest(@Nullable String schema, String sqlQry, Object[] args, int timeout,
+        boolean autoCommit) {
         super(QRY_EXEC);
 
         assert sqlQry != null : "SQL query should not be null";
@@ -56,6 +61,7 @@
         this.sqlQry = sqlQry;
         this.args = args;
         this.timeout = timeout;
+        this.autoCommit = autoCommit;
     }
 
     /**
@@ -91,4 +97,10 @@
         return S.toString(OdbcQueryExecuteRequest.class, this, "args", args, true);
     }
 
+    /**
+     * @return Autocommit flag.
+     */
+    public boolean autoCommit() {
+        return autoCommit;
+    }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryResults.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryResults.java
index 23788c7..9781ffe 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryResults.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcQueryResults.java
@@ -21,6 +21,7 @@
 import java.util.List;
 import org.apache.ignite.cache.query.FieldsQueryCursor;
 import org.apache.ignite.internal.processors.cache.QueryCursorImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 
 /**
  * ODBC result set
@@ -38,12 +39,17 @@
     /** Current result set index. */
     private int currentResultSetIdx;
 
+    /** Client version. */
+    private ClientListenerProtocolVersion ver;
+
     /**
      * @param cursors Result set cursors.
+     * @param ver Client version.
      */
-    OdbcQueryResults(List<FieldsQueryCursor<List<?>>> cursors) {
+    OdbcQueryResults(List<FieldsQueryCursor<List<?>>> cursors, ClientListenerProtocolVersion ver) {
         this.cursors = cursors;
         this.currentResultSetIdx = 0;
+        this.ver = ver;
 
         rowsAffected = new ArrayList<>(cursors.size());
 
@@ -99,7 +105,7 @@
         currentResultSet = null;
 
         if (currentResultSetIdx != cursors.size()) {
-            currentResultSet = new OdbcResultSet(cursors.get(currentResultSetIdx));
+            currentResultSet = new OdbcResultSet(cursors.get(currentResultSetIdx), ver);
             ++currentResultSetIdx;
         }
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandler.java
index bf0601b..143bd42 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandler.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandler.java
@@ -28,6 +28,7 @@
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
+import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.IgniteException;
 import org.apache.ignite.IgniteLogger;
 import org.apache.ignite.cache.query.FieldsQueryCursor;
@@ -36,15 +37,20 @@
 import org.apache.ignite.internal.binary.BinaryWriterExImpl;
 import org.apache.ignite.internal.binary.GridBinaryMarshaller;
 import org.apache.ignite.internal.processors.authentication.AuthorizationContext;
-import org.apache.ignite.internal.processors.cache.query.SqlFieldsQueryEx;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUtils;
 import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode;
+import org.apache.ignite.internal.processors.cache.query.SqlFieldsQueryEx;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.processors.odbc.ClientListenerRequest;
 import org.apache.ignite.internal.processors.odbc.ClientListenerRequestHandler;
 import org.apache.ignite.internal.processors.odbc.ClientListenerResponse;
 import org.apache.ignite.internal.processors.odbc.odbc.escape.OdbcEscapeUtils;
+import org.apache.ignite.internal.processors.query.GridQueryProperty;
 import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor;
 import org.apache.ignite.internal.processors.query.IgniteSQLException;
+import org.apache.ignite.internal.processors.query.NestedTxMode;
 import org.apache.ignite.internal.util.GridSpinBusyLock;
+import org.apache.ignite.internal.util.future.GridFutureAdapter;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.X;
 import org.apache.ignite.internal.util.typedef.internal.U;
@@ -74,6 +80,9 @@
     /** Busy lock. */
     private final GridSpinBusyLock busyLock;
 
+    /** Worker. */
+    private final OdbcRequestHandlerWorker worker;
+
     /** Maximum allowed cursors. */
     private final int maxCursors;
 
@@ -89,6 +98,9 @@
     /** Replicated only flag. */
     private final boolean replicatedOnly;
 
+    /** Nested transaction behaviour. */
+    private final NestedTxMode nestedTxMode;
+
     /** Collocated flag. */
     private final boolean collocated;
 
@@ -99,7 +111,10 @@
     private final boolean skipReducerOnUpdate;
 
     /** Authentication context */
-    private AuthorizationContext actx;
+    private final AuthorizationContext actx;
+
+    /** Client version. */
+    private ClientListenerProtocolVersion ver;
 
     /**
      * Constructor.
@@ -112,11 +127,13 @@
      * @param collocated Collocated flag.
      * @param lazy Lazy flag.
      * @param skipReducerOnUpdate Skip reducer on update flag.
+     * @param nestedTxMode Nested transaction mode.
      * @param actx Authentication context.
+     * @param ver Client protocol version.
      */
     public OdbcRequestHandler(GridKernalContext ctx, GridSpinBusyLock busyLock, int maxCursors,
         boolean distributedJoins, boolean enforceJoinOrder, boolean replicatedOnly, boolean collocated, boolean lazy,
-        boolean skipReducerOnUpdate, AuthorizationContext actx) {
+        boolean skipReducerOnUpdate, AuthorizationContext actx, NestedTxMode nestedTxMode, ClientListenerProtocolVersion ver) {
         this.ctx = ctx;
         this.busyLock = busyLock;
         this.maxCursors = maxCursors;
@@ -127,19 +144,54 @@
         this.lazy = lazy;
         this.skipReducerOnUpdate = skipReducerOnUpdate;
         this.actx = actx;
+        this.nestedTxMode = nestedTxMode;
+        this.ver = ver;
 
         log = ctx.log(getClass());
+
+        // TODO IGNITE-9484 Do not create worker if there is a possibility to unbind TX from threads.
+        worker = new OdbcRequestHandlerWorker(ctx.igniteInstanceName(), log, this, ctx);
     }
 
     /** {@inheritDoc} */
     @Override public ClientListenerResponse handle(ClientListenerRequest req0) {
         assert req0 != null;
 
+        assert req0 instanceof OdbcRequest;
+
         OdbcRequest req = (OdbcRequest)req0;
 
+        if (!MvccUtils.mvccEnabled(ctx))
+            return doHandle(req);
+        else {
+            GridFutureAdapter<ClientListenerResponse> fut = worker.process(req);
+
+            try {
+                return fut.get();
+            }
+            catch (IgniteCheckedException e) {
+                return exceptionToResult(e);
+            }
+        }
+    }
+
+    /**
+     * Start worker, if it's present.
+     */
+    void start() {
+        if (worker != null)
+            worker.start();
+    }
+
+    /**
+     * Handle ODBC request.
+     * @param req ODBC request.
+     * @return Response.
+     */
+    public ClientListenerResponse doHandle(OdbcRequest req) {
         if (!busyLock.enterBusy())
             return new OdbcResponse(IgniteQueryErrorCode.UNKNOWN,
-                    "Failed to handle ODBC request because node is stopping: " + req);
+                "Failed to handle ODBC request because node is stopping: " + req);
 
         if (actx != null)
             AuthorizationContext.context(actx);
@@ -197,6 +249,17 @@
     public void onDisconnect() {
         if (busyLock.enterBusy())
         {
+            if (worker != null) {
+                worker.cancel();
+
+                try {
+                    worker.join();
+                }
+                catch (InterruptedException e) {
+                    // No-op.
+                }
+            }
+
             try
             {
                 for (OdbcQueryResults res : qryResults.values())
@@ -213,9 +276,11 @@
      * @param schema Schema.
      * @param sql SQL request.
      * @param args Arguments.
+     * @param autoCommit Autocommit transaction.
+     * @param timeout Query timeout.
      * @return Query instance.
      */
-    private SqlFieldsQueryEx makeQuery(String schema, String sql, Object[] args, int timeout) {
+    private SqlFieldsQueryEx makeQuery(String schema, String sql, Object[] args, int timeout, boolean autoCommit) {
         SqlFieldsQueryEx qry = new SqlFieldsQueryEx(sql, null);
 
         qry.setArgs(args);
@@ -227,6 +292,8 @@
         qry.setLazy(lazy);
         qry.setSchema(schema);
         qry.setSkipReducerOnUpdate(skipReducerOnUpdate);
+        qry.setNestedTxMode(nestedTxMode);
+        qry.setAutoCommit(autoCommit);
 
         qry.setTimeout(timeout, TimeUnit.SECONDS);
 
@@ -257,11 +324,12 @@
                 log.debug("ODBC query parsed [reqId=" + req.requestId() + ", original=" + req.sqlQuery() +
                     ", parsed=" + sql + ']');
 
-            SqlFieldsQuery qry = makeQuery(req.schema(), sql, req.arguments(), req.timeout());
+            SqlFieldsQuery qry = makeQuery(req.schema(), sql, req.arguments(), req.timeout(), req.autoCommit());
 
             List<FieldsQueryCursor<List<?>>> cursors = ctx.query().querySqlFields(qry, true, false);
 
-            OdbcQueryResults results = new OdbcQueryResults(cursors);
+            OdbcQueryResults results = new OdbcQueryResults(cursors, ver);
+
             Collection<OdbcColumnMeta> fieldsMeta;
 
             if (!results.hasUnfetchedRows()) {
@@ -272,6 +340,10 @@
                 qryResults.put(qryId, results);
 
                 fieldsMeta = results.currentResultSet().fieldsMeta();
+
+                for (OdbcColumnMeta meta : fieldsMeta) {
+                    log.warning("Meta - " + meta.columnName + ", " + meta.precision + ", " + meta.scale);
+                }
             }
 
             OdbcQueryExecuteResult res = new OdbcQueryExecuteResult(qryId, fieldsMeta, results.rowsAffected());
@@ -301,7 +373,7 @@
                 log.debug("ODBC query parsed [reqId=" + req.requestId() + ", original=" + req.sqlQuery() +
                         ", parsed=" + sql + ']');
 
-            SqlFieldsQueryEx qry = makeQuery(req.schema(), sql, null, req.timeout());
+            SqlFieldsQueryEx qry = makeQuery(req.schema(), sql, null, req.timeout(), req.autoCommit());
 
             Object[][] paramSet = req.arguments();
 
@@ -436,8 +508,10 @@
                         if (!matches(field.getKey(), req.columnPattern()))
                             continue;
 
+                        GridQueryProperty prop = table.property(field.getKey());
+
                         OdbcColumnMeta columnMeta = new OdbcColumnMeta(table.schemaName(), table.tableName(),
-                            field.getKey(), field.getValue());
+                            field.getKey(), field.getValue(), prop.precision(), prop.scale(), ver);
 
                         if (!meta.contains(columnMeta))
                             meta.add(columnMeta);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandlerWorker.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandlerWorker.java
new file mode 100644
index 0000000..4184b6a
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcRequestHandlerWorker.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.odbc.odbc;
+
+import org.apache.ignite.IgniteLogger;
+import org.apache.ignite.internal.GridKernalContext;
+import org.apache.ignite.internal.IgniteInterruptedCheckedException;
+import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode;
+import org.apache.ignite.internal.processors.odbc.ClientListenerNioListener;
+import org.apache.ignite.internal.processors.odbc.ClientListenerResponse;
+import org.apache.ignite.internal.util.future.GridFutureAdapter;
+import org.apache.ignite.internal.util.typedef.T2;
+import org.apache.ignite.internal.util.typedef.internal.A;
+import org.apache.ignite.internal.util.worker.GridWorker;
+import org.apache.ignite.thread.IgniteThread;
+import org.jetbrains.annotations.Nullable;
+
+import java.util.concurrent.LinkedBlockingQueue;
+
+/**
+ * ODBC request handler worker to maintain single threaded transactional execution of SQL statements when MVCC is on.<p>
+ * This worker is intended for internal use as a temporary solution and from within {@link OdbcRequestHandler},
+ * therefore it does not do any fine-grained lifecycle handling as it relies on existing guarantees from
+ * {@link ClientListenerNioListener}.
+ */
+class OdbcRequestHandlerWorker extends GridWorker {
+    /** Requests queue.*/
+    private final LinkedBlockingQueue<T2<OdbcRequest, GridFutureAdapter<ClientListenerResponse>>> queue =
+        new LinkedBlockingQueue<>();
+
+    /** Handler.*/
+    private final OdbcRequestHandler hnd;
+
+    /** Context.*/
+    private final GridKernalContext ctx;
+
+    /** Response */
+    private final static ClientListenerResponse ERR_RESPONSE = new OdbcResponse(IgniteQueryErrorCode.UNKNOWN,
+        "Connection closed.");
+
+    /**
+     * Constructor.
+     * @param igniteInstanceName Instance name.
+     * @param log Logger.
+     * @param hnd Handler.
+     * @param ctx Kernal context.
+     */
+    OdbcRequestHandlerWorker(@Nullable String igniteInstanceName, IgniteLogger log, OdbcRequestHandler hnd,
+        GridKernalContext ctx) {
+        super(igniteInstanceName, "odbc-request-handler-worker", log);
+
+        A.notNull(hnd, "hnd");
+
+        this.hnd = hnd;
+
+        this.ctx = ctx;
+    }
+
+    /**
+     * Start this worker.
+     */
+    void start() {
+        new IgniteThread(this).start();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void body() throws InterruptedException, IgniteInterruptedCheckedException {
+        try {
+            while (!isCancelled()) {
+                T2<OdbcRequest, GridFutureAdapter<ClientListenerResponse>> req = queue.take();
+
+                GridFutureAdapter<ClientListenerResponse> fut = req.get2();
+
+                try {
+                    ClientListenerResponse res = hnd.doHandle(req.get1());
+
+                    fut.onDone(res);
+                }
+                catch (Exception e) {
+                    fut.onDone(e);
+                }
+            }
+        }
+        finally {
+            // Notify indexing that this worker is being stopped.
+            try {
+                ctx.query().getIndexing().onClientDisconnect();
+            }
+            catch (Exception e) {
+                // No-op.
+            }
+
+            // Drain the queue on stop.
+            T2<OdbcRequest, GridFutureAdapter<ClientListenerResponse>> req = queue.poll();
+
+            while (req != null) {
+                req.get2().onDone(ERR_RESPONSE);
+
+                req = queue.poll();
+            }
+        }
+    }
+
+    /**
+     * Initiate request processing.
+     * @param req Request.
+     * @return Future to track request processing.
+     */
+    GridFutureAdapter<ClientListenerResponse> process(OdbcRequest req) {
+        GridFutureAdapter<ClientListenerResponse> fut = new GridFutureAdapter<>();
+
+        queue.add(new T2<>(req, fut));
+
+        return fut;
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcResultSet.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcResultSet.java
index 66b0776..945b85d 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcResultSet.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/odbc/odbc/OdbcResultSet.java
@@ -23,6 +23,7 @@
 import java.util.List;
 import org.apache.ignite.cache.query.FieldsQueryCursor;
 import org.apache.ignite.internal.processors.cache.QueryCursorImpl;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.processors.query.GridQueryFieldMetadata;
 
 /**
@@ -35,14 +36,19 @@
     /** Current iterator. */
     private Iterator iter;
 
+    /** Client version. */
+    private ClientListenerProtocolVersion ver;
+
     /**
      * Constructor.
      * @param cursor Result set cursor.
+     * @param ver Client version.
      */
-    OdbcResultSet(FieldsQueryCursor<List<?>> cursor) {
+    OdbcResultSet(FieldsQueryCursor<List<?>> cursor, ClientListenerProtocolVersion ver) {
         assert cursor instanceof QueryCursorImpl;
 
         this.cursor = (QueryCursorImpl<List<?>>)cursor;
+        this.ver = ver;
 
         if (this.cursor.isQuery())
             iter = this.cursor.iterator();
@@ -61,7 +67,7 @@
      * @return Fields metadata of the current result set.
      */
     public Collection<OdbcColumnMeta> fieldsMeta() {
-        return convertMetadata(cursor.fieldsMeta());
+        return convertMetadata(cursor.fieldsMeta(), ver);
     }
 
     /**
@@ -86,14 +92,16 @@
      * {@link OdbcColumnMeta}.
      *
      * @param meta Internal query field metadata.
+     * @param ver Client version.
      * @return Odbc query field metadata.
      */
-    private static Collection<OdbcColumnMeta> convertMetadata(Collection<GridQueryFieldMetadata> meta) {
+    private static Collection<OdbcColumnMeta> convertMetadata(Collection<GridQueryFieldMetadata> meta,
+        ClientListenerProtocolVersion ver) {
         List<OdbcColumnMeta> res = new ArrayList<>();
 
         if (meta != null) {
             for (GridQueryFieldMetadata info : meta)
-                res.add(new OdbcColumnMeta(info));
+                res.add(new OdbcColumnMeta(info, ver));
         }
 
         return res;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/PlatformContextImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/PlatformContextImpl.java
index 9e22f38..4e22ce9 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/PlatformContextImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/PlatformContextImpl.java
@@ -333,7 +333,7 @@
         BinaryContext binCtx = cacheObjProc.binaryContext();
 
         for (BinaryMetadata meta : metas)
-            binCtx.updateMetadata(meta.typeId(), meta);
+            binCtx.updateMetadata(meta.typeId(), meta, false);
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/PlatformProcessorImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/PlatformProcessorImpl.java
index 1c0878c..4e5803e 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/PlatformProcessorImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/PlatformProcessorImpl.java
@@ -71,6 +71,7 @@
 
 import static org.apache.ignite.internal.processors.platform.PlatformAbstractTarget.FALSE;
 import static org.apache.ignite.internal.processors.platform.PlatformAbstractTarget.TRUE;
+import static org.apache.ignite.internal.processors.platform.client.ClientConnectionContext.CURRENT_VER;
 
 /**
  * GridGain platform processor.
@@ -478,7 +479,7 @@
             }
 
             case OP_ADD_CACHE_CONFIGURATION:
-                CacheConfiguration cfg = PlatformConfigurationUtils.readCacheConfiguration(reader);
+                CacheConfiguration cfg = PlatformConfigurationUtils.readCacheConfiguration(reader, CURRENT_VER);
 
                 ctx.grid().addCacheConfiguration(cfg);
 
@@ -547,7 +548,7 @@
             }
 
             case OP_CREATE_CACHE_FROM_CONFIG: {
-                CacheConfiguration cfg = PlatformConfigurationUtils.readCacheConfiguration(reader);
+                CacheConfiguration cfg = PlatformConfigurationUtils.readCacheConfiguration(reader, CURRENT_VER);
 
                 IgniteCacheProxy cache = reader.readBoolean()
                         ? (IgniteCacheProxy)ctx.grid().createCache(cfg, PlatformConfigurationUtils.readNearConfiguration(reader))
@@ -557,7 +558,7 @@
             }
 
             case OP_GET_OR_CREATE_CACHE_FROM_CONFIG: {
-                CacheConfiguration cfg = PlatformConfigurationUtils.readCacheConfiguration(reader);
+                CacheConfiguration cfg = PlatformConfigurationUtils.readCacheConfiguration(reader, CURRENT_VER);
 
                 IgniteCacheProxy cache = reader.readBoolean()
                         ? (IgniteCacheProxy)ctx.grid().getOrCreateCache(cfg,
@@ -672,7 +673,7 @@
     @Override public void processOutStream(int type, BinaryRawWriterEx writer) throws IgniteCheckedException {
         switch (type) {
             case OP_GET_IGNITE_CONFIGURATION: {
-                PlatformConfigurationUtils.writeIgniteConfiguration(writer, ignite().configuration());
+                PlatformConfigurationUtils.writeIgniteConfiguration(writer, ignite().configuration(), CURRENT_VER);
 
                 return;
             }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/PlatformCache.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/PlatformCache.java
index 8f3f2ef..7b91575 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/PlatformCache.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/cache/PlatformCache.java
@@ -76,6 +76,8 @@
 import org.apache.ignite.transactions.TransactionTimeoutException;
 import org.jetbrains.annotations.Nullable;
 
+import static org.apache.ignite.internal.processors.platform.client.ClientConnectionContext.CURRENT_VER;
+
 /**
  * Native cache wrapper implementation.
  */
@@ -980,7 +982,7 @@
                 CacheConfiguration ccfg = ((IgniteCache<Object, Object>)cache).
                         getConfiguration(CacheConfiguration.class);
 
-                PlatformConfigurationUtils.writeCacheConfiguration(writer, ccfg);
+                PlatformConfigurationUtils.writeCacheConfiguration(writer, ccfg, CURRENT_VER);
 
                 break;
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/ClientConnectionContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/ClientConnectionContext.java
index c957901..ffe38ca 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/ClientConnectionContext.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/ClientConnectionContext.java
@@ -44,11 +44,18 @@
     /** Version 1.2.0. */
     public static final ClientListenerProtocolVersion VER_1_2_0 = ClientListenerProtocolVersion.create(1, 2, 0);
 
+    /** Version 1.2.0. */
+    public static final ClientListenerProtocolVersion CURRENT_VER = VER_1_2_0;
+
     /** Supported versions. */
-    private static final Collection<ClientListenerProtocolVersion> SUPPORTED_VERS = Arrays.asList(VER_1_2_0, VER_1_1_0, VER_1_0_0);
+    private static final Collection<ClientListenerProtocolVersion> SUPPORTED_VERS = Arrays.asList(
+        VER_1_2_0,
+        VER_1_1_0,
+        VER_1_0_0
+    );
 
     /** Message parser. */
-    private final ClientMessageParser parser;
+    private ClientMessageParser parser;
 
     /** Request handler. */
     private ClientRequestHandler handler;
@@ -72,8 +79,6 @@
     public ClientConnectionContext(GridKernalContext ctx, long connId, int maxCursors) {
         super(ctx, connId);
 
-        parser = new ClientMessageParser(ctx);
-
         this.maxCursors = maxCursors;
     }
 
@@ -93,7 +98,7 @@
 
     /** {@inheritDoc} */
     @Override public ClientListenerProtocolVersion currentVersion() {
-        return VER_1_1_0;
+        return CURRENT_VER;
     }
 
     /** {@inheritDoc} */
@@ -121,6 +126,8 @@
         AuthorizationContext authCtx = authenticate(user, pwd);
 
         handler = new ClientRequestHandler(this, authCtx);
+
+        parser = new ClientMessageParser(kernalContext(), ver);
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/ClientMessageParser.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/ClientMessageParser.java
index c887b3c..c65e64a 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/ClientMessageParser.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/ClientMessageParser.java
@@ -27,6 +27,7 @@
 import org.apache.ignite.internal.binary.streams.BinaryInputStream;
 import org.apache.ignite.internal.processors.cache.binary.CacheObjectBinaryProcessorImpl;
 import org.apache.ignite.internal.processors.odbc.ClientListenerMessageParser;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.processors.odbc.ClientListenerRequest;
 import org.apache.ignite.internal.processors.odbc.ClientListenerResponse;
 import org.apache.ignite.internal.processors.platform.client.binary.ClientBinaryTypeGetRequest;
@@ -204,17 +205,24 @@
 
     /** Marshaller. */
     private final GridBinaryMarshaller marsh;
+    
+    /** Client version */
+    private final ClientListenerProtocolVersion ver;
 
     /**
      * Ctor.
      *
      * @param ctx Kernal context.
+     * @param ver Client version.
      */
-    ClientMessageParser(GridKernalContext ctx) {
+    ClientMessageParser(GridKernalContext ctx, ClientListenerProtocolVersion ver) {
         assert ctx != null;
+        assert ver != null;
 
         CacheObjectBinaryProcessorImpl cacheObjProc = (CacheObjectBinaryProcessorImpl)ctx.cacheObjects();
         marsh = cacheObjProc.marshaller();
+        
+        this.ver = ver;
     }
 
     /** {@inheritDoc} */
@@ -343,13 +351,13 @@
                 return new ClientCacheGetNamesRequest(reader);
 
             case OP_CACHE_GET_CONFIGURATION:
-                return new ClientCacheGetConfigurationRequest(reader);
+                return new ClientCacheGetConfigurationRequest(reader, ver);
 
             case OP_CACHE_CREATE_WITH_CONFIGURATION:
-                return new ClientCacheCreateWithConfigurationRequest(reader);
+                return new ClientCacheCreateWithConfigurationRequest(reader, ver);
 
             case OP_CACHE_GET_OR_CREATE_WITH_CONFIGURATION:
-                return new ClientCacheGetOrCreateWithConfigurationRequest(reader);
+                return new ClientCacheGetOrCreateWithConfigurationRequest(reader, ver);
 
             case OP_QUERY_SQL:
                 return new ClientCacheSqlQueryRequest(reader);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/binary/ClientBinaryTypePutRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/binary/ClientBinaryTypePutRequest.java
index 7839d48..64c8d80 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/binary/ClientBinaryTypePutRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/binary/ClientBinaryTypePutRequest.java
@@ -49,7 +49,7 @@
     @Override public ClientResponse process(ClientConnectionContext ctx) {
         BinaryContext binCtx = ((CacheObjectBinaryProcessorImpl) ctx.kernalContext().cacheObjects()).binaryContext();
 
-        binCtx.updateMetadata(meta.typeId(), meta);
+        binCtx.updateMetadata(meta.typeId(), meta, false);
 
         return super.process(ctx);
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheConfigurationSerializer.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheConfigurationSerializer.java
index 839720b..331b6d8 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheConfigurationSerializer.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheConfigurationSerializer.java
@@ -30,6 +30,7 @@
 
 import java.util.ArrayList;
 import java.util.Collection;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 
 import static org.apache.ignite.internal.processors.platform.utils.PlatformConfigurationUtils.readQueryEntity;
 import static org.apache.ignite.internal.processors.platform.utils.PlatformConfigurationUtils.writeEnumInt;
@@ -134,8 +135,9 @@
      * Writes the cache configuration.
      * @param writer Writer.
      * @param cfg Configuration.
+     * @param ver Client version.
      */
-    static void write(BinaryRawWriterEx writer, CacheConfiguration cfg) {
+    static void write(BinaryRawWriterEx writer, CacheConfiguration cfg, ClientListenerProtocolVersion ver) {
         assert writer != null;
         assert cfg != null;
 
@@ -191,7 +193,7 @@
             writer.writeInt(qryEntities.size());
 
             for (QueryEntity e : qryEntities)
-                writeQueryEntity(writer, e);
+                writeQueryEntity(writer, e, ver);
         } else
             writer.writeInt(0);
 
@@ -203,9 +205,10 @@
      * Reads the cache configuration.
      *
      * @param reader Reader.
+     * @param ver Client version.
      * @return Configuration.
      */
-    static CacheConfiguration read(BinaryRawReader reader) {
+    static CacheConfiguration read(BinaryRawReader reader, ClientListenerProtocolVersion ver) {
         reader.readInt();  // Skip length.
 
         short propCnt = reader.readShort();
@@ -349,7 +352,7 @@
                         Collection<QueryEntity> entities = new ArrayList<>(qryEntCnt);
 
                         for (int j = 0; j < qryEntCnt; j++)
-                            entities.add(readQueryEntity(reader));
+                            entities.add(readQueryEntity(reader, ver));
 
                         cfg.setQueryEntities(entities);
                     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheCreateWithConfigurationRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheCreateWithConfigurationRequest.java
index 65f9784..9f1d63fc 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheCreateWithConfigurationRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheCreateWithConfigurationRequest.java
@@ -20,6 +20,7 @@
 import org.apache.ignite.binary.BinaryRawReader;
 import org.apache.ignite.cache.CacheExistsException;
 import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.processors.platform.client.ClientConnectionContext;
 import org.apache.ignite.internal.processors.platform.client.ClientRequest;
 import org.apache.ignite.internal.processors.platform.client.ClientResponse;
@@ -39,11 +40,12 @@
      * Constructor.
      *
      * @param reader Reader.
+     * @param ver Client version.
      */
-    public ClientCacheCreateWithConfigurationRequest(BinaryRawReader reader) {
+    public ClientCacheCreateWithConfigurationRequest(BinaryRawReader reader, ClientListenerProtocolVersion ver) {
         super(reader);
 
-        cacheCfg = ClientCacheConfigurationSerializer.read(reader);
+        cacheCfg = ClientCacheConfigurationSerializer.read(reader, ver);
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetConfigurationRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetConfigurationRequest.java
index 3632095..f2c83fc 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetConfigurationRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetConfigurationRequest.java
@@ -20,6 +20,7 @@
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.binary.BinaryRawReader;
 import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.processors.platform.client.ClientConnectionContext;
 import org.apache.ignite.internal.processors.platform.client.ClientResponse;
 
@@ -27,13 +28,19 @@
  * Cache configuration request.
  */
 public class ClientCacheGetConfigurationRequest extends ClientCacheRequest {
+    /** Client version. */
+    private final ClientListenerProtocolVersion ver;
+    
     /**
      * Constructor.
      *
      * @param reader Reader.
+     * @param ver Client version.
      */
-    public ClientCacheGetConfigurationRequest(BinaryRawReader reader) {
+    public ClientCacheGetConfigurationRequest(BinaryRawReader reader, ClientListenerProtocolVersion ver) {
         super(reader);
+        
+        this.ver = ver;
     }
 
     /** {@inheritDoc} */
@@ -42,6 +49,6 @@
         CacheConfiguration cfg = ((IgniteCache<Object, Object>) rawCache(ctx))
                 .getConfiguration(CacheConfiguration.class);
 
-        return new ClientCacheGetConfigurationResponse(requestId(), cfg);
+        return new ClientCacheGetConfigurationResponse(requestId(), cfg, ver);
     }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetConfigurationResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetConfigurationResponse.java
index 2033dfe..f7d6896 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetConfigurationResponse.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetConfigurationResponse.java
@@ -19,6 +19,7 @@
 
 import org.apache.ignite.configuration.CacheConfiguration;
 import org.apache.ignite.internal.binary.BinaryRawWriterEx;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.processors.platform.client.ClientResponse;
 
 /**
@@ -28,24 +29,30 @@
     /** Cache configuration. */
     private final CacheConfiguration cfg;
 
+    /** Client version. */
+    private final ClientListenerProtocolVersion ver;
+    
     /**
      * Constructor.
      *
      * @param reqId Request id.
      * @param cfg Cache configuration.
+     * @param ver Client version.
      */
-    ClientCacheGetConfigurationResponse(long reqId, CacheConfiguration cfg) {
+    ClientCacheGetConfigurationResponse(long reqId, CacheConfiguration cfg, ClientListenerProtocolVersion ver) {
         super(reqId);
 
         assert cfg != null;
+        assert ver != null;
 
         this.cfg = cfg;
+        this.ver = ver;
     }
 
     /** {@inheritDoc} */
     @Override public void encode(BinaryRawWriterEx writer) {
         super.encode(writer);
 
-        ClientCacheConfigurationSerializer.write(writer, cfg);
+        ClientCacheConfigurationSerializer.write(writer, cfg, ver);
     }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetOrCreateWithConfigurationRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetOrCreateWithConfigurationRequest.java
index 48569b4..b005fb2 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetOrCreateWithConfigurationRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheGetOrCreateWithConfigurationRequest.java
@@ -20,6 +20,7 @@
 import org.apache.ignite.binary.BinaryRawReader;
 import org.apache.ignite.cache.CacheExistsException;
 import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.processors.platform.client.ClientConnectionContext;
 import org.apache.ignite.internal.processors.platform.client.ClientRequest;
 import org.apache.ignite.internal.processors.platform.client.ClientResponse;
@@ -39,11 +40,12 @@
      * Constructor.
      *
      * @param reader Reader.
+     * @param ver Client version.
      */
-    public ClientCacheGetOrCreateWithConfigurationRequest(BinaryRawReader reader) {
+    public ClientCacheGetOrCreateWithConfigurationRequest(BinaryRawReader reader, ClientListenerProtocolVersion ver) {
         super(reader);
 
-        cacheCfg = ClientCacheConfigurationSerializer.read(reader);
+        cacheCfg = ClientCacheConfigurationSerializer.read(reader, ver);
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheRequest.java
index 9e2d1f1..9e39b56 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheRequest.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/client/cache/ClientCacheRequest.java
@@ -123,7 +123,7 @@
     }
 
     /** {@inheritDoc} */
-    protected void authorize(ClientConnectionContext ctx, SecurityPermission perm) {
+    @Override protected void authorize(ClientConnectionContext ctx, SecurityPermission perm) {
         SecurityContext secCtx = ctx.securityContext();
 
         if (secCtx != null) {
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/compute/PlatformCompute.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/compute/PlatformCompute.java
index 6012625..ed61021 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/compute/PlatformCompute.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/compute/PlatformCompute.java
@@ -74,6 +74,9 @@
     /** */
     private static final int OP_EXEC_NATIVE = 8;
 
+    /** */
+    private static final int OP_WITH_NO_RESULT_CACHE = 9;
+
     /** Compute instance. */
     private final IgniteComputeImpl compute;
 
@@ -145,6 +148,13 @@
 
                 return TRUE;
             }
+
+            case OP_WITH_NO_RESULT_CACHE: {
+                compute.withNoResultCache();
+                computeForPlatform.withNoResultCache();
+
+                return TRUE;
+            }
         }
 
         return super.processInLongOutLong(type, val);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/dotnet/PlatformDotNetConfigurationClosure.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/dotnet/PlatformDotNetConfigurationClosure.java
index 9ee2f5e..7c50062 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/dotnet/PlatformDotNetConfigurationClosure.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/dotnet/PlatformDotNetConfigurationClosure.java
@@ -45,6 +45,8 @@
 import java.util.Collections;
 import java.util.List;
 
+import static org.apache.ignite.internal.processors.platform.client.ClientConnectionContext.CURRENT_VER;
+
 /**
  * Closure to apply dot net configuration.
  */
@@ -196,7 +198,7 @@
     private void processPrepareResult(BinaryReaderExImpl in) {
         assert cfg != null;
 
-        PlatformConfigurationUtils.readIgniteConfiguration(in, cfg);
+        PlatformConfigurationUtils.readIgniteConfiguration(in, cfg, CURRENT_VER);
 
         // Process beans
         List<PlatformDotNetLifecycleBean> beans = beans(cfg);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/memory/PlatformOutputStreamImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/memory/PlatformOutputStreamImpl.java
index 884e718..5d01ae9 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/memory/PlatformOutputStreamImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/memory/PlatformOutputStreamImpl.java
@@ -336,7 +336,7 @@
     }
 
     /** {@inheritDoc} */
-    public int capacity() {
+    @Override public int capacity() {
         return cap;
     }
 }
\ No newline at end of file
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java
index b4f82a4..cd67f15 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/platform/utils/PlatformConfigurationUtils.java
@@ -76,13 +76,11 @@
 import org.apache.ignite.failure.StopNodeOrHaltFailureHandler;
 import org.apache.ignite.internal.binary.BinaryRawReaderEx;
 import org.apache.ignite.internal.binary.BinaryRawWriterEx;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProtocolVersion;
 import org.apache.ignite.internal.processors.platform.cache.affinity.PlatformAffinityFunction;
 import org.apache.ignite.internal.processors.platform.cache.expiry.PlatformExpiryPolicyFactory;
 import org.apache.ignite.internal.processors.platform.events.PlatformLocalEventListener;
 import org.apache.ignite.internal.processors.platform.plugin.cache.PlatformCachePluginConfiguration;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.lang.IgniteBiTuple;
 import org.apache.ignite.lang.IgnitePredicate;
 import org.apache.ignite.platform.dotnet.PlatformDotNetAffinityFunction;
 import org.apache.ignite.platform.dotnet.PlatformDotNetBinaryConfiguration;
@@ -108,6 +106,8 @@
 import org.apache.ignite.transactions.TransactionConcurrency;
 import org.apache.ignite.transactions.TransactionIsolation;
 
+import static org.apache.ignite.internal.processors.platform.client.ClientConnectionContext.VER_1_2_0;
+
 /**
  * Configuration utils.
  *
@@ -158,9 +158,10 @@
      * Reads cache configuration from a stream.
      *
      * @param in Stream.
+     * @param ver Client version.
      * @return Cache configuration.
      */
-    public static CacheConfiguration readCacheConfiguration(BinaryRawReaderEx in) {
+    public static CacheConfiguration readCacheConfiguration(BinaryRawReaderEx in, ClientListenerProtocolVersion ver) {
         assert in != null;
 
         CacheConfiguration ccfg = new CacheConfiguration();
@@ -226,7 +227,7 @@
             Collection<QueryEntity> entities = new ArrayList<>(qryEntCnt);
 
             for (int i = 0; i < qryEntCnt; i++)
-                entities.add(readQueryEntity(in));
+                entities.add(readQueryEntity(in, ver));
 
             ccfg.setQueryEntities(entities);
         }
@@ -483,9 +484,10 @@
      * Reads the query entity.
      *
      * @param in Stream.
+     * @param ver Client version.
      * @return QueryEntity.
      */
-    public static QueryEntity readQueryEntity(BinaryRawReader in) {
+    public static QueryEntity readQueryEntity(BinaryRawReader in, ClientListenerProtocolVersion ver) {
         QueryEntity res = new QueryEntity();
 
         res.setKeyType(in.readString());
@@ -499,7 +501,8 @@
         Set<String> keyFields = new HashSet<>(cnt);
         Set<String> notNullFields = new HashSet<>(cnt);
         Map<String, Object> defVals = new HashMap<>(cnt);
-        Map<String, IgniteBiTuple<Integer, Integer>> decimalInfo = new HashMap<>(cnt);
+        Map<String, Integer> fieldsPrecision = new HashMap<>(cnt);
+        Map<String, Integer> fieldsScale = new HashMap<>(cnt);
 
         if (cnt > 0) {
             LinkedHashMap<String, String> fields = new LinkedHashMap<>(cnt);
@@ -519,13 +522,18 @@
                 Object defVal = in.readObject();
                 if (defVal != null)
                     defVals.put(fieldName, defVal);
+                
+                if (ver.compareTo(VER_1_2_0) >= 0) {
+                    int precision = in.readInt();
 
-                int precision = in.readInt();
+                    if (precision != -1)
+                        fieldsPrecision.put(fieldName, precision);
 
-                int scale = in.readInt();
+                    int scale = in.readInt();
 
-                if (precision != -1 || scale != -1)
-                    decimalInfo.put(fieldName, F.t(precision, scale));
+                    if (scale != -1)
+                        fieldsScale.put(fieldName, scale);
+                }
             }
 
             res.setFields(fields);
@@ -539,8 +547,11 @@
             if (!defVals.isEmpty())
                 res.setDefaultFieldValues(defVals);
 
-            if (!decimalInfo.isEmpty())
-                res.setDecimalInfo(decimalInfo);
+            if (!fieldsPrecision.isEmpty())
+                res.setFieldsPrecision(fieldsPrecision);
+
+            if (!fieldsScale.isEmpty())
+                res.setFieldsScale(fieldsScale);
         }
 
         // Aliases
@@ -601,9 +612,11 @@
      * Reads Ignite configuration.
      * @param in Reader.
      * @param cfg Configuration.
+     * @param ver Client version.
      */
     @SuppressWarnings("deprecation")
-    public static void readIgniteConfiguration(BinaryRawReaderEx in, IgniteConfiguration cfg) {
+    public static void readIgniteConfiguration(BinaryRawReaderEx in, IgniteConfiguration cfg, 
+        ClientListenerProtocolVersion ver) {
         if (in.readBoolean())
             cfg.setClientMode(in.readBoolean());
         int[] evtTypes = in.readIntArray();
@@ -642,6 +655,19 @@
         if (in.readBoolean())
             cfg.setAuthenticationEnabled(in.readBoolean());
 
+        int sqlSchemasCnt = in.readInt();
+
+        if (sqlSchemasCnt == -1)
+            cfg.setSqlSchemas((String[])null);
+        else {
+            String[] sqlSchemas = new String[sqlSchemasCnt];
+
+            for (int i = 0; i < sqlSchemasCnt; i++)
+                sqlSchemas[i] = in.readString();
+
+            cfg.setSqlSchemas(sqlSchemas);
+        }
+
         Object consId = in.readObjectDetached();
 
         if (consId instanceof Serializable) {
@@ -670,7 +696,7 @@
         if (in.readBoolean())
             cfg.setQueryThreadPoolSize(in.readInt());
 
-        readCacheConfigurations(in, cfg);
+        readCacheConfigurations(in, cfg, ver);
         readDiscoveryConfiguration(in, cfg);
 
         if (in.readBoolean()) {
@@ -806,8 +832,10 @@
      *
      * @param cfg IgniteConfiguration to update.
      * @param in Reader.
+     * @param ver Client version.
      */
-    private static void readCacheConfigurations(BinaryRawReaderEx in, IgniteConfiguration cfg) {
+    private static void readCacheConfigurations(BinaryRawReaderEx in, IgniteConfiguration cfg, 
+        ClientListenerProtocolVersion ver) {
         int len = in.readInt();
 
         if (len == 0)
@@ -816,7 +844,7 @@
         List<CacheConfiguration> caches = new ArrayList<>();
 
         for (int i = 0; i < len; i++)
-            caches.add(readCacheConfiguration(in));
+            caches.add(readCacheConfiguration(in, ver));
 
         CacheConfiguration[] oldCaches = cfg.getCacheConfiguration();
         CacheConfiguration[] caches0 = caches.toArray(new CacheConfiguration[caches.size()]);
@@ -915,8 +943,10 @@
      *
      * @param writer Writer.
      * @param ccfg Configuration.
+     * @param ver Client version.
      */
-    public static void writeCacheConfiguration(BinaryRawWriter writer, CacheConfiguration ccfg) {
+    public static void writeCacheConfiguration(BinaryRawWriter writer, CacheConfiguration ccfg, 
+        ClientListenerProtocolVersion ver) {
         assert writer != null;
         assert ccfg != null;
 
@@ -976,7 +1006,7 @@
             writer.writeInt(qryEntities.size());
 
             for (QueryEntity e : qryEntities)
-                writeQueryEntity(writer, e);
+                writeQueryEntity(writer, e, ver);
         }
         else
             writer.writeInt(0);
@@ -1033,8 +1063,10 @@
      *
      * @param writer Writer.
      * @param qryEntity Query entity.
+     * @param ver Client version.
      */
-    public static void writeQueryEntity(BinaryRawWriter writer, QueryEntity qryEntity) {
+    public static void writeQueryEntity(BinaryRawWriter writer, QueryEntity qryEntity, 
+        ClientListenerProtocolVersion ver) {
         assert qryEntity != null;
 
         writer.writeString(qryEntity.getKeyType());
@@ -1050,7 +1082,8 @@
             Set<String> keyFields = qryEntity.getKeyFields();
             Set<String> notNullFields = qryEntity.getNotNullFields();
             Map<String, Object> defVals = qryEntity.getDefaultFieldValues();
-            Map<String, IgniteBiTuple<Integer, Integer>> decimalInfo = qryEntity.getDecimalInfo();
+            Map<String, Integer> fieldsPrecision = qryEntity.getFieldsPrecision();
+            Map<String, Integer> fieldsScale = qryEntity.getFieldsScale();
 
             writer.writeInt(fields.size());
 
@@ -1061,11 +1094,10 @@
                 writer.writeBoolean(notNullFields != null && notNullFields.contains(field.getKey()));
                 writer.writeObject(defVals != null ? defVals.get(field.getKey()) : null);
 
-                IgniteBiTuple<Integer, Integer> precisionAndScale =
-                    decimalInfo == null ? null : decimalInfo.get(field.getKey());
-
-                writer.writeInt(precisionAndScale == null ? -1 : precisionAndScale.get1());
-                writer.writeInt(precisionAndScale == null ? -1 : precisionAndScale.get2());
+                if (ver.compareTo(VER_1_2_0) >= 0) {
+                    writer.writeInt(fieldsPrecision == null ? -1 : fieldsPrecision.getOrDefault(field.getKey(), -1));
+                    writer.writeInt(fieldsScale == null ? -1 : fieldsScale.getOrDefault(field.getKey(), -1));
+                }
             }
         }
         else
@@ -1130,9 +1162,11 @@
      *
      * @param w Writer.
      * @param cfg Configuration.
+     * @param ver Client version.
      */
     @SuppressWarnings("deprecation")
-    public static void writeIgniteConfiguration(BinaryRawWriter w, IgniteConfiguration cfg) {
+    public static void writeIgniteConfiguration(BinaryRawWriter w, IgniteConfiguration cfg, 
+        ClientListenerProtocolVersion ver) {
         assert w != null;
         assert cfg != null;
 
@@ -1167,6 +1201,16 @@
         w.writeBoolean(cfg.isActiveOnStart());
         w.writeBoolean(true);
         w.writeBoolean(cfg.isAuthenticationEnabled());
+
+        if (cfg.getSqlSchemas() == null)
+            w.writeInt(-1);
+        else {
+            w.writeInt(cfg.getSqlSchemas().length);
+
+            for (String schema : cfg.getSqlSchemas())
+                w.writeString(schema);
+        }
+
         w.writeObject(cfg.getConsistentId());
 
         // Thread pools.
@@ -1195,7 +1239,7 @@
             w.writeInt(cacheCfg.length);
 
             for (CacheConfiguration ccfg : cacheCfg)
-                writeCacheConfiguration(w, ccfg);
+                writeCacheConfiguration(w, ccfg, ver);
         }
         else
             w.writeInt(0);
@@ -1776,6 +1820,7 @@
                 .setCheckpointWriteOrder(CheckpointWriteOrder.fromOrdinal(in.readInt()))
                 .setWriteThrottlingEnabled(in.readBoolean())
                 .setWalCompactionEnabled(in.readBoolean())
+                .setMaxWalArchiveSize(in.readLong())
                 .setSystemRegionInitialSize(in.readLong())
                 .setSystemRegionMaxSize(in.readLong())
                 .setPageSize(in.readInt())
@@ -1903,6 +1948,7 @@
             w.writeInt(cfg.getCheckpointWriteOrder().ordinal());
             w.writeBoolean(cfg.isWriteThrottlingEnabled());
             w.writeBoolean(cfg.isWalCompactionEnabled());
+            w.writeLong(cfg.getMaxWalArchiveSize());
             w.writeLong(cfg.getSystemRegionInitialSize());
             w.writeLong(cfg.getSystemRegionMaxSize());
             w.writeInt(cfg.getPageSize());
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/EnlistOperation.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/EnlistOperation.java
new file mode 100644
index 0000000..fdb6f1e
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/EnlistOperation.java
@@ -0,0 +1,91 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query;
+
+import org.apache.ignite.internal.processors.cache.GridCacheOperation;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Operations on entries which could be performed during transaction.
+ * Operations are used during SQL statements execution, but does not define exact SQL statements semantics.
+ * It is better to treat them independently and having their own semantics.
+ */
+public enum EnlistOperation {
+    /**
+     * This operation creates entry if it does not exist or raises visible failure otherwise.
+     */
+    INSERT(GridCacheOperation.CREATE),
+    /**
+     * This operation creates entry if it does not exist or modifies existing one otherwise.
+     */
+    UPSERT(GridCacheOperation.UPDATE),
+    /**
+     * This operation modifies existing entry or does nothing if entry does not exist.
+     */
+    UPDATE(GridCacheOperation.UPDATE),
+    /**
+     * This operation deletes existing entry or does nothing if entry does not exist.
+     */
+    DELETE(GridCacheOperation.DELETE),
+    /**
+     * This operation locks existing entry protecting it from updates by other transactions
+     * or does notrhing if entry does not exist.
+     */
+    LOCK(null);
+
+    /** */
+    private final GridCacheOperation cacheOp;
+
+    /** */
+    EnlistOperation(GridCacheOperation cacheOp) {
+        this.cacheOp = cacheOp;
+    }
+
+    /**
+     * @return Corresponding Cache operation.
+     */
+    public GridCacheOperation cacheOperation() {
+        return cacheOp;
+    }
+
+    /** */
+    public boolean isDeleteOrLock() {
+        return this == DELETE || this == LOCK;
+    }
+
+    /**
+     * Indicates that an operation cannot create new row.
+     */
+    public boolean noCreate() {
+        // has no meaning for LOCK
+        assert this != LOCK;
+
+        return this == UPDATE || this == DELETE;
+    }
+
+    /** Enum values. */
+    private static final EnlistOperation[] VALS = values();
+
+    /**
+     * @param ord Ordinal value.
+     * @return Enum value.
+     */
+    @Nullable public static EnlistOperation fromOrdinal(int ord) {
+        return ord < 0 || ord >= VALS.length ? null : VALS[ord];
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryFieldMetadata.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryFieldMetadata.java
index 9b08d77..f9df499 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryFieldMetadata.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryFieldMetadata.java
@@ -51,4 +51,18 @@
      * @return Field type name.
      */
     public String fieldTypeName();
-}
\ No newline at end of file
+
+    /**
+     * Gets field precision.
+     *
+     * @return Field precision.
+     */
+    public int precision();
+
+    /**
+     * Gets field scale.
+     *
+     * @return Field scale.
+     */
+    public int scale();
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryIndexing.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryIndexing.java
index dedd075..7aa4021 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryIndexing.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryIndexing.java
@@ -31,6 +31,8 @@
 import org.apache.ignite.internal.GridKernalContext;
 import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccQueryTracker;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
 import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
 import org.apache.ignite.internal.processors.query.schema.SchemaIndexCacheVisitor;
 import org.apache.ignite.internal.util.GridSpinBusyLock;
@@ -61,6 +63,13 @@
     public void stop() throws IgniteCheckedException;
 
     /**
+     * Performs necessary actions on disconnect of a stateful client (say, one associated with a transaction).
+     *
+     * @throws IgniteCheckedException If failed.
+     */
+    public void onClientDisconnect() throws IgniteCheckedException;
+
+    /**
      * Parses SQL query into two step query and executes it.
      *
      * @param schemaName Schema name.
@@ -79,10 +88,11 @@
      * @param cliCtx Client context.
      * @param keepBinary Keep binary flag.
      * @param failOnMultipleStmts Whether an exception should be thrown for multiple statements query.
-     * @param cancel Query cancel state handler.    @return Cursor.
+     * @param tracker Query tracker.
+     * @return Cursor.
      */
     public List<FieldsQueryCursor<List<?>>> querySqlFields(String schemaName, SqlFieldsQuery qry,
-        SqlClientContext cliCtx, boolean keepBinary, boolean failOnMultipleStmts, GridQueryCancel cancel);
+        SqlClientContext cliCtx, boolean keepBinary, boolean failOnMultipleStmts, MvccQueryTracker tracker, GridQueryCancel cancel);
 
     /**
      * Execute an INSERT statement using data streamer as receiver.
@@ -223,6 +233,28 @@
     public void unregisterCache(GridCacheContext cctx, boolean rmvIdx) throws IgniteCheckedException;
 
     /**
+     *
+     * @param cctx Cache context.
+     * @param ids Involved cache ids.
+     * @param parts Partitions.
+     * @param schema Schema name.
+     * @param qry Query string.
+     * @param params Query parameters.
+     * @param flags Flags.
+     * @param pageSize Fetch page size.
+     * @param timeout Timeout.
+     * @param topVer Topology version.
+     * @param mvccSnapshot MVCC snapshot.
+     * @param cancel Query cancel object.
+     * @return Cursor over entries which are going to be changed.
+     * @throws IgniteCheckedException If failed.
+     */
+    public UpdateSourceIterator<?> prepareDistributedUpdate(GridCacheContext<?, ?> cctx, int[] ids, int[] parts,
+        String schema, String qry, Object[] params, int flags,
+        int pageSize, int timeout, AffinityTopologyVersion topVer,
+        MvccSnapshot mvccSnapshot, GridQueryCancel cancel) throws IgniteCheckedException;
+
+    /**
      * Registers type if it was not known before or updates it otherwise.
      *
      * @param cctx Cache context.
@@ -243,7 +275,10 @@
      * @param prevRowAvailable Whether previous row is available.
      * @throws IgniteCheckedException If failed.
      */
-    public void store(GridCacheContext cctx, GridQueryTypeDescriptor type, CacheDataRow row, CacheDataRow prevRow,
+    public void store(GridCacheContext cctx,
+        GridQueryTypeDescriptor type,
+        CacheDataRow row,
+        CacheDataRow prevRow,
         boolean prevRowAvailable) throws IgniteCheckedException;
 
     /**
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java
index 04d1aa9..eb3f2a7 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/GridQueryProcessor.java
@@ -66,6 +66,7 @@
 import org.apache.ignite.internal.processors.cache.GridCacheAdapter;
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
 import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
 import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
 import org.apache.ignite.internal.processors.cache.query.CacheQueryFuture;
 import org.apache.ignite.internal.processors.cache.query.CacheQueryType;
@@ -1752,15 +1753,20 @@
      * @param cacheIds Cache IDs.
      * @return Future that will be completed when rebuilding is finished.
      */
-    public IgniteInternalFuture<?> rebuildIndexesFromHash(Collection<Integer> cacheIds) {
+    public IgniteInternalFuture<?> rebuildIndexesFromHash(Set<Integer> cacheIds) {
         if (!busyLock.enterBusy())
             throw new IllegalStateException("Failed to rebuild indexes from hash (grid is stopping).");
 
+        // Because of alt type ids, there can be few entries in 'types' for a single cache.
+        // In order to avoid processing a cache more than once, let's track processed names.
+        Set<String> processedCacheNames = new HashSet<>();
+
         try {
             GridCompoundFuture<Object, ?> fut = new GridCompoundFuture<Object, Object>();
 
             for (Map.Entry<QueryTypeIdKey, QueryTypeDescriptorImpl> e : types.entrySet()) {
-                if (cacheIds.contains(CU.cacheId(e.getKey().cacheName())))
+                if (cacheIds.contains(CU.cacheId(e.getKey().cacheName())) &&
+                    processedCacheNames.add(e.getKey().cacheName()))
                     fut.add(rebuildIndexesFromHash(e.getKey().cacheName(), e.getValue()));
             }
 
@@ -2040,6 +2046,32 @@
     }
 
     /**
+     *
+     * @param cctx Cache context.
+     * @param cacheIds Involved cache ids.
+     * @param parts Partitions.
+     * @param schema Schema name.
+     * @param qry Query string.
+     * @param params Query parameters.
+     * @param flags Flags.
+     * @param pageSize Fetch page size.
+     * @param timeout Timeout.
+     * @param topVer Topology version.
+     * @param mvccSnapshot MVCC snapshot.
+     * @param cancel Query cancel object.
+     * @return Cursor over entries which are going to be changed.
+     * @throws IgniteCheckedException If failed.
+     */
+    public UpdateSourceIterator<?> prepareDistributedUpdate(GridCacheContext<?, ?> cctx, int[] cacheIds,
+        int[] parts, String schema, String qry, Object[] params, int flags, int pageSize, int timeout,
+        AffinityTopologyVersion topVer, MvccSnapshot mvccSnapshot,
+        GridQueryCancel cancel) throws IgniteCheckedException {
+        checkxEnabled();
+
+        return idx.prepareDistributedUpdate(cctx, cacheIds, parts, schema, qry, params, flags, pageSize, timeout, topVer, mvccSnapshot, cancel);
+    }
+
+    /**
      * Query SQL fields.
      *
      * @param qry Query.
@@ -2079,7 +2111,7 @@
         final boolean failOnMultipleStmts) {
         checkxEnabled();
 
-        validateSqlFieldsQuery(qry);
+        validateSqlFieldsQuery(qry, ctx, cctx);
 
         if (!ctx.state().publicApiActiveState(true)) {
             throw new IgniteException("Can not perform the operation because the cluster is inactive. Note, that " +
@@ -2104,7 +2136,7 @@
                     GridQueryCancel cancel = new GridQueryCancel();
 
                     List<FieldsQueryCursor<List<?>>> res =
-                        idx.querySqlFields(schemaName, qry, cliCtx, keepBinary, failOnMultipleStmts, cancel);
+                        idx.querySqlFields(schemaName, qry, cliCtx, keepBinary, failOnMultipleStmts, null, cancel);
 
                     if (cctx != null)
                         sendQueryExecutedEvent(qry.getSql(), qry.getArgs(), cctx);
@@ -2129,13 +2161,31 @@
      * Validate SQL fields query.
      *
      * @param qry Query.
+     * @param ctx Kernal context.
+     * @param cctx Cache context.
      */
-    private static void validateSqlFieldsQuery(SqlFieldsQuery qry) {
+    private static void validateSqlFieldsQuery(SqlFieldsQuery qry, GridKernalContext ctx,
+        @Nullable GridCacheContext<?, ?> cctx) {
         if (qry.isReplicatedOnly() && qry.getPartitions() != null)
             throw new CacheException("Partitions are not supported in replicated only mode.");
 
         if (qry.isDistributedJoins() && qry.getPartitions() != null)
             throw new CacheException("Using both partitions and distributed JOINs is not supported for the same query");
+
+        if (qry.isLocal() && ctx.clientNode() && (cctx == null || cctx.config().getCacheMode() != CacheMode.LOCAL))
+            throw new CacheException("Execution of local SqlFieldsQuery on client node disallowed.");
+    }
+
+    /**
+     * Validate SQL query.
+     *
+     * @param qry Query.
+     * @param ctx Kernal context.
+     * @param cctx Cache context.
+     */
+    private static void validateSqlQuery(SqlQuery qry, GridKernalContext ctx, GridCacheContext<?, ?> cctx) {
+        if (qry.isLocal() && ctx.clientNode() && cctx.config().getCacheMode() != CacheMode.LOCAL)
+            throw new CacheException("Execution of local SqlQuery on client node disallowed.");
     }
 
     /**
@@ -2206,6 +2256,8 @@
      */
     public <K, V> QueryCursor<Cache.Entry<K,V>> querySql(final GridCacheContext<?,?> cctx, final SqlQuery qry,
         boolean keepBinary) {
+        validateSqlQuery(qry, ctx, cctx);
+
         if (qry.isReplicatedOnly() && qry.getPartitions() != null)
             throw new CacheException("Partitions are not supported in replicated only mode.");
 
@@ -2471,8 +2523,17 @@
 
         for (QueryField col : cols) {
             try {
-                props.add(new QueryBinaryProperty(ctx, col.name(), null, Class.forName(col.typeName()),
-                    false, null, !col.isNullable(), null, -1, -1));
+                props.add(new QueryBinaryProperty(
+                    ctx, 
+                    col.name(),
+                    null, 
+                    Class.forName(col.typeName()), 
+                    false, 
+                    null, 
+                    !col.isNullable(), 
+                    null, 
+                    col.precision(), 
+                    col.scale()));
             }
             catch (ClassNotFoundException e) {
                 throw new SchemaOperationException("Class not found for new property: " + col.typeName());
@@ -2510,15 +2571,15 @@
 
     /**
      * @param cctx Cache context.
-     * @param val Row.
+     * @param row Row removed from cache.
      * @throws IgniteCheckedException Thrown in case of any errors.
      */
-    public void remove(GridCacheContext cctx, CacheDataRow val)
+    public void remove(GridCacheContext cctx, CacheDataRow row)
         throws IgniteCheckedException {
-        assert val != null;
+        assert row != null;
 
         if (log.isDebugEnabled())
-            log.debug("Remove [cacheName=" + cctx.name() + ", key=" + val.key()+ ", val=" + val.value() + "]");
+            log.debug("Remove [cacheName=" + cctx.name() + ", key=" + row.key()+ ", val=" + row.value() + "]");
 
         if (idx == null)
             return;
@@ -2529,14 +2590,14 @@
         try {
             QueryTypeDescriptorImpl desc = typeByValue(cctx.name(),
                 cctx.cacheObjectContext(),
-                val.key(),
-                val.value(),
+                row.key(),
+                row.value(),
                 false);
 
             if (desc == null)
                 return;
 
-            idx.remove(cctx, desc, val);
+                idx.remove(cctx, desc, row);
         }
         finally {
             busyLock.leaveBusy();
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/NestedTxMode.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/NestedTxMode.java
new file mode 100644
index 0000000..3569003
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/NestedTxMode.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query;
+
+import org.apache.ignite.IgniteException;
+
+/**
+ * Behavior options when an attempt to start a nested transaction is made.
+ */
+public enum NestedTxMode {
+    /** Previously started transaction will be committed, new transaction will be started. */
+    COMMIT,
+
+    /** Warning will be printed to log, no new transaction will be started. */
+    IGNORE,
+
+    /** Exception will be thrown, previously started transaction will be rolled back. */
+    ERROR;
+
+    /** Default handling mode. */
+    public final static NestedTxMode DEFAULT = ERROR;
+
+    /**
+     * Get enum value from int
+     *
+     * @param val Int value.
+     * @return Enum value.
+     * @throws IgniteException if the is no enum value associated with the int value.
+     */
+    public static NestedTxMode fromByte(byte val) {
+        switch (val) {
+            case 1:
+                return COMMIT;
+
+            case 2:
+                return IGNORE;
+
+            case 3:
+                return ERROR;
+
+            default:
+                throw new IgniteException("Invalid nested transactions handling mode: " + val);
+        }
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryEntityEx.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryEntityEx.java
index a697882..f94e651 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryEntityEx.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryEntityEx.java
@@ -63,7 +63,7 @@
     }
 
     /** {@inheritDoc} */
-    public QueryEntity setNotNullFields(@Nullable Set<String> notNullFields) {
+    @Override public QueryEntity setNotNullFields(@Nullable Set<String> notNullFields) {
         this.notNullFields = notNullFields;
 
         return this;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryField.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryField.java
index d68a6cb..b74e3df 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryField.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryField.java
@@ -70,6 +70,8 @@
      * @param typeName Class name for this field's values.
      * @param nullable Nullable flag.
      * @param dfltValue Default value.
+     * @param precision Precision.
+     * @param scale Scale.
      */
     public QueryField(String name, String typeName, boolean nullable, Object dfltValue, int precision, int scale) {
         this.name = name;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryTypeDescriptorImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryTypeDescriptorImpl.java
index a7710f9..2eaeb1f 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryTypeDescriptorImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryTypeDescriptorImpl.java
@@ -26,7 +26,9 @@
 import java.util.Map;
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.cache.QueryIndexType;
-import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode;
+import org.apache.ignite.internal.processors.cache.CacheObject;
+import org.apache.ignite.internal.processors.cache.CacheObjectContext;
+import org.apache.ignite.internal.processors.cache.KeyCacheObject;
 import org.apache.ignite.internal.util.tostring.GridToStringExclude;
 import org.apache.ignite.internal.util.tostring.GridToStringInclude;
 import org.apache.ignite.internal.util.typedef.F;
@@ -34,6 +36,13 @@
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.jetbrains.annotations.Nullable;
 
+import static org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode.NULL_KEY;
+import static org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode.NULL_VALUE;
+import static org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode.TOO_LONG_KEY;
+import static org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode.TOO_LONG_VALUE;
+import static org.apache.ignite.internal.processors.query.QueryUtils.KEY_FIELD_NAME;
+import static org.apache.ignite.internal.processors.query.QueryUtils.VAL_FIELD_NAME;
+
 /**
  * Descriptor of type.
  */
@@ -110,13 +119,18 @@
     /** */
     private List<GridQueryProperty> propsWithDefaultValue;
 
+    /** */
+    @Nullable private CacheObjectContext coCtx;
+
     /**
      * Constructor.
      *
      * @param cacheName Cache name.
+     * @param coCtx Cache object context.
      */
-    public QueryTypeDescriptorImpl(String cacheName) {
+    public QueryTypeDescriptorImpl(String cacheName, @Nullable CacheObjectContext coCtx) {
         this.cacheName = cacheName;
+        this.coCtx = coCtx;
     }
 
     /**
@@ -368,6 +382,19 @@
      * @throws IgniteCheckedException In case of error.
      */
     public void addProperty(GridQueryProperty prop, boolean failOnDuplicate) throws IgniteCheckedException {
+        addProperty(prop, failOnDuplicate, true);
+    }
+
+    /**
+     * Adds property to the type descriptor.
+     *
+     * @param prop Property.
+     * @param failOnDuplicate Fail on duplicate flag.
+     * @param isField {@code True} if {@code prop} if field, {@code False} if prop is "_KEY" or "_VAL".
+     * @throws IgniteCheckedException In case of error.
+     */
+    public void addProperty(GridQueryProperty prop, boolean failOnDuplicate, boolean isField)
+        throws IgniteCheckedException {
         String name = prop.name();
 
         if (props.put(name, prop) != null && failOnDuplicate)
@@ -382,6 +409,12 @@
 
             validateProps.add(prop);
         }
+        else if (prop.precision() != -1) {
+            if (validateProps == null)
+                validateProps = new ArrayList<>();
+
+            validateProps.add(prop);
+        }
 
         if (prop.defaultValue() != null) {
             if (propsWithDefaultValue == null)
@@ -390,7 +423,8 @@
             propsWithDefaultValue.add(prop);
         }
 
-        fields.put(name, prop.type());
+        if (isField)
+            fields.put(name, prop.type());
     }
 
     /**
@@ -525,26 +559,35 @@
 
             Object propVal;
 
-            int errCode;
+            boolean isKey = false;
 
-            if (F.eq(prop.name(), keyFieldName)) {
-                propVal = key;
+            if (F.eq(prop.name(), keyFieldName) || (keyFieldName == null && F.eq(prop.name(), KEY_FIELD_NAME))) {
+                propVal = key instanceof KeyCacheObject && coCtx != null ?
+                    ((KeyCacheObject)key).value(coCtx, true) : key;
 
-                errCode = IgniteQueryErrorCode.NULL_KEY;
+                isKey = true;
             }
-            else if (F.eq(prop.name(), valFieldName)) {
-                propVal = val;
-
-                errCode = IgniteQueryErrorCode.NULL_VALUE;
+            else if (F.eq(prop.name(), valFieldName) || (valFieldName == null && F.eq(prop.name(), VAL_FIELD_NAME))) {
+                propVal = val instanceof CacheObject && coCtx != null ?
+                    ((CacheObject)val).value(coCtx, true) : val;
             }
             else {
                 propVal = prop.value(key, val);
-
-                errCode = IgniteQueryErrorCode.NULL_VALUE;
             }
 
-            if (propVal == null)
-                throw new IgniteSQLException("Null value is not allowed for column '" + prop.name() + "'", errCode);
+            if (propVal == null && prop.notNull()) {
+                throw new IgniteSQLException("Null value is not allowed for column '" + prop.name() + "'",
+                    isKey ? NULL_KEY : NULL_VALUE);
+            }
+
+            if (prop.precision() != -1 &&
+                propVal != null &&
+                String.class == propVal.getClass() && 
+                ((String)propVal).length() > prop.precision()) {
+                throw new IgniteSQLException("Value for a column '" + prop.name() + "' is too long. " + 
+                    "Maximum length: " + prop.precision() + ", actual length: " + ((CharSequence)propVal).length(),
+                    isKey ? TOO_LONG_KEY : TOO_LONG_VALUE);
+            }
         }
     }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryUtils.java
index 3f40990..42de312 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryUtils.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/QueryUtils.java
@@ -27,6 +27,7 @@
 import java.util.Date;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.LinkedHashMap;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
@@ -61,7 +62,6 @@
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.internal.A;
 import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.lang.IgniteBiTuple;
 import org.jetbrains.annotations.NotNull;
 import org.jetbrains.annotations.Nullable;
 
@@ -268,7 +268,8 @@
         normalEntity.setValueFieldName(entity.getValueFieldName());
         normalEntity.setNotNullFields(entity.getNotNullFields());
         normalEntity.setDefaultFieldValues(entity.getDefaultFieldValues());
-        normalEntity.setDecimalInfo(entity.getDecimalInfo());
+        normalEntity.setFieldsPrecision(entity.getFieldsPrecision());
+        normalEntity.setFieldsScale(entity.getFieldsScale());
 
         // Normalize table name.
         String normalTblName = entity.getTableName();
@@ -407,7 +408,7 @@
 
         CacheObjectContext coCtx = binaryEnabled ? ctx.cacheObjects().contextForCache(ccfg) : null;
 
-        QueryTypeDescriptorImpl desc = new QueryTypeDescriptorImpl(cacheName);
+        QueryTypeDescriptorImpl desc = new QueryTypeDescriptorImpl(cacheName, coCtx);
 
         desc.schemaName(schemaName);
 
@@ -543,10 +544,12 @@
      */
     public static void processBinaryMeta(GridKernalContext ctx, QueryEntity qryEntity, QueryTypeDescriptorImpl d)
         throws IgniteCheckedException {
+        LinkedHashMap<String, String> fields = qryEntity.getFields();
         Set<String> keyFields = qryEntity.getKeyFields();
         Set<String> notNulls = qryEntity.getNotNullFields();
         Map<String, Object> dlftVals = qryEntity.getDefaultFieldValues();
-        Map<String, IgniteBiTuple<Integer, Integer>> decimalInfo  = qryEntity.getDecimalInfo();
+        Map<String, Integer> precision  = qryEntity.getFieldsPrecision();
+        Map<String, Integer> scale = qryEntity.getFieldsScale();
 
         // We have to distinguish between empty and null keyFields when the key is not of SQL type -
         // when a key is not of SQL type, absence of a field in nonnull keyFields tell us that this field
@@ -559,13 +562,13 @@
         if (hasKeyFields && !isKeyClsSqlType) {
             //ensure that 'keyFields' is case sensitive subset of 'fields'
             for (String keyField : keyFields) {
-                if (!qryEntity.getFields().containsKey(keyField))
+                if (!fields.containsKey(keyField))
                     throw new IgniteCheckedException("QueryEntity 'keyFields' property must be a subset of keys " +
                         "from 'fields' property (case sensitive): " + keyField);
             }
         }
 
-        for (Map.Entry<String, String> entry : qryEntity.getFields().entrySet()) {
+        for (Map.Entry<String, String> entry : fields.entrySet()) {
             Boolean isKeyField;
 
             if (isKeyClsSqlType) // We don't care about keyFields in this case - it might be null, or empty, or anything
@@ -577,22 +580,73 @@
 
             Object dfltVal = dlftVals != null ? dlftVals.get(entry.getKey()) : null;
 
-            IgniteBiTuple<Integer, Integer> precisionAndScale =
-                decimalInfo != null ? decimalInfo.get(entry.getKey()) : null;
-
             QueryBinaryProperty prop = buildBinaryProperty(ctx, entry.getKey(),
                 U.classForName(entry.getValue(), Object.class, true),
                 d.aliases(), isKeyField, notNull, dfltVal,
-                precisionAndScale != null ? precisionAndScale.get1() : -1,
-                precisionAndScale != null ? precisionAndScale.get2() : -1);
+                precision == null ? -1 : precision.getOrDefault(entry.getKey(), -1),
+                scale == null ? -1 : scale.getOrDefault(entry.getKey(), -1));
 
             d.addProperty(prop, false);
         }
 
+        String keyFieldName = qryEntity.getKeyFieldName();
+
+        if (keyFieldName == null)
+            keyFieldName = KEY_FIELD_NAME;
+
+        if (!F.isEmpty(precision) && precision.containsKey(keyFieldName) &&
+            !fields.containsKey(keyFieldName)) {
+            addKeyValueValidationProperty(ctx, qryEntity, d, keyFieldName, true);
+        }
+
+        String valFieldName = qryEntity.getValueFieldName();
+
+        if (valFieldName == null)
+            valFieldName = VAL_FIELD_NAME;
+
+        if (!F.isEmpty(precision) && precision.containsKey(valFieldName) &&
+            !fields.containsKey(valFieldName)) {
+            addKeyValueValidationProperty(ctx, qryEntity, d, valFieldName, false);
+        }
+
         processIndexes(qryEntity, d);
     }
 
     /**
+     * Add validate property to QueryTypeDescriptor.
+     * 
+     * @param ctx Kernel context.
+     * @param qryEntity Query entity.
+     * @param d Descriptor.
+     * @param name Field name.
+     * @throws IgniteCheckedException
+     */
+    private static void addKeyValueValidationProperty(GridKernalContext ctx, QueryEntity qryEntity, QueryTypeDescriptorImpl d, 
+        String name, boolean isKey) throws IgniteCheckedException {
+
+        Map<String, Object> dfltVals = qryEntity.getDefaultFieldValues();
+        Map<String, Integer> precision  = qryEntity.getFieldsPrecision();
+        Map<String, Integer> scale = qryEntity.getFieldsScale();
+
+        String typeName = isKey ? qryEntity.getKeyType() : qryEntity.getValueType();
+
+        Object dfltVal = dfltVals.get(name);
+
+        QueryBinaryProperty prop = buildBinaryProperty(
+            ctx, 
+            name,
+            U.classForName(typeName, Object.class, true),
+            d.aliases(), 
+            isKey, 
+            true, 
+            dfltVal,
+            precision == null ? -1 : precision.getOrDefault(name, -1),
+            scale == null ? -1 : scale.getOrDefault(name, -1));
+
+        d.addProperty(prop, true, false);
+    }
+
+    /**
      * Processes declarative metadata for binary object.
      *
      * @param qryEntity Declared metadata.
@@ -731,11 +785,10 @@
      * @param precision Precision.
      * @param scale Scale.
      * @return Binary property.
-     * @throws IgniteCheckedException On error.
      */
-    public static QueryBinaryProperty buildBinaryProperty(GridKernalContext ctx, String pathStr, Class<?> resType,
-        Map<String, String> aliases, @Nullable Boolean isKeyField, boolean notNull, Object dlftVal,
-        int precision, int scale) throws IgniteCheckedException {
+    public static QueryBinaryProperty buildBinaryProperty(GridKernalContext ctx, String pathStr,
+        Class<?> resType, Map<String, String> aliases, @Nullable Boolean isKeyField, boolean notNull, Object dlftVal,
+        int precision, int scale) {
         String[] path = pathStr.split("\\.");
 
         QueryBinaryProperty res = null;
@@ -1215,6 +1268,26 @@
                         ", queryIdx=" + idx + ']');
             }
         }
+
+        Map<String, Object> dfltVals = entity.getDefaultFieldValues();
+        Map<String, Integer> precision = entity.getFieldsPrecision();
+
+        if (!F.isEmpty(precision)) {
+            for (String fld : precision.keySet()) {
+                if (!dfltVals.containsKey(fld))
+                    continue;
+
+                Object dfltVal = dfltVals.get(fld);
+
+                if (dfltVal == null)
+                    continue;
+
+                if (dfltVal.toString().length() > precision.get(fld)) {
+                    throw new IgniteSQLException("Default value '" + dfltVal +
+                        "' is longer than maximum length " + precision.get(fld));
+                }
+            }
+        }
     }
 
     /**
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/UpdateSourceIterator.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/UpdateSourceIterator.java
new file mode 100644
index 0000000..69feb0f
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/UpdateSourceIterator.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query;
+
+import java.util.Iterator;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.internal.util.lang.GridCloseableIterator;
+import org.jetbrains.annotations.NotNull;
+
+/** */
+public interface UpdateSourceIterator<T> extends GridCloseableIterator<T> {
+    /**
+     * @return Operation.
+     */
+    public EnlistOperation operation();
+
+    /**
+     * Callback method which should be called before moving iteration into another thread.
+     */
+    public default void beforeDetach() {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override default void close() throws IgniteCheckedException {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override default boolean isClosed() {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override default void removeX() throws IgniteCheckedException {
+        throw new UnsupportedOperationException("remove");
+    }
+
+    /** {@inheritDoc} */
+    @Override default boolean hasNext() {
+        try {
+            return hasNextX();
+        }
+        catch (IgniteCheckedException e) {
+            throw new IgniteException(e);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override default T next() {
+        try {
+            return nextX();
+        }
+        catch (IgniteCheckedException e) {
+            throw new IgniteException(e);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override default void remove() {
+        try {
+            removeX();
+        }
+        catch (IgniteCheckedException e) {
+            throw new IgniteException(e);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override @NotNull default Iterator<T> iterator() {
+        return this;
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/messages/GridQueryFailResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/messages/GridQueryFailResponse.java
index 1b759bb..ef26d2a 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/messages/GridQueryFailResponse.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/messages/GridQueryFailResponse.java
@@ -59,7 +59,7 @@
      */
     public GridQueryFailResponse(long qryReqId, Throwable err) {
         this.qryReqId = qryReqId;
-        this.errMsg = err.getClass() + ":" + err.getMessage();
+        this.errMsg = err.getMessage();
         this.failCode = err instanceof QueryCancelledException ? CANCELLED_BY_ORIGINATOR : GENERAL_ERROR;
     }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/messages/GridQueryNextPageResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/messages/GridQueryNextPageResponse.java
index 4d918a0..6b976c2 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/messages/GridQueryNextPageResponse.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/messages/GridQueryNextPageResponse.java
@@ -67,9 +67,15 @@
     /** */
     private AffinityTopologyVersion retry;
 
+    /** Retry cause description*/
+    private String retryCause;
+
     /** Last page flag. */
     private boolean last;
 
+    /** Remove mapping flag. */
+    private boolean removeMapping;
+
     /**
      * For {@link Externalizable}.
      */
@@ -230,6 +236,18 @@
                     return false;
 
                 writer.incrementState();
+
+            case 9:
+                if (!writer.writeString("retryCause", retryCause))
+                    return false;
+
+                writer.incrementState();
+
+            case 10:
+                if (!writer.writeBoolean("removeMapping", removeMapping))
+                    return false;
+
+                writer.incrementState();
         }
 
         return true;
@@ -314,6 +332,23 @@
                     return false;
 
                 reader.incrementState();
+
+            case 9:
+                retryCause = reader.readString("retryCause");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 10:
+                removeMapping = reader.readBoolean("removeMapping");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
         }
 
         return reader.afterMessageRead(GridQueryNextPageResponse.class);
@@ -326,7 +361,7 @@
 
     /** {@inheritDoc} */
     @Override public byte fieldsCount() {
-        return 9;
+        return 11;
     }
 
     /**
@@ -344,6 +379,20 @@
     }
 
     /**
+     * @return Retry Ccause message.
+     */
+    public String retryCause() {
+        return retryCause;
+    }
+
+    /**
+     * @param retryCause Retry Ccause message.
+     */
+    public void retryCause(String retryCause){
+        this.retryCause = retryCause;
+    }
+
+    /**
      * @return Last page flag.
      */
     public boolean last() {
@@ -357,6 +406,20 @@
         this.last = last;
     }
 
+    /**
+     * @param removeMapping Remove mapping flag.
+     */
+    public void removeMapping(boolean removeMapping) {
+        this.removeMapping = removeMapping;
+    }
+
+    /**
+     * @return Remove mapping flag.
+     */
+    public boolean removeMapping() {
+        return removeMapping;
+    }
+
     /** {@inheritDoc} */
     @Override public String toString() {
         return S.toString(GridQueryNextPageResponse.class, this,
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/schema/SchemaIndexCacheVisitorImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/schema/SchemaIndexCacheVisitorImpl.java
index 1775c79..4ab3c83 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/schema/SchemaIndexCacheVisitorImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/schema/SchemaIndexCacheVisitorImpl.java
@@ -244,7 +244,7 @@
                     entry.updateIndex(rowFilter, clo);
                 }
                 finally {
-                    cctx.evicts().touch(entry, AffinityTopologyVersion.NONE);
+                    entry.touch(AffinityTopologyVersion.NONE);
                 }
 
                 break;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/schema/SchemaOperationWorker.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/schema/SchemaOperationWorker.java
index 0bf4d22..3741a5d 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/query/schema/SchemaOperationWorker.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/query/schema/SchemaOperationWorker.java
@@ -177,7 +177,7 @@
     /**
      * Cancel operation.
      */
-    public void cancel() {
+    @Override public void cancel() {
         if (cancelToken.cancel())
             super.cancel();
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/GridRestProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/GridRestProcessor.java
index d7a30f9..b6c1310 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/GridRestProcessor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/GridRestProcessor.java
@@ -17,6 +17,8 @@
 
 package org.apache.ignite.internal.processors.rest;
 
+import java.io.PrintWriter;
+import java.io.StringWriter;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
 import java.util.ArrayList;
@@ -349,7 +351,11 @@
                             .a(", params=").a(tskReq.params());
                     }
 
-                    sb.a(", err=").a(e.getMessage() != null ? e.getMessage() : e.getClass().getName()).a(']');
+                    sb.a(", err=")
+                        .a(e.getMessage() != null ? e.getMessage() : e.getClass().getName())
+                        .a(", trace=")
+                        .a(getErrorMessage(e))
+                        .a(']');
 
                     res = new GridRestResponse(STATUS_FAILED, sb.toString());
                 }
@@ -367,6 +373,21 @@
     }
 
     /**
+     * @param th Th.
+     * @return Stack trace
+     */
+    private String getErrorMessage(Throwable th) {
+        if (th == null)
+            return "";
+
+        StringWriter writer = new StringWriter();
+
+        th.printStackTrace(new PrintWriter(writer));
+
+        return writer.toString();
+    }
+
+    /**
      * @param req Request.
      * @return Not null session.
      * @throws IgniteCheckedException If failed.
@@ -867,6 +888,12 @@
 
                 break;
 
+            case CLUSTER_ACTIVE:
+            case CLUSTER_INACTIVE:
+                perm = SecurityPermission.ADMIN_OPS;
+
+                break;
+
             case CACHE_METRICS:
             case CACHE_SIZE:
             case CACHE_METADATA:
@@ -880,8 +907,6 @@
             case NAME:
             case LOG:
             case CLUSTER_CURRENT_STATE:
-            case CLUSTER_ACTIVE:
-            case CLUSTER_INACTIVE:
             case AUTHENTICATE:
             case ADD_USER:
             case REMOVE_USER:
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/client/message/GridClientNodeMetricsBean.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/client/message/GridClientNodeMetricsBean.java
index 4138c24..829728b 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/client/message/GridClientNodeMetricsBean.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/client/message/GridClientNodeMetricsBean.java
@@ -1231,12 +1231,12 @@
     }
 
     /** {@inheritDoc} */
-    public int hashCode() {
+    @Override public int hashCode() {
         return System.identityHashCode(this);
     }
 
     /** {@inheritDoc} */
-    public boolean equals(Object obj) {
+    @Override public boolean equals(Object obj) {
         if (this == obj)
             return true;
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/protocols/tcp/GridTcpRestParser.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/protocols/tcp/GridTcpRestParser.java
index 65eb3c3..21fdc37 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/protocols/tcp/GridTcpRestParser.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/protocols/tcp/GridTcpRestParser.java
@@ -851,7 +851,7 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public String toString() {
         return S.toString(GridTcpRestParser.class, this);
     }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/protocols/tcp/GridTcpRestProtocol.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/protocols/tcp/GridTcpRestProtocol.java
index f08cc25..caedd1a 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/protocols/tcp/GridTcpRestProtocol.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/rest/protocols/tcp/GridTcpRestProtocol.java
@@ -129,14 +129,12 @@
                 "[firstPort=" + cfg.getPort() + ", lastPort=" + lastPort + ", host=" + host + ']');
         }
         catch (SSLException e) {
-            U.warn(log, "Failed to start " + name() + " protocol on port " + port + ": " + e.getMessage(),
-                "Failed to start " + name() + " protocol on port " + port + ". Check if SSL context factory is " +
-                    "properly configured.");
+            U.warn(log, "Failed to start " + name() + " protocol on port " + port + ". Check if SSL context factory " +
+                "is properly configured: " + e.getMessage());
         }
         catch (IOException e) {
-            U.warn(log, "Failed to start " + name() + " protocol on port " + port + ": " + e.getMessage(),
-                "Failed to start " + name() + " protocol on port " + port + ". " +
-                    "Check restTcpHost configuration property.");
+            U.warn(log, "Failed to start " + name() + " protocol on port " + port + ". " +
+                "Check restTcpHost configuration property: " + e.getMessage());
         }
     }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/service/GridServiceProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/service/GridServiceProcessor.java
index 949c0c7..2dc7e31 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/service/GridServiceProcessor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/service/GridServiceProcessor.java
@@ -69,7 +69,6 @@
 import org.apache.ignite.internal.processors.cache.IgniteInternalCache;
 import org.apache.ignite.internal.processors.cache.binary.MetadataUpdateAcceptedMessage;
 import org.apache.ignite.internal.processors.cache.binary.MetadataUpdateProposedMessage;
-import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal;
 import org.apache.ignite.internal.processors.cache.query.CacheQuery;
 import org.apache.ignite.internal.processors.cache.query.GridCacheQueryManager;
@@ -79,7 +78,6 @@
 import org.apache.ignite.internal.processors.timeout.GridTimeoutObject;
 import org.apache.ignite.internal.util.GridEmptyIterator;
 import org.apache.ignite.internal.util.GridSpinBusyLock;
-import org.apache.ignite.internal.util.SerializableTransient;
 import org.apache.ignite.internal.util.future.GridCompoundFuture;
 import org.apache.ignite.internal.util.future.GridFinishedFuture;
 import org.apache.ignite.internal.util.future.GridFutureAdapter;
@@ -1759,8 +1757,8 @@
                         affReadyFut.get();
                     }
                     catch (IgniteCheckedException e) {
-                        U.error(log, "Failed to wait for affinity ready future " +
-                            "(the assignment will be recalculated anyway)", e);
+                        U.warn(log, "Failed to wait for affinity ready future " +
+                            "(the assignment will be recalculated anyway):" + e.toString());
                     }
                 }
 
@@ -1951,7 +1949,7 @@
 
                         @Override public void onTimeout() {
                             depExe.execute(new Runnable() {
-                                public void run() {
+                                @Override public void run() {
                                     onReassignmentFailed(topVer, retries);
                                 }
                             });
@@ -2127,7 +2125,6 @@
     /**
      */
     @GridInternal
-    @SerializableTransient(methodName = "serializableTransient")
     private static class ServiceTopologyCallable implements IgniteCallable<Map<UUID, Integer>> {
         /** */
         private static final long serialVersionUID = 0L;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/subscription/GridInternalSubscriptionProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/subscription/GridInternalSubscriptionProcessor.java
index 6c0942a..6db7fa5 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/subscription/GridInternalSubscriptionProcessor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/subscription/GridInternalSubscriptionProcessor.java
@@ -20,6 +20,7 @@
 import java.util.List;
 import org.apache.ignite.internal.GridKernalContext;
 import org.apache.ignite.internal.processors.GridProcessorAdapter;
+import org.apache.ignite.internal.processors.cache.persistence.DatabaseLifecycleListener;
 import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetastorageLifecycleListener;
 import org.jetbrains.annotations.NotNull;
 
@@ -35,6 +36,10 @@
     /** */
     private List<MetastorageLifecycleListener> metastorageListeners = new ArrayList<>();
 
+    /** */
+    private List<DatabaseLifecycleListener> databaseListeners = new ArrayList<>();
+
+
     /**
      * @param ctx Kernal context.
      */
@@ -54,4 +59,17 @@
     public List<MetastorageLifecycleListener> getMetastorageSubscribers() {
         return metastorageListeners;
     }
+
+    /** */
+    public void registerDatabaseListener(@NotNull DatabaseLifecycleListener databaseListener) {
+        if (databaseListener == null)
+            throw new NullPointerException("Database subscriber should be not-null.");
+
+        databaseListeners.add(databaseListener);
+    }
+
+    /** */
+    public List<DatabaseLifecycleListener> getDatabaseListeners() {
+        return databaseListeners;
+    }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskProcessor.java
index 2f0aa7b..9007472 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskProcessor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskProcessor.java
@@ -537,8 +537,12 @@
 
         String taskClsName;
 
-        if (task != null)
-            taskClsName = task.getClass().getName();
+        if (task != null) {
+            if (task instanceof GridPeerDeployAware)
+                taskClsName = ((GridPeerDeployAware)task).deployClass().getName();
+            else
+                taskClsName = task.getClass().getName();
+        }
         else
             taskClsName = taskCls != null ? taskCls.getName() : taskName;
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskThreadContextKey.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskThreadContextKey.java
index 92bcd41..bffcecd 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskThreadContextKey.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskThreadContextKey.java
@@ -27,6 +27,9 @@
     /** No failover flag. */
     TC_NO_FAILOVER,
 
+    /** No result cache flag. */
+    TC_NO_RESULT_CACHE,
+
     /** Projection for the task. */
     TC_SUBGRID,
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskWorker.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskWorker.java
index 78efd2d..b73737c 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskWorker.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/task/GridTaskWorker.java
@@ -84,6 +84,7 @@
 import org.apache.ignite.lang.IgniteInClosure;
 import org.apache.ignite.lang.IgniteUuid;
 import org.apache.ignite.marshaller.Marshaller;
+import org.apache.ignite.marshaller.MarshallerUtils;
 import org.apache.ignite.resources.TaskContinuousMapperResource;
 import org.jetbrains.annotations.Nullable;
 
@@ -103,6 +104,7 @@
 import static org.apache.ignite.internal.managers.communication.GridIoPolicy.PUBLIC_POOL;
 import static org.apache.ignite.internal.processors.task.GridTaskThreadContextKey.TC_IO_POLICY;
 import static org.apache.ignite.internal.processors.task.GridTaskThreadContextKey.TC_NO_FAILOVER;
+import static org.apache.ignite.internal.processors.task.GridTaskThreadContextKey.TC_NO_RESULT_CACHE;
 
 /**
  * Grid task worker. Handles full task life cycle.
@@ -314,7 +316,11 @@
 
         marsh = ctx.config().getMarshaller();
 
-        resCache = dep.annotation(taskCls, ComputeTaskNoResultCache.class) == null;
+        boolean noResCacheAnnotation = dep.annotation(taskCls, ComputeTaskNoResultCache.class) != null;
+
+        Boolean noResCacheCtxFlag = getThreadContext(TC_NO_RESULT_CACHE);
+
+        resCache = !(noResCacheAnnotation || (noResCacheCtxFlag != null && noResCacheCtxFlag));
 
         Boolean noFailover = getThreadContext(TC_NO_FAILOVER);
 
@@ -1370,38 +1376,45 @@
 
                     boolean forceLocDep = internal || !ctx.deploy().enabled();
 
-                    req = new GridJobExecuteRequest(
-                        ses.getId(),
-                        res.getJobContext().getJobId(),
-                        ses.getTaskName(),
-                        ses.getUserVersion(),
-                        ses.getTaskClassName(),
-                        loc ? null : U.marshal(marsh, res.getJob()),
-                        loc ? res.getJob() : null,
-                        ses.getStartTime(),
-                        timeout,
-                        ses.getTopology(),
-                        loc ? ses.getTopologyPredicate() : null,
-                        loc ? null : U.marshal(marsh, ses.getTopologyPredicate()),
-                        loc ? null : U.marshal(marsh, ses.getJobSiblings()),
-                        loc ? ses.getJobSiblings() : null,
-                        loc ? null : U.marshal(marsh, sesAttrs),
-                        loc ? sesAttrs : null,
-                        loc ? null : U.marshal(marsh, jobAttrs),
-                        loc ? jobAttrs : null,
-                        ses.getCheckpointSpi(),
-                        dep.classLoaderId(),
-                        dep.deployMode(),
-                        continuous,
-                        dep.participants(),
-                        forceLocDep,
-                        ses.isFullSupport(),
-                        internal,
-                        subjId,
-                        affCacheIds,
-                        affPartId,
-                        mapTopVer,
-                        ses.executorName());
+                    try {
+                        MarshallerUtils.jobReceiverVersion(node.version());
+
+                        req = new GridJobExecuteRequest(
+                            ses.getId(),
+                            res.getJobContext().getJobId(),
+                            ses.getTaskName(),
+                            ses.getUserVersion(),
+                            ses.getTaskClassName(),
+                            loc ? null : U.marshal(marsh, res.getJob()),
+                            loc ? res.getJob() : null,
+                            ses.getStartTime(),
+                            timeout,
+                            ses.getTopology(),
+                            loc ? ses.getTopologyPredicate() : null,
+                            loc ? null : U.marshal(marsh, ses.getTopologyPredicate()),
+                            loc ? null : U.marshal(marsh, ses.getJobSiblings()),
+                            loc ? ses.getJobSiblings() : null,
+                            loc ? null : U.marshal(marsh, sesAttrs),
+                            loc ? sesAttrs : null,
+                            loc ? null : U.marshal(marsh, jobAttrs),
+                            loc ? jobAttrs : null,
+                            ses.getCheckpointSpi(),
+                            dep.classLoaderId(),
+                            dep.deployMode(),
+                            continuous,
+                            dep.participants(),
+                            forceLocDep,
+                            ses.isFullSupport(),
+                            internal,
+                            subjId,
+                            affCacheIds,
+                            affPartId,
+                            mapTopVer,
+                            ses.executorName());
+                    }
+                    finally {
+                        MarshallerUtils.jobReceiverVersion(null);
+                    }
 
                     if (loc)
                         ctx.job().processJobExecuteRequest(ctx.discovery().localNode(), req);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/timeout/GridTimeoutProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/timeout/GridTimeoutProcessor.java
index 25151cf..de9e8eb 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/processors/timeout/GridTimeoutProcessor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/timeout/GridTimeoutProcessor.java
@@ -20,9 +20,11 @@
 import java.io.Closeable;
 import java.util.Comparator;
 import java.util.Iterator;
+import java.util.concurrent.atomic.AtomicBoolean;
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.failure.FailureContext;
 import org.apache.ignite.internal.GridKernalContext;
+import org.apache.ignite.internal.IgniteInternalFuture;
 import org.apache.ignite.internal.processors.GridProcessorAdapter;
 import org.apache.ignite.internal.util.GridConcurrentSkipListSet;
 import org.apache.ignite.internal.util.tostring.GridToStringInclude;
@@ -30,6 +32,8 @@
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.internal.util.worker.GridWorker;
+import org.apache.ignite.lang.IgniteBiInClosure;
+import org.apache.ignite.lang.IgniteInClosure;
 import org.apache.ignite.lang.IgniteUuid;
 import org.apache.ignite.thread.IgniteThread;
 
@@ -138,6 +142,57 @@
     }
 
     /**
+     * Wait for a future (listen with timeout).
+     * @param fut Future.
+     * @param timeout Timeout millis. -1 means expired timeout, 0 - no timeout.
+     * @param clo Finish closure. First argument contains error on future or null if no errors,
+     * second is {@code true} if wait timed out.
+     */
+    public void waitAsync(final IgniteInternalFuture<?> fut,
+        long timeout,
+        IgniteBiInClosure<IgniteCheckedException, Boolean> clo) {
+        if (timeout == -1) {
+            clo.apply(null, false);
+
+            return;
+        }
+
+        if (fut == null || fut.isDone())
+            clo.apply(null, false);
+        else {
+            WaitFutureTimeoutObject timeoutObj = null;
+
+            if (timeout > 0) {
+                timeoutObj = new WaitFutureTimeoutObject(fut, timeout, clo);
+
+                addTimeoutObject(timeoutObj);
+            }
+
+            final WaitFutureTimeoutObject finalTimeoutObj = timeoutObj;
+
+            fut.listen(new IgniteInClosure<IgniteInternalFuture<?>>() {
+                @Override public void apply(IgniteInternalFuture<?> fut) {
+                    if (finalTimeoutObj != null && !finalTimeoutObj.finishGuard.compareAndSet(false, true))
+                        return;
+
+                    try {
+                        fut.get();
+
+                        clo.apply(null, false);
+                    }
+                    catch (IgniteCheckedException e) {
+                        clo.apply(e, false);
+                    }
+                    finally {
+                        if (finalTimeoutObj != null)
+                            removeTimeoutObject(finalTimeoutObj);
+                    }
+                }
+            });
+        }
+    }
+
+    /**
      * Handles job timeouts.
      */
     private class TimeoutWorker extends GridWorker {
@@ -309,4 +364,43 @@
             return S.toString(CancelableTask.class, this);
         }
     }
+
+    /**
+     *
+     */
+    private static class WaitFutureTimeoutObject extends GridTimeoutObjectAdapter {
+        /** */
+        private final IgniteInternalFuture<?> fut;
+
+        /** */
+        private final AtomicBoolean finishGuard = new AtomicBoolean();
+
+        /** */
+        private final IgniteBiInClosure<IgniteCheckedException, Boolean> clo;
+
+        /**
+         * @param fut Future.
+         * @param timeout Timeout.
+         * @param clo Closure to call on timeout.
+         */
+        WaitFutureTimeoutObject(IgniteInternalFuture<?> fut, long timeout,
+            IgniteBiInClosure<IgniteCheckedException, Boolean> clo) {
+            super(timeout);
+
+            this.fut = fut;
+
+            this.clo = clo;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void onTimeout() {
+            if (!fut.isDone() && finishGuard.compareAndSet(false, true))
+                clo.apply(null, true);
+        }
+
+        /** {@inheritDoc} */
+        @Override public String toString() {
+            return S.toString(WaitFutureTimeoutObject.class, this);
+        }
+    }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlKeyword.java b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlKeyword.java
index be76482..658e176 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlKeyword.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlKeyword.java
@@ -18,7 +18,9 @@
 package org.apache.ignite.internal.sql;
 
 import java.lang.reflect.Field;
+import java.util.Collections;
 import java.util.HashSet;
+import java.util.Set;
 import org.apache.ignite.IgniteException;
 import org.apache.ignite.internal.util.typedef.F;
 
@@ -38,6 +40,9 @@
     /** Keyword: BATCH_SIZE */
     public static final String BATCH_SIZE = "BATCH_SIZE";
 
+    /** Keyword: BEGIN. */
+    public static final String BEGIN = "BEGIN";
+
     /** Keyword: BIGINT */
     public static final String BIGINT = "BIGINT";
 
@@ -65,6 +70,9 @@
     /** Keyword: CHARSET. */
     public static final String CHARSET = "CHARSET";
 
+    /** Keyword: COMMIT. */
+    public static final String COMMIT = "COMMIT";
+
     /** Keyword: COPY. */
     public static final String COPY = "COPY";
 
@@ -206,6 +214,9 @@
     /** Keyword: RESTRICT. */
     public static final String RESTRICT = "RESTRICT";
 
+    /** Keyword: ROLLBACK. */
+    public static final String ROLLBACK = "ROLLBACK";
+
     /** Keyword: SET. */
     public static final String SET = "SET";
 
@@ -221,6 +232,9 @@
     /** Keyword: SPATIAL. */
     public static final String SPATIAL = "SPATIAL";
 
+    /** Keyword: START. */
+    public static final String START = "START";
+
     /** Keyword: STREAMING. */
     public static final String STREAMING = "STREAMING";
 
@@ -236,6 +250,9 @@
     /** Keyword: TINYINT. */
     public static final String TINYINT = "TINYINT";
 
+    /** Keyword: TRANSACTION. */
+    public static final String TRANSACTION = "TRANSACTION";
+
     /** Keyword: UNIQUE. */
     public static final String UNIQUE = "UNIQUE";
 
@@ -251,6 +268,9 @@
     /** Keyword: VARCHAR_CASESENSITIVE. */
     public static final String VARCHAR_CASESENSITIVE = "VARCHAR_CASESENSITIVE";
 
+    /** Keyword: WORK. */
+    public static final String WORK = "WORK";
+
     /** Keyword: YEAR. */
     public static final String YEAR = "YEAR";
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlLexer.java b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlLexer.java
index d091fc4..361bd43 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlLexer.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlLexer.java
@@ -210,29 +210,29 @@
     }
 
     /** {@inheritDoc} */
-    public String sql() {
+    @Override public String sql() {
         return sql;
     }
 
     /** {@inheritDoc} */
-    public String token() {
+    @Override public String token() {
         return token;
     }
 
     /** {@inheritDoc} */
-    public char tokenFirstChar() {
+    @Override public char tokenFirstChar() {
         assert tokenTyp != SqlLexerTokenType.EOF;
 
         return token.charAt(0);
     }
 
     /** {@inheritDoc} */
-    public int tokenPosition() {
+    @Override public int tokenPosition() {
         return tokenPos;
     }
 
     /** {@inheritDoc} */
-    public SqlLexerTokenType tokenType() {
+    @Override public SqlLexerTokenType tokenType() {
         return tokenTyp;
     }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlLexerLookAheadToken.java b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlLexerLookAheadToken.java
index e697473..de5b63e 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlLexerLookAheadToken.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlLexerLookAheadToken.java
@@ -49,7 +49,7 @@
     }
 
     /** {@inheritDoc} */
-    public String sql() {
+    @Override public String sql() {
         return sql;
     }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParser.java b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParser.java
index d46863a..0be2623 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParser.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParser.java
@@ -17,17 +17,22 @@
 
 package org.apache.ignite.internal.sql;
 
+import org.apache.ignite.internal.sql.command.SqlBeginTransactionCommand;
 import org.apache.ignite.internal.sql.command.SqlAlterTableCommand;
 import org.apache.ignite.internal.sql.command.SqlAlterUserCommand;
 import org.apache.ignite.internal.sql.command.SqlBulkLoadCommand;
 import org.apache.ignite.internal.sql.command.SqlCommand;
+import org.apache.ignite.internal.sql.command.SqlCommitTransactionCommand;
 import org.apache.ignite.internal.sql.command.SqlCreateIndexCommand;
 import org.apache.ignite.internal.sql.command.SqlCreateUserCommand;
 import org.apache.ignite.internal.sql.command.SqlDropIndexCommand;
 import org.apache.ignite.internal.sql.command.SqlSetStreamingCommand;
+import org.apache.ignite.internal.sql.command.SqlRollbackTransactionCommand;
 import org.apache.ignite.internal.sql.command.SqlDropUserCommand;
 import org.jetbrains.annotations.Nullable;
 
+import static org.apache.ignite.internal.sql.SqlKeyword.BEGIN;
+import static org.apache.ignite.internal.sql.SqlKeyword.COMMIT;
 import static org.apache.ignite.internal.sql.SqlKeyword.ALTER;
 import static org.apache.ignite.internal.sql.SqlKeyword.COPY;
 import static org.apache.ignite.internal.sql.SqlKeyword.CREATE;
@@ -35,15 +40,21 @@
 import static org.apache.ignite.internal.sql.SqlKeyword.HASH;
 import static org.apache.ignite.internal.sql.SqlKeyword.INDEX;
 import static org.apache.ignite.internal.sql.SqlKeyword.PRIMARY;
+import static org.apache.ignite.internal.sql.SqlKeyword.ROLLBACK;
 import static org.apache.ignite.internal.sql.SqlKeyword.SET;
 import static org.apache.ignite.internal.sql.SqlKeyword.SPATIAL;
+import static org.apache.ignite.internal.sql.SqlKeyword.START;
+import static org.apache.ignite.internal.sql.SqlKeyword.TRANSACTION;
 import static org.apache.ignite.internal.sql.SqlKeyword.STREAMING;
 import static org.apache.ignite.internal.sql.SqlKeyword.TABLE;
 import static org.apache.ignite.internal.sql.SqlKeyword.UNIQUE;
+import static org.apache.ignite.internal.sql.SqlKeyword.WORK;
 import static org.apache.ignite.internal.sql.SqlKeyword.USER;
 import static org.apache.ignite.internal.sql.SqlParserUtils.errorUnexpectedToken;
 import static org.apache.ignite.internal.sql.SqlParserUtils.errorUnsupportedIfMatchesKeyword;
 import static org.apache.ignite.internal.sql.SqlParserUtils.matchesKeyword;
+import static org.apache.ignite.internal.sql.SqlParserUtils.skipIfMatchesKeyword;
+import static org.apache.ignite.internal.sql.SqlParserUtils.skipIfMatchesOptionalKeyword;
 
 /**
  * SQL parser.
@@ -102,6 +113,16 @@
                     SqlCommand cmd = null;
 
                     switch (lex.token()) {
+                        case BEGIN:
+                            cmd = processBegin();
+
+                            break;
+
+                        case COMMIT:
+                            cmd = processCommit();
+
+                            break;
+
                         case CREATE:
                             cmd = processCreate();
 
@@ -112,6 +133,16 @@
 
                             break;
 
+                        case ROLLBACK:
+                            cmd = processRollback();
+
+                            break;
+
+                        case START:
+                            cmd = processStart();
+
+                            break;
+
                         case COPY:
                             try {
                                 cmd = processCopy();
@@ -139,7 +170,7 @@
                         return cmd;
                     }
                     else
-                        throw errorUnexpectedToken(lex, CREATE, DROP, ALTER, COPY, SET);
+                        throw errorUnexpectedToken(lex, BEGIN, COMMIT, CREATE, DROP, ROLLBACK, COPY, SET, ALTER, START);
 
                 case QUOTED:
                 case MINUS:
@@ -154,6 +185,30 @@
     }
 
     /**
+     * Process BEGIN keyword.
+     *
+     * @return Command.
+     */
+    private SqlCommand processBegin() {
+        skipIfMatchesOptionalKeyword(lex, TRANSACTION);
+
+        skipIfMatchesOptionalKeyword(lex, WORK);
+
+        return new SqlBeginTransactionCommand();
+    }
+
+    /**
+     * Process COMMIT keyword.
+     *
+     * @return Command.
+     */
+    private SqlCommand processCommit() {
+        skipIfMatchesOptionalKeyword(lex, TRANSACTION);
+
+        return new SqlCommitTransactionCommand();
+    }
+
+    /**
      * Process SET keyword.
      *
      * @return Command.
@@ -214,7 +269,7 @@
             errorUnsupportedIfMatchesKeyword(lex, HASH, PRIMARY, UNIQUE);
         }
 
-        throw errorUnexpectedToken(lex, INDEX, SPATIAL);
+        throw errorUnexpectedToken(lex, INDEX, SPATIAL, USER);
     }
 
     /**
@@ -242,7 +297,29 @@
                 return cmd.parse(lex);
         }
 
-        throw errorUnexpectedToken(lex, INDEX);
+        throw errorUnexpectedToken(lex, INDEX, USER);
+    }
+
+    /**
+     * Process ROLLBACK keyword.
+     *
+     * @return Command.
+     */
+    private SqlCommand processRollback() {
+        skipIfMatchesOptionalKeyword(lex, TRANSACTION);
+
+        return new SqlRollbackTransactionCommand();
+    }
+
+    /**
+     * Process START keyword.
+     *
+     * @return Command.
+     */
+    private SqlCommand processStart() {
+        skipIfMatchesKeyword(lex, TRANSACTION);
+
+        return new SqlBeginTransactionCommand();
     }
 
     /**
@@ -270,6 +347,6 @@
                 return cmd.parse(lex);
         }
 
-        throw errorUnexpectedToken(lex, TABLE);
+        throw errorUnexpectedToken(lex, TABLE, USER);
     }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParserUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParserUtils.java
index e99af1d..074bffa 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParserUtils.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/sql/SqlParserUtils.java
@@ -261,6 +261,20 @@
     }
 
     /**
+     * Skip token if it matches expected keyword by using lookahead.
+     * If next token is not what we expect, no shift is done.
+     *
+     * @param lex Lexer.
+     * @param expKeyword Expected keyword.
+     */
+    static void skipIfMatchesOptionalKeyword(SqlLexer lex, String expKeyword) {
+        SqlLexerToken nextTok = lex.lookAhead();
+
+        if (matchesKeyword(nextTok, expKeyword))
+            lex.shift();
+    }
+
+    /**
      * Skip next token if it matches expected type.
      *
      * @param lex Lexer.
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlBeginTransactionCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlBeginTransactionCommand.java
new file mode 100644
index 0000000..e890cc4
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlBeginTransactionCommand.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.sql.command;
+
+import org.apache.ignite.internal.sql.SqlLexer;
+import org.apache.ignite.internal.util.typedef.internal.S;
+
+/**
+ * BEGIN [TRANSACTION] command.
+ */
+public class SqlBeginTransactionCommand implements SqlCommand {
+    /** {@inheritDoc} */
+    @Override public SqlCommand parse(SqlLexer lex) {
+        return this;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String schemaName() {
+        return null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void schemaName(String schemaName) {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(SqlBeginTransactionCommand.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlCommitTransactionCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlCommitTransactionCommand.java
new file mode 100644
index 0000000..da14dea
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlCommitTransactionCommand.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.sql.command;
+
+import org.apache.ignite.internal.sql.SqlLexer;
+import org.apache.ignite.internal.util.typedef.internal.S;
+
+/**
+ * COMMIT command.
+ */
+public class SqlCommitTransactionCommand implements SqlCommand {
+    /** {@inheritDoc} */
+    @Override public SqlCommand parse(SqlLexer lex) {
+        return this;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String schemaName() {
+        return null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void schemaName(String schemaName) {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(SqlCommitTransactionCommand.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlRollbackTransactionCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlRollbackTransactionCommand.java
new file mode 100644
index 0000000..341b794
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/sql/command/SqlRollbackTransactionCommand.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.sql.command;
+
+import org.apache.ignite.internal.sql.SqlLexer;
+import org.apache.ignite.internal.util.typedef.internal.S;
+
+/**
+ * ROLLBACK command.
+ */
+public class SqlRollbackTransactionCommand implements SqlCommand {
+    /** {@inheritDoc} */
+    @Override public SqlCommand parse(SqlLexer lex) {
+        return this;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String schemaName() {
+        return null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void schemaName(String schemaName) {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(SqlRollbackTransactionCommand.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/transactions/IgniteTxMvccVersionCheckedException.java b/modules/core/src/main/java/org/apache/ignite/internal/transactions/IgniteTxMvccVersionCheckedException.java
new file mode 100644
index 0000000..8a0a54b
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/transactions/IgniteTxMvccVersionCheckedException.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.transactions;
+
+import org.apache.ignite.IgniteCheckedException;
+
+/**
+ * Exception thrown whenever transaction enters an unknown state.
+ */
+public class IgniteTxMvccVersionCheckedException extends IgniteCheckedException {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /**
+     * Creates new exception with given error message.
+     *
+     * @param msg Error message.
+     */
+    public IgniteTxMvccVersionCheckedException(String msg) {
+        super(msg);
+    }
+
+    /**
+     * Creates new exception with given error message and optional nested exception.
+     *
+     * @param msg Error message.
+     * @param cause Optional nested exception (can be <tt>null</tt>).
+     */
+    public IgniteTxMvccVersionCheckedException(String msg, Throwable cause) {
+        super(msg, cause);
+    }
+}
\ No newline at end of file
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/GridConcurrentSkipListSet.java b/modules/core/src/main/java/org/apache/ignite/internal/util/GridConcurrentSkipListSet.java
index a7a0ddc..5c1f95e 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/GridConcurrentSkipListSet.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/GridConcurrentSkipListSet.java
@@ -164,7 +164,7 @@
     }
 
     /** {@inheritDoc} */
-    public boolean equals(Object o) {
+    @Override public boolean equals(Object o) {
         // Override AbstractSet version to avoid calling size()
         if (o == this)
             return true;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/GridLogThrottle.java b/modules/core/src/main/java/org/apache/ignite/internal/util/GridLogThrottle.java
index 396cf37..feae512 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/GridLogThrottle.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/GridLogThrottle.java
@@ -48,8 +48,8 @@
     private static final int throttleCap = IgniteSystemProperties.getInteger(IGNITE_LOG_THROTTLE_CAPACITY, 128);
 
     /** Errors. */
-    private static final ConcurrentMap<IgniteBiTuple<Class<? extends Throwable>, String>, Long> errors =
-        new ConcurrentLinkedHashMap<>(throttleCap, 1f, DFLT_CONCUR_LVL, throttleCap);
+    private static volatile ConcurrentMap<IgniteBiTuple<Class<? extends Throwable>, String>, Long> errors =
+        new ConcurrentLinkedHashMap<>(throttleCap, 0.75f, DFLT_CONCUR_LVL, throttleCap);
 
     /**
      * Sets system-wide log throttle timeout.
@@ -88,7 +88,7 @@
     public static void error(@Nullable IgniteLogger log, @Nullable Throwable e, String msg) {
         assert !F.isEmpty(msg);
 
-        log(log, e, msg, null, LogLevel.ERROR, false, false);
+        log(log, e, msg, LogLevel.ERROR, false, false);
     }
 
     /**
@@ -102,7 +102,7 @@
     public static void error(@Nullable IgniteLogger log, @Nullable Throwable e, String msg, boolean byMsg) {
         assert !F.isEmpty(msg);
 
-        log(log, e, msg, null, LogLevel.ERROR, false, byMsg);
+        log(log, e, msg, LogLevel.ERROR, false, byMsg);
     }
 
     /**
@@ -114,7 +114,7 @@
     public static void warn(@Nullable IgniteLogger log, String msg) {
         assert !F.isEmpty(msg);
 
-        log(log, null, msg, null, LogLevel.WARN, false, false);
+        log(log, null, msg, LogLevel.WARN, false, false);
     }
 
     /**
@@ -129,7 +129,7 @@
     public static void warn(@Nullable IgniteLogger log, @Nullable Throwable e, String msg, boolean quite, boolean byMsg) {
         assert !F.isEmpty(msg);
 
-        log(log, e, msg, null, LogLevel.WARN, quite, byMsg);
+        log(log, e, msg, LogLevel.WARN, quite, byMsg);
     }
 
 
@@ -143,20 +143,7 @@
     public static void warn(@Nullable IgniteLogger log, String msg, boolean quiet) {
         assert !F.isEmpty(msg);
 
-        log(log, null, msg, null, LogLevel.WARN, quiet, false);
-    }
-
-    /**
-     * Logs warning if needed.
-     *
-     * @param log Logger.
-     * @param longMsg Long message (or just message).
-     * @param shortMsg Short message for quiet logging.
-     */
-    public static void warn(@Nullable IgniteLogger log, String longMsg, @Nullable String shortMsg) {
-        assert !F.isEmpty(longMsg);
-
-        log(log, null, longMsg, shortMsg, LogLevel.WARN, false, false);
+        log(log, null, msg, LogLevel.WARN, quiet, false);
     }
 
     /**
@@ -169,7 +156,7 @@
     public static void info(@Nullable IgniteLogger log, String msg, boolean quiet) {
         assert !F.isEmpty(msg);
 
-        log(log, null, msg, null, LogLevel.INFO, quiet, false);
+        log(log, null, msg, LogLevel.INFO, quiet, false);
     }
 
     /**
@@ -188,7 +175,7 @@
      * Clears all stored data. This will make throttle to behave like a new one.
      */
     public static void clear() {
-        errors.clear();
+        errors = new ConcurrentLinkedHashMap<>(throttleCap, 0.75f, DFLT_CONCUR_LVL, throttleCap);
     }
 
     /**
@@ -197,13 +184,17 @@
      * @param log Logger.
      * @param e Error (optional).
      * @param longMsg Long message (or just message).
-     * @param shortMsg Short message for quiet logging.
      * @param level Level where messages should appear.
      * @param byMsg Errors group by message, not by tuple(error, msg).
      */
     @SuppressWarnings({"RedundantTypeArguments"})
-    private static void log(@Nullable IgniteLogger log, @Nullable Throwable e, String longMsg,
-        @Nullable String shortMsg, LogLevel level, boolean quiet, boolean byMsg) {
+    private static void log(@Nullable IgniteLogger log,
+        @Nullable Throwable e,
+        String longMsg,
+        LogLevel level,
+        boolean quiet,
+        boolean byMsg
+    ) {
         assert !F.isEmpty(longMsg);
 
         IgniteBiTuple<Class<? extends Throwable>, String> tup =
@@ -217,7 +208,7 @@
 
             if (loggedTs == null || loggedTs < curTs - throttleTimeout) {
                 if (replace(tup, loggedTs, curTs)) {
-                    level.doLog(log, longMsg, shortMsg, e, quiet);
+                    level.doLog(log, longMsg, e, quiet);
 
                     break;
                 }
@@ -258,32 +249,32 @@
     private enum LogLevel {
         /** Error level. */
         ERROR {
-            @Override public void doLog(IgniteLogger log, String longMsg, String shortMsg, Throwable e, boolean quiet) {
+            @Override public void doLog(IgniteLogger log, String msg, Throwable e, boolean quiet) {
                 if (e != null)
-                    U.error(log, longMsg, e);
+                    U.error(log, msg, e);
                 else
-                    U.error(log, longMsg);
+                    U.error(log, msg);
             }
         },
 
         /** Warn level. */
         WARN {
-            @Override public void doLog(IgniteLogger log, String longMsg, String shortMsg, Throwable e, boolean quiet) {
+            @Override public void doLog(IgniteLogger log, String msg, Throwable e, boolean quiet) {
                 if (quiet)
-                    U.quietAndWarn(log, longMsg, F.isEmpty(shortMsg) ? longMsg : shortMsg);
+                    U.quietAndWarn(log, msg);
                 else
-                    U.warn(log, longMsg, F.isEmpty(shortMsg) ? longMsg : shortMsg);
+                    U.warn(log, msg);
             }
         },
 
         /** Info level. */
         INFO {
-            @Override public void doLog(IgniteLogger log, String longMsg, String shortMsg, Throwable e, boolean quiet) {
+            @Override public void doLog(IgniteLogger log, String msg, Throwable e, boolean quiet) {
                 if (quiet)
-                    U.quietAndInfo(log, longMsg);
+                    U.quietAndInfo(log, msg);
                 else {
                     if (log.isInfoEnabled())
-                        log.info(longMsg);
+                        log.info(msg);
                 }
             }
         };
@@ -292,10 +283,9 @@
          * Performs logging operation.
          *
          * @param log Logger to use.
-         * @param longMsg Long message.
-         * @param shortMsg Short message.
+         * @param msg Long message.
          * @param e Exception to attach to log.
          */
-        public abstract void doLog(IgniteLogger log, String longMsg, String shortMsg, Throwable e, boolean quiet);
+        public abstract void doLog(IgniteLogger log, String msg, Throwable e, boolean quiet);
     }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/GridStringBuilder.java b/modules/core/src/main/java/org/apache/ignite/internal/util/GridStringBuilder.java
index 410f278..2d4a076 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/GridStringBuilder.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/GridStringBuilder.java
@@ -482,7 +482,7 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public String toString() {
         return impl.toString();
     }
 }
\ No newline at end of file
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteExceptionRegistry.java b/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteExceptionRegistry.java
index 03eaaea..0ae7b19 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteExceptionRegistry.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteExceptionRegistry.java
@@ -241,7 +241,7 @@
         }
 
         /** {@inheritDoc} */
-        public String toString() {
+        @Override public String toString() {
             return S.toString(ExceptionInfo.class, this);
         }
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteUtils.java
index 3dfa8c1..d2d2b89 100755
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteUtils.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/IgniteUtils.java
@@ -4020,6 +4020,22 @@
     }
 
     /**
+     * Closes given resource suppressing possible checked exception.
+     *
+     * @param rsrc Resource to close. If it's {@code null} - it's no-op.
+     * @param e Suppressor exception
+     */
+    public static void closeWithSuppressingException(@Nullable AutoCloseable rsrc, @NotNull Exception e) {
+        if (rsrc != null)
+            try {
+                rsrc.close();
+            }
+            catch (Exception suppressed) {
+               e.addSuppressed(suppressed);
+            }
+    }
+
+    /**
      * Quietly closes given resource ignoring possible checked exception.
      *
      * @param rsrc Resource to close. If it's {@code null} - it's no-op.
@@ -4230,7 +4246,7 @@
 
         String s = msg.toString();
 
-        warn(log, s, s);
+        warn(log, s, null);
     }
 
     /**
@@ -4286,18 +4302,23 @@
      * or in QUIET mode it will add {@code (wrn)} prefix to the message.
      *
      * @param log Optional logger to use when QUIET mode is not enabled.
-     * @param longMsg Message to log using normal logger.
-     * @param shortMsg Message to log using quiet logger.
+     * @param msg Message to log using normal logger.
+     * @param e Optional exception.
      */
-    public static void warn(@Nullable IgniteLogger log, Object longMsg, Object shortMsg) {
-        assert longMsg != null;
-        assert shortMsg != null;
+    public static void warn(@Nullable IgniteLogger log, Object msg, @Nullable Throwable e) {
+        assert msg != null;
 
         if (log != null)
-            log.warning(compact(longMsg.toString()));
-        else
+            log.warning(compact(msg.toString()), e);
+        else {
             X.println("[" + SHORT_DATE_FMT.format(new java.util.Date()) + "] (wrn) " +
-                compact(shortMsg.toString()));
+                    compact(msg.toString()));
+
+            if (e != null)
+                e.printStackTrace(System.err);
+            else
+                X.printerrln();
+        }
     }
 
     /**
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/SerializableTransient.java b/modules/core/src/main/java/org/apache/ignite/internal/util/SerializableTransient.java
index 14a2f27..e016009 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/SerializableTransient.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/SerializableTransient.java
@@ -37,19 +37,17 @@
 public @interface SerializableTransient {
     /**
      * Name of the private static method that returns list of transient fields
-     * that should be serialized (String[]), and accepts itself (before serialization)
-     * and {@link IgniteProductVersion}, e.g.
+     * that should be serialized (String[]), and accepts {@link IgniteProductVersion}, e.g.
      * <pre>
-     *     private static String[] fields(Object self, IgniteProductVersion ver){
+     *     private static String[] fields(IgniteProductVersion ver){
      *         return ver.compareTo("1.5.30") > 0 ? SERIALIZABLE_FIELDS : null;
      *     }
      * </pre>
      * <p>
-     *     On serialization version argument <tt>ver</tt> is null, on deserialization - <tt>self</tt> is null.
+     *     On serialization version argument <tt>ver</tt> is receiver version and sender version on deserialization.
      * </p>
      * <p>
-     *     If it returns empty array or null all transient fields will be normally
-     *     ignored.
+     *     If it returns empty array or null all transient fields will be normally ignored.
      * </p>
      *
      * @return Name of the method.
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/StripedExecutor.java b/modules/core/src/main/java/org/apache/ignite/internal/util/StripedExecutor.java
index 904b8d1..d89124f 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/StripedExecutor.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/StripedExecutor.java
@@ -667,7 +667,7 @@
         }
 
         /** {@inheritDoc} */
-        void execute(Runnable cmd) {
+        @Override void execute(Runnable cmd) {
             queue.add(cmd);
 
             if (parked)
@@ -739,7 +739,7 @@
         }
 
         /** {@inheritDoc} */
-        void execute(Runnable cmd) {
+        @Override void execute(Runnable cmd) {
             queue.add(cmd);
         }
 
@@ -796,7 +796,7 @@
         }
 
         /** {@inheritDoc} */
-        void execute(Runnable cmd) {
+        @Override void execute(Runnable cmd) {
             queue.add(cmd);
         }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/TransientSerializable.java b/modules/core/src/main/java/org/apache/ignite/internal/util/TransientSerializable.java
new file mode 100644
index 0000000..b583c1b
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/TransientSerializable.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.util;
+
+import java.lang.annotation.ElementType;
+import java.lang.annotation.Retention;
+import java.lang.annotation.RetentionPolicy;
+import java.lang.annotation.Target;
+import org.apache.ignite.lang.IgniteProductVersion;
+
+/**
+ * Marks class as it has non-transient fields that should not be serialized.
+ * Annotated class must have method that returns list of non-transient
+ * fields that should not be serialized.
+ * <p>
+ *     Works only for jobs. For other messages node version is not available.
+ * </p>
+ */
+@Retention(RetentionPolicy.RUNTIME)
+@Target(ElementType.TYPE)
+public @interface TransientSerializable {
+    /**
+     * Name of the private static method that returns list of non-transient fields
+     * that should not be serialized (String[]), and accepts {@link IgniteProductVersion}, e.g.
+     * <pre>
+     *     private static String[] fields(IgniteProductVersion ver){
+     *         return ver.compareTo("1.5.30") < 0 ? EXCLUDED_FIELDS : null;
+     *     }
+     * </pre>
+     * <p>
+     *     On serialization version argument <tt>ver</tt> is receiver version and sender version on deserialization.
+     * </p>
+     * <p>
+     *     If it returns empty array or null all non-transient fields will be normally serialized.
+     * </p>
+     *
+     * @return Name of the method.
+     */
+    String methodName();
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/future/GridCompoundFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/util/future/GridCompoundFuture.java
index a724060..87f5882 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/future/GridCompoundFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/future/GridCompoundFuture.java
@@ -285,12 +285,12 @@
                 onDone(rdc != null ? rdc.reduce() : null);
             }
             catch (RuntimeException e) {
-                logError(null, "Failed to execute compound future reducer: " + this, e);
+                logError(logger(), "Failed to execute compound future reducer: " + this, e);
 
                 onDone(e);
             }
             catch (AssertionError e) {
-                logError(null, "Failed to execute compound future reducer: " + this, e);
+                logError(logger(), "Failed to execute compound future reducer: " + this, e);
 
                 onDone(e);
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/lang/gridfunc/ContainsPredicate.java b/modules/core/src/main/java/org/apache/ignite/internal/util/lang/gridfunc/ContainsPredicate.java
index e3d613a..c5ca76b 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/lang/gridfunc/ContainsPredicate.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/lang/gridfunc/ContainsPredicate.java
@@ -49,7 +49,7 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public String toString() {
         return S.toString(ContainsPredicate.class, this);
     }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/lang/gridfunc/NotContainsPredicate.java b/modules/core/src/main/java/org/apache/ignite/internal/util/lang/gridfunc/NotContainsPredicate.java
index 5f8ba9a..9bacd9a 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/lang/gridfunc/NotContainsPredicate.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/lang/gridfunc/NotContainsPredicate.java
@@ -48,7 +48,7 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public String toString() {
         return S.toString(NotContainsPredicate.class, this);
     }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioFilterAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioFilterAdapter.java
index 4ede4b7..05f4ec6 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioFilterAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioFilterAdapter.java
@@ -47,7 +47,7 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public String toString() {
         return name;
     }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioFilterChain.java b/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioFilterChain.java
index ec59020..a57b546 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioFilterChain.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioFilterChain.java
@@ -77,7 +77,7 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public String toString() {
         if (str == null) {
             StringBuilder res = new StringBuilder("FilterChain[filters=[");
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioServer.java b/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioServer.java
index 85332f4..9678ae7 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioServer.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/nio/GridNioServer.java
@@ -1875,7 +1875,7 @@
          *
          * @param req Change request.
          */
-        public void offer(SessionChangeRequest req) {
+        @Override public void offer(SessionChangeRequest req) {
             changeReqs.offer(req);
 
             if (select)
@@ -2190,7 +2190,7 @@
         /**
          * @param ses Session.
          */
-        public final void registerWrite(GridSelectorNioSessionImpl ses) {
+        @Override public final void registerWrite(GridSelectorNioSessionImpl ses) {
             SelectionKey key = ses.key();
 
             if (key.isValid()) {
@@ -3337,17 +3337,17 @@
         }
 
         /** {@inheritDoc} */
-        public NioOperation operation() {
+        @Override public NioOperation operation() {
             return op;
         }
 
         /** {@inheritDoc} */
-        public Object message() {
+        @Override public Object message() {
             return msg;
         }
 
         /** {@inheritDoc} */
-        public void resetSession(GridNioSession ses) {
+        @Override public void resetSession(GridNioSession ses) {
             assert msg instanceof Message : msg;
 
             this.ses = (GridSelectorNioSessionImpl)ses;
@@ -3361,7 +3361,7 @@
         }
 
         /** {@inheritDoc} */
-        public GridSelectorNioSessionImpl session() {
+        @Override public GridSelectorNioSessionImpl session() {
             return ses;
         }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/offheap/unsafe/GridUnsafePartitionedMap.java b/modules/core/src/main/java/org/apache/ignite/internal/util/offheap/unsafe/GridUnsafePartitionedMap.java
index 4cba7fd..42b2769 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/offheap/unsafe/GridUnsafePartitionedMap.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/offheap/unsafe/GridUnsafePartitionedMap.java
@@ -291,7 +291,7 @@
     /** {@inheritDoc} */
     @Override public GridCloseableIterator<IgniteBiTuple<byte[], byte[]>> iterator() {
         return new PartitionedMapCloseableIterator<IgniteBiTuple<byte[], byte[]>>() {
-            protected void advance() throws IgniteCheckedException {
+            @Override protected void advance() throws IgniteCheckedException {
                 curIt = null;
 
                 while (p < parts) {
@@ -313,7 +313,7 @@
         assert c != null;
 
         return new PartitionedMapCloseableIterator<T>() {
-            protected void advance() throws IgniteCheckedException {
+            @Override protected void advance() throws IgniteCheckedException {
                 curIt = null;
 
                 while (p < parts) {
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/snaptree/CopyOnWriteManager.java b/modules/core/src/main/java/org/apache/ignite/internal/util/snaptree/CopyOnWriteManager.java
deleted file mode 100644
index 685668a..0000000
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/snaptree/CopyOnWriteManager.java
+++ /dev/null
@@ -1,390 +0,0 @@
-/*
- * Copyright (c) 2009 Stanford University, unless otherwise specified.
- * All rights reserved.
- *
- * This software was developed by the Pervasive Parallelism Laboratory of
- * Stanford University, California, USA.
- *
- * Permission to use, copy, modify, and distribute this software in source
- * or binary form for any purpose with or without fee is hereby granted,
- * provided that the following conditions are met:
- *
- *    1. Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *
- *    2. Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in the
- *       documentation and/or other materials provided with the distribution.
- *
- *    3. Neither the name of Stanford University nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-package org.apache.ignite.internal.util.snaptree;
-
-import java.util.concurrent.atomic.AtomicReference;
-import java.util.concurrent.locks.AbstractQueuedSynchronizer;
-
-/** Manages copy-on-write behavior for a concurrent tree structure.  It is
- *  assumed that the managed structure allows concurrent mutation, but that no
- *  mutating operations may be active when a copy-on-write snapshot of tree is
- *  taken.  Because it is difficult to update the size of data structure in a
- *  highly concurrent fashion, the <code>CopyOnWriteManager</code> also manages
- *  a running total that represents the size of the contained tree structure.
- *  <p>
- *  Users should implement the {@link #freezeAndClone(Object)} and
- *  {@link #cloneFrozen(Object)} methods.
- */
-@SuppressWarnings("ALL")
-abstract public class CopyOnWriteManager<E> implements Cloneable {
-
-    /** This is basically a stripped-down CountDownLatch.  Implementing our own
-     *  reduces the object count by one, and it gives us access to the
-     *  uninterruptable acquireShared.
-     */
-    private class Latch extends AbstractQueuedSynchronizer {
-        /** */
-        private static final long serialVersionUID = 0L;
-
-        Latch(final boolean triggered) {
-            setState(triggered ? 0 : 1);
-        }
-
-        public int tryAcquireShared(final int acquires) {
-            // 1 = success, and followers may also succeed
-            // -1 = failure
-            return getState() == 0 ? 1 : -1;
-        }
-
-        public boolean tryReleaseShared(final int releases) {
-            // Before, state is either 0 or 1.  After, state is always 0.
-            return compareAndSetState(1, 0);
-        }
-    }
-
-    private static final int MUTATE = 1;
-    private static final int MUTATE_AFTER_FREEZE = 2;
-    private static final int BULK_READ = 3;
-    private static final int BULK_READ_AFTER_FREEZE = 4;
-
-    private class COWEpoch extends EpochNode {
-        /** */
-        private static final long serialVersionUID = 0L;
-
-        /** Tripped after this COWEpoch is installed as active. */
-        private final Latch _activated;
-
-        /** True iff this is a mutating epoch. */
-        final boolean mutationAllowed;
-
-        /** The value used by this epoch. */
-        E value;
-
-        /** The computed size of <code>value</code>, as of the beginning of
-         *  this epoch.
-         */
-        int initialSize;
-
-        /** A frozen E equal to <code>value</code>, if not <code>dirty</code>. */
-        private volatile E _frozenValue;
-
-        /** True if any mutations have been performed on <code>value</code>. */
-        volatile boolean dirty;
-
-        /** The epoch that will follow this one, created on demand. */
-        final AtomicReference<COWEpoch> successorRef = new AtomicReference<COWEpoch>(null);
-
-        /** A ticket on the successor, released when this epoch is closed. */
-        Epoch.Ticket successorTicket;
-
-        /** True if the successor should freeze and clone this epoch's value. */
-        boolean freezeRequested;
-
-        private COWEpoch(final boolean mutationAllowed) {
-            this._activated = new Latch(false);
-            this.mutationAllowed = mutationAllowed;
-        }
-
-        public COWEpoch(final E value, final E frozenValue, final int initialSize) {
-            this._activated = new Latch(true); // pre-triggered
-            this.mutationAllowed = true;
-            this.value = value;
-            this.initialSize = initialSize;
-            this._frozenValue = frozenValue;
-            this.dirty = frozenValue == null;
-        }
-
-        EpochNode attemptInitialArrive() {
-            return super.attemptArrive();
-        }
-
-        @Override
-        public EpochNode attemptArrive() {
-            final EpochNode ticket = super.attemptArrive();
-            if (ticket != null && !dirty) {
-                dirty = true;
-                _frozenValue = null;
-            }
-            return ticket;
-        }
-
-        private void setFrozenValue(final E v) {
-            if (!dirty) {
-                _frozenValue = v;
-                if (dirty) {
-                    _frozenValue = null;
-                }
-            }
-        }
-
-        E getFrozenValue() {
-            final E v = _frozenValue;
-            return dirty ? null : v;
-        }
-
-        protected void onClosed(final int dataSum) {
-            assert(dataSum == 0 || dirty);
-
-            final COWEpoch succ = successorRef.get();
-            if (freezeRequested) {
-                succ.value = freezeAndClone(value);
-                succ.setFrozenValue(value);
-            }
-            else {
-                succ.value = value;
-                if (dirty) {
-                    succ.dirty = true;
-                }
-                else {
-                    succ.setFrozenValue(_frozenValue);
-                }
-            }
-            succ.initialSize = initialSize + dataSum;
-
-            _active = succ;
-            successorTicket.leave(0);
-            succ._activated.releaseShared(1);
-        }
-
-        public void awaitActivated() {
-            _activated.acquireShared(1);
-        }
-
-        public COWEpoch getOrCreateSuccessor(final boolean preferredMutation) {
-            final COWEpoch existing = successorRef.get();
-            if (existing != null) {
-                return existing;
-            }
-
-            final COWEpoch repl = new COWEpoch(preferredMutation);
-            if (attemptInstallSuccessor(repl)) {
-                return repl;
-            }
-
-            return successorRef.get();
-        }
-
-        public boolean attemptInstallSuccessor(final COWEpoch succ) {
-            final Epoch.Ticket t = succ.attemptInitialArrive();
-            if (successorRef.compareAndSet(null, succ)) {
-                successorTicket = t;
-                beginClose();
-                return true;
-            }
-            else {
-                return false;
-            }
-        }
-    }
-
-    private volatile COWEpoch _active;
-
-    /** Creates a new {@link CopyOnWriteManager} holding
-     *  <code>initialValue</code>, with an assumed size of
-     *  <code>initialSize</code>.
-     */
-    public CopyOnWriteManager(final E initialValue, final int initialSize) {
-        _active = new COWEpoch(initialValue, null, initialSize);
-    }
-
-    /** The implementing method must mark <code>value</code> as shared, and
-     *  return a new object to use in its place.  Hopefully, the majority of
-     *  the work of the clone can be deferred by copy-on-write.
-     */
-    abstract protected E freezeAndClone(final E value);
-
-    /** Returns a clone of a frozen E. */
-    abstract protected E cloneFrozen(E frozenValue);
-
-    public CopyOnWriteManager<E> clone() {
-        final CopyOnWriteManager<E> copy;
-        try {
-            copy = (CopyOnWriteManager<E>) super.clone();
-        }
-        catch (final CloneNotSupportedException xx) {
-            throw new Error("unexpected", xx);
-        }
-
-        COWEpoch a = _active;
-        E f = a.getFrozenValue();
-        while (f == null) {
-            a.freezeRequested = true;
-            final COWEpoch succ = a.getOrCreateSuccessor(a.mutationAllowed);
-            succ.awaitActivated();
-            if (a.value != succ.value) {
-                f = a.value;
-            }
-            a = succ;
-        }
-
-        copy.createNewEpoch(f, a);
-        return copy;
-    }
-
-    private void createNewEpoch(E f, COWEpoch a)
-    {
-        _active = new COWEpoch(cloneFrozen(f), f, a.initialSize);
-    }
-
-    /** Returns a reference to the tree structure suitable for a read
-     *  operation.  The returned structure may be mutated by operations that
-     *  have the permission of this {@link CopyOnWriteManager}, but they will
-     *  not observe changes managed by other instances.
-     */
-    public E read() {
-        return _active.value;
-    }
-
-    /** Obtains permission to mutate the copy-on-write value held by this
-     *  instance, perhaps blocking while a concurrent snapshot is being
-     *  performed.  {@link Epoch.Ticket#leave} must be called exactly once on
-     *  the object returned from this method, after the mutation has been
-     *  completed.  The change in size reflected by the mutation should be
-     *  passed as the parameter to <code>leave</code>.
-     */
-    public Epoch.Ticket beginMutation() {
-        return begin(true);
-    }
-
-    public Epoch.Ticket beginQuiescent() {
-        return begin(false);
-    }
-
-    private Epoch.Ticket begin(final boolean mutation) {
-        final COWEpoch active = _active;
-        if (active.mutationAllowed == mutation) {
-            final Epoch.Ticket ticket = active.attemptArrive();
-            if (ticket != null) {
-                return ticket;
-            }
-        }
-        return begin(mutation, active);
-    }
-
-    private Epoch.Ticket begin(final boolean mutation, COWEpoch epoch) {
-        while (true) {
-            COWEpoch succ = epoch.successorRef.get();
-            if (succ == null) {
-                final COWEpoch newEpoch = new COWEpoch(mutation);
-                final Epoch.Ticket newTicket = newEpoch.attemptArrive();
-                if (epoch.attemptInstallSuccessor(newEpoch)) {
-                    // can't use the ticket until the new epoch is activated
-                    newEpoch.awaitActivated();
-                    return newTicket;
-                }
-
-                // if our CAS failed, somebody else succeeded
-                succ = epoch.successorRef.get();
-            }
-
-            // is the successor created by someone else suitable?
-            if (succ.mutationAllowed == mutation) {
-                final Epoch.Ticket ticket = succ.attemptArrive();
-                if (ticket != null) {
-                    succ.awaitActivated();
-                    return ticket;
-                }
-            }
-
-            epoch = succ;
-        }
-    }
-
-    /** Returns a reference to the tree structure suitable for a mutating
-     *  operation.  This method may only be called under the protection of a
-     *  ticket returned from {@link #beginMutation}.
-     */
-    public E mutable() {
-        return _active.value;
-    }
-
-    /** Returns a reference to a snapshot of this instance's tree structure
-     *  that may be read, but not written.  This is accomplished by suspending
-     *  mutation, replacing the mutable root of this manager with the result of
-     *  <code>freezeAndClone(root, false)</code>, and then returning a
-     *  reference to the old root.  Successive calls to this method may return
-     *  the same instance.
-     */
-    public E frozen() {
-        COWEpoch a = _active;
-        E f = a.getFrozenValue();
-        while (f == null) {
-            a.freezeRequested = true;
-            final COWEpoch succ = a.getOrCreateSuccessor(a.mutationAllowed);
-            succ.awaitActivated();
-            if (a.value != succ.value) {
-                f = a.value;
-            }
-            a = succ;
-        }
-        return f;
-    }
-
-    /** Returns a reference to a snapshot of this instance's tree structure,
-     *  if one is available without requiring any additional copying, otherwise
-     *  returns null.  May be used in combination with {@link #beginQuiescent}
-     *  to perform quiescent reads with minimal cost.
-     */
-    public E availableFrozen() {
-        return _active.getFrozenValue();
-    }
-
-    /** Returns true if the computed {@link #size} is zero. */
-    public boolean isEmpty() {
-        // for a different internal implementation (such as a C-SNZI) we might
-        // be able to do better than this
-        return size() == 0;
-    }
-
-    /** Returns the sum of the <code>initialSize</code> parameter passed to the
-     *  constructor, and the size deltas passed to {@link Epoch.Ticket#leave}
-     *  for all of the mutation tickets.  The result returned is linearizable
-     *  with mutations, which requires mutation to be quiesced.  No tree freeze
-     *  is required, however.
-     */
-    public int size() {
-        final COWEpoch a = _active;
-        final Integer delta = a.attemptDataSum();
-        if (delta != null) {
-            return a.initialSize + delta;
-        }
-
-        // wait for an existing successor, or force one if not already in progress
-        final COWEpoch succ = a.getOrCreateSuccessor(a.mutationAllowed);
-        succ.awaitActivated();
-        return succ.initialSize;
-    }
-}
\ No newline at end of file
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/snaptree/Epoch.java b/modules/core/src/main/java/org/apache/ignite/internal/util/snaptree/Epoch.java
deleted file mode 100644
index c85320d..0000000
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/snaptree/Epoch.java
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * Copyright (c) 2009 Stanford University, unless otherwise specified.
- * All rights reserved.
- *
- * This software was developed by the Pervasive Parallelism Laboratory of
- * Stanford University, California, USA.
- *
- * Permission to use, copy, modify, and distribute this software in source
- * or binary form for any purpose with or without fee is hereby granted,
- * provided that the following conditions are met:
- *
- *    1. Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *
- *    2. Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in the
- *       documentation and/or other materials provided with the distribution.
- *
- *    3. Neither the name of Stanford University nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-package org.apache.ignite.internal.util.snaptree;
-
-/** A <code>Epoch</code> has a lifecycle consisting of three phases: active,
- *  closing, and closed.  During the active phase partipants may arrive and
- *  leave the epoch.  Once a close has been requested, new participants are not
- *  allowed, only leaving is possible.  Once close has been requested and all
- *  participants have left, the epoch is transitioned to the closed state.
- *  <p>
- *  Entry is performed with {@link #attemptArrive}, which returns a non-null
- *  ticket on success or null if {@link #beginClose} has already been called.
- *  Each successful call to <code>attemptArrive</code> must be paired by a call
- *  to {@link Ticket#leave} on the returned ticket.
- *  <p>
- *  The abstract method {@link #onClosed} will be invoked exactly once after
- *  the epoch becomes closed.  It will be passed the sum of the values passed
- *  to {@link Ticket#leave}.  There is no way to query the current participant
- *  count or state of the epoch without changing it.
- *  <p>
- *  Internally the epoch responds to contention by increasing its size,
- *  striping the participant count across multiple objects (and hopefully
- *  multiple cache lines).  Once close has begun, the epoch converts itself to
- *  a single-shot hierarchical barrier, that also performs a hierarchical
- *  reduction of the leave parameters.
- */
-@SuppressWarnings("ALL")
-abstract public class Epoch {
-
-    /** Represents a single successful arrival to an {@link Epoch}. */
-    public interface Ticket {
-        /** Informs the epoch that returned this ticket that the participant
-         *  has left.  This method should be called exactly once per ticket.
-         *  The sum of the <code>data</code> values for all tickets will be
-         *  computed and passed to {@link Epoch#onClosed}.
-         */
-        void leave(int data);
-    }
-
-    private final Root _root = new Root();
-
-    /** Returns a {@link Ticket} indicating a successful arrival, if no call to
-     *  {@link #beginClose} has been made for this epoch, or returns null if
-     *  close has already begun.  {@link Ticket#leave} must be called exactly
-     *  once on any returned ticket.
-     */
-    public Ticket attemptArrive() {
-        return _root.attemptArrive();
-    }
-
-    /** Prevents new arrivals from succeeding, then returns immediately.
-     *  {@link #onClosed} will be called after all outstanding tickets have
-     *  been returned.  To block until close is complete, add some sort of
-     *  synchronization logic to the user-defined implementation of {@link
-     *  #onClosed}.
-     */
-    public void beginClose() {
-        _root.beginClose();
-    }
-
-    /** Override this method to provide user-defined behavior.
-     *  <code>dataSum</code> will be the sum of the <code>data</code> values
-     *  passed to {@link Ticket#leave} for all tickets in this epoch.
-     *  <p>
-     *  As a simple example, a blocking close operation may be defined by:<pre>
-     *    class BlockingEpoch extends Epoch {
-     *        private final CountDownLatch _closed = new CountDownLatch(1);
-     *
-     *        public void blockingClose() throws InterruptedException {
-     *            beginClose();
-     *            _closed.await();
-     *        }
-     *
-     *        protected void onClosed(int dataSum) {
-     *            _closed.countDown(1);
-     *        }
-     *    }
-     *  </pre>
-     */
-    abstract protected void onClosed(int dataSum);
-
-    //////////////// debugging stuff
-
-    int computeSpread() {
-        return _root.computeSpread();
-    }
-
-    //////////////// internal implementation
-
-    private class Root extends EpochNode {
-        /** */
-        private static final long serialVersionUID = 0L;
-
-        protected void onClosed(final int dataSum) {
-            Epoch.this.onClosed(dataSum);
-        }
-    }
-}
\ No newline at end of file
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/snaptree/EpochNode.java b/modules/core/src/main/java/org/apache/ignite/internal/util/snaptree/EpochNode.java
deleted file mode 100644
index 7ceec45..0000000
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/snaptree/EpochNode.java
+++ /dev/null
@@ -1,433 +0,0 @@
-/*
- * Copyright (c) 2009 Stanford University, unless otherwise specified.
- * All rights reserved.
- *
- * This software was developed by the Pervasive Parallelism Laboratory of
- * Stanford University, California, USA.
- *
- * Permission to use, copy, modify, and distribute this software in source
- * or binary form for any purpose with or without fee is hereby granted,
- * provided that the following conditions are met:
- *
- *    1. Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *
- *    2. Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in the
- *       documentation and/or other materials provided with the distribution.
- *
- *    3. Neither the name of Stanford University nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-package org.apache.ignite.internal.util.snaptree;
-
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.atomic.AtomicReferenceFieldUpdater;
-
-/** Provides an implementation of the behavior of an {@link Epoch}. */
-@SuppressWarnings("ALL")
-abstract class EpochNode extends AtomicLong implements Epoch.Ticket {
-
-    private static final int TRIES_BEFORE_SUBTREE = 2;
-    private static final int CLOSER_HEAD_START = 1000;
-
-    /** This includes the root.  7 or fewer procs gets 2, 63 or fewer gets
-     *  3, 511 or fewer 4.  We observe that the node count reported by {@link
-     *  #computeSpread} is roughly twice the number of hardware contexts in
-     *  use.
-     */
-    private static final int MAX_LEVELS = 2 + log8(Runtime.getRuntime().availableProcessors());
-
-    /** Returns floor(log_base_8(value)). */
-    private static int log8(final int value) {
-        return (31 - Integer.numberOfLeadingZeros(value)) / 3;
-    }
-
-    //////////////// branching factor
-
-    private static final int LOG_BF = 3;
-    private static final int BF = 1 << LOG_BF;
-    private static final int BF_MASK = BF - 1;
-
-    //////////////// bit packing
-
-    private static final int DATA_SUM_SHIFT = 32;
-    private static int dataSum(long state) { return (int)(state >> DATA_SUM_SHIFT); }
-    private static long withDataDelta(long state, int delta) { return state + (((long) delta) << DATA_SUM_SHIFT); }
-
-    private static final int CHILD_CLOSED_SHIFT = 32 - BF;
-    private static long ALL_CHILDREN_CLOSED = ((1L << BF) - 1L) << CHILD_CLOSED_SHIFT;
-    private static long childClosedBit(int which) { return 1L << (CHILD_CLOSED_SHIFT + which); }
-    private static boolean isChildClosed(long state, int which) { return (state & childClosedBit(which)) != 0; }
-    private static long withChildClosed(long state, int which, long childState) {
-        assert(!isChildClosed(state, which));
-        return withDataDelta(state | childClosedBit(which), dataSum(childState));
-    }
-    private static boolean isAllChildrenClosed(long state) { return (state & ALL_CHILDREN_CLOSED) == ALL_CHILDREN_CLOSED; }
-
-    private static final int CHILD_PRESENT_SHIFT = CHILD_CLOSED_SHIFT - BF;
-    private static final long ANY_CHILD_PRESENT = ((1L << BF) - 1L) << CHILD_PRESENT_SHIFT;
-    private static long childPresentBit(int which) { return 1L << (CHILD_PRESENT_SHIFT + which); }
-    private static boolean isChildPresent(long state, int which) { return (state & childPresentBit(which)) != 0; }
-    private static long withChildPresent(long state, int which) { return state | childPresentBit(which); }
-    private static boolean isAnyChildPresent(long state) { return (state & ANY_CHILD_PRESENT) != 0; }
-
-    private static final long MARK = (1L << (CHILD_PRESENT_SHIFT - 1));
-    private static boolean isMarked(long state) { return (state & MARK) != 0L; }
-    /** Records all non-present children as closed. */
-    private static long withMarked(long state) {
-        final int missingChildren = (~((int) state) >> CHILD_PRESENT_SHIFT) & ((1 << BF) - 1);
-        return state | MARK | (((long) missingChildren) << CHILD_CLOSED_SHIFT);
-    }
-
-    private static final long ENTRY_COUNT_MASK = MARK - 1;
-    private static int entryCount(long state) { return (int) (state & ENTRY_COUNT_MASK); }
-    private static long withArrive(long state) { return state + 1; }
-    private static long withLeave(long state, int dataDelta) { return withDataDelta(state - 1, dataDelta); }
-    private static boolean mayArrive(long state) { return entryCount(state) != ENTRY_COUNT_MASK; }
-    private static boolean mayLeave(long state) { return entryCount(state) != 0; }
-
-    private static final long CLOSED_MASK = MARK | ALL_CHILDREN_CLOSED | ENTRY_COUNT_MASK;
-    private static final long CLOSED_VALUE = MARK | ALL_CHILDREN_CLOSED;
-    private static boolean isClosed(long state) { return (state & CLOSED_MASK) == CLOSED_VALUE; }
-
-    private static final long ENTRY_FAST_PATH_MASK = ANY_CHILD_PRESENT | MARK | (ENTRY_COUNT_MASK - (ENTRY_COUNT_MASK >> 1));
-    /** Not marked, no children, and no overflow possible. */
-    private static boolean isEntryFastPath(long state) { return (state & ENTRY_FAST_PATH_MASK) == 0L; }
-
-    //////////////// subclasses
-
-    private static class Child extends EpochNode {
-        /** */
-        private static final long serialVersionUID = 0L;
-
-        private Child(final EpochNode parent, final int whichInParent) {
-            super(parent, whichInParent);
-        }
-
-        protected void onClosed(final int dataSum) {
-            throw new Error();
-        }
-    }
-
-    //////////////// instance state
-
-    private static final AtomicReferenceFieldUpdater[] childrenUpdaters = {
-        AtomicReferenceFieldUpdater.newUpdater(EpochNode.class, EpochNode.class, "_child0"),
-        AtomicReferenceFieldUpdater.newUpdater(EpochNode.class, EpochNode.class, "_child1"),
-        AtomicReferenceFieldUpdater.newUpdater(EpochNode.class, EpochNode.class, "_child2"),
-        AtomicReferenceFieldUpdater.newUpdater(EpochNode.class, EpochNode.class, "_child3"),
-        AtomicReferenceFieldUpdater.newUpdater(EpochNode.class, EpochNode.class, "_child4"),
-        AtomicReferenceFieldUpdater.newUpdater(EpochNode.class, EpochNode.class, "_child5"),
-        AtomicReferenceFieldUpdater.newUpdater(EpochNode.class, EpochNode.class, "_child6"),
-        AtomicReferenceFieldUpdater.newUpdater(EpochNode.class, EpochNode.class, "_child7")
-    };
-
-    private final EpochNode _parent;
-    private final int _whichInParent;
-
-    // It would be cleaner to use an array of children, but we want to force
-    // all of the bulk into the same object as the AtomicLong.value.
-
-    // To avoid races between creating a child and marking a node as closed,
-    // we add a bit to the state for each child that records whether it
-    // *should* exist.  If we find that the bit is set but a child is missing,
-    // we can create it ourself.
-
-    private volatile EpochNode _child0;
-    private volatile EpochNode _child1;
-    private volatile EpochNode _child2;
-    private volatile EpochNode _child3;
-    private volatile EpochNode _child4;
-    private volatile EpochNode _child5;
-    private volatile EpochNode _child6;
-    private volatile EpochNode _child7;
-
-    EpochNode() {
-        _parent = null;
-        _whichInParent = 0;
-    }
-
-    private EpochNode(final EpochNode parent, final int whichInParent) {
-        _parent = parent;
-        _whichInParent = whichInParent;
-    }
-
-    //////////////// provided by the caller
-
-    abstract protected void onClosed(int dataSum);
-
-    //////////////// child management
-
-    private EpochNode getChildFromField(final int which) {
-        switch (which) {
-            case 0: return _child0;
-            case 1: return _child1;
-            case 2: return _child2;
-            case 3: return _child3;
-            case 4: return _child4;
-            case 5: return _child5;
-            case 6: return _child6;
-            case 7: return _child7;
-            default: return null;
-        }
-    }
-
-    private EpochNode getChild(final long state, final int which) {
-        if (!isChildPresent(state, which)) {
-            return null;
-        }
-        final EpochNode existing = getChildFromField(which);
-        return (existing != null) ? existing : constructPresentChild(which);
-    }
-
-    @SuppressWarnings("unchecked")
-    private EpochNode constructPresentChild(final int which) {
-        final EpochNode n = new Child(this, which);
-        return childrenUpdaters[which].compareAndSet(this, null, n) ? n : getChildFromField(which);
-    }
-
-    private EpochNode getOrCreateChild(final int which) {
-        final EpochNode existing = getChildFromField(which);
-        return (existing != null) ? existing : createChild(which);
-    }
-
-    private EpochNode createChild(final int which) {
-        while (true) {
-            final long state = get();
-            if (isMarked(state)) {
-                // whatever we've got is what we've got
-                return getChild(state, which);
-            }
-            if (compareAndSet(state, withChildPresent(state, which))) {
-                // the child now should exist, but we must still actually
-                // construct and link in the instance
-                return constructPresentChild(which);
-            }
-        }
-    }
-
-    /** Returns the <code>Node</code> to decr on success, null if
-     *  {@link #beginClose} has already been called on this instance.
-     */
-    public EpochNode attemptArrive() {
-        final long state = get();
-        if (isEntryFastPath(state) && compareAndSet(state, withArrive(state))) {
-            return this;
-        }
-        else {
-            return attemptArrive(0, 1);
-        }
-    }
-
-    private int getIdentity() {
-        final int h = System.identityHashCode(Thread.currentThread());
-
-        // Multiply by -127, as suggested by java.util.IdentityHashMap.
-        // We also set an bit we don't use, to make sure it is never zero.
-        return (h - (h << 7)) | (1 << 31);
-    }
-
-    /** level 1 is the root. */
-    private EpochNode attemptArrive(int id, final int level) {
-        int tries = 0;
-        while (true) {
-            final long state = get();
-            if (isMarked(state)) {
-                return null;
-            }
-            if (isAnyChildPresent(state) ||
-                    (tries >= TRIES_BEFORE_SUBTREE && level < MAX_LEVELS)) {
-                // Go deeper if we have previously detected contention, or if
-                // we are currently detecting it.  Lazy computation of our
-                // current identity.
-                if (id == 0) {
-                    id = getIdentity();
-                }
-                final EpochNode child = getOrCreateChild(id & BF_MASK);
-                if (child == null) {
-                    return null;
-                }
-                return child.attemptArrive(id >> LOG_BF, level + 1);
-            }
-            if (!mayArrive(state)) {
-                throw new IllegalStateException("maximum arrival count of " + ENTRY_COUNT_MASK + " exceeded");
-            }
-            if (compareAndSet(state, withArrive(state))) {
-                // success
-                return this;
-            }
-
-            ++tries;
-        }
-    }
-
-    /** Should be called on every non-null return value from attemptArrive. */
-    public void leave(final int dataDelta) {
-        while (true) {
-            final long state = get();
-            if (!mayLeave(state)) {
-                throw new IllegalStateException("incorrect call to Epoch.leave");
-            }
-            final long after = withLeave(state, dataDelta);
-            if (compareAndSet(state, after)) {
-                if (isClosed(after)) {
-                    newlyClosed(after);
-                }
-                return;
-            }
-        }
-    }
-
-    private void newlyClosed(final long state) {
-        if (_parent != null) {
-            // propogate
-            _parent.childIsNowClosed(_whichInParent, state);
-        }
-        else {
-            // report
-            onClosed(dataSum(state));
-        }
-    }
-
-    private void childIsNowClosed(final int which, final long childState) {
-        while (true) {
-            final long state = get();
-            if (isChildClosed(state, which)) {
-                // not our problem
-                return;
-            }
-            final long after = withChildClosed(state, which, childState);
-            if (compareAndSet(state, after)) {
-                if (isClosed(after)) {
-                    newlyClosed(after);
-                }
-                return;
-            }
-        }
-    }
-
-    /** Prevents subsequent calls to {@link #attemptArrive} from succeeding. */
-    public void beginClose() {
-        int attempts = 0;
-        long state;
-        while (true) {
-            ++attempts;
-
-            state = get();
-            if (isClosed(state)) {
-                return;
-            }
-
-            if (isMarked(state)) {
-                // give the thread that actually performed this transition a
-                // bit of a head start
-                if (attempts < CLOSER_HEAD_START) {
-                    continue;
-                }
-                break;
-            }
-
-            // every child that is not present will be recorded as closed by withMarked
-            final long after = withMarked(state);
-            if (compareAndSet(state, after)) {
-                if (isAllChildrenClosed(after)) {
-                    if (isClosed(after) && _parent == null) {
-                        // finished in one CAS, yeah!
-                        onClosed(dataSum(after));
-                    }
-                    // no second stage necessary
-                    return;
-                }
-                // CAS successful, so now we need to beginClose() the children
-                break;
-            }
-        }
-
-        // no new child bits can be set after marking, so this gets everyone
-        for (int which = 0; which < BF; ++which) {
-            final EpochNode child = getChild(state, which);
-            if (child != null) {
-                child.beginClose();
-            }
-        }
-
-        // Rather than have each child bubble up its closure, we gather it
-        // here to reduce the number of CASs required.
-        while (true) {
-            final long before = get();
-            long after = before;
-            for (int which = 0; which < BF; ++which) {
-                if (!isChildClosed(before, which)) {
-                    final long childState = getChildFromField(which).get();
-                    if (isClosed(childState)) {
-                        after = withChildClosed(after, which, childState);
-                    }
-                }
-            }
-            if (before == after) {
-                return;
-            }
-            if (compareAndSet(before, after)) {
-                if (isClosed(after) && _parent == null) {
-                    onClosed(dataSum(after));
-                }
-                return;
-            }
-        }
-    }
-
-    /** If possible returns the <code>dataSum</code> that would be delivered
-     *  to {@link #onClosed(int)} if this epoch were closed at this moment,
-     *  otherwise returns null.  This will succeed if and only if the tree
-     *  consists only of a single node.
-     */
-    public Integer attemptDataSum() {
-        final long state = get();
-        if (!isAnyChildPresent(state) && entryCount(state) == 0) {
-            // this is better than Integer.valueOf for dynamic escape analysis
-            //return new Integer(dataSum(state));
-            // this is better than new Integer() for object creation
-            return Integer.valueOf(dataSum(state));
-        }
-        else {
-            return null;
-        }
-    }
-
-    /** For debugging purposes. */
-    int computeSpread() {
-        final long state = get();
-        if (isAnyChildPresent(state)) {
-            int sum = 0;
-            for (int which = 0; which < BF; ++which) {
-                final EpochNode child = getChild(state, which);
-                if (child != null) {
-                    sum += child.computeSpread();
-                }
-                else {
-                    // child would be created for arrive, so count it
-                    sum += 1;
-                }
-            }
-            return sum;
-        }
-        else {
-            return 1;
-        }
-    }
-}
\ No newline at end of file
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/snaptree/SnapTreeMap.java b/modules/core/src/main/java/org/apache/ignite/internal/util/snaptree/SnapTreeMap.java
deleted file mode 100644
index dce2fb8..0000000
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/snaptree/SnapTreeMap.java
+++ /dev/null
@@ -1,2917 +0,0 @@
-/*
- * Copyright (c) 2009 Stanford University, unless otherwise specified.
- * All rights reserved.
- *
- * This software was developed by the Pervasive Parallelism Laboratory of
- * Stanford University, California, USA.
- *
- * Permission to use, copy, modify, and distribute this software in source
- * or binary form for any purpose with or without fee is hereby granted,
- * provided that the following conditions are met:
- *
- *    1. Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *
- *    2. Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in the
- *       documentation and/or other materials provided with the distribution.
- *
- *    3. Neither the name of Stanford University nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
- * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
- * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-package org.apache.ignite.internal.util.snaptree;
-
-import org.apache.ignite.*;
-import org.apache.ignite.internal.util.*;
-import org.apache.ignite.internal.util.lang.*;
-
-import java.io.IOException;
-import java.io.ObjectInputStream;
-import java.io.ObjectOutputStream;
-import java.io.Serializable;
-import java.util.AbstractMap;
-import java.util.AbstractSet;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.Comparator;
-import java.util.Iterator;
-import java.util.Map;
-import java.util.NavigableSet;
-import java.util.NoSuchElementException;
-import java.util.Set;
-import java.util.SortedMap;
-import java.util.SortedSet;
-import java.util.concurrent.ConcurrentNavigableMap;
-
-// TODO: optimized buildFromSorted
-// TODO: submap.clone()
-
-/** A concurrent AVL tree with fast cloning, based on the algorithm of Bronson,
- *  Casper, Chafi, and Olukotun, "A Practical Concurrent Binary Search Tree"
- *  published in PPoPP'10.  To simplify the locking protocols rebalancing work
- *  is performed in pieces, and some removed keys are be retained as routing
- *  nodes in the tree.
- *
- *  <p>This data structure honors all of the contracts of {@link
- *  java.util.concurrent.ConcurrentSkipListMap}, with the additional contract
- *  that clone, size, toArray, and iteration are linearizable (atomic).
- *
- *  <p>The tree uses optimistic concurrency control.  No locks are usually
- *  required for get, containsKey, firstKey, firstEntry, lastKey, or lastEntry.
- *  Reads are not lock free (or even obstruction free), but obstructing threads
- *  perform no memory allocation, system calls, or loops, which seems to work
- *  okay in practice.  All of the updates to the tree are performed in fixed-
- *  size blocks, so restoration of the AVL balance criteria may occur after a
- *  change to the tree has linearized (but before the mutating operation has
- *  returned).  The tree is always properly balanced when quiescent.
- *
- *  <p>To clone the tree (or produce a snapshot for consistent iteration) the
- *  root node is marked as shared, which must be (*) done while there are no
- *  pending mutations.  New mutating operations are blocked if a mark is
- *  pending, and when existing mutating operations are completed the mark is
- *  made.
- *  <em>* - It would be less disruptive if we immediately marked the root as
- *  shared, and then waited for pending operations that might not have seen the
- *  mark without blocking new mutations.  This could result in imbalance being
- *  frozen into the shared portion of the tree, though.  To minimize the
- *  problem we perform the mark and reenable mutation on whichever thread
- *  notices that the entry count has become zero, to reduce context switches on
- *  the critical path.</em>
- *
- *  <p>The same multi-cache line data structure required for efficiently
- *  tracking the entry and exit for mutating operations is used to maintain the
- *  current size of the tree.  This means that the size can be computed by
- *  quiescing as for a clone, but without doing any marking.
- *
- *  <p>Range queries such as higherKey are not amenable to the optimistic
- *  hand-over-hand locking scheme used for exact searches, so they are
- *  implemented with pessimistic concurrency control.  Mutation can be
- *  considered to acquire a lock on the map in Intention-eXclusive mode, range
- *  queries, size(), and root marking acquire the lock in Shared mode.
- *
- *  @author Nathan Bronson
- */
-@SuppressWarnings("ALL")
-public class SnapTreeMap<K, V> extends AbstractMap<K, V> implements ConcurrentNavigableMap<K, V>, Cloneable,
-    Serializable {
-
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /** If false, null values will trigger a NullPointerException.  When false,
-     *  this map acts exactly like a ConcurrentSkipListMap, except for the
-     *  running time of the methods.  The ability to get a snapshot reduces the
-     *  potential ambiguity between null values and absent entries, so I'm not
-     *  sure what the default should be.
-     */
-    static final boolean AllowNullValues = false;
-
-    /** This is a special value that indicates the presence of a null value,
-     *  to differentiate from the absence of a value.  Only used when
-     *  {@link #AllowNullValues} is true.
-     */
-    static final Object SpecialNull = new Object();
-
-    /** This is a special value that indicates that an optimistic read
-     *  failed.
-     */
-    static final Object SpecialRetry = new Object();
-
-
-    /** The number of spins before yielding. */
-    static final int SpinCount = Integer.parseInt(System.getProperty("snaptree.spin", "100"));
-
-    /** The number of yields before blocking. */
-    static final int YieldCount = Integer.parseInt(System.getProperty("snaptree.yield", "0"));
-
-
-    // we encode directions as characters
-    static final char Left = 'L';
-    static final char Right = 'R';
-
-
-    /** An <tt>OVL</tt> is a version number and lock used for optimistic
-     *  concurrent control of some program invariant.  If  {@link #isShrinking}
-     *  then the protected invariant is changing.  If two reads of an OVL are
-     *  performed that both see the same non-changing value, the reader may
-     *  conclude that no changes to the protected invariant occurred between
-     *  the two reads.  The special value UnlinkedOVL is not changing, and is
-     *  guaranteed to not result from a normal sequence of beginChange and
-     *  endChange operations.
-     *  <p>
-     *  For convenience <tt>endChange(ovl) == endChange(beginChange(ovl))</tt>.
-     */
-    static long beginChange(long ovl) { return ovl | 1; }
-    static long endChange(long ovl) { return (ovl | 3) + 1; }
-    static final long UnlinkedOVL = 2;
-
-    static boolean isShrinking(long ovl) { return (ovl & 1) != 0; }
-    static boolean isUnlinked(long ovl) { return (ovl & 2) != 0; }
-    static boolean isShrinkingOrUnlinked(long ovl) { return (ovl & 3) != 0L; }
-
-
-    protected static class Node<K,V> implements Map.Entry<K,V> {
-        public K key;
-        volatile int height;
-
-        /** null means this node is conceptually not present in the map.
-         *  SpecialNull means the value is null.
-         */
-        volatile Object vOpt;
-        volatile Node<K,V> parent;
-        volatile long shrinkOVL;
-        volatile Node<K,V> left;
-        volatile Node<K,V> right;
-
-        Node(final K key,
-              final int height,
-              final Object vOpt,
-              final Node<K,V> parent,
-              final long shrinkOVL,
-              final Node<K,V> left,
-              final Node<K,V> right)
-        {
-            this.key = key;
-            this.height = height;
-            this.vOpt = vOpt;
-            this.parent = parent;
-            this.shrinkOVL = shrinkOVL;
-            this.left = left;
-            this.right = right;
-        }
-
-        @Override
-        public K getKey() { return key; }
-
-        @Override
-        @SuppressWarnings("unchecked")
-        public V getValue() {
-            final Object tmp = vOpt;
-            if (AllowNullValues) {
-               return tmp == SpecialNull ? null : (V)tmp;
-            } else {
-                return (V)tmp;
-            }
-        }
-
-        @Override
-        public V setValue(final V v) {
-            throw new UnsupportedOperationException();
-        }
-
-        Node<K,V> child(char dir) { return dir == Left ? left : right; }
-
-        void setChild(char dir, Node<K,V> node) {
-            if (dir == Left) {
-                left = node;
-            } else {
-                right = node;
-            }
-        }
-
-        //////// copy-on-write stuff
-
-        private static <K,V> boolean isShared(final Node<K,V> node) {
-            return node != null && node.parent == null;
-        }
-
-        static <K,V> Node<K,V> markShared(final Node<K,V> node) {
-            if (node != null) {
-                node.parent = null;
-            }
-            return node;
-        }
-
-        private Node<K,V> lazyCopy(Node<K,V> newParent) {
-            assert (isShared(this));
-            assert (!isShrinkingOrUnlinked(shrinkOVL));
-
-            return new Node<K,V>(key, height, vOpt, newParent, 0L, markShared(left), markShared(right));
-        }
-
-        Node<K,V> unsharedLeft() {
-            final Node<K,V> cl = left;
-            if (!isShared(cl)) {
-                return cl;
-            } else {
-                lazyCopyChildren();
-                return left;
-            }
-        }
-
-        Node<K,V> unsharedRight() {
-            final Node<K,V> cr = right;
-            if (!isShared(cr)) {
-                return cr;
-            } else {
-                lazyCopyChildren();
-                return right;
-            }
-        }
-
-        Node<K,V> unsharedChild(final char dir) {
-            return dir == Left ? unsharedLeft() : unsharedRight();
-        }
-
-        private synchronized void lazyCopyChildren() {
-            final Node<K,V> cl = left;
-            if (isShared(cl)) {
-                left = cl.lazyCopy(this);
-            }
-            final Node<K,V> cr = right;
-            if (isShared(cr)) {
-                right = cr.lazyCopy(this);
-            }
-        }
-
-        //////// per-node blocking
-
-        private void waitUntilShrinkCompleted(final long ovl) {
-            if (!isShrinking(ovl)) {
-                return;
-            }
-
-            for (int tries = 0; tries < SpinCount; ++tries) {
-                if (shrinkOVL != ovl) {
-                    return;
-                }
-            }
-
-            for (int tries = 0; tries < YieldCount; ++tries) {
-                Thread.yield();
-                if (shrinkOVL != ovl) {
-                    return;
-                }
-            }
-
-            // spin and yield failed, use the nuclear option
-            synchronized (this) {
-                // we can't have gotten the lock unless the shrink was over
-            }
-            assert(shrinkOVL != ovl);
-        }
-
-        int validatedHeight() {
-            final int hL = left == null ? 0 : left.validatedHeight();
-            final int hR = right == null ? 0 : right.validatedHeight();
-            assert(Math.abs(hL - hR) <= 1);
-            final int h = 1 + Math.max(hL, hR);
-            assert(h == height);
-            return height;
-        }
-
-        //////// SubMap.size() helper
-
-        static <K,V> int computeFrozenSize(Node<K,V> root,
-                                           Comparable<? super K> fromCmp,
-                                           boolean fromIncl,
-                                           final Comparable<? super K> toCmp,
-                                           final boolean toIncl) {
-            int result = 0;
-            while (true) {
-                if (root == null) {
-                    return result;
-                }
-                if (fromCmp != null) {
-                    final int c = fromCmp.compareTo(root.key);
-                    if (c > 0 || (c == 0 && !fromIncl)) {
-                        // all matching nodes are on the right side
-                        root = root.right;
-                        continue;
-                    }
-                }
-                if (toCmp != null) {
-                    final int c = toCmp.compareTo(root.key);
-                    if (c < 0 || (c == 0 && !toIncl)) {
-                        // all matching nodes are on the left side
-                        root = root.left;
-                        continue;
-                    }
-                }
-
-                // Current node matches.  Nodes on left no longer need toCmp, nodes
-                // on right no longer need fromCmp.
-                if (root.vOpt != null) {
-                    ++result;
-                }
-                result += computeFrozenSize(root.left, fromCmp, fromIncl, null, false);
-                fromCmp = null;
-                root = root.right;
-            }
-        }
-
-        //////// Map.Entry stuff
-
-        @Override
-        public boolean equals(final Object o) {
-            if (!(o instanceof Map.Entry)) {
-                return false;
-            }
-            final Map.Entry rhs = (Map.Entry)o;
-            return eq(key, rhs.getKey()) && eq(getValue(), rhs.getValue());
-        }
-
-        private static boolean eq(final Object o1, final Object o2) {
-            return o1 == null ? o2 == null : o1.equals(o2);
-        }
-
-        @Override
-        public int hashCode() {
-            return (key   == null ? 0 : key.hashCode()) ^
-                   (getValue() == null ? 0 : getValue().hashCode());
-        }
-
-        @Override
-        public String toString() {
-            return key + "=" + getValue();
-        }
-    }
-
-    private static class RootHolder<K,V> extends Node<K,V> {
-        RootHolder() {
-            super(null, 1, null, null, 0L, null, null);
-        }
-
-        RootHolder(final RootHolder<K,V> snapshot) {
-            super(null, 1 + snapshot.height, null, null, 0L, null, snapshot.right);
-        }
-    }
-
-    private static class COWMgr<K,V> extends CopyOnWriteManager<RootHolder<K,V>> {
-        COWMgr() {
-            super(new RootHolder<K,V>(), 0);
-        }
-
-        COWMgr(final RootHolder<K,V> initialValue, final int initialSize) {
-            super(initialValue, initialSize);
-        }
-
-        protected RootHolder<K,V> freezeAndClone(final RootHolder<K,V> value) {
-            Node.markShared(value.right);
-            return new RootHolder<K,V>(value);
-        }
-
-        protected RootHolder<K,V> cloneFrozen(final RootHolder<K,V> frozenValue) {
-            return new RootHolder<K,V>(frozenValue);
-        }
-    }
-
-    //////// node access functions
-
-    private static int height(final Node<?,?> node) {
-        return node == null ? 0 : node.height;
-    }
-
-    @SuppressWarnings("unchecked")
-    private V decodeNull(final Object vOpt) {
-        assert (vOpt != SpecialRetry);
-        if (AllowNullValues) {
-            return vOpt == SpecialNull ? null : (V)vOpt;
-        } else {
-            return (V)vOpt;
-        }
-    }
-
-    private static Object encodeNull(final Object v) {
-        if (AllowNullValues) {
-            return v == null ? SpecialNull : v;
-        } else {
-            if (v == null) {
-                throw new NullPointerException();
-            }
-            return v;
-        }
-    }
-
-    //////////////// state
-
-    private final Comparator<? super K> comparator;
-    private transient volatile COWMgr<K,V> holderRef;
-
-    //////////////// public interface
-
-    public SnapTreeMap() {
-        this.comparator = null;
-        this.holderRef = new COWMgr<K,V>();
-    }
-
-    public SnapTreeMap(final Comparator<? super K> comparator) {
-        this.comparator = comparator;
-        this.holderRef = new COWMgr<K,V>();
-    }
-
-    public SnapTreeMap(final Map<? extends K, ? extends V> source) {
-        this.comparator = null;
-        this.holderRef = new COWMgr<K,V>();
-        putAll(source);
-    }
-
-    public SnapTreeMap(final SortedMap<K,? extends V> source) {
-        this.comparator = source.comparator();
-        if (source instanceof SnapTreeMap) {
-            final SnapTreeMap<K,V> s = (SnapTreeMap<K,V>) source;
-            this.holderRef = (COWMgr<K,V>) s.holderRef.clone();
-        }
-        else {
-            // TODO: take advantage of the sort order
-            // for now we optimize only by bypassing the COWMgr
-            int size = 0;
-            final RootHolder<K,V> holder = new RootHolder<K,V>();
-            for (Map.Entry<K,? extends V> e : source.entrySet()) {
-                final K k = e.getKey();
-                final V v = e.getValue();
-                if (k == null) {
-                    throw new NullPointerException("source map contained a null key");
-                }
-                if (!AllowNullValues && v == null) {
-                    throw new NullPointerException("source map contained a null value");
-                }
-                updateUnderRoot(k, comparable(k), UpdateAlways, null, encodeNull(v), holder);
-                ++size;
-            }
-
-            this.holderRef = new COWMgr<K,V>(holder, size);
-        }
-    }
-
-    @SuppressWarnings("unchecked")
-    @Override
-    public SnapTreeMap<K,V> clone() {
-        final SnapTreeMap<K,V> copy;
-        try {
-            copy = (SnapTreeMap<K,V>) super.clone();
-        } catch (final CloneNotSupportedException xx) {
-            throw new InternalError();
-        }
-        assert(copy.comparator == comparator);
-        copy.holderRef = (COWMgr<K,V>) holderRef.clone();
-        return copy;
-    }
-
-    @Override
-    public int size() {
-        return holderRef.size();
-    }
-
-    @Override
-    public boolean isEmpty() {
-        // removed-but-not-unlinked nodes cannot be leaves, so if the tree is
-        // truly empty then the root holder has no right child
-        return holderRef.read().right == null;
-    }
-
-    @Override
-    public void clear() {
-        holderRef = new COWMgr<K,V>();
-    }
-
-    @Override
-    public Comparator<? super K> comparator() {
-        return comparator;
-    }
-
-    @Override
-    public boolean containsValue(final Object value) {
-        // apply the same null policy as the rest of the code, but fall
-        // back to the default implementation
-        encodeNull(value);
-        return super.containsValue(value);
-    }
-
-    //////// concurrent search
-
-    @Override
-    public boolean containsKey(final Object key) {
-        return getImpl(key) != null;
-    }
-
-    @Override
-    public V get(final Object key) {
-        return decodeNull(getImpl(key));
-    }
-
-    @SuppressWarnings("unchecked")
-    protected Comparable<? super K> comparable(final Object key) {
-        if (key == null) {
-            throw new NullPointerException();
-        }
-        if (comparator == null) {
-            return (Comparable<? super K>)key;
-        }
-        return new Comparable<K>() {
-            final Comparator<? super K> _cmp = comparator;
-
-            @SuppressWarnings("unchecked")
-            public int compareTo(final K rhs) { return _cmp.compare((K)key, rhs); }
-        };
-    }
-
-    /** Returns either a value or SpecialNull, if present, or null, if absent. */
-    private Object getImpl(final Object key) {
-        final Comparable<? super K> k = comparable(key);
-
-        while (true) {
-            final Node<K,V> right = holderRef.read().right;
-            if (right == null) {
-                return null;
-            } else {
-                final int rightCmp = k.compareTo(right.key);
-                if (rightCmp == 0) {
-                    // who cares how we got here
-                    return right.vOpt;
-                }
-
-                final long ovl = right.shrinkOVL;
-                if (isShrinkingOrUnlinked(ovl)) {
-                    right.waitUntilShrinkCompleted(ovl);
-                    // RETRY
-                } else if (right == holderRef.read().right) {
-                    // the reread of .right is the one protected by our read of ovl
-                    final Object vo = attemptGet(k, right, (rightCmp < 0 ? Left : Right), ovl);
-                    if (vo != SpecialRetry) {
-                        return vo;
-                    }
-                    // else RETRY
-                }
-            }
-        }
-    }
-
-    private Object attemptGet(final Comparable<? super K> k,
-                              final Node<K,V> node,
-                              final char dirToC,
-                              final long nodeOVL) {
-        while (true) {
-            final Node<K,V> child = node.child(dirToC);
-
-            if (child == null) {
-                if (node.shrinkOVL != nodeOVL) {
-                    return SpecialRetry;
-                }
-
-                // Note is not present.  Read of node.child occurred while
-                // parent.child was valid, so we were not affected by any
-                // shrinks.
-                return null;
-            } else {
-                final int childCmp = k.compareTo(child.key);
-                if (childCmp == 0) {
-                    // how we got here is irrelevant
-                    return child.vOpt;
-                }
-
-                // child is non-null
-                final long childOVL = child.shrinkOVL;
-                if (isShrinkingOrUnlinked(childOVL)) {
-                    child.waitUntilShrinkCompleted(childOVL);
-
-                    if (node.shrinkOVL != nodeOVL) {
-                        return SpecialRetry;
-                    }
-                    // else RETRY
-                } else if (child != node.child(dirToC)) {
-                    // this .child is the one that is protected by childOVL
-                    if (node.shrinkOVL != nodeOVL) {
-                        return SpecialRetry;
-                    }
-                    // else RETRY
-                } else {
-                    if (node.shrinkOVL != nodeOVL) {
-                        return SpecialRetry;
-                    }
-
-                    // At this point we know that the traversal our parent took
-                    // to get to node is still valid.  The recursive
-                    // implementation will validate the traversal from node to
-                    // child, so just prior to the nodeOVL validation both
-                    // traversals were definitely okay.  This means that we are
-                    // no longer vulnerable to node shrinks, and we don't need
-                    // to validate nodeOVL any more.
-                    final Object vo = attemptGet(k, child, (childCmp < 0 ? Left : Right), childOVL);
-                    if (vo != SpecialRetry) {
-                        return vo;
-                    }
-                    // else RETRY
-                }
-            }
-        }
-    }
-
-    @Override
-    public K firstKey() {
-        return extremeKeyOrThrow(Left);
-    }
-
-    @Override
-    @SuppressWarnings("unchecked")
-    public Map.Entry<K,V> firstEntry() {
-        return (SimpleImmutableEntry<K,V>) extreme(false, Left);
-    }
-
-    @Override
-    public K lastKey() {
-        return extremeKeyOrThrow(Right);
-    }
-
-    @SuppressWarnings("unchecked")
-    public Map.Entry<K,V> lastEntry() {
-        return (SimpleImmutableEntry<K,V>) extreme(false, Right);
-    }
-
-    private K extremeKeyOrThrow(final char dir) {
-        final K k = (K) extreme(true, dir);
-        if (k == null) {
-            throw new NoSuchElementException();
-        }
-        return k;
-    }
-
-    /** Returns a key if returnKey is true, a SimpleImmutableEntry otherwise.
-     *  Returns null if none exists.
-     */
-    private Object extreme(final boolean returnKey, final char dir) {
-        while (true) {
-            final Node<K,V> right = holderRef.read().right;
-            if (right == null) {
-                return null;
-            } else {
-                final long ovl = right.shrinkOVL;
-                if (isShrinkingOrUnlinked(ovl)) {
-                    right.waitUntilShrinkCompleted(ovl);
-                    // RETRY
-                } else if (right == holderRef.read().right) {
-                    // the reread of .right is the one protected by our read of ovl
-                    final Object vo = attemptExtreme(returnKey, dir, right, ovl);
-                    if (vo != SpecialRetry) {
-                        return vo;
-                    }
-                    // else RETRY
-                }
-            }
-        }
-    }
-
-    private Object attemptExtreme(final boolean returnKey,
-                                  final char dir,
-                                  final Node<K,V> node,
-                                  final long nodeOVL) {
-        while (true) {
-            final Node<K,V> child = node.child(dir);
-
-            if (child == null) {
-                // read of the value must be protected by the OVL, because we
-                // must linearize against another thread that inserts a new min
-                // key and then changes this key's value
-                final Object vo = node.vOpt;
-
-                if (node.shrinkOVL != nodeOVL) {
-                    return SpecialRetry;
-                }
-
-                assert(vo != null);
-
-                return returnKey ? node.key : new SimpleImmutableEntry<K,V>(node.key, decodeNull(vo));
-            } else {
-                // child is non-null
-                final long childOVL = child.shrinkOVL;
-                if (isShrinkingOrUnlinked(childOVL)) {
-                    child.waitUntilShrinkCompleted(childOVL);
-
-                    if (node.shrinkOVL != nodeOVL) {
-                        return SpecialRetry;
-                    }
-                    // else RETRY
-                } else if (child != node.child(dir)) {
-                    // this .child is the one that is protected by childOVL
-                    if (node.shrinkOVL != nodeOVL) {
-                        return SpecialRetry;
-                    }
-                    // else RETRY
-                } else {
-                    if (node.shrinkOVL != nodeOVL) {
-                        return SpecialRetry;
-                    }
-
-                    final Object vo = attemptExtreme(returnKey, dir, child, childOVL);
-                    if (vo != SpecialRetry) {
-                        return vo;
-                    }
-                    // else RETRY
-                }
-            }
-        }
-    }
-
-    //////////////// quiesced search
-
-    @Override
-    @SuppressWarnings("unchecked")
-    public K lowerKey(final K key) {
-        return (K) boundedExtreme(null, false, comparable(key), false, true, Right);
-    }
-
-    @Override
-    @SuppressWarnings("unchecked")
-    public K floorKey(final K key) {
-        return (K) boundedExtreme(null, false, comparable(key), true, true, Right);
-    }
-
-    @Override
-    @SuppressWarnings("unchecked")
-    public K ceilingKey(final K key) {
-        return (K) boundedExtreme(comparable(key), true, null, false, true, Left);
-    }
-
-    @Override
-    @SuppressWarnings("unchecked")
-    public K higherKey(final K key) {
-        return (K) boundedExtreme(comparable(key), false, null, false, true, Left);
-    }
-
-
-    @Override
-    @SuppressWarnings("unchecked")
-    public Entry<K,V> lowerEntry(final K key) {
-        return (Entry<K,V>) boundedExtreme(null, false, comparable(key), false, false, Right);
-    }
-
-    @Override
-    @SuppressWarnings("unchecked")
-    public Entry<K,V> floorEntry(final K key) {
-        return (Entry<K,V>) boundedExtreme(null, false, comparable(key), true, false, Right);
-    }
-
-    @Override
-    @SuppressWarnings("unchecked")
-    public Entry<K,V> ceilingEntry(final K key) {
-        return (Entry<K,V>) boundedExtreme(comparable(key), true, null, false, false, Left);
-    }
-
-    @Override
-    @SuppressWarnings("unchecked")
-    public Entry<K,V> higherEntry(final K key) {
-        return (Entry<K,V>) boundedExtreme(comparable(key), false, null, false, false, Left);
-    }
-
-    /** Returns null if none exists. */
-    @SuppressWarnings("unchecked")
-    private K boundedExtremeKeyOrThrow(final Comparable<? super K> minCmp,
-                                       final boolean minIncl,
-                                       final Comparable<? super K> maxCmp,
-                                       final boolean maxIncl,
-                                       final char dir) {
-        final K k = (K) boundedExtreme(minCmp, minIncl, maxCmp, maxIncl, true, dir);
-        if (k == null) {
-            throw new NoSuchElementException();
-        }
-        return k;
-    }
-
-    /** Returns null if none exists. */
-    @SuppressWarnings("unchecked")
-    private Object boundedExtreme(final Comparable<? super K> minCmp,
-                                  final boolean minIncl,
-                                  final Comparable<? super K> maxCmp,
-                                  final boolean maxIncl,
-                                  final boolean returnKey,
-                                  final char dir) {
-        K resultKey;
-        Object result;
-
-        if ((dir == Left && minCmp == null) || (dir == Right && maxCmp == null)) {
-            // no bound in the extreme direction, so use the concurrent search
-            result = extreme(returnKey, dir);
-            if (result == null) {
-                return null;
-            }
-            resultKey = returnKey ? (K) result : ((SimpleImmutableEntry<K,V>) result).getKey();
-        }
-        else {
-            RootHolder holder = holderRef.availableFrozen();
-            final Epoch.Ticket ticket;
-            if (holder == null) {
-                ticket = holderRef.beginQuiescent();
-                holder = holderRef.read();
-            }
-            else {
-                ticket = null;
-            }
-            try {
-                final Node<K,V> node = (dir == Left)
-                        ? boundedMin(holder.right, minCmp, minIncl)
-                        : boundedMax(holder.right, maxCmp, maxIncl);
-                if (node == null) {
-                    return null;
-                }
-                resultKey = node.key;
-                if (returnKey) {
-                    result = node.key;
-                }
-                else if (ticket == null) {
-                    // node of a frozen tree is okay, copy otherwise
-                    result = node;
-                }
-                else {
-                    // we must copy the node
-                    result = new SimpleImmutableEntry<K,V>(node.key, node.getValue());
-                }
-            }
-            finally {
-                if (ticket != null) {
-                    ticket.leave(0);
-                }
-            }
-        }
-
-        if (dir == Left && maxCmp != null) {
-            final int c = maxCmp.compareTo(resultKey);
-            if (c < 0 || (c == 0 && !maxIncl)) {
-                return null;
-            }
-        }
-        if (dir == Right && minCmp != null) {
-            final int c = minCmp.compareTo(resultKey);
-            if (c > 0 || (c == 0 && !minIncl)) {
-                return null;
-            }
-        }
-
-        return result;
-    }
-
-    private Node<K,V> boundedMin(Node<K,V> node,
-                                 final Comparable<? super K> minCmp,
-                                 final boolean minIncl) {
-        while (node != null) {
-            final int c = minCmp.compareTo(node.key);
-            if (c < 0) {
-                // there may be a matching node on the left branch
-                final Node<K,V> z = boundedMin(node.left, minCmp, minIncl);
-                if (z != null) {
-                    return z;
-                }
-            }
-
-            if (c < 0 || (c == 0 && minIncl)) {
-                // this node is a candidate, is it actually present?
-                if (node.vOpt != null) {
-                    return node;
-                }
-            }
-
-            // the matching node is on the right branch if it is present
-            node = node.right;
-        }
-        return null;
-    }
-
-    private Node<K,V> boundedMax(Node<K,V> node,
-                                 final Comparable<? super K> maxCmp,
-                                 final boolean maxIncl) {
-        while (node != null) {
-            final int c = maxCmp.compareTo(node.key);
-            if (c > 0) {
-                // there may be a matching node on the right branch
-                final Node<K,V> z = boundedMax(node.right, maxCmp, maxIncl);
-                if (z != null) {
-                    return z;
-                }
-            }
-
-            if (c > 0 || (c == 0 && maxIncl)) {
-                // this node is a candidate, is it actually present?
-                if (node.vOpt != null) {
-                    return node;
-                }
-            }
-
-            // the matching node is on the left branch if it is present
-            node = node.left;
-        }
-        return null;
-    }
-
-    //////////////// update
-
-    private static final int UpdateAlways = 0;
-    private static final int UpdateIfAbsent = 1;
-    private static final int UpdateIfPresent = 2;
-    private static final int UpdateIfEq = 3;
-
-    private static boolean shouldUpdate(final int func, final Object prev, final Object expected) {
-        switch (func) {
-            case UpdateAlways: return true;
-            case UpdateIfAbsent: return prev == null;
-            case UpdateIfPresent: return prev != null;
-            default: { // UpdateIfEq
-                assert(expected != null);
-                if (prev == null) {
-                    return false;
-                }
-                if (AllowNullValues && (prev == SpecialNull || expected == SpecialNull)) {
-                    return prev == SpecialNull && expected == SpecialNull;
-                }
-                return prev.equals(expected);
-            }
-        }
-    }
-
-    private static Object noUpdateResult(final int func, final Object prev) {
-        return func == UpdateIfEq ? Boolean.FALSE : prev;
-    }
-
-    private static Object updateResult(final int func, final Object prev) {
-        return func == UpdateIfEq ? Boolean.TRUE : prev;
-    }
-
-    private static int sizeDelta(final int func, final Object result, final Object newValue) {
-        switch (func) {
-            case UpdateAlways: {
-                return (result != null ? -1 : 0) + (newValue != null ? 1 : 0);
-            }
-            case UpdateIfAbsent: {
-                assert(newValue != null);
-                return result != null ? 0 : 1;
-            }
-            case UpdateIfPresent: {
-                return result == null ? 0 : (newValue != null ? 0 : -1);
-            }
-            default: { // UpdateIfEq
-                return !((Boolean) result) ? 0 : (newValue != null ? 0 : -1);
-            }
-        }
-    }
-
-    @Override
-    public V put(final K key, final V value) {
-        return decodeNull(update(key, UpdateAlways, null, encodeNull(value)));
-    }
-
-    @Override
-    public V putIfAbsent(final K key, final V value) {
-        return decodeNull(update(key, UpdateIfAbsent, null, encodeNull(value)));
-    }
-
-    @Override
-    public V replace(final K key, final V value) {
-        return decodeNull(update(key, UpdateIfPresent, null, encodeNull(value)));
-    }
-
-    @Override
-    public boolean replace(final K key, final V oldValue, final V newValue) {
-        return (Boolean) update(key, UpdateIfEq, encodeNull(oldValue), encodeNull(newValue));
-    }
-
-    @Override
-    public V remove(final Object key) {
-        return decodeNull(update(key, UpdateAlways, null, null));
-    }
-
-    @Override
-    public boolean remove(final Object key, final Object value) {
-        if (key == null) {
-            throw new NullPointerException();
-        }
-        if (!AllowNullValues && value == null) {
-            return false;
-        }
-        return (Boolean) update(key, UpdateIfEq, encodeNull(value), null);
-    }
-
-    // manages the epoch
-    private Object update(final Object key,
-                          final int func,
-                          final Object expected,
-                          final Object newValue) {
-        final Comparable<? super K> k = comparable(key);
-        int sd = 0;
-        final Epoch.Ticket ticket = holderRef.beginMutation();
-        try {
-            final Object result = updateUnderRoot(key, k, func, expected, newValue, holderRef.mutable());
-            sd = sizeDelta(func, result, newValue);
-            return result;
-        } finally {
-            ticket.leave(sd);
-        }
-    }
-
-    // manages updates to the root holder
-    @SuppressWarnings("unchecked")
-    private Object updateUnderRoot(final Object key,
-                                   final Comparable<? super K> k,
-                                   final int func,
-                                   final Object expected,
-                                   final Object newValue,
-                                   final RootHolder<K,V> holder) {
-
-        while (true) {
-            final Node<K,V> right = holder.unsharedRight();
-            if (right == null) {
-                // key is not present
-                if (!shouldUpdate(func, null, expected)) {
-                    return noUpdateResult(func, null);
-                }
-                if (newValue == null || attemptInsertIntoEmpty((K)key, newValue, holder)) {
-                    // nothing needs to be done, or we were successful, prev value is Absent
-                    return updateResult(func, null);
-                }
-                // else RETRY
-            } else {
-                final long ovl = right.shrinkOVL;
-                if (isShrinkingOrUnlinked(ovl)) {
-                    right.waitUntilShrinkCompleted(ovl);
-                    // RETRY
-                } else if (right == holder.right) {
-                    // this is the protected .right
-                    final Object vo = attemptUpdate(key, k, func, expected, newValue, holder, right, ovl);
-                    if (vo != SpecialRetry) {
-                        return vo;
-                    }
-                    // else RETRY
-                }
-            }
-        }
-    }
-
-    private boolean attemptInsertIntoEmpty(final K key,
-                                           final Object vOpt,
-                                           final RootHolder<K,V> holder) {
-        synchronized (holder) {
-            if (holder.right == null) {
-                holder.right = new Node<K,V>(key, 1, vOpt, holder, 0L, null, null);
-                holder.height = 2;
-                return true;
-            } else {
-                return false;
-            }
-        }
-    }
-
-    /** If successful returns the non-null previous value, SpecialNull for a
-     *  null previous value, or null if not previously in the map.
-     *  The caller should retry if this method returns SpecialRetry.
-     */
-    @SuppressWarnings("unchecked")
-    private Object attemptUpdate(final Object key,
-                                 final Comparable<? super K> k,
-                                 final int func,
-                                 final Object expected,
-                                 final Object newValue,
-                                 final Node<K,V> parent,
-                                 final Node<K,V> node,
-                                 final long nodeOVL) {
-        // As the search progresses there is an implicit min and max assumed for the
-        // branch of the tree rooted at node. A left rotation of a node x results in
-        // the range of keys in the right branch of x being reduced, so if we are at a
-        // node and we wish to traverse to one of the branches we must make sure that
-        // the node has not undergone a rotation since arriving from the parent.
-        //
-        // A rotation of node can't screw us up once we have traversed to node's
-        // child, so we don't need to build a huge transaction, just a chain of
-        // smaller read-only transactions.
-
-        assert (nodeOVL != UnlinkedOVL);
-
-        final int cmp = k.compareTo(node.key);
-        if (cmp == 0) {
-            return attemptNodeUpdate(func, expected, newValue, parent, node);
-        }
-
-        final char dirToC = cmp < 0 ? Left : Right;
-
-        while (true) {
-            final Node<K,V> child = node.unsharedChild(dirToC);
-
-            if (node.shrinkOVL != nodeOVL) {
-                return SpecialRetry;
-            }
-
-            if (child == null) {
-                // key is not present
-                if (newValue == null) {
-                    // Removal is requested.  Read of node.child occurred
-                    // while parent.child was valid, so we were not affected
-                    // by any shrinks.
-                    return noUpdateResult(func, null);
-                } else {
-                    // Update will be an insert.
-                    final boolean success;
-                    final Node<K,V> damaged;
-                    synchronized (node) {
-                        // Validate that we haven't been affected by past
-                        // rotations.  We've got the lock on node, so no future
-                        // rotations can mess with us.
-                        if (node.shrinkOVL != nodeOVL) {
-                            return SpecialRetry;
-                        }
-
-                        if (node.child(dirToC) != null) {
-                            // Lost a race with a concurrent insert.  No need
-                            // to back up to the parent, but we must RETRY in
-                            // the outer loop of this method.
-                            success = false;
-                            damaged = null;
-                        } else {
-                            // We're valid.  Does the user still want to
-                            // perform the operation?
-                            if (!shouldUpdate(func, null, expected)) {
-                                return noUpdateResult(func, null);
-                            }
-
-                            // Create a new leaf
-                            node.setChild(dirToC, new Node<K,V>((K)key, 1, newValue, node, 0L, null, null));
-                            success = true;
-
-                            // attempt to fix node.height while we've still got
-                            // the lock
-                            damaged = fixHeight_nl(node);
-                        }
-                    }
-                    if (success) {
-                        fixHeightAndRebalance(damaged);
-                        return updateResult(func, null);
-                    }
-                    // else RETRY
-                }
-            } else {
-                // non-null child
-                final long childOVL = child.shrinkOVL;
-                if (isShrinkingOrUnlinked(childOVL)) {
-                    child.waitUntilShrinkCompleted(childOVL);
-                    // RETRY
-                } else if (child != node.child(dirToC)) {
-                    // this second read is important, because it is protected
-                    // by childOVL
-                    // RETRY
-                } else {
-                    // validate the read that our caller took to get to node
-                    if (node.shrinkOVL != nodeOVL) {
-                        return SpecialRetry;
-                    }
-
-                    // At this point we know that the traversal our parent took
-                    // to get to node is still valid.  The recursive
-                    // implementation will validate the traversal from node to
-                    // child, so just prior to the nodeOVL validation both
-                    // traversals were definitely okay.  This means that we are
-                    // no longer vulnerable to node shrinks, and we don't need
-                    // to validate nodeOVL any more.
-                    final Object vo = attemptUpdate(key, k, func, expected, newValue, node, child, childOVL);
-                    if (vo != SpecialRetry) {
-                        return vo;
-                    }
-                    // else RETRY
-                }
-            }
-        }
-    }
-
-    /** parent will only be used for unlink, update can proceed even if parent
-     *  is stale.
-     */
-    private Object attemptNodeUpdate(final int func,
-                                     final Object expected,
-                                     final Object newValue,
-                                     final Node<K,V> parent,
-                                     final Node<K,V> node) {
-        if (newValue == null) {
-            // removal
-            if (node.vOpt == null) {
-                // This node is already removed, nothing to do.
-            	return noUpdateResult(func, null);
-            }
-        }
-
-        if (newValue == null && (node.left == null || node.right == null)) {
-            // potential unlink, get ready by locking the parent
-            final Object prev;
-            final Node<K,V> damaged;
-            synchronized (parent) {
-                if (isUnlinked(parent.shrinkOVL) || node.parent != parent) {
-                    return SpecialRetry;
-                }
-
-                synchronized (node) {
-                    prev = node.vOpt;
-                    if (!shouldUpdate(func, prev, expected)) {
-                        return noUpdateResult(func, prev);
-                    }
-                    if (prev == null) {
-                        return updateResult(func, prev);
-                    }
-                    if (!attemptUnlink_nl(parent, node)) {
-                        return SpecialRetry;
-                    }
-                }
-                // try to fix the parent while we've still got the lock
-                damaged = fixHeight_nl(parent);
-            }
-            fixHeightAndRebalance(damaged);
-            return updateResult(func, prev);
-        } else {
-            // potential update (including remove-without-unlink)
-            synchronized (node) {
-                // regular version changes don't bother us
-                if (isUnlinked(node.shrinkOVL)) {
-                    return SpecialRetry;
-                }
-
-                final Object prev = node.vOpt;
-                if (!shouldUpdate(func, prev, expected)) {
-                    return noUpdateResult(func, prev);
-                }
-
-                // retry if we now detect that unlink is possible
-                if (newValue == null && (node.left == null || node.right == null)) {
-                    return SpecialRetry;
-                }
-
-                // update in-place
-                node.vOpt = newValue;
-
-                afterNodeUpdate_nl(node, newValue);
-
-                return updateResult(func, prev);
-            }
-        }
-    }
-
-    protected void afterNodeUpdate_nl(Node<K,V> node, Object val) {
-    }
-
-    /** Does not adjust the size or any heights. */
-    private boolean attemptUnlink_nl(final Node<K,V> parent, final Node<K,V> node) {
-        // assert (Thread.holdsLock(parent));
-        // assert (Thread.holdsLock(node));
-        assert (!isUnlinked(parent.shrinkOVL));
-
-        final Node<K,V> parentL = parent.left;
-        final Node<K,V>  parentR = parent.right;
-        if (parentL != node && parentR != node) {
-            // node is no longer a child of parent
-            return false;
-        }
-
-        assert (!isUnlinked(node.shrinkOVL));
-        assert (parent == node.parent);
-
-        final Node<K,V> left = node.unsharedLeft();
-        final Node<K,V> right = node.unsharedRight();
-        if (left != null && right != null) {
-            // splicing is no longer possible
-            return false;
-        }
-        final Node<K,V> splice = left != null ? left : right;
-
-        if (parentL == node) {
-            parent.left = splice;
-        } else {
-            parent.right = splice;
-        }
-        if (splice != null) {
-            splice.parent = parent;
-        }
-
-        node.shrinkOVL = UnlinkedOVL;
-        node.vOpt = null;
-
-        return true;
-    }
-
-    //////////////// NavigableMap stuff
-
-    @Override
-    public Map.Entry<K,V> pollFirstEntry() {
-        return pollExtremeEntry(Left);
-    }
-
-    @Override
-    public Map.Entry<K,V> pollLastEntry() {
-        return pollExtremeEntry(Right);
-    }
-
-    private Map.Entry<K,V> pollExtremeEntry(final char dir) {
-        final Epoch.Ticket ticket = holderRef.beginMutation();
-        int sizeDelta = 0;
-        try {
-            final Map.Entry<K,V> prev = pollExtremeEntryUnderRoot(dir, holderRef.mutable());
-            if (prev != null) {
-                sizeDelta = -1;
-            }
-            return prev;
-        } finally {
-            ticket.leave(sizeDelta);
-        }
-    }
-
-    private Map.Entry<K,V> pollExtremeEntryUnderRoot(final char dir, final RootHolder<K,V> holder) {
-        while (true) {
-            final Node<K,V> right = holder.unsharedRight();
-            if (right == null) {
-                // tree is empty, nothing to remove
-                return null;
-            } else {
-                final long ovl = right.shrinkOVL;
-                if (isShrinkingOrUnlinked(ovl)) {
-                    right.waitUntilShrinkCompleted(ovl);
-                    // RETRY
-                } else if (right == holder.right) {
-                    // this is the protected .right
-                    final Map.Entry<K,V> result = attemptRemoveExtreme(dir, holder, right, ovl);
-                    if (result != SpecialRetry) {
-                        return result;
-                    }
-                    // else RETRY
-                }
-            }
-        }
-    }
-
-    private Map.Entry<K,V> attemptRemoveExtreme(final char dir,
-                                                final Node<K,V> parent,
-                                                final Node<K,V> node,
-                                                final long nodeOVL) {
-        assert (nodeOVL != UnlinkedOVL);
-
-        while (true) {
-            final Node<K,V> child = node.unsharedChild(dir);
-
-            if (nodeOVL != node.shrinkOVL) {
-                return null;
-            }
-
-            if (child == null) {
-                // potential unlink, get ready by locking the parent
-                final Object vo;
-                final Node<K,V> damaged;
-                synchronized (parent) {
-                    if (isUnlinked(parent.shrinkOVL) || node.parent != parent) {
-                        return null;
-                    }
-
-                    synchronized (node) {
-                        vo = node.vOpt;
-                        if (node.child(dir) != null || !attemptUnlink_nl(parent, node)) {
-                            return null;
-                        }
-                        // success!
-                    }
-                    // try to fix parent.height while we've still got the lock
-                    damaged = fixHeight_nl(parent);
-                }
-                fixHeightAndRebalance(damaged);
-                return new SimpleImmutableEntry<K,V>(node.key, decodeNull(vo));
-            } else {
-                // keep going down
-                final long childOVL = child.shrinkOVL;
-                if (isShrinkingOrUnlinked(childOVL)) {
-                    child.waitUntilShrinkCompleted(childOVL);
-                    // RETRY
-                } else if (child != node.child(dir)) {
-                    // this second read is important, because it is protected
-                    // by childOVL
-                    // RETRY
-                } else {
-                    // validate the read that our caller took to get to node
-                    if (node.shrinkOVL != nodeOVL) {
-                        return null;
-                    }
-
-                    final Map.Entry<K,V> result = attemptRemoveExtreme(dir, node, child, childOVL);
-                    if (result != null) {
-                        return result;
-                    }
-                    // else RETRY
-                }
-            }
-        }
-    }
-
-
-
-    //////////////// tree balance and height info repair
-
-    private static final int UnlinkRequired = -1;
-    private static final int RebalanceRequired = -2;
-    private static final int NothingRequired = -3;
-
-    private int nodeCondition(final Node<K,V> node) {
-        // Begin atomic.
-
-        final Node<K,V> nL = node.left;
-        final Node<K,V> nR = node.right;
-
-        if ((nL == null || nR == null) && node.vOpt == null) {
-            return UnlinkRequired;
-        }
-
-        final int hN = node.height;
-        final int hL0 = height(nL);
-        final int hR0 = height(nR);
-
-        // End atomic.  Since any thread that changes a node promises to fix
-        // it, either our read was consistent (and a NothingRequired conclusion
-        // is correct) or someone else has taken responsibility for either node
-        // or one of its children.
-
-        final int hNRepl = 1 + Math.max(hL0, hR0);
-        final int bal = hL0 - hR0;
-
-        if (bal < -1 || bal > 1) {
-            return RebalanceRequired;
-        }
-
-        return hN != hNRepl ? hNRepl : NothingRequired;
-    }
-
-    private void fixHeightAndRebalance(Node<K,V> node) {
-        while (node != null && node.parent != null) {
-            final int condition = nodeCondition(node);
-            if (condition == NothingRequired || isUnlinked(node.shrinkOVL)) {
-                // nothing to do, or no point in fixing this node
-                return;
-            }
-
-            if (condition != UnlinkRequired && condition != RebalanceRequired) {
-                synchronized (node) {
-                    node = fixHeight_nl(node);
-                }
-            } else {
-                final Node<K,V> nParent = node.parent;
-                synchronized (nParent) {
-                    if (!isUnlinked(nParent.shrinkOVL) && node.parent == nParent) {
-                        synchronized (node) {
-                            node = rebalance_nl(nParent, node);
-                        }
-                    }
-                    // else RETRY
-                }
-            }
-        }
-    }
-
-    /** Attempts to fix the height of a (locked) damaged node, returning the
-     *  lowest damaged node for which this thread is responsible.  Returns null
-     *  if no more repairs are needed.
-     */
-    private Node<K,V> fixHeight_nl(final Node<K,V> node) {
-        final int c = nodeCondition(node);
-        switch (c) {
-            case RebalanceRequired:
-            case UnlinkRequired:
-                // can't repair
-                return node;
-            case NothingRequired:
-                // Any future damage to this node is not our responsibility.
-                return null;
-            default:
-                node.height = c;
-                // we've damaged our parent, but we can't fix it now
-                return node.parent;
-        }
-    }
-
-    /** nParent and n must be locked on entry.  Returns a damaged node, or null
-     *  if no more rebalancing is necessary.
-     */
-    private Node<K,V> rebalance_nl(final Node<K,V> nParent, final Node<K,V> n) {
-
-        final Node<K,V> nL = n.unsharedLeft();
-        final Node<K,V> nR = n.unsharedRight();
-
-        if ((nL == null || nR == null) && n.vOpt == null) {
-            if (attemptUnlink_nl(nParent, n)) {
-                // attempt to fix nParent.height while we've still got the lock
-                return fixHeight_nl(nParent);
-            } else {
-                // retry needed for n
-                return n;
-            }
-        }
-
-        final int hN = n.height;
-        final int hL0 = height(nL);
-        final int hR0 = height(nR);
-        final int hNRepl = 1 + Math.max(hL0, hR0);
-        final int bal = hL0 - hR0;
-
-        if (bal > 1) {
-            return rebalanceToRight_nl(nParent, n, nL, hR0);
-        } else if (bal < -1) {
-            return rebalanceToLeft_nl(nParent, n, nR, hL0);
-        } else if (hNRepl != hN) {
-            // we've got more than enough locks to do a height change, no need to
-            // trigger a retry
-            n.height = hNRepl;
-
-            // nParent is already locked, let's try to fix it too
-            return fixHeight_nl(nParent);
-        } else {
-            // nothing to do
-            return null;
-        }
-    }
-
-    private Node<K,V> rebalanceToRight_nl(final Node<K,V> nParent,
-                                          final Node<K,V> n,
-                                          final Node<K,V> nL,
-                                          final int hR0) {
-        // L is too large, we will rotate-right.  If L.R is taller
-        // than L.L, then we will first rotate-left L.
-        synchronized (nL) {
-            final int hL = nL.height;
-            if (hL - hR0 <= 1) {
-                return n; // retry
-            } else {
-                final Node<K,V> nLR = nL.unsharedRight();
-                final int hLL0 = height(nL.left);
-                final int hLR0 = height(nLR);
-                if (hLL0 >= hLR0) {
-                    // rotate right based on our snapshot of hLR
-                    return rotateRight_nl(nParent, n, nL, hR0, hLL0, nLR, hLR0);
-                } else {
-                    synchronized (nLR) {
-                        // If our hLR snapshot is incorrect then we might
-                        // actually need to do a single rotate-right on n.
-                        final int hLR = nLR.height;
-                        if (hLL0 >= hLR) {
-                            return rotateRight_nl(nParent, n, nL, hR0, hLL0, nLR, hLR);
-                        } else {
-                            // If the underlying left balance would not be
-                            // sufficient to actually fix n.left, then instead
-                            // of rolling it into a double rotation we do it on
-                            // it's own.  This may let us avoid rotating n at
-                            // all, but more importantly it avoids the creation
-                            // of damaged nodes that don't have a direct
-                            // ancestry relationship.  The recursive call to
-                            // rebalanceToRight_nl in this case occurs after we
-                            // release the lock on nLR.
-                            //
-                            // We also need to avoid damaging n.left if post-
-                            // rotation it would be an unnecessary routing node.
-                            // Note that although our height snapshots might be
-                            // stale, their zero/non-zero state can't be.
-                            final int hLRL = height(nLR.left);
-                            final int b = hLL0 - hLRL;
-                            if (b >= -1 && b <= 1 && !((hLL0 == 0 || hLRL == 0) && nL.vOpt == null)) {
-                                // nParent.child.left won't be damaged after a double rotation
-                                return rotateRightOverLeft_nl(nParent, n, nL, hR0, hLL0, nLR, hLRL);
-                            }
-                        }
-                    }
-                    // focus on nL, if necessary n will be balanced later
-                    return rebalanceToLeft_nl(n, nL, nLR, hLL0);
-                }
-            }
-        }
-    }
-
-    private Node<K,V> rebalanceToLeft_nl(final Node<K,V> nParent,
-                                         final Node<K,V> n,
-                                         final Node<K,V> nR,
-                                         final int hL0) {
-        synchronized (nR) {
-            final int hR = nR.height;
-            if (hL0 - hR >= -1) {
-                return n; // retry
-            } else {
-                final Node<K,V> nRL = nR.unsharedLeft();
-                final int hRL0 = height(nRL);
-                final int hRR0 = height(nR.right);
-                if (hRR0 >= hRL0) {
-                    return rotateLeft_nl(nParent, n, hL0, nR, nRL, hRL0, hRR0);
-                } else {
-                    synchronized (nRL) {
-                        final int hRL = nRL.height;
-                        if (hRR0 >= hRL) {
-                            return rotateLeft_nl(nParent, n, hL0, nR, nRL, hRL, hRR0);
-                        } else {
-                            final int hRLR = height(nRL.right);
-                            final int b = hRR0 - hRLR;
-                            if (b >= -1 && b <= 1 && !((hRR0 == 0 || hRLR == 0) && nR.vOpt == null)) {
-                                return rotateLeftOverRight_nl(nParent, n, hL0, nR, nRL, hRR0, hRLR);
-                            }
-                        }
-                    }
-                    return rebalanceToRight_nl(n, nR, nRL, hRR0);
-                }
-            }
-        }
-    }
-
-    private Node<K,V> rotateRight_nl(final Node<K,V> nParent,
-                                     final Node<K,V> n,
-                                     final Node<K,V> nL,
-                                     final int hR,
-                                     final int hLL,
-                                     final Node<K,V> nLR,
-                                     final int hLR) {
-        final long nodeOVL = n.shrinkOVL;
-
-        final Node<K,V> nPL = nParent.left;
-
-        n.shrinkOVL = beginChange(nodeOVL);
-
-        n.left = nLR;
-        if (nLR != null) {
-            nLR.parent = n;
-        }
-
-        nL.right = n;
-        n.parent = nL;
-
-        if (nPL == n) {
-            nParent.left = nL;
-        } else {
-            nParent.right = nL;
-        }
-        nL.parent = nParent;
-
-        // fix up heights links
-        final int hNRepl = 1 + Math.max(hLR, hR);
-        n.height = hNRepl;
-        nL.height = 1 + Math.max(hLL, hNRepl);
-
-        n.shrinkOVL = endChange(nodeOVL);
-
-        // We have damaged nParent, n (now parent.child.right), and nL (now
-        // parent.child).  n is the deepest.  Perform as many fixes as we can
-        // with the locks we've got.
-
-        // We've already fixed the height for n, but it might still be outside
-        // our allowable balance range.  In that case a simple fixHeight_nl
-        // won't help.
-        final int balN = hLR - hR;
-        if (balN < -1 || balN > 1) {
-            // we need another rotation at n
-            return n;
-        }
-
-        // we've fixed balance and height damage for n, now handle
-        // extra-routing node damage
-        if ((nLR == null || hR == 0) && n.vOpt == null) {
-            // we need to remove n and then repair
-            return n;
-        }
-
-        // we've already fixed the height at nL, do we need a rotation here?
-        final int balL = hLL - hNRepl;
-        if (balL < -1 || balL > 1) {
-            return nL;
-        }
-
-        // nL might also have routing node damage (if nL.left was null)
-        if (hLL == 0 && nL.vOpt == null) {
-            return nL;
-        }
-
-        // try to fix the parent height while we've still got the lock
-        return fixHeight_nl(nParent);
-    }
-
-    private Node<K,V> rotateLeft_nl(final Node<K,V> nParent,
-                                    final Node<K,V> n,
-                                    final int hL,
-                                    final Node<K,V> nR,
-                                    final Node<K,V> nRL,
-                                    final int hRL,
-                                    final int hRR) {
-        final long nodeOVL = n.shrinkOVL;
-
-        final Node<K,V> nPL = nParent.left;
-
-        n.shrinkOVL = beginChange(nodeOVL);
-
-        // fix up n links, careful to be compatible with concurrent traversal for all but n
-        n.right = nRL;
-        if (nRL != null) {
-            nRL.parent = n;
-        }
-
-        nR.left = n;
-        n.parent = nR;
-
-        if (nPL == n) {
-            nParent.left = nR;
-        } else {
-            nParent.right = nR;
-        }
-        nR.parent = nParent;
-
-        // fix up heights
-        final int  hNRepl = 1 + Math.max(hL, hRL);
-        n.height = hNRepl;
-        nR.height = 1 + Math.max(hNRepl, hRR);
-
-        n.shrinkOVL = endChange(nodeOVL);
-
-        final int balN = hRL - hL;
-        if (balN < -1 || balN > 1) {
-            return n;
-        }
-
-        if ((nRL == null || hL == 0) && n.vOpt == null) {
-            return n;
-        }
-
-        final int balR = hRR - hNRepl;
-        if (balR < -1 || balR > 1) {
-            return nR;
-        }
-
-        if (hRR == 0 && nR.vOpt == null) {
-            return nR;
-        }
-
-        return fixHeight_nl(nParent);
-    }
-
-    private Node<K,V> rotateRightOverLeft_nl(final Node<K,V> nParent,
-                                             final Node<K,V> n,
-                                             final Node<K,V> nL,
-                                             final int hR,
-                                             final int hLL,
-                                             final Node<K,V> nLR,
-                                             final int hLRL) {
-        final long nodeOVL = n.shrinkOVL;
-        final long leftOVL = nL.shrinkOVL;
-
-        final Node<K,V> nPL = nParent.left;
-        final Node<K,V> nLRL = nLR.unsharedLeft();
-        final Node<K,V> nLRR = nLR.unsharedRight();
-        final int hLRR = height(nLRR);
-
-        n.shrinkOVL = beginChange(nodeOVL);
-        nL.shrinkOVL = beginChange(leftOVL);
-
-        // fix up n links, careful about the order!
-        n.left = nLRR;
-        if (nLRR != null) {
-            nLRR.parent = n;
-        }
-
-        nL.right = nLRL;
-        if (nLRL != null) {
-            nLRL.parent = nL;
-        }
-
-        nLR.left = nL;
-        nL.parent = nLR;
-        nLR.right = n;
-        n.parent = nLR;
-
-        if (nPL == n) {
-            nParent.left = nLR;
-        } else {
-            nParent.right = nLR;
-        }
-        nLR.parent = nParent;
-
-        // fix up heights
-        final int hNRepl = 1 + Math.max(hLRR, hR);
-        n.height = hNRepl;
-        final int hLRepl = 1 + Math.max(hLL, hLRL);
-        nL.height = hLRepl;
-        nLR.height = 1 + Math.max(hLRepl, hNRepl);
-
-        n.shrinkOVL = endChange(nodeOVL);
-        nL.shrinkOVL = endChange(leftOVL);
-
-        // caller should have performed only a single rotation if nL was going
-        // to end up damaged
-        assert(Math.abs(hLL - hLRL) <= 1);
-        assert(!((hLL == 0 || nLRL == null) && nL.vOpt == null));
-
-        // We have damaged nParent, nLR (now parent.child), and n (now
-        // parent.child.right).  n is the deepest.  Perform as many fixes as we
-        // can with the locks we've got.
-
-        // We've already fixed the height for n, but it might still be outside
-        // our allowable balance range.  In that case a simple fixHeight_nl
-        // won't help.
-        final int balN = hLRR - hR;
-        if (balN < -1 || balN > 1) {
-            // we need another rotation at n
-            return n;
-        }
-
-        // n might also be damaged by being an unnecessary routing node
-        if ((nLRR == null || hR == 0) && n.vOpt == null) {
-            // repair involves splicing out n and maybe more rotations
-            return n;
-        }
-
-        // we've already fixed the height at nLR, do we need a rotation here?
-        final int balLR = hLRepl - hNRepl;
-        if (balLR < -1 || balLR > 1) {
-            return nLR;
-        }
-
-        // try to fix the parent height while we've still got the lock
-        return fixHeight_nl(nParent);
-    }
-
-    private Node<K,V> rotateLeftOverRight_nl(final Node<K,V> nParent,
-                                             final Node<K,V> n,
-                                             final int hL,
-                                             final Node<K,V> nR,
-                                             final Node<K,V> nRL,
-                                             final int hRR,
-                                             final int hRLR) {
-        final long nodeOVL = n.shrinkOVL;
-        final long rightOVL = nR.shrinkOVL;
-
-        final Node<K,V> nPL = nParent.left;
-        final Node<K,V> nRLL = nRL.unsharedLeft();
-        final Node<K,V> nRLR = nRL.unsharedRight();
-        final int hRLL = height(nRLL);
-
-        n.shrinkOVL = beginChange(nodeOVL);
-        nR.shrinkOVL = beginChange(rightOVL);
-
-        // fix up n links, careful about the order!
-        n.right = nRLL;
-        if (nRLL != null) {
-            nRLL.parent = n;
-        }
-
-        nR.left = nRLR;
-        if (nRLR != null) {
-            nRLR.parent = nR;
-        }
-
-        nRL.right = nR;
-        nR.parent = nRL;
-        nRL.left = n;
-        n.parent = nRL;
-
-        if (nPL == n) {
-            nParent.left = nRL;
-        } else {
-            nParent.right = nRL;
-        }
-        nRL.parent = nParent;
-
-        // fix up heights
-        final int hNRepl = 1 + Math.max(hL, hRLL);
-        n.height = hNRepl;
-        final int hRRepl = 1 + Math.max(hRLR, hRR);
-        nR.height = hRRepl;
-        nRL.height = 1 + Math.max(hNRepl, hRRepl);
-
-        n.shrinkOVL = endChange(nodeOVL);
-        nR.shrinkOVL = endChange(rightOVL);
-
-        assert(Math.abs(hRR - hRLR) <= 1);
-
-        final int balN = hRLL - hL;
-        if (balN < -1 || balN > 1) {
-            return n;
-        }
-        if ((nRLL == null || hL == 0) && n.vOpt == null) {
-            return n;
-        }
-        final int balRL = hRRepl - hNRepl;
-        if (balRL < -1 || balRL > 1) {
-            return nRL;
-        }
-        return fixHeight_nl(nParent);
-    }
-
-    //////////////// Map views
-
-    @Override
-    public NavigableSet<K> keySet() {
-        return navigableKeySet();
-    }
-
-    @Override
-    public Set<Map.Entry<K,V>> entrySet() {
-        return new EntrySet();
-    }
-
-    private class EntrySet extends AbstractSet<Map.Entry<K,V>> {
-
-        @Override
-        public int size() {
-            return SnapTreeMap.this.size();
-        }
-
-        @Override
-        public boolean isEmpty() {
-            return SnapTreeMap.this.isEmpty();
-        }
-
-        @Override
-        public void clear() {
-            SnapTreeMap.this.clear();
-        }
-
-        @Override
-        public boolean contains(final Object o) {
-            if (!(o instanceof Map.Entry<?,?>)) {
-                return false;
-            }
-            final Object k = ((Map.Entry<?,?>)o).getKey();
-            final Object v = ((Map.Entry<?,?>)o).getValue();
-            final Object actualVo = SnapTreeMap.this.getImpl(k);
-            if (actualVo == null) {
-                // no associated value
-                return false;
-            }
-            final V actual = decodeNull(actualVo);
-            return v == null ? actual == null : v.equals(actual);
-        }
-
-        @Override
-        public boolean add(final Entry<K,V> e) {
-            final Object v = encodeNull(e.getValue());
-            return update(e.getKey(), UpdateAlways, null, v) != v;
-        }
-
-        @Override
-        public boolean remove(final Object o) {
-            if (!(o instanceof Map.Entry<?,?>)) {
-                return false;
-            }
-            final Object k = ((Map.Entry<?,?>)o).getKey();
-            final Object v = ((Map.Entry<?,?>)o).getValue();
-            return SnapTreeMap.this.remove(k, v);
-        }
-
-        @Override
-        public Iterator<Entry<K,V>> iterator() {
-            return new EntryIter<K,V>(SnapTreeMap.this);
-        }
-    }
-
-    private static class EntryIter<K,V> extends AbstractIter<K,V> implements Iterator<Map.Entry<K,V>> {
-        private EntryIter(final SnapTreeMap<K,V> m) {
-            super(m);
-        }
-
-        private EntryIter(final SnapTreeMap<K,V> m,
-                          final Comparable<? super K> minCmp,
-                          final boolean minIncl,
-                          final Comparable<? super K> maxCmp,
-                          final boolean maxIncl,
-                          final boolean descending) {
-            super(m, minCmp, minIncl, maxCmp, maxIncl, descending);
-        }
-
-        @Override
-        public Entry<K,V> next() {
-            return nextNode();
-        }
-    }
-
-    private static class KeyIter<K,V> extends AbstractIter<K,V> implements Iterator<K> {
-        private KeyIter(final SnapTreeMap<K,V> m) {
-            super(m);
-        }
-
-        private KeyIter(final SnapTreeMap<K,V> m,
-                        final Comparable<? super K> minCmp,
-                        final boolean minIncl,
-                        final Comparable<? super K> maxCmp,
-                        final boolean maxIncl,
-                        final boolean descending) {
-            super(m, minCmp, minIncl, maxCmp, maxIncl, descending);
-        }
-
-        @Override
-        public K next() {
-            return nextNode().key;
-        }
-    }
-
-    private static class AbstractIter<K,V> {
-        private final SnapTreeMap<K,V> m;
-        private final boolean descending;
-        private final char forward;
-        private final char reverse;
-        private Node<K,V>[] path;
-        private int depth = 0;
-        private Node<K,V> mostRecentNode;
-        private final K endKey;
-
-        @SuppressWarnings("unchecked")
-        AbstractIter(final SnapTreeMap<K,V> m) {
-            this.m = m;
-            this.descending = false;
-            this.forward = Right;
-            this.reverse = Left;
-            final Node<K,V> root = m.holderRef.frozen().right;
-            this.path = (Node<K,V>[]) new Node[1 + height(root)];
-            this.endKey = null;
-            pushFirst(root);
-        }
-
-        @SuppressWarnings("unchecked")
-        AbstractIter(final SnapTreeMap<K,V> m,
-                     final Comparable<? super K> minCmp,
-                     final boolean minIncl,
-                     final Comparable<? super K> maxCmp,
-                     final boolean maxIncl,
-                     final boolean descending) {
-            this.m = m;
-            this.descending = descending;
-            this.forward = !descending ? Right : Left;
-            this.reverse = !descending ? Left : Right;
-            final Comparable<? super K> fromCmp;
-            final boolean fromIncl = !descending ? minIncl : maxIncl;
-            final Comparable<? super K> toCmp;
-            final boolean toIncl = !descending ? maxIncl : minIncl;
-            if (!descending) {
-                fromCmp = minCmp;
-                toCmp = maxCmp;
-            } else {
-                fromCmp = maxCmp;
-                toCmp = minCmp;
-            }
-
-            final Node<K,V> root = m.holderRef.frozen().right;
-
-            if (toCmp != null) {
-                this.endKey = (K) m.boundedExtreme(minCmp, minIncl, maxCmp, maxIncl, true, forward);
-                if (this.endKey == null) {
-                    // no node satisfies the bound, nothing to iterate
-                    // ---------> EARLY EXIT
-                    return;
-                }
-            } else {
-                this.endKey = null;
-            }
-
-            this.path = (Node<K,V>[]) new Node[1 + height(root)];
-
-            if (fromCmp == null) {
-                pushFirst(root);
-            }
-            else {
-                pushFirst(root, fromCmp, fromIncl);
-                if (depth > 0 && top().vOpt == null) {
-                    advance();
-                }
-            }
-        }
-
-        private int cmp(final Comparable<? super K> comparable, final K key) {
-            final int c = comparable.compareTo(key);
-            if (!descending) {
-                return c;
-            } else {
-                return c == Integer.MIN_VALUE ? 1 : -c;
-            }
-        }
-
-        private void pushFirst(Node<K,V> node) {
-            while (node != null) {
-                path(node);
-                node = node.child(reverse);
-            }
-        }
-
-        private void path(Node<K,V> node) {
-        	if (depth == path.length)
-        		path = Arrays.copyOf(path, depth + 2);
-
-        	path[depth++] = node;
-        }
-
-        private void pushFirst(Node<K,V> node, final Comparable<? super K> fromCmp, final boolean fromIncl) {
-            while (node != null) {
-                final int c = cmp(fromCmp, node.key);
-                if (c > 0 || (c == 0 && !fromIncl)) {
-                    // everything we're interested in is on the right
-                    node = node.child(forward);
-                }
-                else {
-                    path(node);
-                    if (c == 0) {
-                        // start the iteration here
-                        return;
-                    }
-                    else {
-                        node = node.child(reverse);
-                    }
-                }
-            }
-        }
-
-        private Node<K,V> top() {
-            return path[depth - 1];
-        }
-
-        private void advance() {
-            do {
-                final Node<K,V> t = top();
-                if (endKey != null && endKey == t.key) {
-                    depth = 0;
-                    path = null;
-                    return;
-                }
-
-                final Node<K,V> fwd = t.child(forward);
-                if (fwd != null) {
-                    pushFirst(fwd);
-                } else {
-                    // keep going up until we pop a node that is a left child
-                    Node<K,V> popped;
-                    do {
-                        popped = path[--depth];
-                    } while (depth > 0 && popped == top().child(forward));
-                }
-
-                if (depth == 0) {
-                    // clear out the path so we don't pin too much stuff
-                    path = null;
-                    return;
-                }
-
-                // skip removed-but-not-unlinked entries
-            } while (top().vOpt == null);
-        }
-
-        public boolean hasNext() {
-            return depth > 0;
-        }
-
-        Node<K,V> nextNode() {
-            if (depth == 0) {
-                throw new NoSuchElementException();
-            }
-            mostRecentNode = top();
-            advance();
-            return mostRecentNode;
-        }
-
-        public void remove() {
-            if (mostRecentNode == null) {
-                throw new IllegalStateException();
-            }
-            m.remove(mostRecentNode.key);
-            mostRecentNode = null;
-        }
-    }
-
-    //////////////// navigable keySet
-
-    @Override
-    public NavigableSet<K> navigableKeySet() {
-        return new KeySet<K>(this) {
-            public Iterator<K> iterator() {
-                return new KeyIter<K,V>(SnapTreeMap.this);
-            }
-        };
-    }
-
-    @Override
-    public NavigableSet<K> descendingKeySet() {
-        return descendingMap().navigableKeySet();
-    }
-
-    private abstract static class KeySet<K> extends AbstractSet<K> implements NavigableSet<K> {
-
-        private final ConcurrentNavigableMap<K,?> map;
-
-        protected KeySet(final ConcurrentNavigableMap<K,?> map) {
-            this.map = map;
-        }
-
-        //////// basic Set stuff
-
-        @Override
-        abstract public Iterator<K> iterator();
-
-        @Override
-        public boolean contains(final Object o) { return map.containsKey(o); }
-        @Override
-        public boolean isEmpty() { return map.isEmpty(); }
-        @Override
-        public int size() { return map.size(); }
-        @Override
-        public boolean remove(final Object o) { return map.remove(o) != null; }
-
-        //////// SortedSet stuff
-
-        @Override
-        public Comparator<? super K> comparator() { return map.comparator(); }
-        @Override
-        public K first() { return map.firstKey(); }
-        @Override
-        public K last() { return map.lastKey(); }
-
-        //////// NavigableSet stuff
-
-        @Override
-        public K lower(final K k) { return map.lowerKey(k); }
-        @Override
-        public K floor(final K k) { return map.floorKey(k); }
-        @Override
-        public K ceiling(final K k) { return map.ceilingKey(k); }
-        @Override
-        public K higher(final K k) { return map.higherKey(k); }
-
-        @Override
-        public K pollFirst() { return map.pollFirstEntry().getKey(); }
-        @Override
-        public K pollLast() { return map.pollLastEntry().getKey(); }
-
-        @Override
-        public NavigableSet<K> descendingSet() { return map.descendingKeySet(); }
-        @Override
-        public Iterator<K> descendingIterator() { return map.descendingKeySet().iterator(); }
-
-        @Override
-        public NavigableSet<K> subSet(final K fromElement, final boolean minInclusive, final K toElement, final boolean maxInclusive) {
-            return map.subMap(fromElement, minInclusive, toElement, maxInclusive).keySet();
-        }
-        @Override
-        public NavigableSet<K> headSet(final K toElement, final boolean inclusive) {
-            return map.headMap(toElement, inclusive).keySet();
-        }
-        @Override
-        public NavigableSet<K> tailSet(final K fromElement, final boolean inclusive) {
-            return map.tailMap(fromElement, inclusive).keySet();
-        }
-        @Override
-        public SortedSet<K> subSet(final K fromElement, final K toElement) {
-            return map.subMap(fromElement, toElement).keySet();
-        }
-        @Override
-        public SortedSet<K> headSet(final K toElement) {
-            return map.headMap(toElement).keySet();
-        }
-        @Override
-        public SortedSet<K> tailSet(final K fromElement) {
-            return map.tailMap(fromElement).keySet();
-        }
-    }
-
-    //////////////// NavigableMap views
-
-    @Override
-    public ConcurrentNavigableMap<K,V> subMap(final K fromKey,
-                                              final boolean fromInclusive,
-                                              final K toKey,
-                                              final boolean toInclusive) {
-        final Comparable<? super K> fromCmp = comparable(fromKey);
-        if (fromCmp.compareTo(toKey) > 0) {
-            throw new IllegalArgumentException();
-        }
-        return new SubMap<K,V>(this, fromKey, fromCmp, fromInclusive, toKey, comparable(toKey), toInclusive, false);
-    }
-
-    @Override
-    public ConcurrentNavigableMap<K,V> headMap(final K toKey, final boolean inclusive) {
-        return new SubMap<K,V>(this, null, null, false, toKey, comparable(toKey), inclusive, false);
-    }
-
-    @Override
-    public ConcurrentNavigableMap<K,V> tailMap(final K fromKey, final boolean inclusive) {
-        return new SubMap<K,V>(this, fromKey, comparable(fromKey), inclusive, null, null, false, false);
-    }
-
-    @Override
-    public ConcurrentNavigableMap<K,V> subMap(final K fromKey, final K toKey) {
-        return subMap(fromKey, true, toKey, false);
-    }
-
-    @Override
-    public ConcurrentNavigableMap<K,V> headMap(final K toKey) {
-        return headMap(toKey, false);
-    }
-
-    @Override
-    public ConcurrentNavigableMap<K,V> tailMap(final K fromKey) {
-        return tailMap(fromKey, true);
-    }
-
-    @Override
-    public ConcurrentNavigableMap<K,V> descendingMap() {
-        return new SubMap(this, null, null, false, null, null, false, true);
-    }
-
-    private static class SubMap<K, V> extends AbstractMap<K, V> implements ConcurrentNavigableMap<K, V>, Serializable {
-        /** */
-        private static final long serialVersionUID = 0L;
-
-        private final SnapTreeMap<K,V> m;
-        private final K minKey;
-        private transient Comparable<? super K> minCmp;
-        private final boolean minIncl;
-        private final K maxKey;
-        private transient Comparable<? super K> maxCmp;
-        private final boolean maxIncl;
-        private final boolean descending;
-
-        private SubMap(final SnapTreeMap<K,V> m,
-                       final K minKey,
-                       final Comparable<? super K> minCmp,
-                       final boolean minIncl,
-                       final K maxKey,
-                       final Comparable<? super K> maxCmp,
-                       final boolean maxIncl,
-                       final boolean descending) {
-            this.m = m;
-            this.minKey = minKey;
-            this.minCmp = minCmp;
-            this.minIncl = minIncl;
-            this.maxKey = maxKey;
-            this.maxCmp = maxCmp;
-            this.maxIncl = maxIncl;
-            this.descending = descending;
-        }
-
-        // TODO: clone
-
-        private boolean tooLow(final K key) {
-            if (minCmp == null) {
-                return false;
-            } else {
-                final int c = minCmp.compareTo(key);
-                return c > 0 || (c == 0 && !minIncl);
-            }
-        }
-
-        private boolean tooHigh(final K key) {
-            if (maxCmp == null) {
-                return false;
-            } else {
-                final int c = maxCmp.compareTo(key);
-                return c < 0 || (c == 0 && !maxIncl);
-            }
-        }
-
-        private boolean inRange(final K key) {
-            return !tooLow(key) && !tooHigh(key);
-        }
-
-        private void requireInRange(final K key) {
-            if (key == null) {
-                throw new NullPointerException();
-            }
-            if (!inRange(key)) {
-                throw new IllegalArgumentException();
-            }
-        }
-
-        private char minDir() {
-            return descending ? Right : Left;
-        }
-
-        private char maxDir() {
-            return descending ? Left : Right;
-        }
-
-        //////// AbstractMap
-
-        @Override
-        public boolean isEmpty() {
-            return m.boundedExtreme(minCmp, minIncl, maxCmp, maxIncl, true, Left) == null;
-        }
-
-        @Override
-        public int size() {
-            final Node<K,V> root = m.holderRef.frozen().right;
-            return Node.computeFrozenSize(root, minCmp, minIncl, maxCmp, maxIncl);
-        }
-
-        @Override
-        @SuppressWarnings("unchecked")
-        public boolean containsKey(final Object key) {
-            if (key == null) {
-                throw new NullPointerException();
-            }
-            final K k = (K) key;
-            return inRange(k) && m.containsKey(k);
-        }
-
-        @Override
-        public boolean containsValue(final Object value) {
-            // apply the same null policy as the rest of the code, but fall
-            // back to the default implementation
-            encodeNull(value);
-            return super.containsValue(value);
-        }
-
-        @Override
-        @SuppressWarnings("unchecked")
-        public V get(final Object key) {
-            if (key == null) {
-                throw new NullPointerException();
-            }
-            final K k = (K) key;
-            return !inRange(k) ? null : m.get(k);
-        }
-
-        @Override
-        public V put(final K key, final V value) {
-            requireInRange(key);
-            return m.put(key, value);
-        }
-
-        @Override
-        @SuppressWarnings("unchecked")
-        public V remove(final Object key) {
-            if (key == null) {
-                throw new NullPointerException();
-            }
-            return !inRange((K) key) ? null : m.remove(key);
-        }
-
-        @Override
-        public Set<Entry<K,V>> entrySet() {
-            return new EntrySubSet();
-        }
-
-        private class EntrySubSet extends AbstractSet<Map.Entry<K,V>> {
-            public int size() {
-                return SubMap.this.size();
-            }
-
-            @Override
-            public boolean isEmpty() {
-                return SubMap.this.isEmpty();
-            }
-
-            @SuppressWarnings("unchecked")
-            @Override
-            public boolean contains(final Object o) {
-                if (!(o instanceof Map.Entry<?,?>)) {
-                    return false;
-                }
-                final Object k = ((Map.Entry<?,?>)o).getKey();
-                if (!inRange((K) k)) {
-                    return false;
-                }
-                final Object v = ((Map.Entry<?,?>)o).getValue();
-                final Object actualVo = m.getImpl(k);
-                if (actualVo == null) {
-                    // no associated value
-                    return false;
-                }
-                final V actual = m.decodeNull(actualVo);
-                return v == null ? actual == null : v.equals(actual);
-            }
-
-            @Override
-            public boolean add(final Entry<K,V> e) {
-                requireInRange(e.getKey());
-                final Object v = encodeNull(e.getValue());
-                return m.update(e.getKey(), UpdateAlways, null, v) != v;
-            }
-
-            @Override
-            public boolean remove(final Object o) {
-                if (!(o instanceof Map.Entry<?,?>)) {
-                    return false;
-                }
-                final Object k = ((Map.Entry<?,?>)o).getKey();
-                final Object v = ((Map.Entry<?,?>)o).getValue();
-                return SubMap.this.remove(k, v);
-            }
-
-            @Override
-            public Iterator<Entry<K,V>> iterator() {
-                return new EntryIter<K,V>(m, minCmp, minIncl, maxCmp, maxIncl, descending);
-            }
-        }
-
-        //////// SortedMap
-
-        @Override
-        public Comparator<? super K> comparator() {
-            final Comparator<? super K> fromM = m.comparator();
-            if (descending) {
-                return Collections.reverseOrder(fromM);
-            } else {
-                return fromM;
-            }
-        }
-
-        @Override
-        public K firstKey() {
-            return m.boundedExtremeKeyOrThrow(minCmp, minIncl, maxCmp, maxIncl, minDir());
-        }
-
-        @Override
-        public K lastKey() {
-            return m.boundedExtremeKeyOrThrow(minCmp, minIncl, maxCmp, maxIncl, maxDir());
-        }
-
-        //////// NavigableMap
-
-        @SuppressWarnings("unchecked")
-        private K firstKeyOrNull() {
-            return (K) m.boundedExtreme(minCmp, minIncl, maxCmp, maxIncl, true, minDir());
-        }
-
-        @SuppressWarnings("unchecked")
-        private K lastKeyOrNull() {
-            return (K) m.boundedExtreme(minCmp, minIncl, maxCmp, maxIncl, true, maxDir());
-        }
-
-        @SuppressWarnings("unchecked")
-        private Entry<K,V> firstEntryOrNull() {
-            return (Entry<K,V>) m.boundedExtreme(minCmp, minIncl, maxCmp, maxIncl, false, minDir());
-        }
-
-        @SuppressWarnings("unchecked")
-        private Entry<K,V> lastEntryOrNull() {
-            return (Entry<K,V>) m.boundedExtreme(minCmp, minIncl, maxCmp, maxIncl, false, maxDir());
-        }
-
-        @Override
-        public Entry<K,V> lowerEntry(final K key) {
-            if (key == null) {
-                throw new NullPointerException();
-            }
-            if (!descending ? tooLow(key) : tooHigh(key)) {
-                return null;
-            }
-            return ((!descending ? tooHigh(key) : tooLow(key))
-                    ? this : subMapInRange(null, false, key, false)).lastEntryOrNull();
-        }
-
-        @Override
-        public K lowerKey(final K key) {
-            if (key == null) {
-                throw new NullPointerException();
-            }
-            if (!descending ? tooLow(key) : tooHigh(key)) {
-                return null;
-            }
-            return ((!descending ? tooHigh(key) : tooLow(key))
-                    ? this : subMapInRange(null, false, key, false)).lastKeyOrNull();
-        }
-
-        @Override
-        public Entry<K,V> floorEntry(final K key) {
-            if (key == null) {
-                throw new NullPointerException();
-            }
-            if (!descending ? tooLow(key) : tooHigh(key)) {
-                return null;
-            }
-            return ((!descending ? tooHigh(key) : tooLow(key))
-                    ? this : subMapInRange(null, false, key, true)).lastEntryOrNull();
-        }
-
-        @Override
-        public K floorKey(final K key) {
-            if (key == null) {
-                throw new NullPointerException();
-            }
-            if (!descending ? tooLow(key) : tooHigh(key)) {
-                return null;
-            }
-            return ((!descending ? tooHigh(key) : tooLow(key))
-                    ? this : subMapInRange(null, false, key, true)).lastKeyOrNull();
-        }
-
-        @Override
-        public Entry<K,V> ceilingEntry(final K key) {
-            if (key == null) {
-                throw new NullPointerException();
-            }
-            if (!descending ? tooHigh(key) : tooLow(key)) {
-                return null;
-            }
-            return ((!descending ? tooLow(key) : tooHigh(key))
-                    ? this : subMapInRange(key, true, null, false)).firstEntryOrNull();
-        }
-
-        @Override
-        public K ceilingKey(final K key) {
-            if (key == null) {
-                throw new NullPointerException();
-            }
-            if (!descending ? tooHigh(key) : tooLow(key)) {
-                return null;
-            }
-            return ((!descending ? tooLow(key) : tooHigh(key))
-                    ? this : subMapInRange(key, true, null, false)).firstKeyOrNull();
-        }
-
-        @Override
-        public Entry<K,V> higherEntry(final K key) {
-            if (key == null) {
-                throw new NullPointerException();
-            }
-            if (!descending ? tooHigh(key) : tooLow(key)) {
-                return null;
-            }
-            return ((!descending ? tooLow(key) : tooHigh(key))
-                    ? this : subMapInRange(key, false, null, false)).firstEntryOrNull();
-        }
-
-        @Override
-        public K higherKey(final K key) {
-            if (key == null) {
-                throw new NullPointerException();
-            }
-            if (!descending ? tooHigh(key) : tooLow(key)) {
-                return null;
-            }
-            return ((!descending ? tooLow(key) : tooHigh(key))
-                    ? this : subMapInRange(key, false, null, false)).firstKeyOrNull();
-        }
-
-        @Override
-        @SuppressWarnings("unchecked")
-        public Entry<K,V> firstEntry() {
-            return (Entry<K,V>) m.boundedExtreme(minCmp, minIncl, maxCmp, maxIncl, false, minDir());
-        }
-
-        @Override
-        @SuppressWarnings("unchecked")
-        public Entry<K,V> lastEntry() {
-            return (Entry<K,V>) m.boundedExtreme(minCmp, minIncl, maxCmp, maxIncl, false, maxDir());
-        }
-
-        @Override
-        @SuppressWarnings("unchecked")
-        public Entry<K,V> pollFirstEntry() {
-            while (true) {
-                final Entry<K,V> snapshot = (Entry<K,V>) m.boundedExtreme(minCmp, minIncl, maxCmp, maxIncl, false, minDir());
-                if (snapshot == null || m.remove(snapshot.getKey(), snapshot.getValue())) {
-                    return snapshot;
-                }
-            }
-        }
-
-        @Override
-        @SuppressWarnings("unchecked")
-        public Entry<K,V> pollLastEntry() {
-            while (true) {
-                final Entry<K,V> snapshot = (Entry<K,V>) m.boundedExtreme(minCmp, minIncl, maxCmp, maxIncl, false, maxDir());
-                if (snapshot == null || m.remove(snapshot.getKey(), snapshot.getValue())) {
-                    return snapshot;
-                }
-            }
-        }
-
-        //////// ConcurrentMap
-
-        @Override
-        public V putIfAbsent(final K key, final V value) {
-            requireInRange(key);
-            return m.putIfAbsent(key, value);
-        }
-
-        @Override
-        @SuppressWarnings("unchecked")
-        public boolean remove(final Object key, final Object value) {
-            return inRange((K) key) && m.remove(key, value);
-        }
-
-        @Override
-        public boolean replace(final K key, final V oldValue, final V newValue) {
-            requireInRange(key);
-            return m.replace(key, oldValue, newValue);
-        }
-
-        @Override
-        public V replace(final K key, final V value) {
-            requireInRange(key);
-            return m.replace(key, value);
-        }
-
-        //////// ConcurrentNavigableMap
-
-        @Override
-        public SubMap<K,V> subMap(final K fromKey,
-                                  final boolean fromInclusive,
-                                  final K toKey,
-                                  final boolean toInclusive) {
-            if (fromKey == null || toKey == null) {
-                throw new NullPointerException();
-            }
-            return subMapImpl(fromKey, fromInclusive, toKey, toInclusive);
-        }
-
-        @Override
-        public SubMap<K,V> headMap(final K toKey, final boolean inclusive) {
-            if (toKey == null) {
-                throw new NullPointerException();
-            }
-            return subMapImpl(null, false, toKey, inclusive);
-        }
-
-        @Override
-        public SubMap<K,V> tailMap(final K fromKey, final boolean inclusive) {
-            if (fromKey == null) {
-                throw new NullPointerException();
-            }
-            return subMapImpl(fromKey, inclusive, null, false);
-        }
-
-        @Override
-        public SubMap<K,V> subMap(final K fromKey, final K toKey) {
-            return subMap(fromKey, true, toKey, false);
-        }
-
-        @Override
-        public SubMap<K,V> headMap(final K toKey) {
-            return headMap(toKey, false);
-        }
-
-        @Override
-        public SubMap<K,V> tailMap(final K fromKey) {
-            return tailMap(fromKey, true);
-        }
-
-        private SubMap<K,V> subMapImpl(final K fromKey,
-                                          final boolean fromIncl,
-                                          final K toKey,
-                                          final boolean toIncl) {
-            if (fromKey != null) {
-                requireInRange(fromKey);
-            }
-            if (toKey != null) {
-                requireInRange(toKey);
-            }
-            return subMapInRange(fromKey, fromIncl, toKey, toIncl);
-        }
-
-        private SubMap<K,V> subMapInRange(final K fromKey,
-                                          final boolean fromIncl,
-                                          final K toKey,
-                                          final boolean toIncl) {
-            final Comparable<? super K> fromCmp = fromKey == null ? null : m.comparable(fromKey);
-            final Comparable<? super K> toCmp = toKey == null ? null : m.comparable(toKey);
-
-            if (fromKey != null && toKey != null) {
-                final int c = fromCmp.compareTo(toKey);
-                if ((!descending ? c > 0 : c < 0)) {
-                    throw new IllegalArgumentException();
-                }
-            }
-
-            K minK = minKey;
-            Comparable<? super K> minC = minCmp;
-            boolean minI = minIncl;
-            K maxK = maxKey;
-            Comparable<? super K> maxC = maxCmp;
-            boolean maxI = maxIncl;
-
-            if (fromKey != null) {
-                if (!descending) {
-                    minK = fromKey;
-                    minC = fromCmp;
-                    minI = fromIncl;
-                } else {
-                    maxK = fromKey;
-                    maxC = fromCmp;
-                    maxI = fromIncl;
-                }
-            }
-            if (toKey != null) {
-                if (!descending) {
-                    maxK = toKey;
-                    maxC = toCmp;
-                    maxI = toIncl;
-                } else {
-                    minK = toKey;
-                    minC = toCmp;
-                    minI = toIncl;
-                }
-            }
-
-            return new SubMap(m, minK, minC, minI, maxK, maxC, maxI, descending);
-        }
-
-        @Override
-        public SubMap<K,V> descendingMap() {
-            return new SubMap<K,V>(m, minKey, minCmp, minIncl, maxKey, maxCmp, maxIncl, !descending);
-        }
-
-        @Override
-        public NavigableSet<K> keySet() {
-            return navigableKeySet();
-        }
-
-        @Override
-        public NavigableSet<K> navigableKeySet() {
-            return new KeySet<K>(SubMap.this) {
-                public Iterator<K> iterator() {
-                    return new KeyIter<K,V>(m, minCmp, minIncl, maxCmp, maxIncl, descending);
-                }
-            };
-        }
-
-        @Override
-        public NavigableSet<K> descendingKeySet() {
-            return descendingMap().navigableKeySet();
-        }
-
-        //////// Serialization
-
-        private void readObject(final ObjectInputStream xi) throws IOException, ClassNotFoundException {
-            xi.defaultReadObject();
-
-            minCmp = minKey == null ? null : m.comparable(minKey);
-            maxCmp = maxKey == null ? null : m.comparable(maxKey);
-        }
-    }
-
-    //////// Serialization
-
-    /** Saves the state of the <code>SnapTreeMap</code> to a stream. */
-    @SuppressWarnings("unchecked")
-    private void writeObject(final ObjectOutputStream xo) throws IOException {
-        // this handles the comparator, and any subclass stuff
-        xo.defaultWriteObject();
-
-        // by cloning the COWMgr, we get a frozen tree plus the size
-        final COWMgr<K,V> h = (COWMgr<K,V>) holderRef.clone();
-
-        xo.writeInt(h.size());
-        writeEntry(xo, h.frozen().right);
-    }
-
-    private void writeEntry(final ObjectOutputStream xo, final Node<K,V> node) throws IOException {
-        if (node != null) {
-            writeEntry(xo, node.left);
-            if (node.vOpt != null) {
-                xo.writeObject(node.key);
-                xo.writeObject(decodeNull(node.vOpt));
-            }
-            writeEntry(xo, node.right);
-        }
-    }
-
-    /** Reverses {@link #writeObject(ObjectOutputStream)}. */
-    private void readObject(final ObjectInputStream xi) throws IOException, ClassNotFoundException  {
-        xi.defaultReadObject();
-
-        final int size = xi.readInt();
-
-        // TODO: take advantage of the sort order
-        // for now we optimize only by bypassing the COWMgr
-        final RootHolder<K,V> holder = new RootHolder<K,V>();
-        for (int i = 0; i < size; ++i) {
-            final K k = (K) xi.readObject();
-            final V v = (V) xi.readObject();
-            updateUnderRoot(k, comparable(k), UpdateAlways, null, encodeNull(v), holder);
-        }
-
-        holderRef = new COWMgr<K,V>(holder, size);
-    }
-}
\ No newline at end of file
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/snaptree/package-info.java b/modules/core/src/main/java/org/apache/ignite/internal/util/snaptree/package-info.java
deleted file mode 100644
index 2d75a8c..0000000
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/snaptree/package-info.java
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * <!-- Package description. -->
- * Snaptree and related classes.
- */
-package org.apache.ignite.internal.util.snaptree;
\ No newline at end of file
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/util/tostring/GridToStringBuilder.java b/modules/core/src/main/java/org/apache/ignite/internal/util/tostring/GridToStringBuilder.java
index 86daf7c..329682f 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/util/tostring/GridToStringBuilder.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/util/tostring/GridToStringBuilder.java
@@ -21,6 +21,7 @@
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.io.Serializable;
+import java.lang.reflect.Array;
 import java.lang.reflect.Field;
 import java.lang.reflect.Modifier;
 import java.util.ArrayList;
@@ -853,7 +854,7 @@
      */
     private static void addArray(SBLimitedLength buf, Class arrType, Object obj) {
         if (arrType.getComponentType().isPrimitive()) {
-            buf.a(arrayToString(arrType, obj));
+            buf.a(arrayToString(obj));
 
             return;
         }
@@ -985,6 +986,7 @@
             if (newStr)
                 return s;
 
+            // Called from another GTSB.toString(), so this string is already in the buffer and shouldn't be returned.
             return "";
         }
         finally {
@@ -1052,7 +1054,6 @@
         }
         // Specifically catching all exceptions.
         catch (Exception e) {
-
             // Remove entry from cache to avoid potential memory leak
             // in case new class loader got loaded under the same identity hash.
             classCache.remove(cls.getName() + System.identityHashCode(cls.getClassLoader()));
@@ -1075,95 +1076,40 @@
     }
 
     /**
-     * @param arrType Type of the array.
-     * @param arr Array object.
+     * Returns limited string representation of array.
+     *
+     * @param arr Array object. Each value is automatically wrapped if it has a primitive type.
      * @return String representation of an array.
      */
-    @SuppressWarnings({"ConstantConditions", "unchecked"})
-    public static <T> String arrayToString(Class arrType, Object arr) {
+    public static String arrayToString(Object arr) {
         if (arr == null)
             return "null";
 
         String res;
-        int more = 0;
 
-        if (arrType.equals(byte[].class)) {
-            byte[] byteArr = (byte[])arr;
-            if (byteArr.length > COLLECTION_LIMIT) {
-                more = byteArr.length - COLLECTION_LIMIT;
-                byteArr = Arrays.copyOf(byteArr, COLLECTION_LIMIT);
-            }
-            res = Arrays.toString(byteArr);
-        }
-        else if (arrType.equals(boolean[].class)) {
-            boolean[] boolArr = (boolean[])arr;
-            if (boolArr.length > COLLECTION_LIMIT) {
-                more = boolArr.length - COLLECTION_LIMIT;
-                boolArr = Arrays.copyOf(boolArr, COLLECTION_LIMIT);
-            }
-            res = Arrays.toString(boolArr);
-        }
-        else if (arrType.equals(short[].class)) {
-            short[] shortArr = (short[])arr;
-            if (shortArr.length > COLLECTION_LIMIT) {
-                more = shortArr.length - COLLECTION_LIMIT;
-                shortArr = Arrays.copyOf(shortArr, COLLECTION_LIMIT);
-            }
-            res = Arrays.toString(shortArr);
-        }
-        else if (arrType.equals(int[].class)) {
-            int[] intArr = (int[])arr;
-            if (intArr.length > COLLECTION_LIMIT) {
-                more = intArr.length - COLLECTION_LIMIT;
-                intArr = Arrays.copyOf(intArr, COLLECTION_LIMIT);
-            }
-            res = Arrays.toString(intArr);
-        }
-        else if (arrType.equals(long[].class)) {
-            long[] longArr = (long[])arr;
-            if (longArr.length > COLLECTION_LIMIT) {
-                more = longArr.length - COLLECTION_LIMIT;
-                longArr = Arrays.copyOf(longArr, COLLECTION_LIMIT);
-            }
-            res = Arrays.toString(longArr);
-        }
-        else if (arrType.equals(float[].class)) {
-            float[] floatArr = (float[])arr;
-            if (floatArr.length > COLLECTION_LIMIT) {
-                more = floatArr.length - COLLECTION_LIMIT;
-                floatArr = Arrays.copyOf(floatArr, COLLECTION_LIMIT);
-            }
-            res = Arrays.toString(floatArr);
-        }
-        else if (arrType.equals(double[].class)) {
-            double[] doubleArr = (double[])arr;
-            if (doubleArr.length > COLLECTION_LIMIT) {
-                more = doubleArr.length - COLLECTION_LIMIT;
-                doubleArr = Arrays.copyOf(doubleArr, COLLECTION_LIMIT);
-            }
-            res = Arrays.toString(doubleArr);
-        }
-        else if (arrType.equals(char[].class)) {
-            char[] charArr = (char[])arr;
-            if (charArr.length > COLLECTION_LIMIT) {
-                more = charArr.length - COLLECTION_LIMIT;
-                charArr = Arrays.copyOf(charArr, COLLECTION_LIMIT);
-            }
-            res = Arrays.toString(charArr);
-        }
-        else {
+        int arrLen;
+
+        if (arr instanceof Object[]) {
             Object[] objArr = (Object[])arr;
-            if (objArr.length > COLLECTION_LIMIT) {
-                more = objArr.length - COLLECTION_LIMIT;
+
+            arrLen = objArr.length;
+
+            if (arrLen > COLLECTION_LIMIT)
                 objArr = Arrays.copyOf(objArr, COLLECTION_LIMIT);
-            }
+
             res = Arrays.toString(objArr);
+        } else {
+            res = toStringWithLimit(arr, COLLECTION_LIMIT);
+
+            arrLen = Array.getLength(arr);
         }
-        if (more > 0) {
+
+        if (arrLen > COLLECTION_LIMIT) {
             StringBuilder resSB = new StringBuilder(res);
 
             resSB.deleteCharAt(resSB.length() - 1);
-            resSB.append("... and ").append(more).append(" more]");
+
+            resSB.append("... and ").append(arrLen - COLLECTION_LIMIT).append(" more]");
 
             res = resSB.toString();
         }
@@ -1172,6 +1118,37 @@
     }
 
     /**
+     * Returns limited string representation of array.
+     *
+     * @param arr Input array. Each value is automatically wrapped if it has a primitive type.
+     * @param limit max array items to string limit.
+     * @return String representation of an array.
+     */
+    private static String toStringWithLimit(Object arr, int limit) {
+        int arrIdxMax = Array.getLength(arr) - 1;
+
+        if (arrIdxMax == -1)
+            return "[]";
+
+        int idxMax = Math.min(arrIdxMax, limit);
+
+        StringBuilder b = new StringBuilder();
+
+        b.append('[');
+
+        for (int i = 0; i <= idxMax; ++i) {
+            b.append(Array.get(arr, i));
+            
+            if (i == idxMax)
+                return b.append(']').toString();
+
+            b.append(", ");
+        }
+
+        return b.toString();
+    }
+
+    /**
      * Produces uniformed output of string with context properties
      *
      * @param str Output prefix or {@code null} if empty.
@@ -1605,7 +1582,7 @@
     private static String toStringImpl(String str, SBLimitedLength buf, Object[] propNames, Object[] propVals,
         boolean[] propSens, int propCnt) {
 
-        buf.setLength(0);
+        boolean newStr = buf.length() == 0;
 
         if (str != null)
             buf.a(str).a(" ");
@@ -1616,7 +1593,11 @@
 
         buf.a(']');
 
-        return buf.toString();
+        if (newStr)
+            return buf.toString();
+
+        // Called from another GTSB.toString(), so this string is already in the buffer and shouldn't be returned.
+        return "";
     }
 
     /**
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheAggregatedMetrics.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheAggregatedMetrics.java
index a0258cc..6dd91d8 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheAggregatedMetrics.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheAggregatedMetrics.java
@@ -48,22 +48,28 @@
     /** Node IDs with cache metrics. */
     private Map<UUID, VisorCacheMetrics> metrics = new HashMap<>();
 
-    /** Minimum number of elements in heap. */
+    /** Total number of entries in heap. */
+    private transient Long totalHeapSize;
+
+    /** Minimum number of entries in heap. */
     private transient Long minHeapSize;
 
-    /** Average number of elements in heap. */
+    /** Average number of entries in heap. */
     private transient Double avgHeapSize;
 
-    /** Maximum number of elements in heap. */
+    /** Maximum number of entries in heap. */
     private transient Long maxHeapSize;
 
-    /** Minimum number of elements in off heap. */
+    /** Total number of entries in off heap. */
+    private transient Long totalOffHeapSize;
+
+    /** Minimum number of entries in off heap. */
     private transient Long minOffHeapSize;
 
-    /** Average number of elements in off heap. */
+    /** Average number of entries in off heap. */
     private transient Double avgOffHeapSize;
 
-    /** Maximum number of elements in off heap. */
+    /** Maximum number of entries in off heap. */
     private transient Long maxOffHeapSize;
 
     /** Minimum hits of the owning cache. */
@@ -163,7 +169,21 @@
     }
 
     /**
-     * @return Minimum number of elements in heap.
+     * @return Total number of entries in heap.
+     */
+    public long getTotalHeapSize() {
+        if (totalHeapSize == null) {
+            totalHeapSize = 0L;
+
+            for (VisorCacheMetrics metric : metrics.values())
+                totalHeapSize += metric.getHeapEntriesCount();
+        }
+
+        return totalHeapSize;
+    }
+
+    /**
+     * @return Minimum number of entries in heap.
      */
     public long getMinimumHeapSize() {
         if (minHeapSize == null) {
@@ -177,7 +197,7 @@
     }
 
     /**
-     * @return Average number of elements in heap.
+     * @return Average number of entries in heap.
      */
     public double getAverageHeapSize() {
         if (avgHeapSize == null) {
@@ -193,7 +213,7 @@
     }
 
     /**
-     * @return Maximum number of elements in heap.
+     * @return Maximum number of entries in heap.
      */
     public long getMaximumHeapSize() {
         if (maxHeapSize == null) {
@@ -215,7 +235,21 @@
     }
 
     /**
-     * @return Minimum number of primary elements in off heap.
+     * @return Total number of entries in off-heap.
+     */
+    public long getTotalOffHeapSize() {
+        if (totalOffHeapSize == null) {
+            totalOffHeapSize = 0L;
+
+            for (VisorCacheMetrics metric : metrics.values())
+                totalOffHeapSize += metric.getOffHeapPrimaryEntriesCount();
+        }
+
+        return totalOffHeapSize;
+    }
+
+    /**
+     * @return Minimum number of primary entries in off heap.
      */
     public long getMinimumOffHeapPrimarySize() {
         if (minOffHeapSize == null) {
@@ -229,7 +263,7 @@
     }
 
     /**
-     * @return Average number of primary elements in off heap.
+     * @return Average number of primary entries in off heap.
      */
     public double getAverageOffHeapPrimarySize() {
         if (avgOffHeapSize == null) {
@@ -245,7 +279,7 @@
     }
 
     /**
-     * @return Maximum number of primary elements in off heap.
+     * @return Maximum number of primary entries in off heap.
      */
     public long getMaximumOffHeapPrimarySize() {
         if (maxOffHeapSize == null) {
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheConfigurationCollectorTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheConfigurationCollectorTask.java
index 154e39e..fd224a8 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheConfigurationCollectorTask.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheConfigurationCollectorTask.java
@@ -20,7 +20,6 @@
 import java.util.Map;
 import org.apache.ignite.internal.processors.task.GridInternal;
 import org.apache.ignite.internal.visor.VisorOneNodeTask;
-import org.apache.ignite.lang.IgniteUuid;
 
 /**
  * Task that collect cache metrics from all nodes.
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheLostPartitionsTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheLostPartitionsTask.java
index 24b4069..52db552 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheLostPartitionsTask.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheLostPartitionsTask.java
@@ -66,8 +66,9 @@
                 IgniteInternalCache cache = ignite.cachex(cacheName);
 
                 if (cache != null) {
-                    GridDhtPartitionTopology topology = cache.context().topology();
-                    List<Integer> lostPartitions = new ArrayList<>(topology.lostPartitions());
+                    GridDhtPartitionTopology top = cache.context().topology();
+
+                    List<Integer> lostPartitions = new ArrayList<>(top.lostPartitions());
 
                     if (!lostPartitions.isEmpty())
                         res.put(cacheName, lostPartitions);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheModifyTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheModifyTask.java
index d6b1ff7..62e7ac6 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheModifyTask.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheModifyTask.java
@@ -41,7 +41,7 @@
     }
 
     /**
-     * Job that clear specified caches.
+     * Job that modify value in specified cache.
      */
     private static class VisorCacheModifyJob extends VisorJob<VisorCacheModifyTaskArg, VisorCacheModifyTaskResult> {
         /** */
@@ -88,18 +88,18 @@
                         VisorQueryUtils.convertValue(old));
 
                 case GET:
-                    Object value = cache.get(key);
+                    Object val = cache.get(key);
 
-                    return new VisorCacheModifyTaskResult(nid, VisorTaskUtils.compactClass(value),
-                        VisorQueryUtils.convertValue(value));
+                    return new VisorCacheModifyTaskResult(nid, VisorTaskUtils.compactClass(val),
+                        VisorQueryUtils.convertValue(val));
 
                 case REMOVE:
-                    Object removed = cache.get(key);
+                    Object rmv = cache.get(key);
 
                     cache.remove(key);
 
-                    return new VisorCacheModifyTaskResult(nid, VisorTaskUtils.compactClass(removed),
-                        VisorQueryUtils.convertValue(removed));
+                    return new VisorCacheModifyTaskResult(nid, VisorTaskUtils.compactClass(rmv),
+                        VisorQueryUtils.convertValue(rmv));
             }
 
             return new VisorCacheModifyTaskResult(nid, null, null);
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheModifyTaskArg.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheModifyTaskArg.java
index 706aab7..bef73ed 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheModifyTaskArg.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheModifyTaskArg.java
@@ -41,7 +41,7 @@
     private Object key;
 
     /** Specified value. */
-    private Object value;
+    private Object val;
 
     /**
      * Default constructor.
@@ -54,13 +54,13 @@
      * @param cacheName Cache name.
      * @param mode Modification mode.
      * @param key Specified key.
-     * @param value Specified value.
+     * @param val Specified value.
      */
-    public VisorCacheModifyTaskArg(String cacheName, VisorModifyCacheMode mode, Object key, Object value) {
+    public VisorCacheModifyTaskArg(String cacheName, VisorModifyCacheMode mode, Object key, Object val) {
         this.cacheName = cacheName;
         this.mode = mode;
         this.key = key;
-        this.value = value;
+        this.val = val;
     }
 
     /**
@@ -88,7 +88,7 @@
      * @return Specified value.
      */
     public Object getValue() {
-        return value;
+        return val;
     }
 
     /** {@inheritDoc} */
@@ -96,7 +96,7 @@
         U.writeString(out, cacheName);
         U.writeEnum(out, mode);
         out.writeObject(key);
-        out.writeObject(value);
+        out.writeObject(val);
     }
 
     /** {@inheritDoc} */
@@ -104,7 +104,7 @@
         cacheName = U.readString(in);
         mode = VisorModifyCacheMode.fromOrdinal(in.readByte());
         key = in.readObject();
-        value = in.readObject();
+        val = in.readObject();
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheModifyTaskResult.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheModifyTaskResult.java
index ce09bb2..8d0152a 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheModifyTaskResult.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheModifyTaskResult.java
@@ -36,10 +36,10 @@
     private UUID affinityNode;
 
     /** Result type name. */
-    private String resultType;
+    private String resType;
 
     /** Value for specified key or number of modified rows. */
-    private Object result;
+    private Object res;
 
     /**
      * Default constructor.
@@ -50,13 +50,13 @@
 
     /**
      * @param affinityNode Node ID where modified data contained.
-     * @param resultType Result type name.
-     * @param result Value for specified key or number of modified rows.
+     * @param resType Result type name.
+     * @param res Value for specified key or number of modified rows.
      */
-    public VisorCacheModifyTaskResult(UUID affinityNode, String resultType, Object result) {
+    public VisorCacheModifyTaskResult(UUID affinityNode, String resType, Object res) {
         this.affinityNode = affinityNode;
-        this.resultType = resultType;
-        this.result = result;
+        this.resType = resType;
+        this.res = res;
     }
 
     /**
@@ -70,28 +70,28 @@
      * @return Result type name.
      */
     public String getResultType() {
-        return resultType;
+        return resType;
     }
 
     /**
-     * @return Value for specified key or number of modified rows..
+     * @return Value for specified key or number of modified rows.
      */
     public Object getResult() {
-        return result;
+        return res;
     }
 
     /** {@inheritDoc} */
     @Override protected void writeExternalData(ObjectOutput out) throws IOException {
         U.writeUuid(out, affinityNode);
-        U.writeString(out, resultType);
-        out.writeObject(result);
+        U.writeString(out, resType);
+        out.writeObject(res);
     }
 
     /** {@inheritDoc} */
     @Override protected void readExternalData(byte protoVer, ObjectInput in) throws IOException, ClassNotFoundException {
         affinityNode = U.readUuid(in);
-        resultType = U.readString(in);
-        result = in.readObject();
+        resType = U.readString(in);
+        res = in.readObject();
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheNamesCollectorTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheNamesCollectorTask.java
new file mode 100644
index 0000000..7d934d1
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheNamesCollectorTask.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.visor.cache;
+
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
+import org.apache.ignite.internal.processors.cache.DynamicCacheDescriptor;
+import org.apache.ignite.internal.processors.cache.GridCacheProcessor;
+import org.apache.ignite.internal.processors.task.GridInternal;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.internal.visor.VisorJob;
+import org.apache.ignite.internal.visor.VisorOneNodeTask;
+import org.apache.ignite.lang.IgniteUuid;
+
+/**
+ * Task that collect cache names and deployment IDs.
+ */
+@GridInternal
+public class VisorCacheNamesCollectorTask extends VisorOneNodeTask<Void, VisorCacheNamesCollectorTaskResult> {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** {@inheritDoc} */
+    @Override protected VisorCacheNamesCollectorJob job(Void arg) {
+        return new VisorCacheNamesCollectorJob(arg, debug);
+    }
+
+    /**
+     * Job that collect cache names and deployment IDs.
+     */
+    private static class VisorCacheNamesCollectorJob extends VisorJob<Void, VisorCacheNamesCollectorTaskResult> {
+        /** */
+        private static final long serialVersionUID = 0L;
+
+        /**
+         * Create job.
+         *
+         * @param arg Task argument.
+         * @param debug Debug flag.
+         */
+        private VisorCacheNamesCollectorJob(Void arg, boolean debug) {
+            super(arg, debug);
+        }
+
+        /** {@inheritDoc} */
+        @Override protected VisorCacheNamesCollectorTaskResult run(Void arg) {
+            GridCacheProcessor cacheProc = ignite.context().cache();
+
+            Map<String, IgniteUuid> caches = new HashMap<>();
+            Set<String> groups = new HashSet<>();
+
+            for (Map.Entry<String, DynamicCacheDescriptor> item : cacheProc.cacheDescriptors().entrySet()) {
+                DynamicCacheDescriptor cd = item.getValue();
+
+                caches.put(item.getKey(), cd.deploymentId());
+
+                String grp = cd.groupDescriptor().groupName();
+
+                if (!F.isEmpty(grp))
+                    groups.add(grp);
+            }
+
+            return new VisorCacheNamesCollectorTaskResult(caches, groups);
+        }
+
+        /** {@inheritDoc} */
+        @Override public String toString() {
+            return S.toString(VisorCacheNamesCollectorJob.class, this);
+        }
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheNamesCollectorTaskResult.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheNamesCollectorTaskResult.java
new file mode 100644
index 0000000..a2e0908
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheNamesCollectorTaskResult.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.visor.cache;
+
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.Map;
+import java.util.Set;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.internal.visor.VisorDataTransferObject;
+import org.apache.ignite.lang.IgniteUuid;
+
+/**
+ * Result for {@link VisorCacheNamesCollectorTask}.
+ */
+public class VisorCacheNamesCollectorTaskResult extends VisorDataTransferObject {
+    /** */
+    private static final long serialVersionUID = 0L;
+
+    /** Cache names and deployment IDs. */
+    private Map<String, IgniteUuid> caches;
+
+    /** Cache groups. */
+    private Set<String> groups;
+
+    /**
+     * Default constructor.
+     */
+    public VisorCacheNamesCollectorTaskResult() {
+        // No-op.
+    }
+
+    /**
+     * @param caches Cache names and deployment IDs.
+     */
+    public VisorCacheNamesCollectorTaskResult(Map<String, IgniteUuid> caches, Set<String> groups) {
+        this.caches = caches;
+        this.groups = groups;
+    }
+
+    /**
+     * @return Value for specified key or number of modified rows..
+     */
+    public Map<String, IgniteUuid> getCaches() {
+        return caches;
+    }
+
+    /**
+     * @return Value for specified key or number of modified rows..
+     */
+    public Set<String> getGroups() {
+        return groups;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void writeExternalData(ObjectOutput out) throws IOException {
+        U.writeMap(out, caches);
+        U.writeCollection(out, groups);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void readExternalData(byte protoVer, ObjectInput in) throws IOException, ClassNotFoundException {
+        caches = U.readMap(in);
+        groups = U.readSet(in);
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(VisorCacheNamesCollectorTaskResult.class, this);
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCachePartitionsTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCachePartitionsTask.java
index af65de0..76ace17 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCachePartitionsTask.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCachePartitionsTask.java
@@ -63,7 +63,7 @@
             if (res.getException() != null)
                 throw res.getException();
 
-            parts.put(res.getNode().id(), (VisorCachePartitions)res.getData());
+            parts.put(res.getNode().id(), res.getData());
         }
 
         return parts;
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheResetLostPartitionsTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheResetLostPartitionsTask.java
index eb48cd2..2ad88ee 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheResetLostPartitionsTask.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorCacheResetLostPartitionsTask.java
@@ -43,7 +43,7 @@
         private static final long serialVersionUID = 0L;
 
         /**
-         * @param arg Object with list cache names to reset lost partitons.
+         * @param arg Object with list cache names to reset lost partitions.
          * @param debug Debug flag.
          */
         private VisorCacheResetLostPartitionsJob(VisorCacheResetLostPartitionsTaskArg arg, boolean debug) {
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorMemoryMetrics.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorMemoryMetrics.java
index 5b46220..fd94421 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorMemoryMetrics.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/cache/VisorMemoryMetrics.java
@@ -66,13 +66,31 @@
     private long physicalMemSz;
 
     /** */
-    private long cpBufPages;
+    private int pageSize;
 
     /** */
     private long cpBufSz;
 
     /** */
-    private int pageSize;
+    private long cpUsedBufPages;
+
+    /** */
+    private long cpUsedBufSz;
+
+    /** */
+    private long pagesRead;
+
+    /** */
+    private long pagesWritten;
+
+    /** */
+    private long pagesReplaced;
+
+    /** */
+    private long offHeapSz;
+
+    /** */
+    private long offHeapUsedSz;
 
     /**
      * Default constructor.
@@ -96,9 +114,19 @@
         physicalMemoryPages = m.getPhysicalMemoryPages();
         totalAllocatedSz = m.getTotalAllocatedSize();
         physicalMemSz = m.getPhysicalMemorySize();
-        cpBufPages = m.getUsedCheckpointBufferPages();
-        cpBufSz = m.getUsedCheckpointBufferSize();
+
         pageSize = m.getPageSize();
+
+        cpBufSz = m.getCheckpointBufferSize();
+        cpUsedBufPages = m.getUsedCheckpointBufferPages();
+        cpUsedBufSz = m.getUsedCheckpointBufferSize();
+
+        pagesRead = m.getPagesRead();
+        pagesWritten = m.getPagesWritten();
+        pagesReplaced = m.getPagesReplaced();
+
+        offHeapSz = m.getOffHeapSize();
+        offHeapUsedSz = m.getOffheapUsedSize();
     }
 
     /**
@@ -180,10 +208,12 @@
     }
 
     /**
-     * @return Checkpoint buffer size in pages.
+     * This method needed for compatibility with V2.
+     *
+     * @return Used checkpoint buffer size in pages.
      */
     public long getCheckpointBufferPages() {
-        return cpBufPages;
+        return cpUsedBufPages;
     }
 
     /**
@@ -194,15 +224,64 @@
     }
 
     /**
+     * @return Used checkpoint buffer size in pages.
+     */
+    public long getUsedCheckpointBufferPages() {
+        return cpUsedBufPages;
+    }
+
+    /**
+     * @return Used checkpoint buffer size in bytes.
+     */
+    public long getUsedCheckpointBufferSize() {
+        return cpUsedBufSz;
+    }
+
+    /**
      * @return Page size in bytes.
      */
     public int getPageSize() {
         return pageSize;
     }
 
+    /**
+     * @return The number of read pages from last restart.
+     */
+    public long getPagesRead() {
+        return pagesRead;
+    }
+
+    /**
+     * @return The number of written pages from last restart.
+     */
+    public long getPagesWritten() {
+        return pagesWritten;
+    }
+
+    /**
+     * @return The number of replaced pages from last restart .
+     */
+    public long getPagesReplaced() {
+        return pagesReplaced;
+    }
+
+    /**
+     * @return Total offheap size in bytes.
+     */
+    public long getOffHeapSize() {
+        return offHeapSz;
+    }
+
+    /**
+     * @return Total used offheap size in bytes.
+     */
+    public long getOffheapUsedSize() {
+        return offHeapUsedSz;
+    }
+
     /** {@inheritDoc} */
     @Override public byte getProtocolVersion() {
-        return V2;
+        return V3;
     }
 
     /** {@inheritDoc} */
@@ -216,11 +295,23 @@
         out.writeLong(dirtyPages);
         out.writeFloat(pagesReplaceRate);
         out.writeLong(physicalMemoryPages);
+
+        // V2
         out.writeLong(totalAllocatedSz);
         out.writeLong(physicalMemSz);
-        out.writeLong(cpBufPages);
+        out.writeLong(cpUsedBufPages);
         out.writeLong(cpBufSz);
         out.writeInt(pageSize);
+
+        // V3
+        out.writeLong(cpUsedBufSz);
+
+        out.writeLong(pagesRead);
+        out.writeLong(pagesWritten);
+        out.writeLong(pagesReplaced);
+
+        out.writeLong(offHeapSz);
+        out.writeLong(offHeapUsedSz);
     }
 
     /** {@inheritDoc} */
@@ -238,10 +329,21 @@
         if (protoVer > V1) {
             totalAllocatedSz = in.readLong();
             physicalMemSz = in.readLong();
-            cpBufPages = in.readLong();
+            cpUsedBufPages = in.readLong();
             cpBufSz = in.readLong();
             pageSize = in.readInt();
         }
+
+        if (protoVer > V2) {
+            cpUsedBufSz = in.readLong();
+
+            pagesRead = in.readLong();
+            pagesWritten = in.readLong();
+            pagesReplaced = in.readLong();
+
+            offHeapSz = in.readLong();
+            offHeapUsedSz = in.readLong();
+        }
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/compute/VisorGatewayTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/compute/VisorGatewayTask.java
index d14463f..c41864f 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/visor/compute/VisorGatewayTask.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/compute/VisorGatewayTask.java
@@ -246,7 +246,7 @@
          * @return Object constructed from string.
          */
         @Nullable private Object toObject(Class cls, String val) {
-            if (val == null  || "null".equals(val))
+            if (val == null  || "null".equals(val) || "nil".equals(val))
                 return null;
 
             if (String.class == cls)
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/file/VisorFileBlock.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/file/VisorFileBlock.java
index 23540b5..79d0e93 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/visor/file/VisorFileBlock.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/file/VisorFileBlock.java
@@ -53,7 +53,7 @@
      * Default constructor.
      */
     public VisorFileBlock() {
-
+        // No-op.
     }
 
     /**
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorJob.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorJob.java
index 14b9281..5fab8d1 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorJob.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorJob.java
@@ -19,9 +19,11 @@
 
 import java.util.Collection;
 import java.util.List;
+import java.util.Set;
 import java.util.concurrent.ConcurrentMap;
-import org.apache.ignite.IgniteFileSystem;
+
 import org.apache.ignite.DataRegionMetrics;
+import org.apache.ignite.IgniteFileSystem;
 import org.apache.ignite.cache.CacheMetrics;
 import org.apache.ignite.cluster.ClusterNode;
 import org.apache.ignite.configuration.FileSystemConfiguration;
@@ -32,6 +34,7 @@
 import org.apache.ignite.internal.processors.cache.GridCacheProcessor;
 import org.apache.ignite.internal.processors.igfs.IgfsProcessorAdapter;
 import org.apache.ignite.internal.util.ipc.IpcServerEndpoint;
+import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.internal.visor.VisorJob;
@@ -159,7 +162,7 @@
             List<VisorMemoryMetrics> memoryMetrics = res.getMemoryMetrics();
 
             // TODO: Should be really fixed in IGNITE-7111.
-            if (ignite.active()) {
+            if (ignite.cluster().active()) {
                 for (DataRegionMetrics m : ignite.dataRegionMetrics())
                     memoryMetrics.add(new VisorMemoryMetrics(m));
             }
@@ -181,17 +184,23 @@
 
             GridCacheProcessor cacheProc = ignite.context().cache();
 
-            List<VisorCache> resCaches = res.getCaches();
+            Set<String> cacheGrps = arg.getCacheGroups();
+
+            boolean all = F.isEmpty(cacheGrps);
 
             int partitions = 0;
             double total = 0;
             double ready = 0;
 
+            List<VisorCache> resCaches = res.getCaches();
+
             for (String cacheName : cacheProc.cacheNames()) {
                 if (proxyCache(cacheName))
                     continue;
 
-                if (arg.getSystemCaches() || !(isSystemCache(cacheName) || isIgfsCache(cfg, cacheName))) {
+                boolean sysCache = isSystemCache(cacheName);
+
+                if (arg.getSystemCaches() || !(sysCache || isIgfsCache(cfg, cacheName))) {
                     long start0 = U.currentTimeMillis();
 
                     try {
@@ -213,7 +222,8 @@
                         total += partTotal;
                         ready += partReady;
 
-                        resCaches.add(new VisorCache(ignite, ca, arg.isCollectCacheMetrics()));
+                        if (all || cacheGrps.contains(ca.configuration().getGroupName()))
+                            resCaches.add(new VisorCache(ignite, ca, arg.isCollectCacheMetrics()));
                     }
                     catch(IllegalStateException | IllegalArgumentException e) {
                         if (debug && ignite.log() != null)
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorTask.java
index fffc3bf..067c57b 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorTask.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorTask.java
@@ -72,7 +72,7 @@
             }
         }
 
-        taskRes.setActive(ignite.active());
+        taskRes.setActive(ignite.cluster().active());
 
         return taskRes;
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorTaskArg.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorTaskArg.java
index 1876d06..b707991 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorTaskArg.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorNodeDataCollectorTaskArg.java
@@ -20,6 +20,8 @@
 import java.io.IOException;
 import java.io.ObjectInput;
 import java.io.ObjectOutput;
+import java.util.Set;
+
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.internal.visor.VisorDataTransferObject;
@@ -46,6 +48,9 @@
     /** If {@code false} then cache metrics will not be collected. */
     private boolean collectCacheMetrics;
 
+    /** Optional Set of cache groups, if provided, then caches only from that groups will be collected. */
+    private Set<String> cacheGrps;
+
     /**
      * Default constructor.
      */
@@ -61,6 +66,31 @@
      * @param evtThrottleCntrKey Event throttle counter key, unique for Visor instance.
      * @param sysCaches If {@code true} then collect information about system caches.
      * @param collectCacheMetrics If {@code false} then cache metrics will not be collected.
+     * @param cacheGrps Optional Set of cache groups, if provided, then caches only from that groups will be collected.
+     */
+    public VisorNodeDataCollectorTaskArg(
+        boolean taskMonitoringEnabled,
+        String evtOrderKey,
+        String evtThrottleCntrKey,
+        boolean sysCaches,
+        boolean collectCacheMetrics,
+        Set<String> cacheGrps
+    ) {
+        this.taskMonitoringEnabled = taskMonitoringEnabled;
+        this.evtOrderKey = evtOrderKey;
+        this.evtThrottleCntrKey = evtThrottleCntrKey;
+        this.sysCaches = sysCaches;
+        this.collectCacheMetrics = collectCacheMetrics;
+        this.cacheGrps = cacheGrps;
+    }
+    /**
+     * Create task arguments with given parameters.
+     *
+     * @param taskMonitoringEnabled If {@code true} then Visor should collect information about tasks.
+     * @param evtOrderKey Event order key, unique for Visor instance.
+     * @param evtThrottleCntrKey Event throttle counter key, unique for Visor instance.
+     * @param sysCaches If {@code true} then collect information about system caches.
+     * @param collectCacheMetrics If {@code false} then cache metrics will not be collected.
      */
     public VisorNodeDataCollectorTaskArg(
         boolean taskMonitoringEnabled,
@@ -69,11 +99,7 @@
         boolean sysCaches,
         boolean collectCacheMetrics
     ) {
-        this.taskMonitoringEnabled = taskMonitoringEnabled;
-        this.evtOrderKey = evtOrderKey;
-        this.evtThrottleCntrKey = evtThrottleCntrKey;
-        this.sysCaches = sysCaches;
-        this.collectCacheMetrics = collectCacheMetrics;
+        this(taskMonitoringEnabled, evtOrderKey, evtThrottleCntrKey, sysCaches, collectCacheMetrics, null);
     }
 
     /**
@@ -90,7 +116,7 @@
         String evtThrottleCntrKey,
         boolean sysCaches
     ) {
-        this(taskMonitoringEnabled, evtOrderKey, evtThrottleCntrKey, sysCaches, true);
+        this(taskMonitoringEnabled, evtOrderKey, evtThrottleCntrKey, sysCaches, true, null);
     }
 
     /**
@@ -163,9 +189,23 @@
         this.collectCacheMetrics = collectCacheMetrics;
     }
 
+    /**
+     * @return Optional cache group, if provided, then caches only from that group will be collected.
+     */
+    public Set<String> getCacheGroups() {
+        return cacheGrps;
+    }
+
+    /**
+     * @param cacheGrps Optional Set of cache groups, if provided, then caches only from that groups will be collected.
+     */
+    public void setCacheGroups(Set<String> cacheGrps) {
+        this.cacheGrps = cacheGrps;
+    }
+
     /** {@inheritDoc} */
     @Override public byte getProtocolVersion() {
-        return V2;
+        return V3;
     }
 
     /** {@inheritDoc} */
@@ -175,6 +215,7 @@
         U.writeString(out, evtThrottleCntrKey);
         out.writeBoolean(sysCaches);
         out.writeBoolean(collectCacheMetrics);
+        U.writeCollection(out, cacheGrps);
     }
 
     /** {@inheritDoc} */
@@ -185,6 +226,8 @@
         sysCaches = in.readBoolean();
 
         collectCacheMetrics = protoVer < V2 || in.readBoolean();
+
+        cacheGrps = protoVer < V3 ? null : U.readSet(in);
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorPersistenceMetrics.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorPersistenceMetrics.java
index d7aed5f..ebc1c02 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorPersistenceMetrics.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorPersistenceMetrics.java
@@ -43,6 +43,18 @@
     private float walFsyncTimeAvg;
 
     /** */
+    private long walBufPollSpinRate;
+
+    /** */
+    private long walSz;
+
+    /** */
+    private long walLastRollOverTm;
+
+    /** */
+    private long cpTotalTm;
+
+    /** */
     private long lastCpDuration;
 
     /** */
@@ -66,6 +78,36 @@
     /** */
     private long lastCpCowPages;
 
+    /** */
+    private long dirtyPages;
+
+    /** */
+    private long pagesRead;
+
+    /** */
+    private long pagesWritten;
+
+    /** */
+    private long pagesReplaced;
+
+    /** */
+    private long offHeapSz;
+
+    /** */
+    private long offheapUsedSz;
+
+    /** */
+    private long usedCpBufPages;
+
+    /** */
+    private long usedCpBufSz;
+
+    /** */
+    private long cpBufSz;
+
+    /** */
+    private long totalSz;
+
     /**
      * Default constructor.
      */
@@ -74,21 +116,41 @@
     }
 
     /**
-     * @param metrics Persistence metrics.
+     * @param m Persistence metrics.
      */
-    public VisorPersistenceMetrics(DataStorageMetrics metrics) {
-        walLoggingRate = metrics.getWalLoggingRate();
-        walWritingRate = metrics.getWalWritingRate();
-        walArchiveSegments = metrics.getWalArchiveSegments();
-        walFsyncTimeAvg = metrics.getWalFsyncTimeAverage();
-        lastCpDuration = metrics.getLastCheckpointDuration();
-        lastCpLockWaitDuration = metrics.getLastCheckpointLockWaitDuration();
-        lastCpMmarkDuration = metrics.getLastCheckpointMarkDuration();
-        lastCpPagesWriteDuration = metrics.getLastCheckpointPagesWriteDuration();
-        lastCpFsyncDuration = metrics.getLastCheckpointFsyncDuration();
-        lastCpTotalPages = metrics.getLastCheckpointTotalPagesNumber();
-        lastCpDataPages = metrics.getLastCheckpointDataPagesNumber();
-        lastCpCowPages = metrics.getLastCheckpointCopiedOnWritePagesNumber();
+    public VisorPersistenceMetrics(DataStorageMetrics m) {
+        walLoggingRate = m.getWalLoggingRate();
+        walWritingRate = m.getWalWritingRate();
+        walArchiveSegments = m.getWalArchiveSegments();
+        walFsyncTimeAvg = m.getWalFsyncTimeAverage();
+        walBufPollSpinRate = m.getWalBuffPollSpinsRate();
+        walSz = m.getWalTotalSize();
+        walLastRollOverTm = m.getWalLastRollOverTime();
+
+        cpTotalTm = m.getCheckpointTotalTime();
+
+        lastCpDuration = m.getLastCheckpointDuration();
+        lastCpLockWaitDuration = m.getLastCheckpointLockWaitDuration();
+        lastCpMmarkDuration = m.getLastCheckpointMarkDuration();
+        lastCpPagesWriteDuration = m.getLastCheckpointPagesWriteDuration();
+        lastCpFsyncDuration = m.getLastCheckpointFsyncDuration();
+        lastCpTotalPages = m.getLastCheckpointTotalPagesNumber();
+        lastCpDataPages = m.getLastCheckpointDataPagesNumber();
+        lastCpCowPages = m.getLastCheckpointCopiedOnWritePagesNumber();
+
+        dirtyPages = m.getDirtyPages();
+        pagesRead = m.getPagesRead();
+        pagesWritten = m.getPagesWritten();
+        pagesReplaced = m.getPagesReplaced();
+
+        offHeapSz = m.getOffHeapSize();
+        offheapUsedSz = m.getOffheapUsedSize();
+
+        usedCpBufPages = m.getUsedCheckpointBufferPages();
+        usedCpBufSz = m.getUsedCheckpointBufferSize();
+        cpBufSz = m.getCheckpointBufferSize();
+
+        totalSz = m.getTotalAllocatedSize();
     }
 
     /**
@@ -101,80 +163,183 @@
     /**
      * @return Average number of bytes per second written during the last time interval.
      */
-    public float getWalWritingRate(){
+    public float getWalWritingRate() {
         return walWritingRate;
     }
 
     /**
      * @return Current number of WAL segments in the WAL archive.
      */
-    public int getWalArchiveSegments(){
+    public int getWalArchiveSegments() {
         return walArchiveSegments;
     }
 
     /**
      * @return Average WAL fsync duration in microseconds over the last time interval.
      */
-    public float getWalFsyncTimeAverage(){
+    public float getWalFsyncTimeAverage() {
         return walFsyncTimeAvg;
     }
 
     /**
+     * @return WAL buffer poll spins number over the last time interval.
+     */
+    public long getWalBuffPollSpinsRate() {
+        return walBufPollSpinRate;
+    }
+
+    /**
+     * @return Total size in bytes for storage WAL files.
+     */
+    public long getWalTotalSize() {
+        return walSz;
+    }
+
+    /**
+     * @return Time of the last WAL segment rollover.
+     */
+    public long getWalLastRollOverTime() {
+        return walLastRollOverTm;
+    }
+
+    /**
+     * @return Total checkpoint time from last restart.
+     */
+    public long getCheckpointTotalTime() {
+        return cpTotalTm;
+    }
+
+    /**
      * @return Total checkpoint duration in milliseconds.
      */
-    public long getLastCheckpointingDuration(){
+    public long getLastCheckpointingDuration() {
         return lastCpDuration;
     }
 
     /**
      * @return Checkpoint lock wait time in milliseconds.
      */
-    public long getLastCheckpointLockWaitDuration(){
+    public long getLastCheckpointLockWaitDuration() {
         return lastCpLockWaitDuration;
     }
 
     /**
      * @return Checkpoint mark duration in milliseconds.
      */
-    public long getLastCheckpointMarkDuration(){
+    public long getLastCheckpointMarkDuration() {
         return lastCpMmarkDuration;
     }
 
     /**
      * @return Checkpoint pages write phase in milliseconds.
      */
-    public long getLastCheckpointPagesWriteDuration(){
+    public long getLastCheckpointPagesWriteDuration() {
         return lastCpPagesWriteDuration;
     }
 
     /**
      * @return Checkpoint fsync time in milliseconds.
      */
-    public long getLastCheckpointFsyncDuration(){
+    public long getLastCheckpointFsyncDuration() {
         return lastCpFsyncDuration;
     }
 
     /**
      * @return Total number of pages written during the last checkpoint.
      */
-    public long getLastCheckpointTotalPagesNumber(){
+    public long getLastCheckpointTotalPagesNumber() {
         return lastCpTotalPages;
     }
 
     /**
      * @return Total number of data pages written during the last checkpoint.
      */
-    public long getLastCheckpointDataPagesNumber(){
+    public long getLastCheckpointDataPagesNumber() {
         return lastCpDataPages;
     }
 
     /**
      * @return Total number of pages copied to a temporary checkpoint buffer during the last checkpoint.
      */
-    public long getLastCheckpointCopiedOnWritePagesNumber(){
+    public long getLastCheckpointCopiedOnWritePagesNumber() {
         return lastCpCowPages;
     }
 
+    /**
+     * @return Total dirty pages for the next checkpoint.
+     */
+    public long getDirtyPages() {
+        return dirtyPages;
+    }
+
+    /**
+     * @return The number of read pages from last restart.
+     */
+    public long getPagesRead() {
+        return pagesRead;
+    }
+
+    /**
+     * @return The number of written pages from last restart.
+     */
+    public long getPagesWritten() {
+        return pagesWritten;
+    }
+
+    /**
+     * @return The number of replaced pages from last restart.
+     */
+    public long getPagesReplaced() {
+        return pagesReplaced;
+    }
+
+    /**
+     * @return Total offheap size in bytes.
+     */
+    public long getOffHeapSize() {
+        return offHeapSz;
+    }
+
+    /**
+     * @return Total used offheap size in bytes.
+     */
+    public long getOffheapUsedSize() {
+        return offheapUsedSz;
+    }
+
+    /**
+     * @return Checkpoint buffer size in pages.
+     */
+    public long getUsedCheckpointBufferPages() {
+        return usedCpBufPages;
+    }
+
+    /**
+     * @return Checkpoint buffer size in bytes.
+     */
+    public long getUsedCheckpointBufferSize() {
+        return usedCpBufSz;
+    }
+
+    /**
+     * @return Checkpoint buffer size in bytes.
+     */
+    public long getCheckpointBufferSize() {
+        return cpBufSz;
+    }
+
+    /**
+     * @return Total size of memory allocated in bytes.
+     */
+    public long getTotalAllocatedSize() {
+        return totalSz;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte getProtocolVersion() {
+        return V2;
+    }
+
     /** {@inheritDoc} */
     @Override protected void writeExternalData(ObjectOutput out) throws IOException {
         out.writeFloat(walLoggingRate);
@@ -189,6 +354,22 @@
         out.writeLong(lastCpTotalPages);
         out.writeLong(lastCpDataPages);
         out.writeLong(lastCpCowPages);
+
+        // V2
+        out.writeLong(walBufPollSpinRate);
+        out.writeLong(walSz);
+        out.writeLong(walLastRollOverTm);
+        out.writeLong(cpTotalTm);
+        out.writeLong(dirtyPages);
+        out.writeLong(pagesRead);
+        out.writeLong(pagesWritten);
+        out.writeLong(pagesReplaced);
+        out.writeLong(offHeapSz);
+        out.writeLong(offheapUsedSz);
+        out.writeLong(usedCpBufPages);
+        out.writeLong(usedCpBufSz);
+        out.writeLong(cpBufSz);
+        out.writeLong(totalSz);
     }
 
     /** {@inheritDoc} */
@@ -205,6 +386,23 @@
         lastCpTotalPages = in.readLong();
         lastCpDataPages = in.readLong();
         lastCpCowPages = in.readLong();
+
+        if (protoVer > V1) {
+            walBufPollSpinRate = in.readLong();
+            walSz = in.readLong();
+            walLastRollOverTm = in.readLong();
+            cpTotalTm = in.readLong();
+            dirtyPages = in.readLong();
+            pagesRead = in.readLong();
+            pagesWritten = in.readLong();
+            pagesReplaced = in.readLong();
+            offHeapSz = in.readLong();
+            offheapUsedSz = in.readLong();
+            usedCpBufPages = in.readLong();
+            usedCpBufSz = in.readLong();
+            cpBufSz = in.readLong();
+            totalSz = in.readLong();
+        }
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorSuppressedError.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorSuppressedError.java
index 2ffaabd..e971341 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorSuppressedError.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/node/VisorSuppressedError.java
@@ -141,7 +141,7 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public String toString() {
         return S.toString(VisorSuppressedError.class, this);
     }
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/service/VisorServiceTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/service/VisorServiceTask.java
index f2489bc..e2a1fb7 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/visor/service/VisorServiceTask.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/service/VisorServiceTask.java
@@ -59,7 +59,7 @@
         @Override protected Collection<VisorServiceDescriptor> run(final Void arg) {
             Collection<VisorServiceDescriptor> res = new ArrayList<>();
 
-            if (ignite.active()) {
+            if (ignite.cluster().active()) {
                 Collection<ServiceDescriptor> services = ignite.services().serviceDescriptors();
 
                 for (ServiceDescriptor srvc : services)
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/tx/VisorTxInfo.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/tx/VisorTxInfo.java
index 03de5b0..be60a5e 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/visor/tx/VisorTxInfo.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/tx/VisorTxInfo.java
@@ -26,9 +26,12 @@
 import java.util.Collection;
 import java.util.TimeZone;
 import java.util.UUID;
+import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
+import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.internal.visor.VisorDataTransferObject;
+import org.apache.ignite.lang.IgniteClosure;
 import org.apache.ignite.lang.IgniteUuid;
 import org.apache.ignite.transactions.TransactionConcurrency;
 import org.apache.ignite.transactions.TransactionIsolation;
@@ -82,6 +85,9 @@
     /** */
     private Collection<UUID> masterNodeIds;
 
+    /** */
+    private AffinityTopologyVersion topVer;
+
     /**
      * Default constructor.
      */
@@ -103,7 +109,7 @@
      */
     public VisorTxInfo(IgniteUuid xid, long startTime, long duration, TransactionIsolation isolation,
         TransactionConcurrency concurrency, long timeout, String lb, Collection<UUID> primaryNodes,
-        TransactionState state, int size, IgniteUuid nearXid, Collection<UUID> masterNodeIds) {
+        TransactionState state, int size, IgniteUuid nearXid, Collection<UUID> masterNodeIds, AffinityTopologyVersion topVer) {
         this.xid = xid;
         this.startTime = startTime;
         this.duration = duration;
@@ -116,11 +122,12 @@
         this.size = size;
         this.nearXid = nearXid;
         this.masterNodeIds = masterNodeIds;
+        this.topVer = topVer;
     }
 
     /** {@inheritDoc} */
     @Override public byte getProtocolVersion() {
-        return V2;
+        return V3;
     }
 
     /** */
@@ -154,6 +161,11 @@
     }
 
     /** */
+    public AffinityTopologyVersion getTopologyVersion() {
+        return topVer;
+    }
+
+    /** */
     public long getTimeout() {
         return timeout;
     }
@@ -202,6 +214,8 @@
         U.writeGridUuid(out, nearXid);
         U.writeCollection(out, masterNodeIds);
         out.writeLong(startTime);
+        out.writeLong(topVer == null ? -1 : topVer.topologyVersion());
+        out.writeInt(topVer == null ? -1 : topVer.minorTopologyVersion());
     }
 
     /** {@inheritDoc} */
@@ -217,12 +231,48 @@
         size = in.readInt();
         if (protoVer >= V2) {
             nearXid = U.readGridUuid(in);
-
             masterNodeIds = U.readCollection(in);
-
             startTime = in.readLong();
         }
+        if (protoVer >= V3) {
+            long topVer = in.readLong();
+            int minorTopVer = in.readInt();
 
+            if (topVer != -1)
+                this.topVer = new AffinityTopologyVersion(topVer, minorTopVer);
+        }
+    }
+
+    /**
+     * Get tx info as user string.
+     *
+     * @return User string.
+     */
+    public String toUserString() {
+        return "    Tx: [xid=" + getXid() +
+            ", label=" + getLabel() +
+            ", state=" + getState() +
+            ", startTime=" + getFormattedStartTime() +
+            ", duration=" + getDuration() / 1000 +
+            ", isolation=" + getIsolation() +
+            ", concurrency=" + getConcurrency() +
+            ", topVer=" + (getTopologyVersion() == null ? "N/A" : getTopologyVersion()) +
+            ", timeout=" + getTimeout() +
+            ", size=" + getSize() +
+            ", dhtNodes=" + (getPrimaryNodes() == null ? "N/A" :
+            F.transform(getPrimaryNodes(), new IgniteClosure<UUID, String>() {
+                @Override public String apply(UUID id) {
+                    return U.id8(id);
+                }
+            })) +
+            ", nearXid=" + getNearXid() +
+            ", parentNodeIds=" + (getMasterNodeIds() == null ? "N/A" :
+            F.transform(getMasterNodeIds(), new IgniteClosure<UUID, String>() {
+                @Override public String apply(UUID id) {
+                    return U.id8(id);
+                }
+            })) +
+            ']';
     }
 
     /** {@inheritDoc} */
diff --git a/modules/core/src/main/java/org/apache/ignite/internal/visor/tx/VisorTxTask.java b/modules/core/src/main/java/org/apache/ignite/internal/visor/tx/VisorTxTask.java
index 9919b7d..23d1663 100644
--- a/modules/core/src/main/java/org/apache/ignite/internal/visor/tx/VisorTxTask.java
+++ b/modules/core/src/main/java/org/apache/ignite/internal/visor/tx/VisorTxTask.java
@@ -183,7 +183,7 @@
         private static final TxKillClosure NEAR_KILL_CLOSURE = new NearKillClosure();
 
         /** */
-        private static final TxKillClosure LOCAL_KILL_CLOSURE = NEAR_KILL_CLOSURE;
+        private static final TxKillClosure LOCAL_KILL_CLOSURE = new LocalKillClosure();
 
         /** */
         private static final TxKillClosure REMOTE_KILL_CLOSURE = new RemoteKillClosure();
@@ -312,7 +312,7 @@
 
                 infos.add(new VisorTxInfo(locTx.xid(), locTx.startTime(), duration, locTx.isolation(), locTx.concurrency(),
                     locTx.timeout(), lb, mappings, locTx.state(),
-                    size, locTx.nearXidVersion().asGridUuid(), locTx.masterNodeIds()));
+                    size, locTx.nearXidVersion().asGridUuid(), locTx.masterNodeIds(), locTx.topologyVersionSnapshot()));
 
                 if (arg.getOperation() == VisorTxOperation.KILL)
                     killClo.apply(locTx, tm);
@@ -394,7 +394,7 @@
         IgniteBiClosure<IgniteInternalTx, IgniteTxManager, IgniteInternalFuture<IgniteInternalTx>> {
     }
 
-    /** Kills near or local tx. */
+    /** Kills near tx. */
     private static class NearKillClosure implements TxKillClosure {
         /** */
         private static final long serialVersionUID = 0L;
@@ -402,7 +402,19 @@
         /** {@inheritDoc} */
         @Override public IgniteInternalFuture<IgniteInternalTx> apply(IgniteInternalTx tx, IgniteTxManager tm) {
             return tx.isRollbackOnly() || tx.state() == COMMITTING || tx.state() == COMMITTED ?
-                new GridFinishedFuture<>() : tx.rollbackAsync();
+                new GridFinishedFuture<>() : ((GridNearTxLocal)tx).rollbackNearTxLocalAsync(false, false);
+        }
+    }
+
+    /** Kills local tx. */
+    private static class LocalKillClosure implements TxKillClosure {
+        /** */
+        private static final long serialVersionUID = 0L;
+
+        /** {@inheritDoc} */
+        @Override public IgniteInternalFuture<IgniteInternalTx> apply(IgniteInternalTx tx, IgniteTxManager tm) {
+            return tx.isRollbackOnly() || tx.state() == COMMITTING || tx.state() == COMMITTED ?
+                new GridFinishedFuture<>() : ((GridDhtTxLocal)tx).rollbackDhtLocalAsync();
         }
     }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/lang/IgniteProductVersion.java b/modules/core/src/main/java/org/apache/ignite/lang/IgniteProductVersion.java
index 457ec55..eef134a 100644
--- a/modules/core/src/main/java/org/apache/ignite/lang/IgniteProductVersion.java
+++ b/modules/core/src/main/java/org/apache/ignite/lang/IgniteProductVersion.java
@@ -258,7 +258,7 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public String toString() {
         String revTsStr = new SimpleDateFormat("yyyyMMdd").format(new Date(revTs * 1000));
 
         String hash = U.byteArray2HexString(revHash).toLowerCase();
diff --git a/modules/core/src/main/java/org/apache/ignite/marshaller/MarshallerExclusions.java b/modules/core/src/main/java/org/apache/ignite/marshaller/MarshallerExclusions.java
index fba35e2..8302427 100644
--- a/modules/core/src/main/java/org/apache/ignite/marshaller/MarshallerExclusions.java
+++ b/modules/core/src/main/java/org/apache/ignite/marshaller/MarshallerExclusions.java
@@ -50,7 +50,7 @@
     };
 
     /** */
-    private static final Map<Class<?>, Boolean> cache = new GridBoundedConcurrentLinkedHashMap<>(
+    private static volatile Map<Class<?>, Boolean> cache = new GridBoundedConcurrentLinkedHashMap<>(
         512, 512, 0.75f, 16);
 
     /**
@@ -153,6 +153,6 @@
      * Intended for test purposes only.
      */
     public static void clearCache() {
-        cache.clear();
+        cache = new GridBoundedConcurrentLinkedHashMap<>(512, 512, 0.75f, 16);
     }
-}
\ No newline at end of file
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/marshaller/MarshallerUtils.java b/modules/core/src/main/java/org/apache/ignite/marshaller/MarshallerUtils.java
index f7fef52..d5cf386 100644
--- a/modules/core/src/main/java/org/apache/ignite/marshaller/MarshallerUtils.java
+++ b/modules/core/src/main/java/org/apache/ignite/marshaller/MarshallerUtils.java
@@ -45,6 +45,9 @@
     /** Job sender node version. */
     private static final ThreadLocal<IgniteProductVersion> JOB_SND_NODE_VER = new ThreadLocal<>();
 
+    /** Job sender node version. */
+    private static final ThreadLocal<IgniteProductVersion> JOB_RCV_NODE_VER = new ThreadLocal<>();
+
     /**
      * Set node name to marshaller context if possible.
      *
@@ -96,6 +99,24 @@
     }
 
     /**
+     * Sets thread local job receiver node version.
+     *
+     * @param ver Thread local job receiver node version.
+     */
+    public static void jobReceiverVersion(IgniteProductVersion ver) {
+        JOB_RCV_NODE_VER.set(ver);
+    }
+
+    /**
+     * Returns thread local job receiver node version.
+     *
+     * @return Thread local job receiver node version.
+     */
+    public static IgniteProductVersion jobReceiverVersion() {
+        return JOB_RCV_NODE_VER.get();
+    }
+
+    /**
      * Returns class name filter for marshaller.
      *
      * @return Class name filter for marshaller.
@@ -199,5 +220,4 @@
                 "[path=" + fileName + ']', e);
         }
     }
-
 }
diff --git a/modules/core/src/main/java/org/apache/ignite/mxbean/CacheMetricsMXBean.java b/modules/core/src/main/java/org/apache/ignite/mxbean/CacheMetricsMXBean.java
index 16bdede..74991bb 100644
--- a/modules/core/src/main/java/org/apache/ignite/mxbean/CacheMetricsMXBean.java
+++ b/modules/core/src/main/java/org/apache/ignite/mxbean/CacheMetricsMXBean.java
@@ -28,271 +28,271 @@
 @MXBeanDescription("MBean that provides access to cache descriptor.")
 public interface CacheMetricsMXBean extends CacheStatisticsMXBean, CacheMXBean, CacheMetrics {
     /** {@inheritDoc} */
-    @MXBeanDescription("Clear statistics.")
+    @Override @MXBeanDescription("Clear statistics.")
     public void clear();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Number of hits.")
+    @Override @MXBeanDescription("Number of hits.")
     public long getCacheHits();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Percentage of successful hits.")
+    @Override @MXBeanDescription("Percentage of successful hits.")
     public float getCacheHitPercentage();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Number of misses.")
+    @Override @MXBeanDescription("Number of misses.")
     public long getCacheMisses();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Percentage of accesses that failed to find anything.")
+    @Override @MXBeanDescription("Percentage of accesses that failed to find anything.")
     public float getCacheMissPercentage();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Number of gets.")
+    @Override @MXBeanDescription("Number of gets.")
     public long getCacheGets();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Number of puts.")
+    @Override @MXBeanDescription("Number of puts.")
     public long getCachePuts();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Number of removals.")
+    @Override @MXBeanDescription("Number of removals.")
     public long getCacheRemovals();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Number of eviction entries.")
+    @Override @MXBeanDescription("Number of eviction entries.")
     public long getCacheEvictions();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Average time to execute get.")
+    @Override @MXBeanDescription("Average time to execute get.")
     public float getAverageGetTime();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Average time to execute put.")
+    @Override @MXBeanDescription("Average time to execute put.")
     public float getAveragePutTime();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Average time to execute remove.")
+    @Override @MXBeanDescription("Average time to execute remove.")
     public float getAverageRemoveTime();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Average time to commit transaction.")
+    @Override @MXBeanDescription("Average time to commit transaction.")
     public float getAverageTxCommitTime();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Average time to rollback transaction.")
+    @Override @MXBeanDescription("Average time to rollback transaction.")
     public float getAverageTxRollbackTime();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Number of transaction commits.")
+    @Override @MXBeanDescription("Number of transaction commits.")
     public long getCacheTxCommits();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Number of transaction rollback.")
+    @Override @MXBeanDescription("Number of transaction rollback.")
     public long getCacheTxRollbacks();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Cache name.")
+    @Override @MXBeanDescription("Cache name.")
     public String name();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Number of gets from off-heap memory.")
+    @Override @MXBeanDescription("Number of gets from off-heap memory.")
     public long getOffHeapGets();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Number of puts to off-heap memory.")
+    @Override @MXBeanDescription("Number of puts to off-heap memory.")
     public long getOffHeapPuts();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Number of removed entries from off-heap memory.")
+    @Override @MXBeanDescription("Number of removed entries from off-heap memory.")
     public long getOffHeapRemovals();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Number of evictions from off-heap memory.")
+    @Override @MXBeanDescription("Number of evictions from off-heap memory.")
     public long getOffHeapEvictions();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Number of hits on off-heap memory.")
+    @Override @MXBeanDescription("Number of hits on off-heap memory.")
     public long getOffHeapHits();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Percentage of hits on off-heap memory.")
+    @Override @MXBeanDescription("Percentage of hits on off-heap memory.")
     public float getOffHeapHitPercentage();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Number of misses on off-heap memory.")
+    @Override @MXBeanDescription("Number of misses on off-heap memory.")
     public long getOffHeapMisses();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Percentage of misses on off-heap memory.")
+    @Override @MXBeanDescription("Percentage of misses on off-heap memory.")
     public float getOffHeapMissPercentage();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Number of entries stored in off-heap memory.")
+    @Override @MXBeanDescription("Number of entries stored in off-heap memory.")
     public long getOffHeapEntriesCount();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Number of entries in heap memory.")
+    @Override @MXBeanDescription("Number of entries in heap memory.")
     public long getHeapEntriesCount();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Number of primary entries stored in off-heap memory.")
+    @Override @MXBeanDescription("Number of primary entries stored in off-heap memory.")
     public long getOffHeapPrimaryEntriesCount();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Number of backup stored in off-heap memory.")
+    @Override @MXBeanDescription("Number of backup stored in off-heap memory.")
     public long getOffHeapBackupEntriesCount();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Memory size allocated in off-heap.")
+    @Override @MXBeanDescription("Memory size allocated in off-heap.")
     public long getOffHeapAllocatedSize();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Number of non-null values in the cache.")
+    @Override @MXBeanDescription("Number of non-null values in the cache.")
     public int getSize();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Number of non-null values in the cache as a long value.")
+    @Override @MXBeanDescription("Number of non-null values in the cache as a long value.")
     public long getCacheSize();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Number of keys in the cache (possibly with null values).")
+    @Override @MXBeanDescription("Number of keys in the cache (possibly with null values).")
     public int getKeySize();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("True if cache is empty.")
+    @Override @MXBeanDescription("True if cache is empty.")
     public boolean isEmpty();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Current size of evict queue.")
+    @Override @MXBeanDescription("Current size of evict queue.")
     public int getDhtEvictQueueCurrentSize();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Transaction per-thread map size.")
+    @Override @MXBeanDescription("Transaction per-thread map size.")
     public int getTxThreadMapSize();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Transaction per-Xid map size.")
+    @Override @MXBeanDescription("Transaction per-Xid map size.")
     public int getTxXidMapSize();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Transaction committed queue size.")
+    @Override @MXBeanDescription("Transaction committed queue size.")
     public int getTxCommitQueueSize();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Transaction prepared queue size.")
+    @Override @MXBeanDescription("Transaction prepared queue size.")
     public int getTxPrepareQueueSize();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Transaction start version counts map size.")
+    @Override @MXBeanDescription("Transaction start version counts map size.")
     public int getTxStartVersionCountsSize();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Transaction committed ID map size.")
+    @Override @MXBeanDescription("Transaction committed ID map size.")
     public int getTxCommittedVersionsSize();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Transaction rolled back ID map size.")
+    @Override @MXBeanDescription("Transaction rolled back ID map size.")
     public int getTxRolledbackVersionsSize();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Transaction DHT per-thread map size.")
+    @Override @MXBeanDescription("Transaction DHT per-thread map size.")
     public int getTxDhtThreadMapSize();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Transaction DHT per-Xid map size.")
+    @Override @MXBeanDescription("Transaction DHT per-Xid map size.")
     public int getTxDhtXidMapSize();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Transaction DHT committed queue size.")
+    @Override @MXBeanDescription("Transaction DHT committed queue size.")
     public int getTxDhtCommitQueueSize();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Transaction DHT prepared queue size.")
+    @Override @MXBeanDescription("Transaction DHT prepared queue size.")
     public int getTxDhtPrepareQueueSize();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Transaction DHT start version counts map size.")
+    @Override @MXBeanDescription("Transaction DHT start version counts map size.")
     public int getTxDhtStartVersionCountsSize();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Transaction DHT committed ID map size.")
+    @Override @MXBeanDescription("Transaction DHT committed ID map size.")
     public int getTxDhtCommittedVersionsSize();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Transaction DHT rolled back ID map size.")
+    @Override @MXBeanDescription("Transaction DHT rolled back ID map size.")
     public int getTxDhtRolledbackVersionsSize();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("True if write-behind is enabled for this cache.")
+    @Override @MXBeanDescription("True if write-behind is enabled for this cache.")
     public boolean isWriteBehindEnabled();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Size of internal buffer that triggers flush procedure.")
+    @Override @MXBeanDescription("Size of internal buffer that triggers flush procedure.")
     public int getWriteBehindFlushSize();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Count of flush threads.")
+    @Override @MXBeanDescription("Count of flush threads.")
     public int getWriteBehindFlushThreadCount();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Flush frequency interval in milliseconds.")
+    @Override @MXBeanDescription("Flush frequency interval in milliseconds.")
     public long getWriteBehindFlushFrequency();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Maximum size of batch for similar operations.")
+    @Override @MXBeanDescription("Maximum size of batch for similar operations.")
     public int getWriteBehindStoreBatchSize();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Count of cache overflow events since write-behind cache has started.")
+    @Override @MXBeanDescription("Count of cache overflow events since write-behind cache has started.")
     public int getWriteBehindTotalCriticalOverflowCount();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Count of cache overflow events since write-behind cache has started.")
+    @Override @MXBeanDescription("Count of cache overflow events since write-behind cache has started.")
     public int getWriteBehindCriticalOverflowCount();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Count of cache cache entries that are currently in retry state.")
+    @Override @MXBeanDescription("Count of cache cache entries that are currently in retry state.")
     public int getWriteBehindErrorRetryCount();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Count of cache entries that are waiting to be flushed.")
+    @Override @MXBeanDescription("Count of cache entries that are waiting to be flushed.")
     public int getWriteBehindBufferSize();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Key type.")
+    @Override @MXBeanDescription("Key type.")
     public String getKeyType();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Value type.")
+    @Override @MXBeanDescription("Value type.")
     public String getValueType();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("True if the cache is store by value.")
+    @Override @MXBeanDescription("True if the cache is store by value.")
     public boolean isStoreByValue();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("True if statistics collection is enabled.")
+    @Override @MXBeanDescription("True if statistics collection is enabled.")
     public boolean isStatisticsEnabled();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("True if management is enabled.")
+    @Override @MXBeanDescription("True if management is enabled.")
     public boolean isManagementEnabled();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("True when a cache is in read-through mode.")
+    @Override @MXBeanDescription("True when a cache is in read-through mode.")
     public boolean isReadThrough();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("True when a cache is in write-through mode.")
+    @Override @MXBeanDescription("True when a cache is in write-through mode.")
     public boolean isWriteThrough();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("True when a cache topology is valid for read operations.")
+    @Override @MXBeanDescription("True when a cache topology is valid for read operations.")
     public boolean isValidForReading();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("True when a cache topology is valid for write operations.")
+    @Override @MXBeanDescription("True when a cache topology is valid for write operations.")
     public boolean isValidForWriting();
 
     /**
diff --git a/modules/core/src/main/java/org/apache/ignite/mxbean/ClusterMetricsMXBean.java b/modules/core/src/main/java/org/apache/ignite/mxbean/ClusterMetricsMXBean.java
index 537cef7..ffb4080 100644
--- a/modules/core/src/main/java/org/apache/ignite/mxbean/ClusterMetricsMXBean.java
+++ b/modules/core/src/main/java/org/apache/ignite/mxbean/ClusterMetricsMXBean.java
@@ -27,234 +27,234 @@
 @MXBeanDescription("MBean that provides access to aggregated cluster metrics.")
 public interface ClusterMetricsMXBean extends ClusterMetrics {
     /** {@inheritDoc} */
-    @MXBeanDescription("Last update time of this node metrics.")
+    @Override @MXBeanDescription("Last update time of this node metrics.")
     public long getLastUpdateTime();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Maximum number of jobs that ever ran concurrently on this node.")
+    @Override @MXBeanDescription("Maximum number of jobs that ever ran concurrently on this node.")
     public int getMaximumActiveJobs();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Number of currently active jobs concurrently executing on the node.")
+    @Override @MXBeanDescription("Number of currently active jobs concurrently executing on the node.")
     public int getCurrentActiveJobs();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Average number of active jobs concurrently executing on the node.")
+    @Override @MXBeanDescription("Average number of active jobs concurrently executing on the node.")
     public float getAverageActiveJobs();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Maximum number of waiting jobs this node had.")
+    @Override @MXBeanDescription("Maximum number of waiting jobs this node had.")
     public int getMaximumWaitingJobs();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Number of queued jobs currently waiting to be executed.")
+    @Override @MXBeanDescription("Number of queued jobs currently waiting to be executed.")
     public int getCurrentWaitingJobs();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Average number of waiting jobs this node had queued.")
+    @Override @MXBeanDescription("Average number of waiting jobs this node had queued.")
     public float getAverageWaitingJobs();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Maximum number of jobs rejected at once during a single collision resolution operation.")
+    @Override @MXBeanDescription("Maximum number of jobs rejected at once during a single collision resolution operation.")
     public int getMaximumRejectedJobs();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Number of jobs rejected after more recent collision resolution operation.")
+    @Override @MXBeanDescription("Number of jobs rejected after more recent collision resolution operation.")
     public int getCurrentRejectedJobs();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Average number of jobs this node rejects during collision resolution operations.")
+    @Override @MXBeanDescription("Average number of jobs this node rejects during collision resolution operations.")
     public float getAverageRejectedJobs();
 
     /** {@inheritDoc} */
-    @MXBeanDescription(
+    @Override @MXBeanDescription(
         "Total number of jobs this node rejects during collision resolution operations since node startup.")
     public int getTotalRejectedJobs();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Maximum number of cancelled jobs this node ever had running concurrently.")
+    @Override @MXBeanDescription("Maximum number of cancelled jobs this node ever had running concurrently.")
     public int getMaximumCancelledJobs();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Number of cancelled jobs that are still running.")
+    @Override @MXBeanDescription("Number of cancelled jobs that are still running.")
     public int getCurrentCancelledJobs();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Average number of cancelled jobs this node ever had running concurrently.")
+    @Override @MXBeanDescription("Average number of cancelled jobs this node ever had running concurrently.")
     public float getAverageCancelledJobs();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Total number of cancelled jobs since node startup.")
+    @Override @MXBeanDescription("Total number of cancelled jobs since node startup.")
     public int getTotalCancelledJobs();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Total number of jobs handled by the node.")
+    @Override @MXBeanDescription("Total number of jobs handled by the node.")
     public int getTotalExecutedJobs();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Total time all finished jobs takes to execute on the node.")
+    @Override @MXBeanDescription("Total time all finished jobs takes to execute on the node.")
     public long getTotalJobsExecutionTime();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Maximum time a job ever spent waiting in a queue to be executed.")
+    @Override @MXBeanDescription("Maximum time a job ever spent waiting in a queue to be executed.")
     public long getMaximumJobWaitTime();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Current wait time of oldest job.")
+    @Override @MXBeanDescription("Current wait time of oldest job.")
     public long getCurrentJobWaitTime();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Average time jobs spend waiting in the queue to be executed.")
+    @Override @MXBeanDescription("Average time jobs spend waiting in the queue to be executed.")
     public double getAverageJobWaitTime();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Time it took to execute the longest job on the node.")
+    @Override @MXBeanDescription("Time it took to execute the longest job on the node.")
     public long getMaximumJobExecuteTime();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Longest time a current job has been executing for.")
+    @Override @MXBeanDescription("Longest time a current job has been executing for.")
     public long getCurrentJobExecuteTime();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Average time a job takes to execute on the node.")
+    @Override @MXBeanDescription("Average time a job takes to execute on the node.")
     public double getAverageJobExecuteTime();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Total number of tasks handled by the node.")
+    @Override @MXBeanDescription("Total number of tasks handled by the node.")
     public int getTotalExecutedTasks();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Total time this node spent executing jobs.")
+    @Override @MXBeanDescription("Total time this node spent executing jobs.")
     public long getTotalBusyTime();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Total time this node spent idling (not executing any jobs).")
+    @Override @MXBeanDescription("Total time this node spent idling (not executing any jobs).")
     public long getTotalIdleTime();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Time this node spend idling since executing last job.")
+    @Override @MXBeanDescription("Time this node spend idling since executing last job.")
     public long getCurrentIdleTime();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Percentage of time this node is busy executing jobs vs. idling.")
+    @Override @MXBeanDescription("Percentage of time this node is busy executing jobs vs. idling.")
     public float getBusyTimePercentage();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Percentage of time this node is idling vs. executing jobs.")
+    @Override @MXBeanDescription("Percentage of time this node is idling vs. executing jobs.")
     public float getIdleTimePercentage();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("The number of CPUs available to the Java Virtual Machine.")
+    @Override @MXBeanDescription("The number of CPUs available to the Java Virtual Machine.")
     public int getTotalCpus();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("The system load average; or a negative value if not available.")
+    @Override @MXBeanDescription("The system load average; or a negative value if not available.")
     public double getCurrentCpuLoad();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Average of CPU load values over all metrics kept in the history.")
+    @Override @MXBeanDescription("Average of CPU load values over all metrics kept in the history.")
     public double getAverageCpuLoad();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Average time spent in GC since the last update.")
+    @Override @MXBeanDescription("Average time spent in GC since the last update.")
     public double getCurrentGcCpuLoad();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("The initial size of memory in bytes; -1 if undefined.")
+    @Override @MXBeanDescription("The initial size of memory in bytes; -1 if undefined.")
     public long getHeapMemoryInitialized();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Current heap size that is used for object allocation.")
+    @Override @MXBeanDescription("Current heap size that is used for object allocation.")
     public long getHeapMemoryUsed();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("The amount of committed memory in bytes.")
+    @Override @MXBeanDescription("The amount of committed memory in bytes.")
     public long getHeapMemoryCommitted();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("The maximum amount of memory in bytes; -1 if undefined.")
+    @Override @MXBeanDescription("The maximum amount of memory in bytes; -1 if undefined.")
     public long getHeapMemoryMaximum();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("The total amount of memory in bytes; -1 if undefined.")
+    @Override @MXBeanDescription("The total amount of memory in bytes; -1 if undefined.")
     public long getHeapMemoryTotal();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("The initial size of memory in bytes; -1 if undefined.")
+    @Override @MXBeanDescription("The initial size of memory in bytes; -1 if undefined.")
     public long getNonHeapMemoryInitialized();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Current non-heap memory size that is used by Java VM.")
+    @Override @MXBeanDescription("Current non-heap memory size that is used by Java VM.")
     public long getNonHeapMemoryUsed();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Amount of non-heap memory in bytes that is committed for the JVM to use.")
+    @Override @MXBeanDescription("Amount of non-heap memory in bytes that is committed for the JVM to use.")
     public long getNonHeapMemoryCommitted();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Maximum amount of non-heap memory in bytes that can " +
+    @Override @MXBeanDescription("Maximum amount of non-heap memory in bytes that can " +
         "be used for memory management. -1 if undefined.")
     public long getNonHeapMemoryMaximum();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Total amount of non-heap memory in bytes that can " +
+    @Override @MXBeanDescription("Total amount of non-heap memory in bytes that can " +
         "be used for memory management. -1 if undefined.")
     public long getNonHeapMemoryTotal();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Uptime of the JVM in milliseconds.")
+    @Override @MXBeanDescription("Uptime of the JVM in milliseconds.")
     public long getUpTime();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Start time of the JVM in milliseconds.")
+    @Override @MXBeanDescription("Start time of the JVM in milliseconds.")
     public long getStartTime();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Current number of live threads.")
+    @Override @MXBeanDescription("Current number of live threads.")
     public int getCurrentThreadCount();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("The peak live thread count.")
+    @Override @MXBeanDescription("The peak live thread count.")
     public int getMaximumThreadCount();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("The total number of threads started.")
+    @Override @MXBeanDescription("The total number of threads started.")
     public long getTotalStartedThreadCount();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Current number of live daemon threads.")
+    @Override @MXBeanDescription("Current number of live daemon threads.")
     public int getCurrentDaemonThreadCount();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Last data version.")
+    @Override @MXBeanDescription("Last data version.")
     public long getLastDataVersion();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Sent messages count.")
+    @Override @MXBeanDescription("Sent messages count.")
     public int getSentMessagesCount();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Sent bytes count.")
+    @Override @MXBeanDescription("Sent bytes count.")
     public long getSentBytesCount();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Received messages count.")
+    @Override @MXBeanDescription("Received messages count.")
     public int getReceivedMessagesCount();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Received bytes count.")
+    @Override @MXBeanDescription("Received bytes count.")
     public long getReceivedBytesCount();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Outbound messages queue size.")
+    @Override @MXBeanDescription("Outbound messages queue size.")
     public int getOutboundMessagesQueueSize();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Start time of the grid node in milliseconds.")
+    @Override @MXBeanDescription("Start time of the grid node in milliseconds.")
     public long getNodeStartTime();
 
     /** {@inheritDoc} */
-    @MXBeanDescription("Total number of nodes.")
+    @Override @MXBeanDescription("Total number of nodes.")
     public int getTotalNodes();
 
     /**
diff --git a/modules/core/src/main/java/org/apache/ignite/spi/IgniteSpiAdapter.java b/modules/core/src/main/java/org/apache/ignite/spi/IgniteSpiAdapter.java
index e8c27d2..a7e6e8c 100644
--- a/modules/core/src/main/java/org/apache/ignite/spi/IgniteSpiAdapter.java
+++ b/modules/core/src/main/java/org/apache/ignite/spi/IgniteSpiAdapter.java
@@ -519,7 +519,7 @@
         if (!enabled)
             return;
 
-        if (!checkClient && (CU.clientNode(getLocalNode()) || CU.clientNode(node)))
+        if (!checkClient && (getLocalNode().isClient() || node.isClient()))
             return;
 
         String clsAttr = createSpiAttributeName(IgniteNodeAttributes.ATTR_SPI_CLASS);
@@ -547,7 +547,8 @@
         if (rmtCls == null) {
             if (!optional && starting)
                 throw new IgniteSpiException("Remote SPI with the same name is not configured" + tipStr +
-                    " [name=" + name + ", loc=" + locCls + ']');
+                    " [name=" + name + ", loc=" + locCls + ", locNode=" + spiCtx.localNode() + ", rmt=" + rmtCls +
+                    ", rmtNode=" + node + ']');
 
             sb.a(format(">>> Remote SPI with the same name is not configured: " + name, locCls));
         }
diff --git a/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java b/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java
index 6617568..4ab1dd4 100755
--- a/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java
+++ b/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpi.java
@@ -204,16 +204,22 @@
  * <h2 class="header">Optional</h2>
  * The following configuration parameters are optional:
  * <ul>
+ * <li>Address resolver (see {@link #setAddressResolver(AddressResolver)}</li>
  * <li>Node local IP address (see {@link #setLocalAddress(String)})</li>
  * <li>Node local port number (see {@link #setLocalPort(int)})</li>
  * <li>Local port range (see {@link #setLocalPortRange(int)}</li>
+ * <li>Use paired connections (see {@link #setUsePairedConnections(boolean)}</li>
  * <li>Connections per node (see {@link #setConnectionsPerNode(int)})</li>
+ * <li>Shared memory port (see {@link #setSharedMemoryPort(int)}</li>
  * <li>Idle connection timeout (see {@link #setIdleConnectionTimeout(long)})</li>
  * <li>Direct or heap buffer allocation (see {@link #setDirectBuffer(boolean)})</li>
  * <li>Direct or heap buffer allocation for sending (see {@link #setDirectSendBuffer(boolean)})</li>
  * <li>Count of selectors and selector threads for NIO server (see {@link #setSelectorsCount(int)})</li>
+ * <li>Selector thread busy-loop iterations (see {@link #setSelectorSpins(long)}</li>
  * <li>{@code TCP_NODELAY} socket option for sockets (see {@link #setTcpNoDelay(boolean)})</li>
+ * <li>Filter reachable addresses (see {@link #setFilterReachableAddresses(boolean)} </li>
  * <li>Message queue limit (see {@link #setMessageQueueLimit(int)})</li>
+ * <li>Slow client queue limit (see {@link #setSlowClientQueueLimit(int)})</li>
  * <li>Connect timeout (see {@link #setConnectTimeout(long)})</li>
  * <li>Maximum connect timeout (see {@link #setMaxConnectTimeout(long)})</li>
  * <li>Reconnect attempts count (see {@link #setReconnectCount(int)})</li>
@@ -382,7 +388,7 @@
     private final GridNioServerListener<Message> srvLsnr =
         new GridNioServerListenerAdapter<Message>() {
             @Override public void onSessionWriteTimeout(GridNioSession ses) {
-                LT.warn(log,"Communication SPI session write timed out (consider increasing " +
+                LT.warn(log, "Communication SPI session write timed out (consider increasing " +
                     "'socketWriteTimeout' " + "configuration property) [remoteAddr=" + ses.remoteAddress() +
                     ", writeTimeout=" + sockWriteTimeout + ']');
 
@@ -499,7 +505,7 @@
                     boolean unknownNode = true;
 
                     if (discoverySpi instanceof TcpDiscoverySpi) {
-                        TcpDiscoverySpi tcpDiscoverySpi = (TcpDiscoverySpi) discoverySpi;
+                        TcpDiscoverySpi tcpDiscoverySpi = (TcpDiscoverySpi)discoverySpi;
 
                         ClusterNode node0 = tcpDiscoverySpi.getNode0(sndId);
 
@@ -511,7 +517,7 @@
                         }
                     }
                     else if (discoverySpi instanceof IgniteDiscoverySpi)
-                        unknownNode = !((IgniteDiscoverySpi) discoverySpi).knownNode(sndId);
+                        unknownNode = !((IgniteDiscoverySpi)discoverySpi).knownNode(sndId);
 
                     if (unknownNode) {
                         U.warn(log, "Close incoming connection, unknown node [nodeId=" + sndId + ", ses=" + ses + ']');
@@ -636,7 +642,7 @@
 
                         if (log.isDebugEnabled())
                             log.debug("Received incoming connection from remote node " +
-                            "[rmtNode=" + rmtNode.id() + ", reserved=" + reserved +
+                                "[rmtNode=" + rmtNode.id() + ", reserved=" + reserved +
                                 ", recovery=" + recoveryDesc + ']');
 
                         if (reserved) {
@@ -655,9 +661,9 @@
                         if (oldFut instanceof ConnectFuture && locNode.order() < rmtNode.order()) {
                             if (log.isInfoEnabled()) {
                                 log.info("Received incoming connection from remote node while " +
-                                "connecting to this node, rejecting [locNode=" + locNode.id() +
-                                ", locNodeOrder=" + locNode.order() + ", rmtNode=" + rmtNode.id() +
-                                ", rmtNodeOrder=" + rmtNode.order() + ']');
+                                    "connecting to this node, rejecting [locNode=" + locNode.id() +
+                                    ", locNodeOrder=" + locNode.order() + ", rmtNode=" + rmtNode.id() +
+                                    ", rmtNodeOrder=" + rmtNode.order() + ']');
                             }
 
                             ses.send(new RecoveryLastReceivedMessage(ALREADY_CONNECTED));
@@ -928,7 +934,7 @@
                                     catch (IgniteCheckedException e) {
                                         if (log.isDebugEnabled())
                                             log.debug("Failed to send recovery handshake " +
-                                                    "[rmtNode=" + rmtNode.id() + ", err=" + e + ']');
+                                                "[rmtNode=" + rmtNode.id() + ", err=" + e + ']');
 
                                         recoveryDesc.release();
                                     }
@@ -1010,14 +1016,14 @@
                                         msgFut.get();
 
                                         GridTcpNioCommunicationClient client =
-                                                connected(recoveryDesc, ses, rmtNode, msg.received(), false, createClient);
+                                            connected(recoveryDesc, ses, rmtNode, msg.received(), false, createClient);
 
                                         fut.onDone(client);
                                     }
                                     catch (IgniteCheckedException e) {
                                         if (log.isDebugEnabled())
                                             log.debug("Failed to send recovery handshake " +
-                                                    "[rmtNode=" + rmtNode.id() + ", err=" + e + ']');
+                                                "[rmtNode=" + rmtNode.id() + ", err=" + e + ']');
 
                                         recoveryDesc.release();
 
@@ -1210,6 +1216,15 @@
     }
 
     /**
+     * See {@link #setAddressResolver(AddressResolver)}.
+     *
+     * @return Address resolver.
+     */
+    public AddressResolver getAddressResolver() {
+        return addrRslvr;
+    }
+
+    /**
      * Injects resources.
      *
      * @param ignite Ignite.
@@ -1330,9 +1345,10 @@
      * Default is {@code false}.
      *
      * @param usePairedConnections {@code true} to use paired connections and {@code false} otherwise.
-     * @see #getConnectionsPerNode()
      * @return {@code this} for chaining.
+     * @see #getConnectionsPerNode()
      */
+    @IgniteSpiConfiguration(optional = true)
     public TcpCommunicationSpi setUsePairedConnections(boolean usePairedConnections) {
         this.usePairedConnections = usePairedConnections;
 
@@ -1345,9 +1361,10 @@
      * half for outgoing messages.
      *
      * @param maxConnectionsPerNode Number of connections per node.
-     * @see #isUsePairedConnections()
      * @return {@code this} for chaining.
+     * @see #isUsePairedConnections()
      */
+    @IgniteSpiConfiguration(optional = true)
     public TcpCommunicationSpi setConnectionsPerNode(int maxConnectionsPerNode) {
         this.connectionsPerNode = maxConnectionsPerNode;
 
@@ -1357,7 +1374,7 @@
     /**
      * See {@link #setConnectionsPerNode(int)}.
      *
-     *  @return Number of connections per node.
+     * @return Number of connections per node.
      */
     public int getConnectionsPerNode() {
         return connectionsPerNode;
@@ -1513,7 +1530,8 @@
      * See {@link #setConnectTimeout(long)}.
      *
      * @return Connect timeout.
-     */public long getConnectTimeout() {
+     */
+    public long getConnectTimeout() {
         return connTimeout;
     }
 
@@ -1671,6 +1689,7 @@
      * @param selectorSpins Selector thread busy-loop iterations.
      * @return {@code this} for chaining.
      */
+    @IgniteSpiConfiguration(optional = true)
     public TcpCommunicationSpi setSelectorSpins(long selectorSpins) {
         this.selectorSpins = selectorSpins;
 
@@ -1832,6 +1851,7 @@
      * @param slowClientQueueLimit Slow client queue limit.
      * @return {@code this} for chaining.
      */
+    @IgniteSpiConfiguration(optional = true)
     public TcpCommunicationSpi setSlowClientQueueLimit(int slowClientQueueLimit) {
         this.slowClientQueueLimit = slowClientQueueLimit;
 
@@ -2223,7 +2243,7 @@
             log.debug(startInfo());
     }
 
-    /** {@inheritDoc} }*/
+    /** {@inheritDoc} } */
     @Override public void onContextInitialized0(IgniteSpiContext spiCtx) throws IgniteSpiException {
         spiCtx.registerPort(boundTcpPort, IgnitePortProtocol.TCP);
 
@@ -2331,12 +2351,12 @@
 
                 IgniteBiInClosure<GridNioSession, Integer> queueSizeMonitor =
                     !clientMode && slowClientQueueLimit > 0 ?
-                    new CI2<GridNioSession, Integer>() {
-                        @Override public void apply(GridNioSession ses, Integer qSize) {
-                            checkClientQueueSize(ses, qSize);
-                        }
-                    } :
-                    null;
+                        new CI2<GridNioSession, Integer>() {
+                            @Override public void apply(GridNioSession ses, Integer qSize) {
+                                checkClientQueueSize(ses, qSize);
+                            }
+                        } :
+                        null;
 
                 GridNioFilter[] filters;
 
@@ -3262,7 +3282,7 @@
                         GridNioSession ses = recoveryDesc.session();
 
                         if (ses != null) {
-                            while(ses.closeTime() == 0)
+                            while (ses.closeTime() == 0)
                                 ses.close();
                         }
 
@@ -3485,7 +3505,6 @@
      * @param node Remote node.
      * @param addrs Remote node addresses.
      * @param errs TCP client creation errors.
-     *
      * @throws IgniteCheckedException If failed.
      */
     protected void processClientCreationError(
@@ -3513,7 +3532,7 @@
 
         if (!commErrResolve && enableForcibleNodeKill) {
             if (ctx.node(node.id()) != null
-                && (CU.clientNode(node) ||  !CU.clientNode(getLocalNode())) &&
+                && (node.isClient() || !getLocalNode().isClient()) &&
                 connectionError(errs)) {
                 String msg = "TcpCommunicationSpi failed to establish connection to node, node will be dropped from " +
                     "cluster [" + "rmtNode=" + node + ']';
@@ -3592,8 +3611,8 @@
      * @param timeout Timeout for handshake.
      * @param sslMeta Session meta.
      * @param handshakeConnIdx Non null connection index if need send it in handshake.
-     * @throws IgniteCheckedException If handshake failed or wasn't completed withing timeout.
      * @return Handshake response.
+     * @throws IgniteCheckedException If handshake failed or wasn't completed withing timeout.
      */
     @SuppressWarnings("ThrowFromFinallyBlock")
     private long safeTcpHandshake(
@@ -4113,7 +4132,7 @@
         /** {@inheritDoc} */
         @Override public void onEvent(Event evt) {
             assert evt instanceof DiscoveryEvent : evt;
-            assert evt.type() == EVT_NODE_LEFT || evt.type() == EVT_NODE_FAILED ;
+            assert evt.type() == EVT_NODE_LEFT || evt.type() == EVT_NODE_FAILED;
 
             onNodeLeft(((DiscoveryEvent)evt).eventNode().id());
         }
@@ -4194,7 +4213,7 @@
                     srvLsnr,
                     writerFactory,
                     new GridNioCodecFilter(
-                        new GridDirectParser(log.getLogger(GridDirectParser.class),msgFactory, readerFactory),
+                        new GridDirectParser(log.getLogger(GridDirectParser.class), msgFactory, readerFactory),
                         log,
                         true),
                     new GridConnectionBytesVerifyFilter(log)
diff --git a/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/internal/TcpCommunicationConnectionCheckFuture.java b/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/internal/TcpCommunicationConnectionCheckFuture.java
index c42fa57..46fdb0b 100644
--- a/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/internal/TcpCommunicationConnectionCheckFuture.java
+++ b/modules/core/src/main/java/org/apache/ignite/spi/communication/tcp/internal/TcpCommunicationConnectionCheckFuture.java
@@ -339,12 +339,12 @@
         }
 
         /** {@inheritDoc} */
-        public void onTimeout() {
+        @Override public void onTimeout() {
             cancel();
         }
 
         /** {@inheritDoc} */
-        public void onConnected(UUID rmtNodeId) {
+        @Override public void onConnected(UUID rmtNodeId) {
             finish(nodeId(nodeIdx).equals(rmtNodeId));
         }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/DiscoverySpiListener.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/DiscoverySpiListener.java
index 2b2ac94..519a235 100644
--- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/DiscoverySpiListener.java
+++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/DiscoverySpiListener.java
@@ -21,6 +21,7 @@
 import java.util.Map;
 import org.apache.ignite.cluster.ClusterNode;
 import org.apache.ignite.events.DiscoveryEvent;
+import org.apache.ignite.internal.IgniteInternalFuture;
 import org.jetbrains.annotations.Nullable;
 
 /**
@@ -48,8 +49,10 @@
      *      {@code EVT_NODE_JOINED}, then joined node will be in snapshot).
      * @param topHist Topology snapshots history.
      * @param data Data for custom event.
+     *
+     * @return A future that will be completed when notification process has finished.
      */
-    public void onDiscovery(
+    public IgniteInternalFuture onDiscovery(
         int type,
         long topVer,
         ClusterNode node,
diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ClientImpl.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ClientImpl.java
index afdffaf..673290e 100644
--- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ClientImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ClientImpl.java
@@ -48,6 +48,7 @@
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.LinkedBlockingDeque;
+import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.atomic.AtomicReference;
 import javax.net.ssl.SSLException;
 import org.apache.ignite.Ignite;
@@ -60,6 +61,7 @@
 import org.apache.ignite.cache.CacheMetrics;
 import org.apache.ignite.cluster.ClusterMetrics;
 import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.configuration.IgniteConfiguration;
 import org.apache.ignite.failure.FailureContext;
 import org.apache.ignite.internal.IgniteClientDisconnectedCheckedException;
 import org.apache.ignite.internal.IgniteEx;
@@ -79,6 +81,7 @@
 import org.apache.ignite.internal.worker.WorkersRegistry;
 import org.apache.ignite.lang.IgniteInClosure;
 import org.apache.ignite.lang.IgniteUuid;
+import org.apache.ignite.spi.IgniteSpiAdapter;
 import org.apache.ignite.spi.IgniteSpiContext;
 import org.apache.ignite.spi.IgniteSpiException;
 import org.apache.ignite.spi.IgniteSpiOperationTimeoutHelper;
@@ -147,6 +150,12 @@
     /** */
     private static final Object SPI_RECONNECT = "SPI_RECONNECT";
 
+    /** */
+    private static final long CLIENT_THROTTLE_RECONNECT_RESET_TIMEOUT = IgniteSystemProperties.getLong(
+        IgniteSystemProperties.CLIENT_THROTTLE_RECONNECT_RESET_TIMEOUT_INTERVAL,
+        2 * 60_000
+    );
+
     /** Remote nodes. */
     private final ConcurrentMap<UUID, TcpDiscoveryNode> rmtNodes = new ConcurrentHashMap<>();
 
@@ -470,7 +479,12 @@
 
                 Collection<ClusterNode> top = updateTopologyHistory(topVer + 1, null);
 
-                lsnr.onDiscovery(EVT_NODE_FAILED, topVer, n, top, new TreeMap<>(topHist), null);
+                try {
+                    lsnr.onDiscovery(EVT_NODE_FAILED, topVer, n, top, new TreeMap<>(topHist), null).get();
+                }
+                catch (IgniteCheckedException e) {
+                    throw new IgniteException("Failed to wait for discovery listener notification", e);
+                }
             }
         }
 
@@ -1610,6 +1624,12 @@
         /** */
         private boolean nodeAdded;
 
+        /** */
+        private long lastReconnectTimestamp = -1;
+
+        /** */
+        private long currentReconnectDelay = -1;
+
         /**
          * @param log Logger.
          */
@@ -1692,6 +1712,8 @@
 
                             locNode.onClientDisconnected(newId);
 
+                            throttleClientReconnect();
+
                             tryJoin();
                         }
                     }
@@ -1881,6 +1903,36 @@
         }
 
         /**
+         * Wait random delay before trying to reconnect. Delay will grow exponentially every time client is forced to
+         * reconnect, but only if all these reconnections happened in small period of time (2 minutes). Maximum delay
+         * could be configured with {@link IgniteSpiAdapter#clientFailureDetectionTimeout()}, default value is
+         * {@link IgniteConfiguration#DFLT_CLIENT_FAILURE_DETECTION_TIMEOUT}.
+         *
+         * @throws InterruptedException If thread is interrupted.
+         */
+        private void throttleClientReconnect() throws InterruptedException {
+            if (U.currentTimeMillis() - lastReconnectTimestamp > CLIENT_THROTTLE_RECONNECT_RESET_TIMEOUT)
+                currentReconnectDelay = 0; // Skip pause on first reconnect.
+            else if (currentReconnectDelay == 0)
+                currentReconnectDelay = 200;
+            else {
+                long maxDelay = spi.failureDetectionTimeoutEnabled()
+                    ? spi.clientFailureDetectionTimeout()
+                    : IgniteConfiguration.DFLT_CLIENT_FAILURE_DETECTION_TIMEOUT;
+
+                currentReconnectDelay = Math.min(maxDelay, (int)(currentReconnectDelay * 1.5));
+            }
+
+            if (currentReconnectDelay != 0) {
+                ThreadLocalRandom random = ThreadLocalRandom.current();
+
+                Thread.sleep(random.nextLong(currentReconnectDelay / 2, currentReconnectDelay));
+            }
+
+            lastReconnectTimestamp = U.currentTimeMillis();
+        }
+
+        /**
          *
          */
         private void onDisconnected() {
@@ -2483,21 +2535,31 @@
          * @param top Topology snapshot.
          * @param data Optional custom message data.
          */
-        private void notifyDiscovery(int type, long topVer, ClusterNode node, Collection<ClusterNode> top,
-            @Nullable DiscoverySpiCustomMessage data) {
+        private void notifyDiscovery(
+            int type,
+            long topVer,
+            ClusterNode node,
+            Collection<ClusterNode> top,
+            @Nullable DiscoverySpiCustomMessage data
+        ) {
             DiscoverySpiListener lsnr = spi.lsnr;
 
-            DebugLogger log = type == EVT_NODE_METRICS_UPDATED ? traceLog : debugLog;
+            DebugLogger debugLog = type == EVT_NODE_METRICS_UPDATED ? traceLog : ClientImpl.this.debugLog;
 
             if (lsnr != null) {
-                if (log.isDebugEnabled())
-                    log.debug("Discovery notification [node=" + node + ", type=" + U.gridEventName(type) +
+                if (debugLog.isDebugEnabled())
+                    debugLog.debug("Discovery notification [node=" + node + ", type=" + U.gridEventName(type) +
                         ", topVer=" + topVer + ']');
 
-                lsnr.onDiscovery(type, topVer, node, top, new TreeMap<>(topHist), data);
+                try {
+                    lsnr.onDiscovery(type, topVer, node, top, new TreeMap<>(topHist), data).get();
+                }
+                catch (IgniteCheckedException e) {
+                    throw new IgniteException("Failed to wait for discovery listener notification", e);
+                }
             }
-            else if (log.isDebugEnabled())
-                log.debug("Skipped discovery notification [node=" + node + ", type=" + U.gridEventName(type) +
+            else if (debugLog.isDebugEnabled())
+                debugLog.debug("Skipped discovery notification [node=" + node + ", type=" + U.gridEventName(type) +
                     ", topVer=" + topVer + ']');
         }
 
@@ -2569,7 +2631,7 @@
         }
 
         /** {@inheritDoc} */
-        public String toString() {
+        @Override public String toString() {
             return sock.toString();
         }
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java
index 4ba071c..f82af61 100644
--- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java
+++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/ServerImpl.java
@@ -70,6 +70,7 @@
 import org.apache.ignite.failure.FailureContext;
 import org.apache.ignite.internal.IgniteEx;
 import org.apache.ignite.internal.IgniteFutureTimeoutCheckedException;
+import org.apache.ignite.internal.IgniteInternalFuture;
 import org.apache.ignite.internal.IgniteInterruptedCheckedException;
 import org.apache.ignite.internal.IgniteNodeAttributes;
 import org.apache.ignite.internal.IgnitionEx;
@@ -577,7 +578,7 @@
         if (log.isInfoEnabled())
             log.info("Finished node ping [nodeId=" + nodeId + ", res=" + res + ", time=" + (end - start) + "ms]");
 
-        if (!res && !node.isClient() && nodeAlive(nodeId)) {
+        if (!res && node.clientRouterNodeId() == null && nodeAlive(nodeId)) {
             LT.warn(log, "Failed to ping node (status check will be initiated): " + nodeId);
 
             msgWorker.addMessage(new TcpDiscoveryStatusCheckMessage(locNode, node.id()));
@@ -600,7 +601,7 @@
 
         UUID clientNodeId = null;
 
-        if (node.isClient()) {
+        if (node.clientRouterNodeId() != null) {
             clientNodeId = node.id();
 
             node = ring.node(node.clientRouterNodeId());
@@ -1985,7 +1986,7 @@
                         ring.allNodes(),
                         new C1<TcpDiscoveryNode, Collection<InetSocketAddress>>() {
                             @Override public Collection<InetSocketAddress> apply(TcpDiscoveryNode node) {
-                                return !node.isClient() ? spi.getNodeAddresses(node) :
+                                return node.clientRouterNodeId() == null ? spi.getNodeAddresses(node) :
                                     Collections.<InetSocketAddress>emptyList();
                             }
                         }
@@ -2174,7 +2175,7 @@
 
                 TcpDiscoveryNode node = addedMsg.node();
 
-                if (node.isClient() && !msgs.contains(msg)) {
+                if (node.clientRouterNodeId() != null && !msgs.contains(msg)) {
                     Collection<TcpDiscoveryNode> allNodes = ring.allNodes();
 
                     Collection<TcpDiscoveryNode> top = new ArrayList<>(allNodes.size());
@@ -2263,7 +2264,7 @@
         @Nullable Collection<TcpDiscoveryAbstractMessage> messages(@Nullable IgniteUuid lastMsgId,
             TcpDiscoveryNode node)
         {
-            assert node != null && node.isClient() : node;
+            assert node != null && node.clientRouterNodeId() != null : node;
 
             if (lastMsgId == null) {
                 // Client connection failed before it received TcpDiscoveryNodeAddedMessage.
@@ -2511,7 +2512,7 @@
          *
          * @return Non-discarded messages iterator.
          */
-        public Iterator<TcpDiscoveryAbstractMessage> iterator() {
+        @Override public Iterator<TcpDiscoveryAbstractMessage> iterator() {
             return new SkipIterator();
         }
 
@@ -2822,7 +2823,7 @@
                 processDiscardMessage((TcpDiscoveryDiscardMessage)msg);
 
             else if (msg instanceof TcpDiscoveryCustomEventMessage)
-                processCustomMessage((TcpDiscoveryCustomEventMessage)msg);
+                processCustomMessage((TcpDiscoveryCustomEventMessage)msg, false);
 
             else if (msg instanceof TcpDiscoveryClientPingRequest)
                 processClientPingRequest((TcpDiscoveryClientPingRequest)msg);
@@ -3681,9 +3682,7 @@
                         if (subj == null) {
                             // Node has not pass authentication.
                             LT.warn(log, "Authentication failed [nodeId=" + node.id() +
-                                    ", addrs=" + U.addressesAsString(node) + ']',
-                                "Authentication failed [nodeId=" + U.id8(node.id()) + ", addrs=" +
-                                    U.addressesAsString(node) + ']');
+                                ", addrs=" + U.addressesAsString(node) + ']');
 
                             // Always output in debug.
                             if (log.isDebugEnabled())
@@ -3712,14 +3711,11 @@
                             if (!(subj instanceof Serializable)) {
                                 // Node has not pass authentication.
                                 LT.warn(log, "Authentication subject is not Serializable [nodeId=" + node.id() +
-                                        ", addrs=" + U.addressesAsString(node) + ']',
-                                    "Authentication subject is not Serializable [nodeId=" + U.id8(node.id()) +
-                                        ", addrs=" +
-                                        U.addressesAsString(node) + ']');
+                                    ", addrs=" + U.addressesAsString(node) + ']');
 
                                 authFailedMsg = "Authentication subject is not serializable";
                             }
-                            else if (!node.isClient() &&
+                            else if (node.clientRouterNodeId() == null &&
                                 !subj.systemOperationAllowed(SecurityPermission.JOIN_AS_SERVER))
                                 authFailedMsg = "Node is not authorised to join as a server node";
 
@@ -4128,13 +4124,13 @@
          */
         private void trySendMessageDirectly(TcpDiscoveryNode node, TcpDiscoveryAbstractMessage msg)
             throws IgniteSpiException {
-            if (node.isClient()) {
+            if (node.clientRouterNodeId() != null) {
                 TcpDiscoveryNode routerNode = ring.node(node.clientRouterNodeId());
 
                 if (routerNode == null)
                     throw new IgniteSpiException("Router node for client does not exist: " + node);
 
-                if (routerNode.isClient())
+                if (routerNode.clientRouterNodeId() != null)
                     throw new IgniteSpiException("Router node is a client node: " + node);
 
                 if (routerNode.id().equals(getLocalNodeId())) {
@@ -4216,7 +4212,7 @@
                     TcpDiscoveryNodeAddFinishedMessage addFinishMsg = new TcpDiscoveryNodeAddFinishedMessage(locNodeId,
                         node.id());
 
-                    if (node.isClient()) {
+                    if (node.clientRouterNodeId() != null) {
                         addFinishMsg.clientDiscoData(msg.gridDiscoveryData());
 
                         addFinishMsg.clientNodeAttributes(node.attributes());
@@ -4311,9 +4307,7 @@
                             if (!permissionsEqual(coordSubj.subject().permissions(), subj.subject().permissions())) {
                                 // Node has not pass authentication.
                                 LT.warn(log, "Authentication failed [nodeId=" + node.id() +
-                                        ", addrs=" + U.addressesAsString(node) + ']',
-                                    "Authentication failed [nodeId=" + U.id8(node.id()) + ", addrs=" +
-                                        U.addressesAsString(node) + ']');
+                                    ", addrs=" + U.addressesAsString(node) + ']');
 
                                 // Always output in debug.
                                 if (log.isDebugEnabled())
@@ -4417,9 +4411,7 @@
                                         LT.warn(log,
                                             "Failed to authenticate local node " +
                                                 "(local authentication result is different from rest of topology) " +
-                                                "[nodeId=" + node.id() + ", addrs=" + U.addressesAsString(node) + ']',
-                                            "Authentication failed [nodeId=" + U.id8(node.id()) +
-                                                ", addrs=" + U.addressesAsString(node) + ']');
+                                                "[nodeId=" + node.id() + ", addrs=" + U.addressesAsString(node) + ']');
 
                                         joinRes.set(authFail);
 
@@ -4609,7 +4601,7 @@
                     notifyDiscovery(EVT_NODE_JOINED, topVer, node);
 
                 try {
-                    if (spi.ipFinder.isShared() && locNodeCoord && !node.isClient())
+                    if (spi.ipFinder.isShared() && locNodeCoord && node.clientRouterNodeId() == null)
                         spi.ipFinder.registerAddresses(node.socketAddresses());
                 }
                 catch (IgniteSpiException e) {
@@ -5415,8 +5407,9 @@
 
         /**
          * @param msg Message.
+         * @param waitForNotification If {@code true} then thread will wait when discovery event notification has finished.
          */
-        private void processCustomMessage(TcpDiscoveryCustomEventMessage msg) {
+        private void processCustomMessage(TcpDiscoveryCustomEventMessage msg, boolean waitForNotification) {
             if (isLocalNodeCoordinator()) {
                 boolean delayMsg;
 
@@ -5450,14 +5443,14 @@
                     msg.topologyVersion(ring.topologyVersion());
 
                     if (pendingMsgs.procCustomMsgs.add(msg.id())) {
-                        notifyDiscoveryListener(msg);
+                        notifyDiscoveryListener(msg, waitForNotification);
 
                         if (sendMessageToRemotes(msg))
                             sendMessageAcrossRing(msg);
                         else {
                             registerPendingMessage(msg);
 
-                            processCustomMessage(msg);
+                            processCustomMessage(msg, waitForNotification);
                         }
                     }
 
@@ -5487,7 +5480,7 @@
 
                                 ackMsg.topologyVersion(msg.topologyVersion());
 
-                                processCustomMessage(ackMsg);
+                                processCustomMessage(ackMsg, waitForNotification);
                             }
                             catch (IgniteCheckedException e) {
                                 U.error(log, "Failed to marshal discovery custom message.", e);
@@ -5514,7 +5507,7 @@
                     assert msg.topologyVersion() == ring.topologyVersion() :
                         "msg: " + msg + ", topVer=" + ring.topologyVersion();
 
-                    notifyDiscoveryListener(msg);
+                    notifyDiscoveryListener(msg, waitForNotification);
                 }
 
                 if (msg.verified())
@@ -5590,7 +5583,7 @@
                 TcpDiscoveryCustomEventMessage msg;
 
                 while ((msg = pollPendingCustomeMessage()) != null)
-                    processCustomMessage(msg);
+                    processCustomMessage(msg, true);
             }
         }
 
@@ -5605,8 +5598,9 @@
 
         /**
          * @param msg Custom message.
+         * @param waitForNotification If {@code true} thread will wait when discovery event notification has finished.
          */
-        private void notifyDiscoveryListener(TcpDiscoveryCustomEventMessage msg) {
+        private void notifyDiscoveryListener(TcpDiscoveryCustomEventMessage msg, boolean waitForNotification) {
             DiscoverySpiListener lsnr = spi.lsnr;
 
             TcpDiscoverySpiState spiState = spiStateCopy();
@@ -5622,23 +5616,40 @@
             if (lsnr != null && (spiState == CONNECTED || spiState == DISCONNECTING)) {
                 TcpDiscoveryNode node = ring.node(msg.creatorNodeId());
 
-                if (node != null) {
+                if (node == null)
+                    return;
+
+                DiscoverySpiCustomMessage msgObj;
+
+                try {
+                    msgObj = msg.message(spi.marshaller(), U.resolveClassLoader(spi.ignite().configuration()));
+                }
+                catch (Throwable t) {
+                    throw new IgniteException("Failed to unmarshal discovery custom message: " + msg, t);
+                }
+
+                IgniteInternalFuture fut = lsnr.onDiscovery(DiscoveryCustomEvent.EVT_DISCOVERY_CUSTOM_EVT,
+                    msg.topologyVersion(),
+                    node,
+                    snapshot,
+                    hist,
+                    msgObj);
+
+                if (waitForNotification || msgObj.isMutable()) {
                     try {
-                        DiscoverySpiCustomMessage msgObj = msg.message(spi.marshaller(),
-                            U.resolveClassLoader(spi.ignite().configuration()));
-
-                        lsnr.onDiscovery(DiscoveryCustomEvent.EVT_DISCOVERY_CUSTOM_EVT,
-                            msg.topologyVersion(),
-                            node,
-                            snapshot,
-                            hist,
-                            msgObj);
-
-                        if (msgObj.isMutable())
-                            msg.message(msgObj, U.marshal(spi.marshaller(), msgObj));
+                        fut.get();
                     }
-                    catch (Throwable e) {
-                        U.error(log, "Failed to unmarshal discovery custom message.", e);
+                    catch (IgniteCheckedException e) {
+                        throw new IgniteException("Failed to wait for discovery listener notification", e);
+                    }
+                }
+
+                if (msgObj.isMutable()) {
+                    try {
+                        msg.message(msgObj, U.marshal(spi.marshaller(), msgObj));
+                    }
+                    catch (Throwable t) {
+                        throw new IgniteException("Failed to marshal mutable discovery message: " + msgObj, t);
                     }
                 }
             }
@@ -6555,7 +6566,7 @@
 
             TcpDiscoveryNode node = ring.node(nodeId);
 
-            assert node == null || node.isClient();
+            assert node == null || node.clientRouterNodeId() != null;
 
             if (node != null) {
                 node.clientRouterNodeId(msg.routerNodeId());
@@ -6787,6 +6798,9 @@
         /** Current client metrics. */
         private volatile ClusterMetrics metrics;
 
+        /** Last metrics update message receive time. */
+        private volatile long lastMetricsUpdateMsgTime;
+
         /** */
         private final AtomicReference<GridFutureAdapter<Boolean>> pingFut = new AtomicReference<>();
 
@@ -6799,10 +6813,12 @@
          * @param log Logger.
          */
         private ClientMessageWorker(Socket sock, UUID clientNodeId, IgniteLogger log) {
-            super("tcp-disco-client-message-worker", log, 2000, null);
+            super("tcp-disco-client-message-worker", log, Math.max(spi.metricsUpdateFreq, 10), null);
 
             this.sock = sock;
             this.clientNodeId = clientNodeId;
+
+            lastMetricsUpdateMsgTime = U.currentTimeMillis();
         }
 
         /**
@@ -6823,6 +6839,8 @@
          * @param metrics New current client metrics.
          */
         void metrics(ClusterMetrics metrics) {
+            lastMetricsUpdateMsgTime = U.currentTimeMillis();
+
             this.metrics = metrics;
         }
 
@@ -7003,6 +7021,37 @@
 
             U.closeQuiet(sock);
         }
+
+        /** {@inheritDoc} */
+        @Override protected void noMessageLoop() {
+            if (U.currentTimeMillis() - lastMetricsUpdateMsgTime > spi.clientFailureDetectionTimeout()) {
+                TcpDiscoveryNode clientNode = ring.node(clientNodeId);
+
+                if (clientNode != null) {
+                    boolean failedNode;
+
+                    synchronized (mux) {
+                        failedNode = failedNodes.containsKey(clientNode);
+                    }
+
+                    if (!failedNode) {
+                        String msg = "Client node considered as unreachable " +
+                            "and will be dropped from cluster, " +
+                            "because no metrics update messages received in interval: " +
+                            "TcpDiscoverySpi.clientFailureDetectionTimeout() ms. " +
+                            "It may be caused by network problems or long GC pause on client node, try to increase this " +
+                            "parameter. " +
+                            "[nodeId=" + clientNodeId +
+                            ", clientFailureDetectionTimeout=" + spi.clientFailureDetectionTimeout() +
+                            ']';
+
+                        failNode(clientNodeId, msg);
+
+                        U.warn(log, msg);
+                    }
+                }
+            }
+        }
     }
 
     /** */
diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java
index c81c482..d8dc8f8 100644
--- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java
+++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpi.java
@@ -522,7 +522,9 @@
      * of {@link IgniteConfiguration#isClientMode()}
      *
      * @return forceServerMode flag.
+     * @deprecated Will be removed at 3.0.
      */
+    @Deprecated
     public boolean isForceServerMode() {
         return forceSrvMode;
     }
@@ -535,8 +537,10 @@
      *
      * @param forceSrvMode forceServerMode flag.
      * @return {@code this} for chaining.
+     * @deprecated Will be removed at 3.0.
      */
     @IgniteSpiConfiguration(optional = true)
+    @Deprecated
     public TcpDiscoverySpi setForceServerMode(boolean forceSrvMode) {
         this.forceSrvMode = forceSrvMode;
 
@@ -1985,9 +1989,9 @@
         DiscoveryDataBag dataBag;
 
         if (dataPacket.joiningNodeId().equals(locNode.id()))
-            dataBag = dataPacket.unmarshalGridData(marshaller(), clsLdr, locNode.isClient(), log);
+            dataBag = dataPacket.unmarshalGridData(marshaller(), clsLdr, locNode.clientRouterNodeId() != null, log);
         else
-            dataBag = dataPacket.unmarshalJoiningNodeData(marshaller(), clsLdr, locNode.isClient(), log);
+            dataBag = dataPacket.unmarshalJoiningNodeData(marshaller(), clsLdr, locNode.clientRouterNodeId() != null, log);
 
 
         exchange.onExchange(dataBag);
@@ -2191,7 +2195,7 @@
     }
 
     /** {@inheritDoc} */
-    public void clientReconnect() throws IgniteSpiException {
+    @Override public void clientReconnect() throws IgniteSpiException {
         impl.reconnect();
     }
 
@@ -2278,7 +2282,7 @@
      * <p>
      * This method is intended for test purposes only.
      */
-    public void simulateNodeFailure() {
+    @Override public void simulateNodeFailure() {
         impl.simulateNodeFailure();
     }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/internal/TcpDiscoveryNode.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/internal/TcpDiscoveryNode.java
index 55fe4e6..13d1006 100644
--- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/internal/TcpDiscoveryNode.java
+++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/internal/TcpDiscoveryNode.java
@@ -40,7 +40,6 @@
 import org.apache.ignite.internal.util.tostring.GridToStringExclude;
 import org.apache.ignite.internal.util.tostring.GridToStringInclude;
 import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.internal.CU;
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.lang.IgnitePredicate;
@@ -230,7 +229,7 @@
      *
      * @param consistentId Consistent globally unique node ID.
      */
-    public void setConsistentId(Serializable consistentId) {
+    @Override public void setConsistentId(Serializable consistentId) {
         this.consistentId = consistentId;
 
         final Map<String, Object> map = new HashMap<>(attrs);
@@ -292,14 +291,14 @@
     }
 
     /** {@inheritDoc} */
-    public void setMetrics(ClusterMetrics metrics) {
+    @Override public void setMetrics(ClusterMetrics metrics) {
         assert metrics != null;
 
         this.metrics = metrics;
     }
 
     /** {@inheritDoc} */
-    public Map<Integer, CacheMetrics> cacheMetrics() {
+    @Override public Map<Integer, CacheMetrics> cacheMetrics() {
         if (metricsProvider != null) {
             Map<Integer, CacheMetrics> cacheMetrics0 = metricsProvider.cacheMetrics();
 
@@ -312,7 +311,7 @@
     }
 
     /** {@inheritDoc} */
-    public void setCacheMetrics(Map<Integer, CacheMetrics> cacheMetrics) {
+    @Override public void setCacheMetrics(Map<Integer, CacheMetrics> cacheMetrics) {
         this.cacheMetrics = cacheMetrics != null ? cacheMetrics : Collections.<Integer, CacheMetrics>emptyMap();
     }
 
@@ -467,7 +466,15 @@
 
     /** {@inheritDoc} */
     @Override public boolean isClient() {
-        return clientRouterNodeId != null;
+        if (!cacheCliInit) {
+            Boolean clientModeAttr = ((ClusterNode) this).attribute(IgniteNodeAttributes.ATTR_CLIENT_MODE);
+
+            cacheCli = clientModeAttr != null && clientModeAttr;
+
+            cacheCliInit = true;
+        }
+
+        return cacheCli;
     }
 
     /**
@@ -529,17 +536,6 @@
     }
 
     /** {@inheritDoc} */
-    public boolean isCacheClient() {
-        if (!cacheCliInit) {
-            cacheCli = CU.clientNodeDirect(this);
-
-            cacheCliInit = true;
-        }
-
-        return cacheCli;
-    }
-
-    /** {@inheritDoc} */
     @Override public int compareTo(@Nullable TcpDiscoveryNode node) {
         if (node == null)
             return 1;
@@ -627,7 +623,7 @@
         ver = (IgniteProductVersion)in.readObject();
         clientRouterNodeId = U.readUuid(in);
 
-        if (isClient())
+        if (clientRouterNodeId() != null)
             consistentId = consistentIdAttr != null ? consistentIdAttr : id;
         else
             consistentId = consistentIdAttr != null ? consistentIdAttr : U.consistentId(addrs, discPort);
diff --git a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/internal/TcpDiscoveryNodesRing.java b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/internal/TcpDiscoveryNodesRing.java
index 7fc394b..3a8ded7 100644
--- a/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/internal/TcpDiscoveryNodesRing.java
+++ b/modules/core/src/main/java/org/apache/ignite/spi/discovery/tcp/internal/TcpDiscoveryNodesRing.java
@@ -60,7 +60,9 @@
     /** Client nodes filter. */
     private static final PN CLIENT_NODES = new PN() {
         @Override public boolean apply(ClusterNode node) {
-            return node.isClient();
+            assert node instanceof TcpDiscoveryNode : node;
+
+            return ((TcpDiscoveryNode) node).clientRouterNodeId() != null;
         }
     };
 
@@ -199,7 +201,7 @@
                 return false;
 
             for (TcpDiscoveryNode node : nodes)
-                if (!node.isClient() && !node.id().equals(locNode.id()))
+                if (node.clientRouterNodeId() == null && !node.id().equals(locNode.id()))
                     return true;
 
             return false;
@@ -642,7 +644,7 @@
 
         return F.view(nodes, new P1<TcpDiscoveryNode>() {
             @Override public boolean apply(TcpDiscoveryNode node) {
-                return !node.isClient() && (excludedEmpty || !excluded.contains(node));
+                return node.clientRouterNodeId() == null && (excludedEmpty || !excluded.contains(node));
             }
         });
     }
diff --git a/modules/core/src/main/java/org/apache/ignite/ssl/DelegatingSSLContextSpi.java b/modules/core/src/main/java/org/apache/ignite/ssl/DelegatingSSLContextSpi.java
new file mode 100644
index 0000000..d8621f2
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/ssl/DelegatingSSLContextSpi.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ssl;
+
+import java.security.KeyManagementException;
+import java.security.SecureRandom;
+import javax.net.ssl.KeyManager;
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.SSLContextSpi;
+import javax.net.ssl.SSLEngine;
+import javax.net.ssl.SSLParameters;
+import javax.net.ssl.SSLServerSocketFactory;
+import javax.net.ssl.SSLSessionContext;
+import javax.net.ssl.SSLSocketFactory;
+import javax.net.ssl.TrustManager;
+
+/** */
+class DelegatingSSLContextSpi extends SSLContextSpi {
+
+    /** */
+    private final SSLContext delegate;
+
+    /** */
+    private final SSLParameters parameters;
+
+    /** */
+    DelegatingSSLContextSpi(SSLContext delegate, SSLParameters parameters) {
+        this.delegate = delegate;
+        this.parameters = parameters;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void engineInit(KeyManager[] keyManagers, TrustManager[] trustManagers,
+        SecureRandom secureRandom) throws KeyManagementException {
+        delegate.init(keyManagers, trustManagers, secureRandom);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected SSLSocketFactory engineGetSocketFactory() {
+        return new SSLSocketFactoryWrapper(delegate.getSocketFactory(), parameters);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected SSLServerSocketFactory engineGetServerSocketFactory() {
+        return new SSLServerSocketFactoryWrapper(delegate.getServerSocketFactory(),
+            parameters);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected SSLEngine engineCreateSSLEngine() {
+        final SSLEngine engine = delegate.createSSLEngine();
+
+        if (parameters != null)
+            engine.setSSLParameters(parameters);
+
+        return engine;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected SSLEngine engineCreateSSLEngine(String s, int i) {
+        final SSLEngine engine = delegate.createSSLEngine();
+
+        if (parameters != null)
+            engine.setSSLParameters(parameters);
+
+        return engine;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected SSLSessionContext engineGetServerSessionContext() {
+        return delegate.getServerSessionContext();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected SSLSessionContext engineGetClientSessionContext() {
+        return delegate.getClientSessionContext();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected SSLParameters engineGetDefaultSSLParameters() {
+        return delegate.getDefaultSSLParameters();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected SSLParameters engineGetSupportedSSLParameters() {
+        return delegate.getSupportedSSLParameters();
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/ssl/SSLContextWrapper.java b/modules/core/src/main/java/org/apache/ignite/ssl/SSLContextWrapper.java
new file mode 100644
index 0000000..901d42b
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/ssl/SSLContextWrapper.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ssl;
+
+import javax.net.ssl.SSLContext;
+import javax.net.ssl.SSLParameters;
+
+/** */
+class SSLContextWrapper extends SSLContext {
+    /** */
+    SSLContextWrapper(SSLContext delegate, SSLParameters sslParameters) {
+        super(new DelegatingSSLContextSpi(delegate, sslParameters),
+            delegate.getProvider(),
+            delegate.getProtocol());
+    }
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/ssl/SSLServerSocketFactoryWrapper.java b/modules/core/src/main/java/org/apache/ignite/ssl/SSLServerSocketFactoryWrapper.java
new file mode 100644
index 0000000..ad80f3c
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/ssl/SSLServerSocketFactoryWrapper.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ssl;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.ServerSocket;
+import javax.net.ssl.SSLParameters;
+import javax.net.ssl.SSLServerSocket;
+import javax.net.ssl.SSLServerSocketFactory;
+
+/** */
+class SSLServerSocketFactoryWrapper extends SSLServerSocketFactory {
+
+    /** */
+    private final SSLServerSocketFactory delegate;
+    /** */
+    private final SSLParameters parameters;
+
+    /** */
+    SSLServerSocketFactoryWrapper(SSLServerSocketFactory delegate, SSLParameters parameters) {
+        this.delegate = delegate;
+        this.parameters = parameters;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String[] getDefaultCipherSuites() {
+        return delegate.getDefaultCipherSuites();
+    }
+
+    /** {@inheritDoc} */
+    @Override public String[] getSupportedCipherSuites() {
+        return delegate.getSupportedCipherSuites();
+    }
+
+    /** {@inheritDoc} */
+    @Override public ServerSocket createServerSocket(int port) throws IOException {
+        SSLServerSocket srvSock = (SSLServerSocket)delegate.createServerSocket(port);
+
+        if (parameters != null)
+            srvSock.setSSLParameters(parameters);
+
+        return srvSock;
+    }
+
+    /** {@inheritDoc} */
+    @Override public ServerSocket createServerSocket(int port, int backlog) throws IOException {
+        SSLServerSocket srvSock = (SSLServerSocket)delegate.createServerSocket(port, backlog);
+
+        srvSock.setSSLParameters(parameters);
+
+        return srvSock;
+    }
+
+    /** {@inheritDoc} */
+    @Override public ServerSocket createServerSocket(int port, int backlog, InetAddress locAddr) throws IOException {
+        SSLServerSocket srvSock = (SSLServerSocket)delegate.createServerSocket(port, backlog, locAddr);
+
+        if (parameters != null)
+            srvSock.setSSLParameters(parameters);
+
+        return srvSock;
+    }
+
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/ssl/SSLSocketFactoryWrapper.java b/modules/core/src/main/java/org/apache/ignite/ssl/SSLSocketFactoryWrapper.java
new file mode 100644
index 0000000..bfe6d0d
--- /dev/null
+++ b/modules/core/src/main/java/org/apache/ignite/ssl/SSLSocketFactoryWrapper.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ssl;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.Socket;
+import javax.net.ssl.SSLParameters;
+import javax.net.ssl.SSLSocket;
+import javax.net.ssl.SSLSocketFactory;
+
+/** */
+class SSLSocketFactoryWrapper extends SSLSocketFactory {
+
+    /** */
+    private final SSLSocketFactory delegate;
+
+    /** */
+    private final SSLParameters parameters;
+
+    /** */
+    SSLSocketFactoryWrapper(SSLSocketFactory delegate, SSLParameters parameters) {
+        this.delegate = delegate;
+        this.parameters = parameters;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String[] getDefaultCipherSuites() {
+        return delegate.getDefaultCipherSuites();
+    }
+
+    /** {@inheritDoc} */
+    @Override public String[] getSupportedCipherSuites() {
+        return delegate.getSupportedCipherSuites();
+    }
+
+    /** {@inheritDoc} */
+    @Override public Socket createSocket() throws IOException {
+        SSLSocket sock = (SSLSocket)delegate.createSocket();
+
+        if (parameters != null)
+            sock.setSSLParameters(parameters);
+
+        return sock;
+    }
+
+    /** {@inheritDoc} */
+    @Override public Socket createSocket(Socket sock, String host, int port, boolean autoClose) throws IOException {
+        SSLSocket sslSock = (SSLSocket)delegate.createSocket(sock, host, port, autoClose);
+
+        if (parameters != null)
+            sslSock.setSSLParameters(parameters);
+
+        return sock;
+    }
+
+    /** {@inheritDoc} */
+    @Override public Socket createSocket(String host, int port) throws IOException {
+        SSLSocket sock = (SSLSocket)delegate.createSocket(host, port);
+
+        if (parameters != null)
+            sock.setSSLParameters(parameters);
+
+        return sock;
+    }
+
+    /** {@inheritDoc} */
+    @Override public Socket createSocket(String host, int port, InetAddress locAddr, int locPort) throws IOException {
+        SSLSocket sock = (SSLSocket)delegate.createSocket(host, port, locAddr, locPort);
+
+        if (parameters != null)
+            sock.setSSLParameters(parameters);
+
+        return sock;
+    }
+
+    /** {@inheritDoc} */
+    @Override public Socket createSocket(InetAddress addr, int port) throws IOException {
+        SSLSocket sock = (SSLSocket)delegate.createSocket(addr, port);
+
+        if (parameters != null)
+            sock.setSSLParameters(parameters);
+
+        return sock;
+    }
+
+    /** {@inheritDoc} */
+    @Override public Socket createSocket(InetAddress addr, int port, InetAddress locAddr,
+        int locPort) throws IOException {
+        SSLSocket sock = (SSLSocket)delegate.createSocket(addr, port, locAddr, locPort);
+
+        if (parameters != null)
+            sock.setSSLParameters(parameters);
+
+        return sock;
+    }
+
+}
diff --git a/modules/core/src/main/java/org/apache/ignite/ssl/SslContextFactory.java b/modules/core/src/main/java/org/apache/ignite/ssl/SslContextFactory.java
index 06edd70..edff5c9 100644
--- a/modules/core/src/main/java/org/apache/ignite/ssl/SslContextFactory.java
+++ b/modules/core/src/main/java/org/apache/ignite/ssl/SslContextFactory.java
@@ -30,6 +30,7 @@
 import javax.net.ssl.KeyManagerFactory;
 import javax.net.ssl.SSLContext;
 import javax.net.ssl.SSLException;
+import javax.net.ssl.SSLParameters;
 import javax.net.ssl.TrustManager;
 import javax.net.ssl.TrustManagerFactory;
 import javax.net.ssl.X509TrustManager;
@@ -89,6 +90,12 @@
     /** Trust managers. */
     private TrustManager[] trustMgrs;
 
+    /** Enabled cipher suites. */
+    private String[] cipherSuites;
+
+    /** Enabled cipher suites. */
+    private String[] protocols;
+
     /**
      * Gets key store type used for context creation.
      *
@@ -281,6 +288,38 @@
     }
 
     /**
+     * Sets enabled cipher suites.
+     * @param cipherSuites enabled cipher suites.
+     */
+    public void setCipherSuites(String... cipherSuites) {
+        this.cipherSuites = cipherSuites;
+    }
+
+    /**
+     * Gets enabled cipher suites
+     * @return enabled cipher suites
+     */
+    public String[] getCipherSuites() {
+        return cipherSuites;
+    }
+
+    /**
+     * Gets enabled cipher suites
+     * @return enabled cipher suites
+     */
+    public String[] getProtocols() {
+        return protocols;
+    }
+
+    /**
+     * Sets enabled protocols.
+     * @param protocols enabled protocols.
+     */
+    public void setProtocols(String... protocols) {
+        this.protocols = protocols;
+    }
+
+    /**
      * Creates SSL context based on factory settings.
      *
      * @return Initialized SSL context.
@@ -310,6 +349,18 @@
 
             SSLContext ctx = SSLContext.getInstance(proto);
 
+            if (cipherSuites != null || protocols != null) {
+                SSLParameters sslParameters = new SSLParameters();
+
+                if (cipherSuites != null)
+                    sslParameters.setCipherSuites(cipherSuites);
+
+                if (protocols != null)
+                    sslParameters.setProtocols(protocols);
+
+                ctx = new SSLContextWrapper(ctx, sslParameters);
+            }
+
             ctx.init(keyMgrFactory.getKeyManagers(), mgrs, null);
 
             return ctx;
@@ -426,7 +477,7 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public String toString() {
         return getClass().getSimpleName() + parameters();
     }
 
diff --git a/modules/core/src/main/java/org/apache/ignite/thread/IgniteThread.java b/modules/core/src/main/java/org/apache/ignite/thread/IgniteThread.java
index 70b75e3..6f65e0e 100644
--- a/modules/core/src/main/java/org/apache/ignite/thread/IgniteThread.java
+++ b/modules/core/src/main/java/org/apache/ignite/thread/IgniteThread.java
@@ -56,6 +56,12 @@
     /** */
     private final byte plc;
 
+    /** */
+    private boolean executingEntryProcessor;
+
+    /** */
+    private boolean holdsTopLock;
+
     /**
      * Creates thread with given worker.
      *
@@ -158,9 +164,46 @@
     }
 
     /**
+     * @return {@code True} if thread is currently executing entry processor.
+     */
+    public boolean executingEntryProcessor() {
+        return executingEntryProcessor;
+    }
+
+    /**
+     * @return {@code True} if thread is currently holds topology lock.
+     */
+    public boolean holdsTopLock() {
+        return holdsTopLock;
+    }
+
+    /**
+     * Callback before entry processor execution is started.
+     */
+    public static void onEntryProcessorEntered(boolean holdsTopLock) {
+        Thread curThread = Thread.currentThread();
+
+        if (curThread instanceof IgniteThread) {
+            ((IgniteThread)curThread).executingEntryProcessor = true;
+
+            ((IgniteThread)curThread).holdsTopLock = holdsTopLock;
+        }
+    }
+
+    /**
+     * Callback after entry processor execution is finished.
+     */
+    public static void onEntryProcessorLeft() {
+        Thread curThread = Thread.currentThread();
+
+        if (curThread instanceof IgniteThread)
+            ((IgniteThread)curThread).executingEntryProcessor = false;
+    }
+
+    /**
      * @return IgniteThread or {@code null} if current thread is not an instance of IgniteThread.
      */
-    public static IgniteThread current(){
+    public static IgniteThread current() {
         Thread thread = Thread.currentThread();
 
         return thread.getClass() == IgniteThread.class || thread instanceof IgniteThread ?
diff --git a/modules/core/src/main/java/org/jsr166/ConcurrentLinkedHashMap.java b/modules/core/src/main/java/org/jsr166/ConcurrentLinkedHashMap.java
index 68469b3..5c9b4ac 100644
--- a/modules/core/src/main/java/org/jsr166/ConcurrentLinkedHashMap.java
+++ b/modules/core/src/main/java/org/jsr166/ConcurrentLinkedHashMap.java
@@ -988,29 +988,6 @@
 
             return oldVal;
         }
-
-        /**
-         *
-         */
-        void clear() {
-            if (cnt != 0) {
-                writeLock().lock();
-
-                try {
-                    HashEntry<K, V>[] tab = tbl;
-
-                    for (int i = 0; i < tab.length ; i++)
-                        tab[i] = null;
-
-                    ++modCnt;
-
-                    cnt = 0; // write-volatile
-                }
-                finally {
-                    writeLock().unlock();
-                }
-            }
-        }
     }
 
     /* ---------------- Public operations -------------- */
@@ -1571,8 +1548,7 @@
      * Removes all of the mappings from this map.
      */
     @Override public void clear() {
-        for (Segment<K, V> segment : segments)
-            segment.clear();
+        throw new UnsupportedOperationException();
     }
 
     /**
diff --git a/modules/core/src/main/resources/META-INF/classnames.properties b/modules/core/src/main/resources/META-INF/classnames.properties
index d4bbf0b..c105421 100644
--- a/modules/core/src/main/resources/META-INF/classnames.properties
+++ b/modules/core/src/main/resources/META-INF/classnames.properties
@@ -24,6 +24,7 @@
 org.apache.ignite.IgniteException
 org.apache.ignite.IgniteIllegalStateException
 org.apache.ignite.IgniteInterruptedException
+org.apache.ignite.IgniteJdbcThinDataSource
 org.apache.ignite.IgniteState
 org.apache.ignite.binary.BinaryInvalidTypeException
 org.apache.ignite.binary.BinaryObject
@@ -172,7 +173,9 @@
 org.apache.ignite.events.IgfsEvent
 org.apache.ignite.events.JobEvent
 org.apache.ignite.events.TaskEvent
+org.apache.ignite.events.TransactionStateChangedEvent
 org.apache.ignite.events.WalSegmentArchivedEvent
+org.apache.ignite.events.WalSegmentCompactedEvent
 org.apache.ignite.failure.FailureType
 org.apache.ignite.hadoop.HadoopInputSplit
 org.apache.ignite.hadoop.HadoopMapReducePlan
@@ -252,13 +255,19 @@
 org.apache.ignite.internal.IgniteKernal
 org.apache.ignite.internal.IgniteKernal$1
 org.apache.ignite.internal.IgniteKernal$5
-org.apache.ignite.internal.IgniteKernal$6
 org.apache.ignite.internal.IgniteMessagingImpl
 org.apache.ignite.internal.IgniteNeedReconnectException
 org.apache.ignite.internal.IgniteSchedulerImpl
 org.apache.ignite.internal.IgniteServicesImpl
+org.apache.ignite.internal.IgnitionEx$IgniteNamedInstance$2
+org.apache.ignite.internal.IgnitionEx$IgniteNamedInstance$3
 org.apache.ignite.internal.IgnitionEx$IgniteNamedInstance$4
 org.apache.ignite.internal.NodeStoppingException
+org.apache.ignite.internal.SecurityCredentialsAttrFilterPredicate
+org.apache.ignite.internal.TransactionMetricsMxBeanImpl
+org.apache.ignite.internal.TransactionsMXBeanImpl$1
+org.apache.ignite.internal.UnregisteredBinaryTypeException
+org.apache.ignite.internal.UnregisteredClassException
 org.apache.ignite.internal.binary.BinaryEnumObjectImpl
 org.apache.ignite.internal.binary.BinaryFieldMetadata
 org.apache.ignite.internal.binary.BinaryMetadata
@@ -321,6 +330,8 @@
 org.apache.ignite.internal.cluster.NodeOrderComparator
 org.apache.ignite.internal.cluster.NodeOrderLegacyComparator
 org.apache.ignite.internal.commandline.Command
+org.apache.ignite.internal.commandline.CommandHandler$1
+org.apache.ignite.internal.commandline.CommandHandler$2
 org.apache.ignite.internal.commandline.cache.CacheCommand
 org.apache.ignite.internal.compute.ComputeTaskCancelledCheckedException
 org.apache.ignite.internal.compute.ComputeTaskTimeoutCheckedException
@@ -379,7 +390,7 @@
 org.apache.ignite.internal.marshaller.optimized.OptimizedFieldType
 org.apache.ignite.internal.mem.IgniteOutOfMemoryException
 org.apache.ignite.internal.pagemem.impl.PageMemoryNoStoreImpl$Segment
-org.apache.ignite.internal.pagemem.wal.StorageException
+org.apache.ignite.internal.processors.cache.persistence.StorageException
 org.apache.ignite.internal.pagemem.wal.WALIterator
 org.apache.ignite.internal.pagemem.wal.WALPointer
 org.apache.ignite.internal.pagemem.wal.record.ExchangeRecord$Type
@@ -472,6 +483,7 @@
 org.apache.ignite.internal.processors.cache.ClientCacheChangeDummyDiscoveryMessage
 org.apache.ignite.internal.processors.cache.ClusterCachesInfo$1$1
 org.apache.ignite.internal.processors.cache.DynamicCacheChangeBatch
+org.apache.ignite.internal.processors.cache.DynamicCacheChangeFailureMessage
 org.apache.ignite.internal.processors.cache.DynamicCacheChangeRequest
 org.apache.ignite.internal.processors.cache.EntryProcessorResourceInjectorProxy
 org.apache.ignite.internal.processors.cache.ExchangeActions$1
@@ -501,7 +513,6 @@
 org.apache.ignite.internal.processors.cache.GridCacheAdapter$51
 org.apache.ignite.internal.processors.cache.GridCacheAdapter$53
 org.apache.ignite.internal.processors.cache.GridCacheAdapter$54
-org.apache.ignite.internal.processors.cache.GridCacheAdapter$54$1
 org.apache.ignite.internal.processors.cache.GridCacheAdapter$55
 org.apache.ignite.internal.processors.cache.GridCacheAdapter$56
 org.apache.ignite.internal.processors.cache.GridCacheAdapter$6
@@ -515,6 +526,7 @@
 org.apache.ignite.internal.processors.cache.GridCacheAdapter$GlobalClearAllNearJob
 org.apache.ignite.internal.processors.cache.GridCacheAdapter$GlobalClearKeySetJob
 org.apache.ignite.internal.processors.cache.GridCacheAdapter$GlobalClearKeySetNearJob
+org.apache.ignite.internal.processors.cache.GridCacheAdapter$InvokeAllTimeStatClosure
 org.apache.ignite.internal.processors.cache.GridCacheAdapter$LoadCacheClosure
 org.apache.ignite.internal.processors.cache.GridCacheAdapter$LoadCacheJob
 org.apache.ignite.internal.processors.cache.GridCacheAdapter$LoadCacheJobV2
@@ -643,6 +655,7 @@
 org.apache.ignite.internal.processors.cache.KeyCacheObjectImpl
 org.apache.ignite.internal.processors.cache.QueryCursorImpl$State
 org.apache.ignite.internal.processors.cache.StoredCacheData
+org.apache.ignite.internal.processors.cache.TxTimeoutOnPartitionMapExchangeChangeMessage
 org.apache.ignite.internal.processors.cache.WalStateAbstractMessage
 org.apache.ignite.internal.processors.cache.WalStateAckMessage
 org.apache.ignite.internal.processors.cache.WalStateFinishMessage
@@ -709,6 +722,7 @@
 org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLockFuture$1
 org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLockFuture$2
 org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLockFuture$3
+org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLockFuture$4
 org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLockRequest
 org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLockResponse
 org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState
@@ -734,6 +748,7 @@
 org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxLocal
 org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxLocal$1
 org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxLocal$2
+org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxLocal$3
 org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxLocalAdapter
 org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxLocalAdapter$1
 org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxLocalAdapter$2
@@ -826,7 +841,6 @@
 org.apache.ignite.internal.processors.cache.distributed.dht.colocated.GridDhtColocatedLockFuture$2
 org.apache.ignite.internal.processors.cache.distributed.dht.colocated.GridDhtColocatedLockFuture$3
 org.apache.ignite.internal.processors.cache.distributed.dht.colocated.GridDhtColocatedLockFuture$4
-org.apache.ignite.internal.processors.cache.distributed.dht.colocated.GridDhtColocatedLockFuture$5
 org.apache.ignite.internal.processors.cache.distributed.dht.colocated.GridDhtColocatedLockFuture$LockTimeoutObject$1
 org.apache.ignite.internal.processors.cache.distributed.dht.colocated.GridDhtColocatedLockFuture$MiniFuture$1
 org.apache.ignite.internal.processors.cache.distributed.dht.preloader.CacheGroupAffinityMessage
@@ -853,7 +867,8 @@
 org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture$6
 org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture$7
 org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture$8
-org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture$8$1$1
+org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture$9
+org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture$9$1$1
 org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture$ExchangeLocalState
 org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture$ExchangeType
 org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage
@@ -889,7 +904,6 @@
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockFuture$2
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockFuture$3
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockFuture$4
-org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockFuture$5
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockFuture$LockTimeoutObject$1
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockFuture$MiniFuture$1
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockRequest
@@ -916,13 +930,13 @@
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTransactionalCache$2
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxFinishFuture$1
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxFinishFuture$2
+org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxFinishFuture$3
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxFinishFuture$FinishMiniFuture$1
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxFinishRequest
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxFinishResponse
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$1
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$10
-org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$11
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$12
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$13
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$14
@@ -934,15 +948,19 @@
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$2
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$20
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$21
+org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$21$1
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$22
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$23
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$24
+org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$25
+org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$26
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$3
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$4
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$5
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$6
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$7
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$8
+org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$9
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal$FinishClosure
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxPrepareFutureAdapter$1
 org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxPrepareRequest
@@ -960,17 +978,18 @@
 org.apache.ignite.internal.processors.cache.local.atomic.GridLocalAtomicCache$8
 org.apache.ignite.internal.processors.cache.local.atomic.GridLocalAtomicCache$9
 org.apache.ignite.internal.processors.cache.persistence.CacheDataRowAdapter$RowData
-org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager$12
-org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager$6
-org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager$9
-org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager$CheckpointEntryType
+org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager$2
+org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager$4
+org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager$7
 org.apache.ignite.internal.processors.cache.persistence.GridCacheOffheapManager$WALHistoricalIterator
 org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager$1
+org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointEntryType
 org.apache.ignite.internal.processors.cache.persistence.file.AsyncFileIOFactory
 org.apache.ignite.internal.processors.cache.persistence.file.FileDownloader$1
 org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory
 org.apache.ignite.internal.processors.cache.persistence.file.PersistentStorageIOException
 org.apache.ignite.internal.processors.cache.persistence.file.RandomAccessFileIOFactory
+org.apache.ignite.internal.processors.cache.persistence.migration.UpgradePendingTreeToPerPartitionTask
 org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryImpl$Segment
 org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryImpl$ThrottlingPolicy
 org.apache.ignite.internal.processors.cache.persistence.pagemem.PagesWriteSpeedBasedThrottle$ThrottleMode
@@ -978,16 +997,18 @@
 org.apache.ignite.internal.processors.cache.persistence.snapshot.SnapshotOperation
 org.apache.ignite.internal.processors.cache.persistence.snapshot.TrackingPageIsCorruptedException
 org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree$Bool
-org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree$DestroyBag
 org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree$Result
 org.apache.ignite.internal.processors.cache.persistence.tree.io.DataPageIO$EntryPart
+org.apache.ignite.internal.processors.cache.persistence.tree.reuse.LongListReuseBag
 org.apache.ignite.internal.processors.cache.persistence.wal.AbstractWalRecordsIterator
 org.apache.ignite.internal.processors.cache.persistence.wal.AbstractWalRecordsIterator$StartSeekingFilter
 org.apache.ignite.internal.processors.cache.persistence.wal.FileWALPointer
+org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager$7
 org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager$FileArchiver$1
 org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager$FileArchiver$2
 org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager$FileCompressor$1
 org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager$RecordsIterator
+org.apache.ignite.internal.processors.cache.persistence.wal.FsyncModeFileWriteAheadLogManager$7
 org.apache.ignite.internal.processors.cache.persistence.wal.FsyncModeFileWriteAheadLogManager$FileArchiver$1
 org.apache.ignite.internal.processors.cache.persistence.wal.FsyncModeFileWriteAheadLogManager$FileArchiver$2
 org.apache.ignite.internal.processors.cache.persistence.wal.FsyncModeFileWriteAheadLogManager$FileCompressor$1
@@ -1023,6 +1044,7 @@
 org.apache.ignite.internal.processors.cache.query.GridCacheQueryManager$CacheSqlIndexMetadata
 org.apache.ignite.internal.processors.cache.query.GridCacheQueryManager$CacheSqlMetadata
 org.apache.ignite.internal.processors.cache.query.GridCacheQueryManager$CachedResult$QueueIterator
+org.apache.ignite.internal.processors.cache.query.GridCacheQueryManager$InternalScanFilter
 org.apache.ignite.internal.processors.cache.query.GridCacheQueryManager$MetadataJob
 org.apache.ignite.internal.processors.cache.query.GridCacheQueryManager$MetadataJob$1
 org.apache.ignite.internal.processors.cache.query.GridCacheQueryManager$MetadataJob$2
@@ -1080,6 +1102,8 @@
 org.apache.ignite.internal.processors.cache.store.GridCacheWriteBehindStore$StoreOperation
 org.apache.ignite.internal.processors.cache.store.GridCacheWriteBehindStore$ValueStatus
 org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx$FinalizationStatus
+org.apache.ignite.internal.processors.cache.transactions.IgniteTransactionsImpl$1
+org.apache.ignite.internal.processors.cache.transactions.IgniteTransactionsImpl$2
 org.apache.ignite.internal.processors.cache.transactions.IgniteTxAdapter
 org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry
 org.apache.ignite.internal.processors.cache.transactions.IgniteTxHandler$1
@@ -1121,9 +1145,14 @@
 org.apache.ignite.internal.processors.cache.transactions.IgniteTxMap
 org.apache.ignite.internal.processors.cache.transactions.IgniteTxMap$1
 org.apache.ignite.internal.processors.cache.transactions.IgniteTxMap$1$1
+org.apache.ignite.internal.processors.cache.transactions.TransactionEventProxyImpl
 org.apache.ignite.internal.processors.cache.transactions.TransactionMetricsAdapter
+org.apache.ignite.internal.processors.cache.transactions.TransactionMetricsAdapter$1
+org.apache.ignite.internal.processors.cache.transactions.TransactionMetricsAdapter$2
+org.apache.ignite.internal.processors.cache.transactions.TransactionMetricsAdapter$3
 org.apache.ignite.internal.processors.cache.transactions.TransactionProxyImpl
 org.apache.ignite.internal.processors.cache.transactions.TransactionProxyImpl$1
+org.apache.ignite.internal.processors.cache.transactions.TransactionProxyRollbackOnlyImpl
 org.apache.ignite.internal.processors.cache.transactions.TxDeadlockDetection$UniqueDeque
 org.apache.ignite.internal.processors.cache.transactions.TxEntryValueHolder
 org.apache.ignite.internal.processors.cache.transactions.TxLock
@@ -1131,23 +1160,25 @@
 org.apache.ignite.internal.processors.cache.transactions.TxLocksRequest
 org.apache.ignite.internal.processors.cache.transactions.TxLocksResponse
 org.apache.ignite.internal.processors.cache.verify.CacheInfo
-org.apache.ignite.internal.processors.cache.verify.CacheInfo$1
-org.apache.ignite.internal.processors.cache.verify.CacheInfo$2
 org.apache.ignite.internal.processors.cache.verify.CollectConflictPartitionKeysTask
 org.apache.ignite.internal.processors.cache.verify.CollectConflictPartitionKeysTask$CollectPartitionEntryHashesJob
 org.apache.ignite.internal.processors.cache.verify.ContentionClosure
 org.apache.ignite.internal.processors.cache.verify.ContentionInfo
-org.apache.ignite.internal.processors.cache.verify.IdleVerifyResultV2
 org.apache.ignite.internal.processors.cache.verify.IdleVerifyDumpResult
+org.apache.ignite.internal.processors.cache.verify.IdleVerifyResultV2
 org.apache.ignite.internal.processors.cache.verify.PartitionEntryHashRecord
 org.apache.ignite.internal.processors.cache.verify.PartitionHashRecord
+org.apache.ignite.internal.processors.cache.verify.PartitionHashRecordV2
 org.apache.ignite.internal.processors.cache.verify.PartitionKey
+org.apache.ignite.internal.processors.cache.verify.PartitionKeyV2
 org.apache.ignite.internal.processors.cache.verify.RetrieveConflictPartitionValuesTask
 org.apache.ignite.internal.processors.cache.verify.RetrieveConflictPartitionValuesTask$RetrieveConflictValuesJob
+org.apache.ignite.internal.processors.cache.verify.VerifyBackupPartitionsDumpTask
 org.apache.ignite.internal.processors.cache.verify.VerifyBackupPartitionsTask
 org.apache.ignite.internal.processors.cache.verify.VerifyBackupPartitionsTask$VerifyBackupPartitionsJob
+org.apache.ignite.internal.processors.cache.verify.VerifyBackupPartitionsTaskV2
+org.apache.ignite.internal.processors.cache.verify.VerifyBackupPartitionsTaskV2$VerifyBackupPartitionsJobV2
 org.apache.ignite.internal.processors.cache.verify.ViewCacheClosure
-org.apache.ignite.internal.processors.cache.verify.ViewCacheClosure$1
 org.apache.ignite.internal.processors.cache.version.GridCacheRawVersionedEntry
 org.apache.ignite.internal.processors.cache.version.GridCacheVersion
 org.apache.ignite.internal.processors.cache.version.GridCacheVersionConflictContext$State
@@ -1290,6 +1321,7 @@
 org.apache.ignite.internal.processors.datastructures.GridCacheSemaphoreState
 org.apache.ignite.internal.processors.datastructures.GridCacheSetHeader
 org.apache.ignite.internal.processors.datastructures.GridCacheSetHeaderKey
+org.apache.ignite.internal.processors.datastructures.GridCacheSetImpl$5
 org.apache.ignite.internal.processors.datastructures.GridCacheSetImpl$SumReducer
 org.apache.ignite.internal.processors.datastructures.GridCacheSetItemKey
 org.apache.ignite.internal.processors.datastructures.GridCacheSetProxy
@@ -1397,6 +1429,7 @@
 org.apache.ignite.internal.processors.marshaller.MarshallerMappingItem
 org.apache.ignite.internal.processors.marshaller.MissingMappingRequestMessage
 org.apache.ignite.internal.processors.marshaller.MissingMappingResponseMessage
+org.apache.ignite.internal.processors.odbc.jdbc.JdbcRequestHandler$1
 org.apache.ignite.internal.processors.odbc.jdbc.JdbcStatementType
 org.apache.ignite.internal.processors.odbc.odbc.escape.OdbcEscapeType
 org.apache.ignite.internal.processors.platform.PlatformAbstractConfigurationClosure
@@ -1606,6 +1639,7 @@
 org.apache.ignite.internal.processors.task.GridTaskWorker$State
 org.apache.ignite.internal.sql.SqlLexerTokenType
 org.apache.ignite.internal.sql.SqlParseException
+org.apache.ignite.internal.sql.SqlStrictParseException
 org.apache.ignite.internal.transactions.IgniteTxHeuristicCheckedException
 org.apache.ignite.internal.transactions.IgniteTxOptimisticCheckedException
 org.apache.ignite.internal.transactions.IgniteTxRollbackCheckedException
@@ -1887,6 +1921,8 @@
 org.apache.ignite.internal.visor.baseline.VisorBaselineTask$VisorBaselineJob
 org.apache.ignite.internal.visor.baseline.VisorBaselineTaskArg
 org.apache.ignite.internal.visor.baseline.VisorBaselineTaskResult
+org.apache.ignite.internal.visor.baseline.VisorBaselineViewTask
+org.apache.ignite.internal.visor.baseline.VisorBaselineViewTask$VisorBaselineViewJob
 org.apache.ignite.internal.visor.binary.VisorBinaryMetadata
 org.apache.ignite.internal.visor.binary.VisorBinaryMetadataCollectorTask
 org.apache.ignite.internal.visor.binary.VisorBinaryMetadataCollectorTask$VisorBinaryCollectMetadataJob
@@ -1929,6 +1965,9 @@
 org.apache.ignite.internal.visor.cache.VisorCacheModifyTask$VisorCacheModifyJob
 org.apache.ignite.internal.visor.cache.VisorCacheModifyTaskArg
 org.apache.ignite.internal.visor.cache.VisorCacheModifyTaskResult
+org.apache.ignite.internal.visor.cache.VisorCacheNamesCollectorTask
+org.apache.ignite.internal.visor.cache.VisorCacheNamesCollectorTask$VisorCacheNamesCollectorJob
+org.apache.ignite.internal.visor.cache.VisorCacheNamesCollectorTaskResult
 org.apache.ignite.internal.visor.cache.VisorCacheNearConfiguration
 org.apache.ignite.internal.visor.cache.VisorCacheNodesTask
 org.apache.ignite.internal.visor.cache.VisorCacheNodesTask$VisorCacheNodesJob
@@ -2023,13 +2062,13 @@
 org.apache.ignite.internal.visor.misc.VisorChangeGridActiveStateTask
 org.apache.ignite.internal.visor.misc.VisorChangeGridActiveStateTask$VisorChangeGridActiveStateJob
 org.apache.ignite.internal.visor.misc.VisorChangeGridActiveStateTaskArg
+org.apache.ignite.internal.visor.misc.VisorClusterNode
 org.apache.ignite.internal.visor.misc.VisorLatestVersionTask
 org.apache.ignite.internal.visor.misc.VisorLatestVersionTask$VisorLatestVersionJob
 org.apache.ignite.internal.visor.misc.VisorNopTask
 org.apache.ignite.internal.visor.misc.VisorNopTask$VisorNopJob
 org.apache.ignite.internal.visor.misc.VisorResolveHostNameTask
 org.apache.ignite.internal.visor.misc.VisorResolveHostNameTask$VisorResolveHostNameJob
-org.apache.ignite.internal.visor.misc.VisorClusterNode
 org.apache.ignite.internal.visor.misc.VisorWalTask
 org.apache.ignite.internal.visor.misc.VisorWalTask$VisorWalJob
 org.apache.ignite.internal.visor.misc.VisorWalTaskArg
@@ -2132,18 +2171,27 @@
 org.apache.ignite.internal.visor.service.VisorServiceDescriptor
 org.apache.ignite.internal.visor.service.VisorServiceTask
 org.apache.ignite.internal.visor.service.VisorServiceTask$VisorServiceJob
-org.apache.ignite.internal.visor.util.VisorClusterGroupEmptyException
-org.apache.ignite.internal.visor.util.VisorEventMapper
-org.apache.ignite.internal.visor.util.VisorExceptionWrapper
-org.apache.ignite.internal.visor.util.VisorTaskUtils$4
 org.apache.ignite.internal.visor.tx.VisorTxInfo
 org.apache.ignite.internal.visor.tx.VisorTxOperation
 org.apache.ignite.internal.visor.tx.VisorTxProjection
 org.apache.ignite.internal.visor.tx.VisorTxSortOrder
 org.apache.ignite.internal.visor.tx.VisorTxTask
+org.apache.ignite.internal.visor.tx.VisorTxTask$1
+org.apache.ignite.internal.visor.tx.VisorTxTask$2
+org.apache.ignite.internal.visor.tx.VisorTxTask$3
+org.apache.ignite.internal.visor.tx.VisorTxTask$4
+org.apache.ignite.internal.visor.tx.VisorTxTask$5
+org.apache.ignite.internal.visor.tx.VisorTxTask$NearKillClosure
+org.apache.ignite.internal.visor.tx.VisorTxTask$RemoteKillClosure
+org.apache.ignite.internal.visor.tx.VisorTxTask$TxKillClosure
+org.apache.ignite.internal.visor.tx.VisorTxTask$VisorTxJob
 org.apache.ignite.internal.visor.tx.VisorTxTaskArg
 org.apache.ignite.internal.visor.tx.VisorTxTaskResult
-org.apache.ignite.internal.visor.verify.VisorViewCacheCmd
+org.apache.ignite.internal.visor.util.VisorClusterGroupEmptyException
+org.apache.ignite.internal.visor.util.VisorEventMapper
+org.apache.ignite.internal.visor.util.VisorExceptionWrapper
+org.apache.ignite.internal.visor.util.VisorTaskUtils$4
+org.apache.ignite.internal.visor.verify.IndexValidationIssue
 org.apache.ignite.internal.visor.verify.ValidateIndexesPartitionResult
 org.apache.ignite.internal.visor.verify.VisorContentionJobResult
 org.apache.ignite.internal.visor.verify.VisorContentionTask
@@ -2156,15 +2204,22 @@
 org.apache.ignite.internal.visor.verify.VisorIdleAnalyzeTask$VisorIdleVerifyJob$2
 org.apache.ignite.internal.visor.verify.VisorIdleAnalyzeTaskArg
 org.apache.ignite.internal.visor.verify.VisorIdleAnalyzeTaskResult
+org.apache.ignite.internal.visor.verify.VisorIdleVerifyDumpTask
+org.apache.ignite.internal.visor.verify.VisorIdleVerifyDumpTaskArg
+org.apache.ignite.internal.visor.verify.VisorIdleVerifyJob
+org.apache.ignite.internal.visor.verify.VisorIdleVerifyJob$1
 org.apache.ignite.internal.visor.verify.VisorIdleVerifyTask
 org.apache.ignite.internal.visor.verify.VisorIdleVerifyTask$VisorIdleVerifyJob
 org.apache.ignite.internal.visor.verify.VisorIdleVerifyTask$VisorIdleVerifyJob$1
 org.apache.ignite.internal.visor.verify.VisorIdleVerifyTaskArg
-org.apache.ignite.internal.visor.verify.VisorIdleVerifyDumpTaskArg
 org.apache.ignite.internal.visor.verify.VisorIdleVerifyTaskResult
+org.apache.ignite.internal.visor.verify.VisorIdleVerifyTaskV2
+org.apache.ignite.internal.visor.verify.VisorIdleVerifyTaskV2$VisorIdleVerifyJobV2
+org.apache.ignite.internal.visor.verify.VisorIdleVerifyTaskV2$VisorIdleVerifyJobV2$1
 org.apache.ignite.internal.visor.verify.VisorValidateIndexesJobResult
 org.apache.ignite.internal.visor.verify.VisorValidateIndexesTaskArg
 org.apache.ignite.internal.visor.verify.VisorValidateIndexesTaskResult
+org.apache.ignite.internal.visor.verify.VisorViewCacheCmd
 org.apache.ignite.internal.visor.verify.VisorViewCacheTask
 org.apache.ignite.internal.visor.verify.VisorViewCacheTask$VisorViewCacheJob
 org.apache.ignite.internal.visor.verify.VisorViewCacheTaskArg
@@ -2187,8 +2242,10 @@
 org.apache.ignite.lang.IgniteRunnable
 org.apache.ignite.lang.IgniteUuid
 org.apache.ignite.lifecycle.LifecycleEventType
+org.apache.ignite.marshaller.MarshallerUtils$1
 org.apache.ignite.marshaller.jdk.JdkMarshallerDummySerializable
 org.apache.ignite.messaging.MessagingListenActor
+org.apache.ignite.mxbean.TransactionMetricsMxBean
 org.apache.ignite.platform.dotnet.PlatformDotNetAffinityFunction
 org.apache.ignite.platform.dotnet.PlatformDotNetCacheStoreFactory
 org.apache.ignite.platform.dotnet.PlatformDotNetCacheStoreFactoryNative
@@ -2224,7 +2281,6 @@
 org.apache.ignite.spi.collision.priorityqueue.PriorityQueueCollisionSpi$PriorityGridCollisionJobContextComparator
 org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi$1
 org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi$10
-org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi$11
 org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi$2$1
 org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi$2$2
 org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi$2$ConnectClosure
@@ -2233,6 +2289,7 @@
 org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi$2$ConnectClosureNew$1
 org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi$3
 org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi$4
+org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi$9
 org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi$HandshakeClosure
 org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi$HandshakeException
 org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi$HandshakeTimeoutException
@@ -2245,6 +2302,7 @@
 org.apache.ignite.spi.discovery.tcp.ClientImpl$State
 org.apache.ignite.spi.discovery.tcp.ServerImpl$IpFinderCleaner$1
 org.apache.ignite.spi.discovery.tcp.ServerImpl$IpFinderCleaner$2
+org.apache.ignite.spi.discovery.tcp.ServerImpl$RingMessageSendState
 org.apache.ignite.spi.discovery.tcp.internal.DiscoveryDataPacket
 org.apache.ignite.spi.discovery.tcp.internal.TcpDiscoveryNode
 org.apache.ignite.spi.discovery.tcp.internal.TcpDiscoveryNode$1
diff --git a/modules/core/src/test/config/loaders/grid-cfg.xml b/modules/core/src/test/config/loaders/grid-cfg.xml
index 3966baf..818b050 100644
--- a/modules/core/src/test/config/loaders/grid-cfg.xml
+++ b/modules/core/src/test/config/loaders/grid-cfg.xml
@@ -28,7 +28,7 @@
     <!--
         Grid configuration.
     -->
-    <bean id="grid.cfg" class="org.apache.ignite.configuration.IgniteConfiguration" singleton="true">
+    <bean id="grid.cfg" class="org.apache.ignite.configuration.IgniteConfiguration">
         <!-- Grid with default name. -->
 
         <property name="connectorConfiguration"><null/></property>
@@ -43,9 +43,15 @@
                 </property>
             </bean>
         </property>
+
+        <property name="lifecycleBeans">
+            <list>
+                <bean class="org.apache.ignite.startup.cmdline.GridCommandLineLoaderTest.KillerLifecycleBean" />
+            </list>
+        </property>
     </bean>
 
-    <bean id="grid.cfg.2" class="org.apache.ignite.configuration.IgniteConfiguration" singleton="true">
+    <bean id="grid.cfg.2" class="org.apache.ignite.configuration.IgniteConfiguration">
         <property name="igniteInstanceName" value="gridName2"/>
 
         <property name="connectorConfiguration"><null/></property>
@@ -60,5 +66,12 @@
                 </property>
             </bean>
         </property>
+
+        <property name="lifecycleBeans">
+            <list>
+                <bean class="org.apache.ignite.startup.cmdline.GridCommandLineLoaderTest.KillerLifecycleBean" />
+            </list>
+        </property>
     </bean>
+
 </beans>
diff --git a/modules/core/src/test/config/tests.properties b/modules/core/src/test/config/tests.properties
index 718d661..89ea55e 100644
--- a/modules/core/src/test/config/tests.properties
+++ b/modules/core/src/test/config/tests.properties
@@ -45,7 +45,7 @@
 # URI string.
 deploy.uri.file2=file://freq=200@localhost/${java.io.tmpdir}/gg/verification/
 # File scanner URI for local file deployment.
-deploy.uri.file=file://localhost/@{IGNITE_HOME}/work/deployment/file/
+deploy.uri.file=file://localhost/@{IGNITE_HOME}/modules/extdata/uri/target/file/
 # FTP scanner URI for FTP deployment.
 deploy.uri.ftp=ftp://ftptest:iddqd@94.72.60.102:21/test/deployment
 # Classes scanner URI for classes deployment. Must be overridden for every user.
@@ -146,8 +146,3 @@
 
 # Hadoop home directory.
 hadoop.home=@{HADOOP_HOME}
-
-# Sharded mongo properties
-mongos.host=192.168.2.10
-mongos.port=27017
-sharded.db.name=gg-test-db
diff --git a/modules/core/src/test/java/org/apache/ignite/cache/IgniteCacheEntryProcessorSequentialCallTest.java b/modules/core/src/test/java/org/apache/ignite/cache/IgniteCacheEntryProcessorSequentialCallTest.java
index 592449d..165bca7 100644
--- a/modules/core/src/test/java/org/apache/ignite/cache/IgniteCacheEntryProcessorSequentialCallTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/cache/IgniteCacheEntryProcessorSequentialCallTest.java
@@ -252,7 +252,7 @@
      */
     public static class NotNullCacheEntryProcessor implements CacheEntryProcessor<TestKey, TestValue, Object> {
         /** {@inheritDoc} */
-        public Object process(MutableEntry entry, Object... arguments) throws EntryProcessorException {
+        @Override public Object process(MutableEntry entry, Object... arguments) throws EntryProcessorException {
             assertNotNull(entry.getValue());
 
             return null;
diff --git a/modules/core/src/test/java/org/apache/ignite/cache/store/CacheStoreSessionListenerLifecycleSelfTest.java b/modules/core/src/test/java/org/apache/ignite/cache/store/CacheStoreSessionListenerLifecycleSelfTest.java
index 5327e19..ff176c5 100644
--- a/modules/core/src/test/java/org/apache/ignite/cache/store/CacheStoreSessionListenerLifecycleSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/cache/store/CacheStoreSessionListenerLifecycleSelfTest.java
@@ -252,6 +252,9 @@
 
                 tx.commit();
             }
+
+            // Force cache shutdown order
+            ignite.cache("cache-0").destroy();
         }
         finally {
             stopGrid();
@@ -396,4 +399,4 @@
             // No-op.
         }
     }
-}
\ No newline at end of file
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/cache/store/CacheStoreSessionListenerReadWriteThroughDisabledAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/cache/store/CacheStoreSessionListenerReadWriteThroughDisabledAbstractTest.java
index ab4477d..774d4f7 100644
--- a/modules/core/src/test/java/org/apache/ignite/cache/store/CacheStoreSessionListenerReadWriteThroughDisabledAbstractTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/cache/store/CacheStoreSessionListenerReadWriteThroughDisabledAbstractTest.java
@@ -46,7 +46,7 @@
  */
 public abstract class CacheStoreSessionListenerReadWriteThroughDisabledAbstractTest extends GridCacheAbstractSelfTest {
     /** {@inheritDoc} */
-    protected int gridCount() {
+    @Override protected int gridCount() {
         return 2;
     }
 
@@ -54,7 +54,7 @@
     protected final int CNT = 100;
 
     /** {@inheritDoc} */
-    protected CacheConfiguration cacheConfiguration(String igniteInstanceName) throws Exception {
+    @Override protected CacheConfiguration cacheConfiguration(String igniteInstanceName) throws Exception {
         CacheConfiguration cacheCfg = super.cacheConfiguration(igniteInstanceName);
 
         cacheCfg.setCacheStoreFactory(FactoryBuilder.factoryOf(EmptyCacheStore.class));
@@ -70,7 +70,7 @@
     }
 
     /** {@inheritDoc} */
-    protected NearCacheConfiguration nearConfiguration() {
+    @Override protected NearCacheConfiguration nearConfiguration() {
         return null;
     }
 
diff --git a/modules/core/src/test/java/org/apache/ignite/client/ClientCacheConfigurationTest.java b/modules/core/src/test/java/org/apache/ignite/client/ClientCacheConfigurationTest.java
index 0c82b26..350c0dc 100644
--- a/modules/core/src/test/java/org/apache/ignite/client/ClientCacheConfigurationTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/client/ClientCacheConfigurationTest.java
@@ -38,7 +38,10 @@
 import org.apache.ignite.cache.QueryEntity;
 import org.apache.ignite.cache.QueryIndex;
 import org.apache.ignite.configuration.ClientConfiguration;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.Timeout;
 
 import static org.junit.Assert.assertTrue;
 
@@ -46,6 +49,10 @@
  * {@link ClientConfiguration} unit tests.
  */
 public class ClientCacheConfigurationTest {
+    /** Per test timeout */
+    @Rule
+    public Timeout globalTimeout = new Timeout((int) GridTestUtils.DFLT_TEST_TIMEOUT);
+
     /** Serialization/deserialization. */
     @Test
     public void testSerialization() throws IOException, ClassNotFoundException {
diff --git a/modules/core/src/test/java/org/apache/ignite/client/ClientConfigurationTest.java b/modules/core/src/test/java/org/apache/ignite/client/ClientConfigurationTest.java
index e6ab4d7..bcc212a 100644
--- a/modules/core/src/test/java/org/apache/ignite/client/ClientConfigurationTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/client/ClientConfigurationTest.java
@@ -27,7 +27,10 @@
 import java.util.Collections;
 import org.apache.ignite.configuration.BinaryConfiguration;
 import org.apache.ignite.configuration.ClientConfiguration;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.Timeout;
 
 import static org.junit.Assert.assertTrue;
 
@@ -35,6 +38,10 @@
  * {@link ClientConfiguration} unit tests.
  */
 public class ClientConfigurationTest {
+    /** Per test timeout */
+    @Rule
+    public Timeout globalTimeout = new Timeout((int) GridTestUtils.DFLT_TEST_TIMEOUT);
+
     /** Serialization/deserialization. */
     @Test
     public void testSerialization() throws IOException, ClassNotFoundException {
diff --git a/modules/core/src/test/java/org/apache/ignite/client/FunctionalTest.java b/modules/core/src/test/java/org/apache/ignite/client/FunctionalTest.java
index b49f7e3..1272287 100644
--- a/modules/core/src/test/java/org/apache/ignite/client/FunctionalTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/client/FunctionalTest.java
@@ -40,7 +40,10 @@
 import org.apache.ignite.cache.QueryEntity;
 import org.apache.ignite.cache.QueryIndex;
 import org.apache.ignite.configuration.ClientConfiguration;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.Timeout;
 
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
@@ -54,6 +57,10 @@
  * Thin client functional tests.
  */
 public class FunctionalTest {
+    /** Per test timeout */
+    @Rule
+    public Timeout globalTimeout = new Timeout((int) GridTestUtils.DFLT_TEST_TIMEOUT);
+
     /**
      * Tested API:
      * <ul>
diff --git a/modules/core/src/test/java/org/apache/ignite/client/IgniteBinaryTest.java b/modules/core/src/test/java/org/apache/ignite/client/IgniteBinaryTest.java
index b9402cc..0f84c75 100644
--- a/modules/core/src/test/java/org/apache/ignite/client/IgniteBinaryTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/client/IgniteBinaryTest.java
@@ -26,12 +26,11 @@
 import org.apache.ignite.Ignition;
 import org.apache.ignite.binary.BinaryObject;
 import org.apache.ignite.binary.BinaryType;
-import org.apache.ignite.client.ClientCache;
-import org.apache.ignite.client.Config;
-import org.apache.ignite.client.IgniteClient;
-import org.apache.ignite.client.Person;
 import org.apache.ignite.configuration.ClientConfiguration;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.Timeout;
 
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
@@ -40,6 +39,10 @@
  * Ignite {@link BinaryObject} API system tests.
  */
 public class IgniteBinaryTest {
+    /** Per test timeout */
+    @Rule
+    public Timeout globalTimeout = new Timeout((int) GridTestUtils.DFLT_TEST_TIMEOUT);
+
     /**
      * Unmarshalling schema-less Ignite binary objects into Java static types.
      */
diff --git a/modules/core/src/test/java/org/apache/ignite/client/LoadTest.java b/modules/core/src/test/java/org/apache/ignite/client/LoadTest.java
index 63bcf57..f97e7b7 100644
--- a/modules/core/src/test/java/org/apache/ignite/client/LoadTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/client/LoadTest.java
@@ -39,10 +39,10 @@
 import org.apache.ignite.configuration.BinaryConfiguration;
 import org.apache.ignite.configuration.ClientConfiguration;
 import org.apache.ignite.configuration.IgniteConfiguration;
-import org.apache.ignite.client.ClientCache;
-import org.apache.ignite.client.Config;
-import org.apache.ignite.client.IgniteClient;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.Timeout;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
@@ -52,6 +52,10 @@
  * Load, capacity and performance tests.
  */
 public class LoadTest {
+    /** Per test timeout */
+    @Rule
+    public Timeout globalTimeout = new Timeout((int) GridTestUtils.DFLT_TEST_TIMEOUT);
+
     /**
      * Test thin client in multi-thread environment.
      */
diff --git a/modules/core/src/test/java/org/apache/ignite/client/ReliabilityTest.java b/modules/core/src/test/java/org/apache/ignite/client/ReliabilityTest.java
index 147f371..f019fd9 100644
--- a/modules/core/src/test/java/org/apache/ignite/client/ReliabilityTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/client/ReliabilityTest.java
@@ -34,14 +34,12 @@
 import org.apache.ignite.cache.query.QueryCursor;
 import org.apache.ignite.cache.query.ScanQuery;
 import org.apache.ignite.internal.processors.platform.client.ClientStatus;
-import org.apache.ignite.client.ClientCache;
-import org.apache.ignite.client.ClientCacheConfiguration;
-import org.apache.ignite.client.IgniteClient;
 import org.apache.ignite.configuration.ClientConfiguration;
 import org.apache.ignite.internal.client.thin.ClientServerError;
-import org.apache.ignite.client.ClientConnectionException;
-import org.apache.ignite.client.LocalIgniteCluster;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.Timeout;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNull;
@@ -51,6 +49,10 @@
  * High Availability tests.
  */
 public class ReliabilityTest {
+    /** Per test timeout */
+    @Rule
+    public Timeout globalTimeout = new Timeout((int) GridTestUtils.DFLT_TEST_TIMEOUT);
+
     /**
      * Thin clint failover.
      */
diff --git a/modules/core/src/test/java/org/apache/ignite/client/SslParametersTest.java b/modules/core/src/test/java/org/apache/ignite/client/SslParametersTest.java
new file mode 100644
index 0000000..7ac6108
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/client/SslParametersTest.java
@@ -0,0 +1,329 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.client;
+
+import java.util.concurrent.Callable;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.Ignition;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.ClientConfiguration;
+import org.apache.ignite.configuration.ClientConnectorConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.ssl.SslContextFactory;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+import org.jetbrains.annotations.NotNull;
+
+/**
+ * Tests cases when node connects to cluster with different set of cipher suites.
+ */
+public class SslParametersTest extends GridCommonAbstractTest {
+
+    public static final String TEST_CACHE_NAME = "TEST";
+    /** */
+    private volatile String[] cipherSuites;
+
+    /** */
+    private volatile String[] protocols;
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        cfg.setClientConnectorConfiguration(new ClientConnectorConfiguration()
+            .setSslEnabled(true)
+            .setUseIgniteSslContextFactory(true));
+
+        cfg.setSslContextFactory(createSslFactory());
+
+        CacheConfiguration ccfg = new CacheConfiguration(TEST_CACHE_NAME);
+
+        cfg.setCacheConfiguration(ccfg);
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    protected ClientConfiguration getClientConfiguration() throws Exception {
+        ClientConfiguration cfg = new ClientConfiguration();
+
+        cfg.setAddresses("127.0.0.1:10800");
+
+        cfg.setSslMode(SslMode.REQUIRED);
+
+        cfg.setSslContextFactory(createSslFactory());
+
+        return cfg;
+    }
+
+    @NotNull private SslContextFactory createSslFactory() {
+        SslContextFactory factory = (SslContextFactory)GridTestUtils.sslTrustedFactory(
+            "node01", "trustone");
+
+        factory.setCipherSuites(cipherSuites);
+        factory.setProtocols(protocols);
+
+        return factory;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids();
+
+        protocols = null;
+        cipherSuites = null;
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testSameCipherSuite() throws Exception {
+        cipherSuites = new String[] {
+            "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+            "TLS_RSA_WITH_AES_128_GCM_SHA256",
+            "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256"
+        };
+
+        startGrid();
+
+        checkSuccessfulClientStart(
+            new String[][] {
+                new String[] {
+                    "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+                    "TLS_RSA_WITH_AES_128_GCM_SHA256",
+                    "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256"
+                }
+            },
+            null
+        );
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testOneCommonCipherSuite() throws Exception {
+        cipherSuites = new String[] {
+            "TLS_RSA_WITH_AES_128_GCM_SHA256",
+            "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256"
+        };
+
+        startGrid();
+        
+        checkSuccessfulClientStart(
+            new String[][] {
+                new String[] {
+                    "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+                    "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256"
+                }
+            },
+            null
+        );
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testNoCommonCipherSuite() throws Exception {
+        cipherSuites = new String[] {
+            "TLS_RSA_WITH_AES_128_GCM_SHA256"
+        };
+
+        startGrid();
+        
+        checkClientStartFailure(
+            new String[][] {
+                new String[] {
+                    "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+                    "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256"
+                }
+            },
+            null
+        );
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testNonExistentCipherSuite() throws Exception {
+        cipherSuites = new String[] {
+            "TLS_RSA_WITH_AES_128_GCM_SHA256"
+        };
+
+        startGrid();
+        
+        checkClientStartFailure(
+            new String[][] {
+                new String[] {
+                    "TLC_FAKE_CIPHER",
+                    "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256"
+                }
+            },
+            null,
+            IllegalArgumentException.class,
+            "Unsupported ciphersuite"
+        );
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testNoCommonProtocols() throws Exception {
+        protocols = new String[] {
+            "TLSv1.1",
+            "SSLv3"
+        };
+
+        startGrid();
+
+        checkClientStartFailure(
+            null,
+            new String[][] {
+                new String[] {
+                    "TLSv1",
+                    "TLSv1.2",
+                }
+            }
+        );
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testNonExistentProtocol() throws Exception {
+        protocols = new String[] {
+            "SSLv3"
+        };
+
+        startGrid();
+
+        checkClientStartFailure(
+            null,
+            new String[][] {
+                new String[] {
+                    "SSLv3",
+                    "SSLvDoesNotExist"
+                }
+            },
+            IllegalArgumentException.class,
+            "SSLvDoesNotExist"
+        );
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testSameProtocols() throws Exception {
+        protocols = new String[] {
+            "TLSv1.1",
+            "TLSv1.2",
+        };
+
+        startGrid();
+
+        checkSuccessfulClientStart(null,
+            new String[][] {
+                new String[] {
+                    "TLSv1.1",
+                    "TLSv1.2",
+                }
+            }
+        );
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testOneCommonProtocol() throws Exception {
+        protocols = new String[] {
+            "TLSv1",
+            "TLSv1.1",
+            "TLSv1.2"
+        };
+
+        startGrid();
+
+        checkSuccessfulClientStart(null,
+            new String[][] {
+                new String[] {
+                    "TLSv1.1",
+                    "SSLv3"
+                }
+            }
+        );
+    }
+
+    /**
+     * @param cipherSuites list of cipher suites
+     * @param protocols list of protocols
+     * @throws Exception If failed.
+     */
+    private void checkSuccessfulClientStart(String[][] cipherSuites, String[][] protocols) throws Exception {
+        int n = Math.max(
+            cipherSuites != null ? cipherSuites.length : 0,
+            protocols != null ? protocols.length : 0);
+
+        for (int i = 0; i < n; i++) {
+            this.cipherSuites = cipherSuites != null && i < cipherSuites.length ? cipherSuites[i] : null;
+            this.protocols = protocols != null && i < protocols.length ? protocols[i] : null;
+
+            IgniteClient client = Ignition.startClient(getClientConfiguration());
+
+            client.getOrCreateCache(TEST_CACHE_NAME);
+
+            client.close();
+        }
+    }
+
+    /**
+     * @param cipherSuites list of cipher suites
+     * @param protocols list of protocols
+     * @throws Exception If failed.
+     */
+    private void checkClientStartFailure(String[][] cipherSuites, String[][] protocols) throws Exception {
+        checkClientStartFailure(cipherSuites, protocols, ClientConnectionException.class, "Ignite cluster is unavailable");
+    }
+
+    /**
+     * @param cipherSuites list of cipher suites
+     * @param protocols list of protocols
+     * @param ex expected exception class
+     * @param msg exception message
+     * @throws Exception If failed.
+     */
+    private void checkClientStartFailure(String[][] cipherSuites, String[][] protocols, Class<? extends Throwable> ex, String msg) throws Exception {
+        int n = Math.max(
+            cipherSuites != null ? cipherSuites.length : 0,
+            protocols != null ? protocols.length : 0);
+
+        for (int i = 0; i < n; i++) {
+            this.cipherSuites = cipherSuites != null && i < cipherSuites.length ? cipherSuites[i] : null;
+            this.protocols = protocols != null && i < protocols.length ? protocols[i] : null;
+
+            int finalI = i;
+
+            GridTestUtils.assertThrows(null, new Callable<Object>() {
+                @Override public Object call() throws Exception {
+                    Ignition.startClient(getClientConfiguration());
+
+                    return null;
+                }
+            }, ex, msg);
+        }
+    }
+
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/custom/DummyEventFilterFactory.java b/modules/core/src/test/java/org/apache/ignite/custom/DummyEventFilterFactory.java
index e0688bc..103e6a8 100644
--- a/modules/core/src/test/java/org/apache/ignite/custom/DummyEventFilterFactory.java
+++ b/modules/core/src/test/java/org/apache/ignite/custom/DummyEventFilterFactory.java
@@ -25,22 +25,22 @@
 /**
  * Must be not in org.apache.ignite.internal
  */
-public class DummyEventFilterFactory implements Factory<CacheEntryEventFilter<Integer, String>> {
+public class DummyEventFilterFactory<T> implements Factory<CacheEntryEventFilter<Integer, T>> {
     /** */
     private static final long serialVersionUID = 0L;
 
     /** {@inheritDoc} */
-    @Override public CacheEntryEventFilter<Integer, String> create() {
-        return new DummyEventFilter();
+    @Override public CacheEntryEventFilter<Integer, T> create() {
+        return new DummyEventFilter<T>();
     }
 
     /**
      *
      */
-    private static class DummyEventFilter implements CacheEntryEventFilter<Integer, String> {
+    private static class DummyEventFilter<T> implements CacheEntryEventFilter<Integer, T> {
         /** {@inheritDoc} */
         @Override public boolean evaluate(
-            final CacheEntryEvent<? extends Integer, ? extends String> evt) throws CacheEntryListenerException {
+            final CacheEntryEvent<? extends Integer, ? extends T> evt) throws CacheEntryListenerException {
             return true;
         }
     }
diff --git a/modules/core/src/test/java/org/apache/ignite/failure/AccountTransferTransactionTest.java b/modules/core/src/test/java/org/apache/ignite/failure/AccountTransferTransactionTest.java
deleted file mode 100644
index 8d7cf15..0000000
--- a/modules/core/src/test/java/org/apache/ignite/failure/AccountTransferTransactionTest.java
+++ /dev/null
@@ -1,331 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.failure;
-
-import javax.management.MBeanServer;
-import javax.management.MBeanServerInvocationHandler;
-import javax.management.ObjectName;
-import java.lang.management.ManagementFactory;
-import java.util.ArrayList;
-import java.util.Random;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteCache;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.cache.CacheMode;
-import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
-import org.apache.ignite.cache.eviction.fifo.FifoEvictionPolicy;
-import org.apache.ignite.configuration.CacheConfiguration;
-import org.apache.ignite.configuration.DataRegionConfiguration;
-import org.apache.ignite.configuration.DataStorageConfiguration;
-import org.apache.ignite.configuration.IgniteConfiguration;
-import org.apache.ignite.internal.IgniteEx;
-import org.apache.ignite.internal.TestRecordingCommunicationSpi;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.internal.worker.WorkersControlMXBeanImpl;
-import org.apache.ignite.mxbean.WorkersControlMXBean;
-import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
-import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
-import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
-import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
-import org.apache.ignite.transactions.Transaction;
-import org.jetbrains.annotations.NotNull;
-
-import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
-import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC;
-import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC;
-import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ;
-
-/**
- * Test transfer amount between accounts with enabled {@link StopNodeFailureHandler}.
- */
-public class AccountTransferTransactionTest extends GridCommonAbstractTest {
-    /** */
-    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
-    /** Count of accounts in one thread. */
-    private static final int ACCOUNTS_CNT = 20;
-    /** Count of threads and caches. */
-    private static final int THREADS_CNT = 20;
-    /** Count of nodes to start. */
-    private static final int NODES_CNT = 3;
-    /** Count of transaction on cache. */
-    private static final int TRANSACTION_CNT = 10;
-
-    /** {@inheritDoc} */
-    @Override protected FailureHandler getFailureHandler(String igniteInstanceName) {
-        return new StopNodeFailureHandler();
-    }
-
-    /** {@inheritDoc} */
-    @Override protected IgniteConfiguration getConfiguration(String name) throws Exception {
-        final IgniteConfiguration cfg = super.getConfiguration(name);
-
-        ((TcpDiscoverySpi)cfg.getDiscoverySpi()).setIpFinder(IP_FINDER);
-        cfg.setCommunicationSpi(new TestRecordingCommunicationSpi());
-        cfg.setLocalHost("127.0.0.1");
-        cfg.setDataStorageConfiguration(new DataStorageConfiguration()
-            .setDefaultDataRegionConfiguration(new DataRegionConfiguration()
-                .setMaxSize(50 * 1024 * 1024)
-                .setPersistenceEnabled(true))
-        );
-
-        CacheConfiguration[] cacheConfigurations = new CacheConfiguration[THREADS_CNT];
-        for (int i = 0; i < THREADS_CNT; i++) {
-            cacheConfigurations[i] = new CacheConfiguration()
-                .setName(cacheName(i))
-                .setAffinity(new RendezvousAffinityFunction(false, 32))
-                .setBackups(1)
-                .setAtomicityMode(TRANSACTIONAL)
-                .setCacheMode(CacheMode.PARTITIONED)
-                .setWriteSynchronizationMode(FULL_SYNC)
-                .setEvictionPolicy(new FifoEvictionPolicy(1000))
-                .setOnheapCacheEnabled(true);
-        }
-
-        cfg.setCacheConfiguration(cacheConfigurations);
-
-        return cfg;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void beforeTest() throws Exception {
-        super.beforeTest();
-
-        stopAllGrids();
-
-        cleanPersistenceDir();
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void afterTest() throws Exception {
-        super.afterTest();
-
-        stopAllGrids();
-
-        cleanPersistenceDir();
-    }
-
-    /**
-     * Test transfer amount.
-     */
-    public void testTransferAmount() throws Exception {
-        //given: started some nodes with client.
-        startGrids(NODES_CNT);
-
-        IgniteEx igniteClient = startGrid(getClientConfiguration(NODES_CNT));
-
-        igniteClient.cluster().active(true);
-
-        Random random = new Random();
-
-        long[] initAmount = new long[THREADS_CNT];
-
-        //and: fill all accounts on all caches and calculate total amount for every cache.
-        for (int cachePrefixIdx = 0; cachePrefixIdx < THREADS_CNT; cachePrefixIdx++) {
-            IgniteCache<Object, Object> cache = igniteClient.getOrCreateCache(cacheName(cachePrefixIdx));
-
-            try (Transaction tx = igniteClient.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
-                for (int accountId = 0; accountId < ACCOUNTS_CNT; accountId++) {
-                    Long amount = (long)random.nextInt(1000);
-
-                    cache.put(accountId, amount);
-
-                    initAmount[cachePrefixIdx] += amount;
-                }
-
-                tx.commit();
-            }
-        }
-
-        //when: start transfer amount from account to account in different threads.
-        CountDownLatch firstTransactionDone = new CountDownLatch(THREADS_CNT);
-
-        ArrayList<Thread> transferThreads = new ArrayList<>();
-
-        for (int i = 0; i < THREADS_CNT; i++) {
-            transferThreads.add(new TransferAmountTxThread(firstTransactionDone, igniteClient, cacheName(i)));
-
-            transferThreads.get(i).start();
-        }
-
-        firstTransactionDone.await(10, TimeUnit.SECONDS);
-
-        //and: terminate disco-event-worker thread on one node.
-        WorkersControlMXBean bean = workersMXBean(1);
-
-        bean.terminateWorker(
-            bean.getWorkerNames().stream()
-                .filter(name -> name.startsWith("disco-event-worker"))
-                .findFirst()
-                .orElse(null)
-        );
-
-        for (Thread thread : transferThreads) {
-            thread.join();
-        }
-
-        long[] resultAmount = new long[THREADS_CNT];
-
-        //then: calculate total amount for every thread.
-        for (int j = 0; j < THREADS_CNT; j++) {
-            String cacheName = cacheName(j);
-
-            IgniteCache<Object, Object> cache = igniteClient.getOrCreateCache(cacheName);
-
-            try (Transaction tx = igniteClient.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
-
-                for (int i = 0; i < ACCOUNTS_CNT; i++)
-                    resultAmount[j] += getNotNullValue(cache, i);
-                tx.commit();
-            }
-
-            long diffAmount = initAmount[j] - resultAmount[j];
-
-            //and: check that result amount equal to init amount.
-            assertTrue(
-                String.format("Total amount before and after transfer is not same: diff=%s, cache=%s",
-                    diffAmount, cacheName),
-                diffAmount == 0
-            );
-        }
-    }
-
-    /**
-     * Make test cache name by prefix.
-     */
-    @NotNull private String cacheName(int cachePrefixIdx) {
-        return "cache" + cachePrefixIdx;
-    }
-
-    /**
-     * Ignite configuration for client.
-     */
-    @NotNull private IgniteConfiguration getClientConfiguration(int nodesPrefix) throws Exception {
-        IgniteConfiguration clientConf = getConfiguration(getTestIgniteInstanceName(nodesPrefix));
-
-        clientConf.setClientMode(true);
-
-        return clientConf;
-    }
-
-    /**
-     * Extract not null value from cache.
-     */
-    private long getNotNullValue(IgniteCache<Object, Object> cache, int i) {
-        Object value = cache.get(i);
-
-        return value == null ? 0 : ((Long)value);
-    }
-
-    /**
-     * Configure workers mx bean.
-     */
-    private WorkersControlMXBean workersMXBean(int igniteInt) throws Exception {
-        ObjectName mbeanName = U.makeMBeanName(
-            getTestIgniteInstanceName(igniteInt),
-            "Kernal",
-            WorkersControlMXBeanImpl.class.getSimpleName()
-        );
-
-        MBeanServer mbeanSrv = ManagementFactory.getPlatformMBeanServer();
-
-        if (!mbeanSrv.isRegistered(mbeanName))
-            fail("MBean is not registered: " + mbeanName.getCanonicalName());
-
-        return MBeanServerInvocationHandler.newProxyInstance(mbeanSrv, mbeanName, WorkersControlMXBean.class, true);
-    }
-
-    /**
-     *
-     */
-    private static class TransferAmountTxThread extends Thread {
-        /** */
-        private CountDownLatch firstTransactionLatch;
-        /** */
-        private Ignite ignite;
-        /** */
-        private String cacheName;
-        /** */
-        private Random random = new Random();
-
-        /**
-         * @param ignite Ignite.
-         */
-        private TransferAmountTxThread(CountDownLatch firstTransactionLatch, final Ignite ignite, String cacheName) {
-            this.firstTransactionLatch = firstTransactionLatch;
-            this.ignite = ignite;
-            this.cacheName = cacheName;
-        }
-
-        /** {@inheritDoc} */
-        @Override public void run() {
-            for (int i = 0; i < TRANSACTION_CNT; i++) {
-                try {
-                    updateInTransaction(ignite.cache(cacheName));
-                }
-                finally {
-                    if (i == 0)
-                        firstTransactionLatch.countDown();
-                }
-            }
-        }
-
-        /**
-         * @throws IgniteException if fails
-         */
-        @SuppressWarnings("unchecked")
-        private void updateInTransaction(IgniteCache cache) throws IgniteException {
-            int accIdFrom = random.nextInt(ACCOUNTS_CNT);
-            int accIdTo = random.nextInt(ACCOUNTS_CNT);
-
-            if (accIdFrom == accIdTo)
-                accIdTo = (int)getNextAccountId(accIdFrom);
-
-            Long acctFrom;
-            Long acctTo;
-
-            try (Transaction tx = ignite.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
-                acctFrom = (Long)cache.get(accIdFrom);
-                acctTo = (Long)cache.get(accIdTo);
-
-                long transactionAmount = (long)(random.nextDouble() * acctFrom);
-
-                cache.put(accIdFrom, acctFrom - transactionAmount);
-                cache.put(accIdTo, acctTo + transactionAmount);
-
-                tx.commit();
-            }
-        }
-
-        /**
-         * @param curr current
-         * @return random value
-         */
-        private long getNextAccountId(long curr) {
-            long randomVal;
-
-            do {
-                randomVal = random.nextInt(ACCOUNTS_CNT);
-            }
-            while (curr == randomVal);
-
-            return randomVal;
-        }
-    }
-}
diff --git a/modules/core/src/test/java/org/apache/ignite/failure/FailureHandlerTriggeredTest.java b/modules/core/src/test/java/org/apache/ignite/failure/FailureHandlerTriggeredTest.java
index 8d56ced..23c7e08 100644
--- a/modules/core/src/test/java/org/apache/ignite/failure/FailureHandlerTriggeredTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/failure/FailureHandlerTriggeredTest.java
@@ -93,7 +93,7 @@
         }
 
         /** {@inheritDoc} */
-        public SchemaAbstractDiscoveryMessage message() {
+        @Override public SchemaAbstractDiscoveryMessage message() {
             throw new Error("Exchange worker termination");
         }
     }
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/ClusterMetricsSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/ClusterMetricsSelfTest.java
index addecea..7168d3a 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/ClusterMetricsSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/ClusterMetricsSelfTest.java
@@ -47,7 +47,7 @@
     private static final int NODES_CNT = 4;
 
     /** */
-    private static final int ITER_CNT = 30;
+    private static final int ITER_CNT = 10;
 
     /** {@inheritDoc} */
     @Override protected void beforeTestsStarted() throws Exception {
@@ -61,7 +61,7 @@
 
         cfg.setCacheConfiguration();
         cfg.setIncludeProperties();
-        cfg.setMetricsUpdateFrequency(0);
+        //cfg.setMetricsUpdateFrequency(0);
 
         return cfg;
     }
@@ -141,8 +141,8 @@
     private void checkMetrics(ClusterMetrics m) {
         assert m.getTotalNodes() == NODES_CNT;
 
-        assert m.getMaximumActiveJobs() == 0;
-        assert m.getAverageActiveJobs() == 0;
+        assert m.getMaximumActiveJobs() >= 0;
+        assert m.getAverageActiveJobs() >= 0;
 
         assert m.getMaximumCancelledJobs() == 0;
         assert m.getAverageCancelledJobs() == 0;
@@ -165,7 +165,7 @@
 
         assert m.getMaximumThreadCount() > 0;
         assert m.getIdleTimePercentage() >= 0;
-        assert m.getIdleTimePercentage() <= 1;
+        assert m.getIdleTimePercentage() <= 100;
 
         assert m.getAverageCpuLoad() >= 0 || m.getAverageCpuLoad() == -1.0;
 
@@ -235,4 +235,4 @@
             latch.await();
         }
     }
-}
\ No newline at end of file
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/ClusterNodeMetricsSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/ClusterNodeMetricsSelfTest.java
index b573ca3..b77e463 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/ClusterNodeMetricsSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/ClusterNodeMetricsSelfTest.java
@@ -37,6 +37,7 @@
 import org.apache.ignite.configuration.CacheConfiguration;
 import org.apache.ignite.configuration.IgniteConfiguration;
 import org.apache.ignite.events.Event;
+import org.apache.ignite.internal.processors.cache.persistence.DataRegion;
 import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl;
 import org.apache.ignite.internal.processors.task.GridInternal;
 import org.apache.ignite.internal.util.lang.GridAbsPredicate;
@@ -124,25 +125,26 @@
 
         final IgniteCache cache = ignite.getOrCreateCache(CACHE_NAME);
 
-        DataRegionMetricsImpl memMetrics = getDefaultMemoryPolicyMetrics(ignite);
+        DataRegion dataRegion = getDefaultDataRegion(ignite);
+
+        DataRegionMetricsImpl memMetrics = dataRegion.memoryMetrics();
 
         memMetrics.enableMetrics();
 
         int pageSize = getPageSize(ignite);
 
-        assertEquals(0, memMetrics.getTotalAllocatedPages());
+        assertEquals(dataRegion.pageMemory().loadedPages(), memMetrics.getTotalAllocatedPages());
 
         fillCache(cache);
 
-        assertTrue(memMetrics.getTotalAllocatedPages() * pageSize > MAX_VALS_AMOUNT
-            * VAL_SIZE);
+        assertTrue(memMetrics.getTotalAllocatedPages() * pageSize > MAX_VALS_AMOUNT * VAL_SIZE);
     }
 
     /**
      * @param ignite Ignite instance.
      */
-    private DataRegionMetricsImpl getDefaultMemoryPolicyMetrics(IgniteEx ignite) throws IgniteCheckedException {
-        return ignite.context().cache().context().database().dataRegion(null).memoryMetrics();
+    private DataRegion getDefaultDataRegion(IgniteEx ignite) throws IgniteCheckedException {
+        return ignite.context().cache().context().database().dataRegion(null);
     }
 
     /**
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/GridAffinityMappedTest.java b/modules/core/src/test/java/org/apache/ignite/internal/GridAffinityMappedTest.java
index 0622453..79c9031 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/GridAffinityMappedTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/GridAffinityMappedTest.java
@@ -59,8 +59,6 @@
 
         cfg.setDiscoverySpi(disco);
 
-        cfg.setFailureDetectionTimeout(Integer.MAX_VALUE);
-
         if (igniteInstanceName.endsWith("1"))
             cfg.setCacheConfiguration(); // Empty cache configuration.
         else {
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/GridAffinityP2PSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/GridAffinityP2PSelfTest.java
index 35ebb0d6..7061e75 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/GridAffinityP2PSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/GridAffinityP2PSelfTest.java
@@ -89,8 +89,6 @@
 
         c.setDiscoverySpi(disco);
 
-        c.setFailureDetectionTimeout(Integer.MAX_VALUE);
-
         c.setDeploymentMode(depMode);
 
         if (igniteInstanceName.endsWith("1"))
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/GridAffinitySelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/GridAffinitySelfTest.java
index 1d9a587..db49729 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/GridAffinitySelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/GridAffinitySelfTest.java
@@ -53,8 +53,6 @@
 
         cfg.setDiscoverySpi(disco);
 
-        cfg.setFailureDetectionTimeout(Integer.MAX_VALUE);
-
         if (igniteInstanceName.endsWith("1"))
             cfg.setClientMode(true);
         else {
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/GridStartupMain.java b/modules/core/src/test/java/org/apache/ignite/internal/GridStartupMain.java
deleted file mode 100644
index 0cfd80e..0000000
--- a/modules/core/src/test/java/org/apache/ignite/internal/GridStartupMain.java
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal;
-
-import javax.swing.JComponent;
-import javax.swing.JLabel;
-import javax.swing.JOptionPane;
-import org.apache.ignite.internal.util.typedef.G;
-
-/**
- * Ignite startup.
- */
-public class GridStartupMain {
-    /**
-     * @param args Arguments.
-     * @throws Exception If failed.
-     */
-    public static void main(String[] args) throws Exception {
-        //resetLog4j("org.apache.ignite.internal.processors.cache.distributed.dht.preloader", Level.DEBUG, false, 0);
-
-        //G.start("modules/tests/config/spring-multicache.xml");
-        //G.start("examples/config/example-cache.xml");
-
-        G.start();
-
-        // Wait until Ok is pressed.
-        JOptionPane.showMessageDialog(
-            null,
-            new JComponent[] {
-                new JLabel("Ignite started."),
-                new JLabel(
-                    "<html>" +
-                        "You can use JMX console at <u>http://localhost:1234</u>" +
-                    "</html>"),
-                new JLabel("Press OK to stop Ignite.")
-            },
-            "Ignite Startup JUnit",
-            JOptionPane.INFORMATION_MESSAGE
-        );
-
-        G.stop(true);
-    }
-}
\ No newline at end of file
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/GridStartupTest.java b/modules/core/src/test/java/org/apache/ignite/internal/GridStartupTest.java
deleted file mode 100644
index a58362b..0000000
--- a/modules/core/src/test/java/org/apache/ignite/internal/GridStartupTest.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal;
-
-import javax.swing.JComponent;
-import javax.swing.JLabel;
-import javax.swing.JOptionPane;
-import org.apache.ignite.internal.util.typedef.G;
-import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
-import org.apache.ignite.testframework.junits.common.GridCommonTest;
-
-/**
- * Ignite startup.
- */
-@SuppressWarnings({"ProhibitedExceptionDeclared"})
-@GridCommonTest(group = "Kernal")
-public class GridStartupTest extends GridCommonAbstractTest {
-    /** */
-    public GridStartupTest() {
-        super(false);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected long getTestTimeout() {
-        return Long.MAX_VALUE;
-    }
-
-    /**
-     * @throws Exception If failed.
-     */
-    public void testStartup() throws Exception {
-        //resetLog4j("org.apache.ignite.internal.processors.cache.distributed.dht.preloader", Level.DEBUG, false, 0);
-
-        //G.start("modules/tests/config/spring-multicache.xml");
-        //G.start("examples/config/example-cache.xml");
-
-        G.start();
-
-        // Wait until Ok is pressed.
-        JOptionPane.showMessageDialog(
-            null,
-            new JComponent[] {
-                new JLabel("Ignite started."),
-                new JLabel(
-                    "<html>" +
-                        "You can use JMX console at <u>http://localhost:1234</u>" +
-                    "</html>"),
-                new JLabel("Press OK to stop Ignite.")
-            },
-            "Ignite Startup JUnit",
-            JOptionPane.INFORMATION_MESSAGE
-        );
-
-        G.stop(true);
-    }
-}
\ No newline at end of file
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/GridTaskResultCacheSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/GridTaskResultCacheSelfTest.java
index fb66652..c6bb43f 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/GridTaskResultCacheSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/GridTaskResultCacheSelfTest.java
@@ -47,10 +47,21 @@
     /**
      * @throws Exception If failed.
      */
-    public void testNoCacheResults() throws Exception {
+    public void testNoCacheResultAnnotationUsage() throws Exception {
         Ignite ignite = G.ignite(getTestIgniteInstanceName());
 
-        ignite.compute().execute(GridResultNoCacheTestTask.class, "Grid Result No Cache Test Argument");
+        ignite.compute()
+            .execute(GridResultNoCacheResultAnnotationTestTask.class, "Grid Result No Cache Annotation Test Argument");
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testNoCacheResultMethodUsage() throws Exception {
+        Ignite ignite = G.ignite(getTestIgniteInstanceName());
+
+        ignite.compute().withNoResultCache()
+                .execute(GridResultNoCacheResultMethodTestTask.class, "Grid Result No Cache Method Test Argument");
     }
 
     /**
@@ -65,8 +76,7 @@
     /**
      *
      */
-    @ComputeTaskNoResultCache
-    private static class GridResultNoCacheTestTask extends GridAbstractCacheTestTask {
+    private static class GridResultNoCacheResultMethodTestTask extends GridAbstractCacheTestTask {
         /** {@inheritDoc} */
         @Override public ComputeJobResultPolicy result(ComputeJobResult res, List<ComputeJobResult> rcvd) {
             assert res.getData() != null;
@@ -86,6 +96,14 @@
     /**
      *
      */
+    @ComputeTaskNoResultCache
+    private static class GridResultNoCacheResultAnnotationTestTask extends GridResultNoCacheResultMethodTestTask {
+
+    }
+
+    /**
+     *
+     */
     private static class GridResultCacheTestTask extends GridAbstractCacheTestTask {
         /** {@inheritDoc} */
         @Override public ComputeJobResultPolicy result(ComputeJobResult res, List<ComputeJobResult> rcvd) {
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/IgniteClientReconnectAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/internal/IgniteClientReconnectAbstractTest.java
index 76a0d52..0d89da2 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/IgniteClientReconnectAbstractTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/IgniteClientReconnectAbstractTest.java
@@ -305,26 +305,32 @@
             }
         };
 
-        for (Ignite client : clients)
-            client.events().localListen(p, EVT_CLIENT_NODE_DISCONNECTED, EVT_CLIENT_NODE_RECONNECTED);
+        try {
+            for (Ignite client : clients)
+                client.events().localListen(p, EVT_CLIENT_NODE_DISCONNECTED, EVT_CLIENT_NODE_RECONNECTED);
 
-        for (Ignite client : clients)
-            srvSpi.failNode(client.cluster().localNode().id(), null);
+            for (Ignite client : clients)
+                srvSpi.failNode(client.cluster().localNode().id(), null);
 
-        waitReconnectEvent(log, disconnectLatch);
+            waitReconnectEvent(log, disconnectLatch);
 
-        if (disconnectedC != null)
-            disconnectedC.run();
+            if (disconnectedC != null)
+                disconnectedC.run();
 
-        log.info("Allow reconnect.");
+            log.info("Allow reconnect.");
 
-        for (DiscoverySpiTestListener blockLsnr : blockLsnrs)
-            blockLsnr.stopBlockJoin();
+            for (DiscoverySpiTestListener blockLsnr : blockLsnrs)
+                blockLsnr.stopBlockJoin();
 
-        waitReconnectEvent(log, reconnectLatch);
+            waitReconnectEvent(log, reconnectLatch);
 
-        for (Ignite client : clients)
-            client.events().stopLocalListen(p);
+            for (Ignite client : clients)
+                client.events().stopLocalListen(p);
+        }
+        finally {
+            for (DiscoverySpiTestListener blockLsnr : blockLsnrs)
+                blockLsnr.stopBlockJoin();
+        }
     }
 
     /**
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/IgniteClientReconnectCacheTest.java b/modules/core/src/test/java/org/apache/ignite/internal/IgniteClientReconnectCacheTest.java
index a975101..bc498da 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/IgniteClientReconnectCacheTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/IgniteClientReconnectCacheTest.java
@@ -37,7 +37,6 @@
 import org.apache.ignite.IgniteException;
 import org.apache.ignite.IgniteLogger;
 import org.apache.ignite.IgniteTransactions;
-import org.apache.ignite.Ignition;
 import org.apache.ignite.cache.CacheAtomicityMode;
 import org.apache.ignite.cache.CacheWriteSynchronizationMode;
 import org.apache.ignite.cluster.ClusterGroup;
@@ -105,6 +104,9 @@
     private static final int CACHE_PUTS_CNT = 3;
 
     /** */
+    public static final String NEAR_CACHE_NAME = "nearCache";
+
+    /** */
     private UUID nodeId;
 
     /** {@inheritDoc} */
@@ -177,7 +179,7 @@
         CacheConfiguration<Object, Object> ccfg = new CacheConfiguration<>(DEFAULT_CACHE_NAME);
 
         ccfg.setWriteSynchronizationMode(FULL_SYNC);
-        ccfg.setName("nearCache");
+        ccfg.setName(NEAR_CACHE_NAME);
 
         final IgniteCache<Object, Object> nearCache = client.getOrCreateCache(ccfg, new NearCacheConfiguration<>())
             .withAllowAtomicOpsInTx();
@@ -270,7 +272,7 @@
 
         checkCacheDiscoveryData(srv, client, DEFAULT_CACHE_NAME, true, true, false);
 
-        checkCacheDiscoveryData(srv, client, "nearCache", true, true, true);
+        checkCacheDiscoveryData(srv, client, NEAR_CACHE_NAME, true, true, true);
 
         checkCacheDiscoveryData(srv, client, STATIC_CACHE, true, true, false);
 
@@ -308,7 +310,7 @@
 
         checkCacheDiscoveryData(srv2, client, DEFAULT_CACHE_NAME, true, true, false);
 
-        checkCacheDiscoveryData(srv2, client, "nearCache", true, true, true);
+        checkCacheDiscoveryData(srv2, client, NEAR_CACHE_NAME, true, true, true);
 
         checkCacheDiscoveryData(srv2, client, STATIC_CACHE, true, true, false);
 
@@ -316,9 +318,18 @@
 
         assertEquals(20, staticCache.get(20));
 
-        srv.cache(nearCache.getName()).put(20, 22);
-
-        assertEquals(22, nearCache.localPeek(20));
+        for(int i = 0; i < 100; i++) {
+            srv.cache(nearCache.getName()).put(i, 22);
+            Object actual = nearCache.localPeek(i);
+            // Change of topology may start partitions moving. It leads to invalidate near cache and
+            // null-values can be valid in such case.
+            if(actual == null) {
+                actual = nearCache.get(i);
+                assertEquals(22, actual);
+                actual = nearCache.localPeek(i);
+            }
+            assertEquals(22, actual);
+        }
     }
 
     /**
@@ -1203,7 +1214,7 @@
 
                 ClusterGroup grp = client.cluster().forCacheNodes(DEFAULT_CACHE_NAME);
 
-                assertEquals(CLIENTS + srvNodes, grp.nodes().size());
+                assertEquals(expNodes, grp.nodes().size());
 
                 grp = client.cluster().forClientNodes(DEFAULT_CACHE_NAME);
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/TaskNodeRestartTest.java b/modules/core/src/test/java/org/apache/ignite/internal/TaskNodeRestartTest.java
index 573804eb..7cab4e6 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/TaskNodeRestartTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/TaskNodeRestartTest.java
@@ -180,7 +180,7 @@
      */
     private static class TestTask2 implements ComputeTask<Void, Void> {
         /** {@inheritDoc} */
-        @Nullable public Map<? extends ComputeJob, ClusterNode> map(List<ClusterNode> subgrid, Void arg)
+        @Override @Nullable public Map<? extends ComputeJob, ClusterNode> map(List<ClusterNode> subgrid, Void arg)
             throws IgniteException {
             Map<TestJob, ClusterNode> jobs = new HashMap<>();
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/TestRecordingCommunicationSpi.java b/modules/core/src/test/java/org/apache/ignite/internal/TestRecordingCommunicationSpi.java
index b36bf16..7b68a6b 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/TestRecordingCommunicationSpi.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/TestRecordingCommunicationSpi.java
@@ -33,6 +33,7 @@
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.T2;
 import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteBiInClosure;
 import org.apache.ignite.lang.IgniteBiPredicate;
 import org.apache.ignite.lang.IgniteInClosure;
 import org.apache.ignite.lang.IgnitePredicate;
@@ -65,6 +66,9 @@
     /** */
     private IgniteBiPredicate<ClusterNode, Message> blockP;
 
+    /** */
+    private volatile IgniteBiInClosure<ClusterNode, Message> c;
+
     /**
      * @param node Node.
      * @return Test SPI.
@@ -87,6 +91,9 @@
 
             Message msg0 = ioMsg.message();
 
+            if (c != null)
+                c.apply(node, msg0);
+
             synchronized (this) {
                 boolean record = (recordClasses != null && recordClasses.contains(msg0.getClass())) ||
                     (recordP != null && recordP.apply(node, msg0));
@@ -196,20 +203,16 @@
      * @throws InterruptedException If interrupted.
      */
     public void waitForBlocked() throws InterruptedException {
-        synchronized (this) {
-            while (blockedMsgs.isEmpty())
-                wait();
-        }
+        waitForBlocked(1);
     }
 
     /**
-     * @param cnt Number of messages to wait.
-     *
+     * @param size Number of messages to wait for.
      * @throws InterruptedException If interrupted.
      */
-    public void waitForBlocked(int cnt) throws InterruptedException {
+    public void waitForBlocked(int size) throws InterruptedException {
         synchronized (this) {
-            while (blockedMsgs.size() < cnt)
+            while (blockedMsgs.size() < size)
                 wait();
         }
     }
@@ -240,6 +243,13 @@
     }
 
     /**
+     * @param c Message closure.
+     */
+    public void closure(IgniteBiInClosure<ClusterNode, Message> c) {
+        this.c = c;
+    }
+
+    /**
      * @param blockP Message block predicate.
      */
     public void blockMessages(IgniteBiPredicate<ClusterNode, Message> blockP) {
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/binary/BinaryMarshallerSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/binary/BinaryMarshallerSelfTest.java
index 4a7d5d5..81a087d 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/binary/BinaryMarshallerSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/binary/BinaryMarshallerSelfTest.java
@@ -3064,6 +3064,33 @@
     /**
      * @throws Exception If failed.
      */
+    public void testMixedRawCollections() throws Exception {
+        Collection<String> excludedClasses = Arrays.asList(
+            ObjectRaw.class.getName(),
+            ObjectWithRaw.class.getName(),
+            Value.class.getName());
+
+        BinaryMarshaller m0 = binaryMarshaller(null, excludedClasses);
+        BinaryMarshaller m1 = binaryMarshaller();
+
+        Value obj = new Value(27);
+        ObjectWithRaw objectWithRaw = new ObjectWithRaw(27, 13);
+        ObjectRaw objectRaw = new ObjectRaw(27, 13);
+
+        Value objOther = new Value(26);
+        ObjectWithRaw objectWithRawOther = new ObjectWithRaw(26, 13);
+        ObjectRaw objectRawOther = new ObjectRaw(26, 13);
+
+        ArrayList collection = new ArrayList(Arrays.asList(
+            obj, objectWithRawOther, objectRaw, objectWithRaw, objectRawOther, objOther));
+
+        marshalUnmarshal(collection, m0);
+        marshalUnmarshal(collection, m1);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
     public void testBinaryEquals() throws Exception {
         Collection<String> excludedClasses = Arrays.asList(
             ObjectRaw.class.getName(),
@@ -5473,8 +5500,10 @@
 
         /** {@inheritDoc} */
         @Override public void readBinary(BinaryReader reader) throws BinaryObjectException {
-            val0 = reader.rawReader().readInt();
-            val1 = reader.rawReader().readInt();
+            BinaryRawReader rawReader = reader.rawReader();
+
+            val0 = rawReader.readInt();
+            val1 = rawReader.readInt();
         }
     }
 
@@ -5592,4 +5621,4 @@
             }
         }
     }
-}
\ No newline at end of file
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/binary/TestCachingMetadataHandler.java b/modules/core/src/test/java/org/apache/ignite/internal/binary/TestCachingMetadataHandler.java
index 0870153..c515f81 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/binary/TestCachingMetadataHandler.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/binary/TestCachingMetadataHandler.java
@@ -30,7 +30,7 @@
     private final ConcurrentHashMap<Integer, BinaryType> metas = new ConcurrentHashMap<>();
 
     /** {@inheritDoc} */
-    @Override public void addMeta(int typeId, BinaryType meta) throws BinaryObjectException {
+    @Override public void addMeta(int typeId, BinaryType meta, boolean failIfUnregistered) throws BinaryObjectException {
         BinaryType otherType = metas.put(typeId, meta);
 
         if (otherType != null)
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/cluster/FullyConnectedComponentSearcherTest.java b/modules/core/src/test/java/org/apache/ignite/internal/cluster/FullyConnectedComponentSearcherTest.java
index d6680cf..7042da0 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/cluster/FullyConnectedComponentSearcherTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/cluster/FullyConnectedComponentSearcherTest.java
@@ -23,9 +23,12 @@
 import java.util.Random;
 import org.apache.ignite.internal.cluster.graph.FullyConnectedComponentSearcher;
 import org.apache.ignite.internal.util.typedef.internal.A;
+import org.apache.ignite.testframework.GridTestUtils;
 import org.jetbrains.annotations.NotNull;
 import org.junit.Assert;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.Timeout;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
@@ -34,6 +37,10 @@
  */
 @RunWith(Parameterized.class)
 public class FullyConnectedComponentSearcherTest {
+    /** Per test timeout */
+    @Rule
+    public Timeout globalTimeout = new Timeout((int) GridTestUtils.DFLT_TEST_TIMEOUT);
+
     /** Adjacency matrix provider for each test. */
     private AdjacencyMatrixProvider provider;
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManagerAttributesSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManagerAttributesSelfTest.java
index 6ec8046..69f95e8 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManagerAttributesSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManagerAttributesSelfTest.java
@@ -123,11 +123,15 @@
             Ignite g = startGrid(i);
 
             assert "true".equals(g.cluster().localNode().attribute(PREFER_IPV4));
+
+            checkIsClientFlag((IgniteEx) g);
         }
 
         System.setProperty(PREFER_IPV4, "false");
 
-        startGrid(2);
+        IgniteEx g = startGrid(2);
+
+        checkIsClientFlag(g);
     }
 
     /**
@@ -154,12 +158,18 @@
         try {
             System.setProperty(IGNITE_OPTIMIZED_MARSHALLER_USE_DEFAULT_SUID, first);
 
-            startGrid(0);
+            {
+                IgniteEx g = startGrid(0);
+
+                checkIsClientFlag(g);
+            }
 
             System.setProperty(IGNITE_OPTIMIZED_MARSHALLER_USE_DEFAULT_SUID, second);
 
             try {
-                startGrid(1);
+                IgniteEx g = startGrid(1);
+
+                checkIsClientFlag(g);
 
                 if (fail)
                     fail("Node should not join");
@@ -206,7 +216,11 @@
             else
                 System.clearProperty(IGNITE_BINARY_MARSHALLER_USE_STRING_SERIALIZATION_VER_2);
 
-            startGrid(0);
+            {
+                IgniteEx g = startGrid(0);
+
+                checkIsClientFlag(g);
+            }
 
             if (second != null)
                 System.setProperty(IGNITE_BINARY_MARSHALLER_USE_STRING_SERIALIZATION_VER_2, second);
@@ -214,7 +228,9 @@
                 System.clearProperty(IGNITE_BINARY_MARSHALLER_USE_STRING_SERIALIZATION_VER_2);
 
             try {
-                startGrid(1);
+                IgniteEx g = startGrid(1);
+
+                checkIsClientFlag(g);
 
                 if (fail)
                     fail("Node should not join");
@@ -317,6 +333,8 @@
 
             IgniteEx ignite = startGrid(0);
 
+            checkIsClientFlag(ignite);
+
             // Ignore if disabled security plugin used.
             if (IGNITE_SECURITY_COMPATIBILITY_MODE.equals(prop) && !ignite.context().security().enabled())
                 return;
@@ -327,7 +345,9 @@
                 System.clearProperty(prop);
 
             try {
-                startGrid(1);
+                IgniteEx g = startGrid(1);
+
+                checkIsClientFlag(g);
 
                 if (fail)
                     fail("Node must not join");
@@ -351,7 +371,9 @@
      * @throws Exception If failed.
      */
     public void testDifferentDeploymentModes() throws Exception {
-        startGrid(0);
+        IgniteEx g = startGrid(0);
+
+        checkIsClientFlag(g);
 
         mode = CONTINUOUS;
 
@@ -370,7 +392,9 @@
      * @throws Exception If failed.
      */
     public void testDifferentPeerClassLoadingEnabledFlag() throws Exception {
-        startGrid(0);
+        IgniteEx g = startGrid(0);
+
+        checkIsClientFlag(g);
 
         p2pEnabled = true;
 
@@ -398,11 +422,24 @@
             Ignite g = startGrid(i);
 
             assert val.equals(g.cluster().localNode().attribute(PREFER_IPV4));
+
+            checkIsClientFlag((IgniteEx) g);
         }
     }
 
     /**
      *
+     * @param g
+     */
+    protected void checkIsClientFlag(IgniteEx g) {
+        boolean isClientDiscovery = g.context().discovery().localNode().isClient();
+        boolean isClientConfig = g.configuration().isClientMode() == null ? false : g.configuration().isClientMode();
+
+        assertEquals(isClientConfig, isClientDiscovery);
+    }
+
+    /**
+     *
      */
     public static class RegularDiscovery extends GridDiscoveryManagerAttributesSelfTest {
         /** {@inheritDoc} */
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/BinaryMetadataRegistrationInsideEntryProcessorTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/BinaryMetadataRegistrationInsideEntryProcessorTest.java
new file mode 100644
index 0000000..73dae4b
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/BinaryMetadataRegistrationInsideEntryProcessorTest.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import javax.cache.processor.EntryProcessor;
+import javax.cache.processor.EntryProcessorException;
+import javax.cache.processor.MutableEntry;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+/**
+ *
+ */
+public class BinaryMetadataRegistrationInsideEntryProcessorTest extends GridCommonAbstractTest {
+    /** */
+    private static final String CACHE_NAME = "test-cache";
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration() {
+        TcpDiscoveryVmIpFinder ipFinder = new TcpDiscoveryVmIpFinder()
+            .setAddresses(Arrays.asList("127.0.0.1:47500..47509"));
+
+        return new IgniteConfiguration()
+            .setDiscoverySpi(new TcpDiscoverySpi().setIpFinder(ipFinder))
+            .setPeerClassLoadingEnabled(true);
+    }
+
+    /**
+     * @throws Exception If failed;
+     */
+    public void test() throws Exception {
+        Ignite ignite = startGrids(2);
+
+        IgniteCache<Integer, Map<Integer, CustomObj>> cache = ignite.createCache(CACHE_NAME);
+
+        try {
+            for (int i = 0; i < 10_000; i++)
+                cache.invoke(i, new CustomProcessor());
+        }
+        catch (Exception e) {
+            Map<Integer, CustomObj> value = cache.get(1);
+
+            if ((value != null) && (value.get(1) != null) && (value.get(1).getObj() == CustomEnum.ONE))
+                System.out.println("Data was saved.");
+            else
+                System.out.println("Data wasn't saved.");
+
+            throw e;
+        }
+    }
+
+    /**
+     *
+     */
+    private static class CustomProcessor implements EntryProcessor<Integer,
+        Map<Integer, CustomObj>, Object> {
+        /** {@inheritDoc} */
+        @Override public Object process(
+            MutableEntry<Integer, Map<Integer, CustomObj>> entry,
+            Object... objects) throws EntryProcessorException {
+            Map<Integer, CustomObj> map = new HashMap<>();
+
+            map.put(1, new CustomObj(CustomEnum.ONE));
+
+            entry.setValue(map);
+
+            return null;
+        }
+    }
+
+    /**
+     *
+     */
+    private static class CustomObj {
+        /** Object. */
+        private final Object obj;
+
+        /**
+         * @param obj Object.
+         */
+        public CustomObj(Object obj) {
+            this.obj = obj;
+        }
+
+        /**
+         * @param val Value.
+         */
+        public static CustomObj valueOf(int val) {
+            return new CustomObj(val);
+        }
+
+        /**
+         *
+         */
+        public Object getObj() {
+            return obj;
+        }
+    }
+
+    /**
+     *
+     */
+    private enum CustomEnum {
+        /** */ONE(1),
+        /** */TWO(2),
+        /** */THREE(3);
+
+        /** Value. */
+        private final Object val;
+
+        /**
+         * @param val Value.
+         */
+        CustomEnum(Object val) {
+            this.val = val;
+        }
+    }
+
+}
\ No newline at end of file
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheEntryProcessorCopySelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheEntryProcessorCopySelfTest.java
index 7005e14..aabd3b6 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheEntryProcessorCopySelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheEntryProcessorCopySelfTest.java
@@ -158,7 +158,7 @@
 
             CacheObject obj = entry.peekVisibleValue();
 
-            ca.context().evicts().touch(entry, AffinityTopologyVersion.NONE);
+            entry.touch(AffinityTopologyVersion.NONE);
 
             int actCnt = cnt.get();
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheMetricsManageTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheMetricsManageTest.java
index 7c3ad66..ae00ac9 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheMetricsManageTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheMetricsManageTest.java
@@ -36,6 +36,7 @@
 import org.apache.ignite.cache.CacheAtomicityMode;
 import org.apache.ignite.cache.CacheMetrics;
 import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.cache.CacheWriteSynchronizationMode;
 import org.apache.ignite.configuration.CacheConfiguration;
 import org.apache.ignite.configuration.DataRegionConfiguration;
 import org.apache.ignite.configuration.DataStorageConfiguration;
@@ -489,7 +490,8 @@
             .setName(CACHE1)
             .setGroupName(GROUP)
             .setCacheMode(CacheMode.PARTITIONED)
-            .setAtomicityMode(CacheAtomicityMode.ATOMIC);
+            .setAtomicityMode(CacheAtomicityMode.ATOMIC)
+            .setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC);
 
         cfg.setCacheConfiguration(cacheCfg);
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheStopAndDestroySelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheStopAndDestroySelfTest.java
index 5eb8292..6239b52 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheStopAndDestroySelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/CacheStopAndDestroySelfTest.java
@@ -18,11 +18,15 @@
 package org.apache.ignite.internal.processors.cache;
 
 import java.util.UUID;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.atomic.AtomicInteger;
 import javax.cache.Cache;
 import javax.cache.CacheManager;
 import javax.cache.Caching;
 import javax.cache.configuration.MutableConfiguration;
+import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.IgniteException;
 import org.apache.ignite.cluster.ClusterNode;
@@ -782,6 +786,70 @@
     }
 
     /**
+     * @throws Exception If failed.
+     */
+    public void testConcurrentUseAndCloseFromClient() throws Exception {
+        testConcurrentUseAndClose(true);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testConcurrentUseAndCloseFromServer() throws Exception {
+        testConcurrentUseAndClose(false);
+    }
+
+    /**
+     * @param isClient Should client or server be used during the test.
+     * @throws Exception If failed.
+     */
+    private void testConcurrentUseAndClose(boolean isClient) throws Exception {
+        int threads = 8;
+        int keys = 1000;
+        int iterations = 20;
+
+        startGrid(0);
+
+        IgniteConfiguration igniteCfg = getConfiguration(getTestIgniteInstanceName(1));
+        igniteCfg.setClientMode(isClient);
+        Ignite ignite = startGrid(optimize(igniteCfg));
+
+        ExecutorService execSrvc = Executors.newFixedThreadPool(threads);
+
+        for (int i = 0; i < threads; i++) {
+            execSrvc.execute(() -> {
+                while (!Thread.interrupted()) {
+                    try {
+                        IgniteCache<Integer, String> cache = ignite.getOrCreateCache("cache");
+
+                        ThreadLocalRandom random = ThreadLocalRandom.current();
+                        int key = random.nextInt(keys);
+
+                        if (random.nextBoolean())
+                            cache.put(key, Integer.toString(key));
+                        else
+                            cache.get(key);
+                    }
+                    catch (Exception ignore) {
+                    }
+                }
+            });
+        }
+
+        for (int i = 0; i < iterations; i++) {
+            System.out.println("Iteration #" + (i + 1));
+
+            IgniteCache<Integer, String> cache = ignite.getOrCreateCache("cache");
+
+            cache.close();
+
+            Thread.sleep(100);
+        }
+
+        execSrvc.shutdownNow();
+    }
+
+    /**
      * @param cache Cache.
      * @throws Exception If failed.
      */
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheAbstractFullApiSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheAbstractFullApiSelfTest.java
index 9ae9f8c..b872d2a 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheAbstractFullApiSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheAbstractFullApiSelfTest.java
@@ -6390,7 +6390,7 @@
 
                     size++;
 
-                    ctx.evicts().touch(e, null);
+                    e.touch(null);
                 }
             }
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheAbstractSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheAbstractSelfTest.java
index 16e7de4..89f0ca7 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheAbstractSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheAbstractSelfTest.java
@@ -201,8 +201,6 @@
     @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
         IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName);
 
-        cfg.setFailureDetectionTimeout(Integer.MAX_VALUE);
-
         TcpDiscoverySpi disco = new TcpDiscoverySpi();
 
         disco.setIpFinder(ipFinder);
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheMvccManagerSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheMvccManagerSelfTest.java
index 993a1cf..0a284e9 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheMvccManagerSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheMvccManagerSelfTest.java
@@ -55,7 +55,6 @@
 
         cfg.setDiscoverySpi(disco);
 
-        cfg.setFailureDetectionTimeout(Integer.MAX_VALUE);
         cfg.setCacheConfiguration(cacheConfiguration());
 
         return cfg;
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheTestEntryEx.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheTestEntryEx.java
index 99abd70..8bc620f 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheTestEntryEx.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridCacheTestEntryEx.java
@@ -19,6 +19,7 @@
 
 import java.util.Collection;
 import java.util.Collections;
+import java.util.List;
 import java.util.UUID;
 import javax.cache.Cache;
 import javax.cache.expiry.ExpiryPolicy;
@@ -26,7 +27,10 @@
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.cache.eviction.EvictableEntry;
 import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxLocalAdapter;
 import org.apache.ignite.internal.processors.cache.distributed.dht.atomic.GridDhtAtomicAbstractUpdateFuture;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccVersion;
 import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey;
@@ -277,6 +281,12 @@
     }
 
     /** {@inheritDoc} */
+    @Nullable @Override public List<GridCacheEntryInfo> allVersionsInfo()
+        throws IgniteCheckedException {
+        return null;
+    }
+
+    /** {@inheritDoc} */
     @Override public boolean valid(AffinityTopologyVersion topVer) {
         return true;
     }
@@ -391,7 +401,8 @@
         Object transformClo,
         String taskName,
         @Nullable IgniteCacheExpiryPolicy expiryPlc,
-        boolean keepBinary) {
+        boolean keepBinary,
+        MvccSnapshot mvccVer) {
         return val;
     }
 
@@ -408,6 +419,7 @@
         String taskName,
         @Nullable IgniteCacheExpiryPolicy expiryPlc,
         boolean keepBinary,
+        MvccSnapshot mvccVer,
         @Nullable ReaderArguments args) throws IgniteCheckedException, GridCacheEntryRemovedException {
         assert false;
 
@@ -425,6 +437,7 @@
         String taskName,
         @Nullable IgniteCacheExpiryPolicy expiryPlc,
         boolean keepBinary,
+        MvccSnapshot mvccVer,
         @Nullable ReaderArguments readerArgs) {
         assert false;
 
@@ -457,9 +470,41 @@
         UUID subjId,
         String taskName,
         @Nullable GridCacheVersion dhtVer,
-        @Nullable Long updateCntr)
+        @Nullable Long updateCntr,
+        MvccSnapshot mvccVer
+    )
+        throws IgniteCheckedException, GridCacheEntryRemovedException
+    {
+        rawPut(val, ttl);
+
+        return new GridCacheUpdateTxResult(true);
+    }
+
+    /** {@inheritDoc} */
+    @Override public GridCacheUpdateTxResult mvccSet(@Nullable IgniteInternalTx tx, UUID affNodeId, CacheObject val,
+        long ttl0, AffinityTopologyVersion topVer, @Nullable Long updateCntr, MvccSnapshot mvccVer,
+        GridCacheOperation op, boolean needHistory,
+        boolean noCreate) throws IgniteCheckedException, GridCacheEntryRemovedException {
+        rawPut(val, ttl);
+
+        return new GridCacheUpdateTxResult(true);
+    }
+
+    /** {@inheritDoc} */
+    @Override public GridCacheUpdateTxResult mvccRemove(@Nullable IgniteInternalTx tx, UUID affNodeId,
+        AffinityTopologyVersion topVer, @Nullable Long updateCntr, MvccSnapshot mvccVer, boolean needHistory)
         throws IgniteCheckedException, GridCacheEntryRemovedException {
-        return new GridCacheUpdateTxResult(true, rawPut(val, ttl), null);
+        obsoleteVer = ver;
+
+        val = null;
+
+        return new GridCacheUpdateTxResult(true);
+    }
+
+    /** {@inheritDoc} */
+    @Override public GridCacheUpdateTxResult mvccLock(GridDhtTxLocalAdapter tx,
+        MvccSnapshot mvccVer) throws GridCacheEntryRemovedException, IgniteCheckedException {
+        return new GridCacheUpdateTxResult(true);
     }
 
     /** {@inheritDoc} */
@@ -539,15 +584,14 @@
         UUID subjId,
         String taskName,
         @Nullable GridCacheVersion dhtVer,
-        @Nullable Long updateCntr
+        @Nullable Long updateCntr,
+        MvccSnapshot mvccVer
         ) throws IgniteCheckedException, GridCacheEntryRemovedException {
         obsoleteVer = ver;
 
-        CacheObject old = val;
-
         val = null;
 
-        return new GridCacheUpdateTxResult(true, old, null);
+        return new GridCacheUpdateTxResult(true);
     }
 
     /** @inheritDoc */
@@ -638,6 +682,10 @@
     @Override public boolean initialValue(
         CacheObject val,
         GridCacheVersion ver,
+        MvccVersion mvccVer,
+        MvccVersion newMvccVer,
+        byte mvccTxState,
+        byte newMvccTxState,
         long ttl,
         long expireTime,
         boolean preload,
@@ -890,4 +938,20 @@
     @Override public boolean lockedByCurrentThread() {
         return false;
     }
+
+    /** {@inheritDoc} */
+    @Override public GridCacheUpdateTxResult mvccUpdateRowsWithPreloadInfo(IgniteInternalTx tx,
+        UUID affNodeId,
+        AffinityTopologyVersion topVer,
+        @Nullable Long updateCntr,
+        List<GridCacheEntryInfo> entries,
+        GridCacheOperation op,
+        MvccSnapshot mvccVer) throws IgniteCheckedException, GridCacheEntryRemovedException {
+        return null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void touch(AffinityTopologyVersion topVer) {
+        context().evicts().touch(this, topVer);
+    }
 }
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridProjectionForCachesOnDaemonNodeSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridProjectionForCachesOnDaemonNodeSelfTest.java
index e32668b..85b2373 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridProjectionForCachesOnDaemonNodeSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/GridProjectionForCachesOnDaemonNodeSelfTest.java
@@ -77,7 +77,7 @@
     }
 
     /** {@inheritDoc} */
-    protected void beforeTest() throws Exception {
+    @Override protected void beforeTest() throws Exception {
         ignite.getOrCreateCache(DEFAULT_CACHE_NAME);
     }
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAbstractTest.java
index 54b561b..370a7a8 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAbstractTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAbstractTest.java
@@ -97,8 +97,6 @@
         MemoryEventStorageSpi evtSpi = new MemoryEventStorageSpi();
         evtSpi.setExpireCount(100);
 
-        cfg.setFailureDetectionTimeout(Integer.MAX_VALUE);
-
         cfg.setEventStorageSpi(evtSpi);
 
         cfg.setDiscoverySpi(disco);
@@ -220,7 +218,7 @@
     /**
      * @return Cache.
      */
-    protected <K, V> IgniteCache<K, V> jcache() {
+    @Override protected <K, V> IgniteCache<K, V> jcache() {
         return jcache(0);
     }
 
@@ -228,7 +226,7 @@
      * @param idx Grid index.
      * @return Cache.
      */
-    protected <K, V> IgniteCache<K, V> jcache(int idx) {
+    @Override protected <K, V> IgniteCache<K, V> jcache(int idx) {
         return grid(idx).cache(DEFAULT_CACHE_NAME);
     }
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheConfigVariationsFullApiTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheConfigVariationsFullApiTest.java
index 3ffdd65..3600c05 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheConfigVariationsFullApiTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheConfigVariationsFullApiTest.java
@@ -6219,7 +6219,7 @@
 
                     size++;
 
-                    ctx.evicts().touch(e, null);
+                    e.touch(null);
                 }
             }
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheCopyOnReadDisabledAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheCopyOnReadDisabledAbstractTest.java
index bba779f..61f8136 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheCopyOnReadDisabledAbstractTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheCopyOnReadDisabledAbstractTest.java
@@ -43,6 +43,11 @@
         return ccfg;
     }
 
+    /** {@inheritDoc} */
+    @Override protected boolean onheapCacheEnabled() {
+        return true;
+    }
+
     /**
      * @throws Exception If failed.
      */
@@ -64,6 +69,7 @@
             assertSame(val0, cache.localPeek(key));
         }
 
+        /* Does not seem to work anymore since main storage mechanism is always off-heap.
         TestKey key = new TestKey(0);
 
         TestValue val0 = cache.get(key);
@@ -74,7 +80,7 @@
             }
         });
 
-        assertSame(val0, invokeVal);
+        assertSame(val0, invokeVal);*/
     }
 
     /**
@@ -122,4 +128,4 @@
             this.val = val;
         }
     }
-}
\ No newline at end of file
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheInvokeAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheInvokeAbstractTest.java
index 54ca4fa..d9a0428 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheInvokeAbstractTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheInvokeAbstractTest.java
@@ -18,12 +18,15 @@
 package org.apache.ignite.internal.processors.cache;
 
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.Map;
+import java.util.Objects;
 import java.util.Set;
 import java.util.concurrent.Callable;
 import javax.cache.processor.EntryProcessor;
@@ -32,10 +35,15 @@
 import javax.cache.processor.MutableEntry;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
+import org.apache.ignite.binary.BinaryObjectBuilder;
+import org.apache.ignite.cache.CacheEntryProcessor;
 import org.apache.ignite.cache.CachePeekMode;
+import org.apache.ignite.cache.affinity.Affinity;
+import org.apache.ignite.cache.affinity.AffinityKeyMapped;
 import org.apache.ignite.configuration.CacheConfiguration;
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.apache.ignite.lang.IgniteFuture;
+import org.apache.ignite.resources.IgniteInstanceResource;
 import org.apache.ignite.testframework.GridTestUtils;
 import org.apache.ignite.transactions.Transaction;
 import org.apache.ignite.transactions.TransactionConcurrency;
@@ -235,6 +243,148 @@
     }
 
     /**
+     *
+     */
+    private static class MyKey {
+        /** */
+        String key;
+
+
+        /** */
+        @AffinityKeyMapped
+        String affkey = "affkey";
+
+        /** */
+        public MyKey(String key) {
+            this.key = key;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean equals(Object o) {
+            if (this == o)
+                return true;
+
+            if (!(o instanceof MyKey))
+                return false;
+
+            MyKey key1 = (MyKey)o;
+
+            return Objects.equals(key, key1.key) &&
+                Objects.equals(affkey, key1.affkey);
+        }
+
+        /** {@inheritDoc} */
+        @Override public int hashCode() {
+            return Objects.hash(key, affkey);
+        }
+
+        /** {@inheritDoc} */
+        @Override public String toString() {
+            return "MyKey{" +
+                "key='" + key + '\'' +
+                '}';
+        }
+    }
+
+    /** */
+    static class MyClass1{}
+
+    /** */
+    static class MyClass2{}
+
+    /** */
+    static class MyClass3{}
+
+    /** */
+    Object[] results = new Object[] {
+        new MyClass1(),
+        new MyClass2(),
+        new MyClass3()
+    };
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testInvokeAllAppliedOnceOnBinaryTypeRegistration() {
+        IgniteCache<MyKey, Integer> cache = jcache();
+
+        Affinity<Object> affinity = grid(0).affinity(cache.getName());
+
+        for (int i = 0; i < gridCount(); i++) {
+            if(!affinity.isPrimary(grid(i).localNode(), new MyKey(""))) {
+                cache = jcache(i);
+                break;
+            }
+        }
+
+        LinkedHashSet<MyKey> keys = new LinkedHashSet<>(Arrays.asList(
+            new MyKey("remove_0"), new MyKey("1"), new MyKey("2"),
+            new MyKey("remove_3"), new MyKey("remove_4"), new MyKey("register_type_0"),
+            new MyKey("6"), new MyKey("remove_7"), new MyKey("register_type_1"),
+            new MyKey("9"), new MyKey("remove_10"), new MyKey("11"), new MyKey("12"), new MyKey("register_type_2")
+        ));
+
+        for (MyKey key : keys)
+            cache.put(key, 0);
+
+        cache.invokeAll(keys,
+            new CacheEntryProcessor<MyKey, Integer, Object>() {
+
+                @IgniteInstanceResource
+                Ignite ignite;
+
+                @Override public Object process(MutableEntry<MyKey, Integer> entry,
+                    Object... objects) throws EntryProcessorException {
+
+                    String key = entry.getKey().key;
+
+                    if (key.startsWith("register_type")) {
+                        BinaryObjectBuilder bo = ignite.binary().builder(key);
+
+                        bo.build();
+                    }
+
+                    if (key.startsWith("remove")) {
+                        entry.remove();
+                    }
+                    else {
+                        Integer value = entry.getValue() == null ? 0 : entry.getValue();
+
+                        entry.setValue(++value);
+                    }
+
+                    if (key.startsWith("register_type"))
+                        return results[Integer.parseInt(key.substring(key.lastIndexOf("_") + 1))];
+
+                    return null;
+                }
+
+            });
+
+        Map<MyKey, Integer> all = cache.getAll(keys);
+
+        for (Map.Entry<MyKey, Integer> entry : all.entrySet()) {
+            MyKey key = entry.getKey();
+
+            if (key.key.startsWith("remove")) {
+                assertNull(entry.getValue());
+
+                if (cacheStoreFactory() != null)
+                    assertNull(storeMap.get(keys));
+            }
+            else {
+                int value = entry.getValue();
+
+                assertEquals("\"" + key + "' entry has wrong value, exp=1 actl=" + value, 1, value);
+
+                if (cacheStoreFactory() != null)
+                    assertEquals("\"" + key + "' entry has wrong value in cache store, exp=1 actl=" + value,
+                        1, (int)storeMap.get(key));
+            }
+        }
+    }
+
+    /**
      * @param cache Cache.
      * @param txMode Not null transaction concurrency mode if explicit transaction should be started.
      * @throws Exception If failed.
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheStoreValueAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheStoreValueAbstractTest.java
index eceb9d2..bd259a5 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheStoreValueAbstractTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheStoreValueAbstractTest.java
@@ -65,6 +65,11 @@
     }
 
     /** {@inheritDoc} */
+    @Override protected boolean onheapCacheEnabled() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
     @Override protected CacheConfiguration cacheConfiguration(String igniteInstanceName) throws Exception {
         CacheConfiguration ccfg = super.cacheConfiguration(igniteInstanceName);
 
@@ -189,6 +194,8 @@
 
                 assertNull(cache.localPeek(key, CachePeekMode.ONHEAP));
 
+                cache.get(key);
+
                 assertNotNull(cache.localPeek(key, CachePeekMode.ONHEAP));
 
                 checkNoValue(aff, key);
@@ -239,7 +246,7 @@
         for (int g = 0; g < gridCount(); g++) {
             IgniteEx ig = grid(g);
 
-            GridCacheAdapter cache0 = internalCache(ig, null);
+            GridCacheAdapter cache0 = internalCache(ig, DEFAULT_CACHE_NAME);
 
             GridCacheEntryEx e = cache0.peekEx(key);
 
@@ -354,6 +361,8 @@
 
                 assertNull(cache.localPeek(key, CachePeekMode.ONHEAP));
 
+                cache.get(key);
+
                 assertNotNull(cache.localPeek(key, CachePeekMode.ONHEAP));
 
                 checkHasValue(aff, key);
@@ -369,7 +378,7 @@
         for (int g = 0; g < gridCount(); g++) {
             IgniteEx ig = grid(g);
 
-            GridCacheAdapter cache0 = internalCache(ig, null);
+            GridCacheAdapter cache0 = internalCache(ig, DEFAULT_CACHE_NAME);
 
             GridCacheEntryEx e = cache0.peekEx(key);
 
@@ -495,7 +504,7 @@
         }
 
         /** {@inheritDoc} */
-        public String toString() {
+        @Override public String toString() {
             return "TestValue [val=" + val + ']';
         }
     }
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheInitializationFailTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheInitializationFailTest.java
index 4ef1dc3..b1df28e 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheInitializationFailTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClientCacheInitializationFailTest.java
@@ -43,6 +43,8 @@
 import org.apache.ignite.internal.GridKernalContext;
 import org.apache.ignite.internal.IgniteKernal;
 import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccQueryTracker;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
 import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
 import org.apache.ignite.internal.processors.query.GridQueryCancel;
 import org.apache.ignite.internal.processors.query.GridQueryIndexing;
@@ -53,6 +55,7 @@
 import org.apache.ignite.internal.processors.query.QueryField;
 import org.apache.ignite.internal.processors.query.QueryIndexDescriptorImpl;
 import org.apache.ignite.internal.processors.query.SqlClientContext;
+import org.apache.ignite.internal.processors.query.UpdateSourceIterator;
 import org.apache.ignite.internal.processors.query.schema.SchemaIndexCacheVisitor;
 import org.apache.ignite.internal.util.GridSpinBusyLock;
 import org.apache.ignite.internal.util.lang.GridCloseableIterator;
@@ -223,6 +226,11 @@
      */
     private static class FailedIndexing implements GridQueryIndexing {
         /** {@inheritDoc} */
+        @Override public void onClientDisconnect() throws IgniteCheckedException {
+            // No-op.
+        }
+
+        /** {@inheritDoc} */
         @Override public void start(GridKernalContext ctx, GridSpinBusyLock busyLock) throws IgniteCheckedException {
             // No-op
         }
@@ -240,7 +248,7 @@
 
         /** {@inheritDoc} */
         @Override public List<FieldsQueryCursor<List<?>>> querySqlFields(String schemaName, SqlFieldsQuery qry,
-            SqlClientContext cliCtx, boolean keepBinary, boolean failOnMultipleStmts, GridQueryCancel cancel) {
+            SqlClientContext cliCtx, boolean keepBinary, boolean failOnMultipleStmts, MvccQueryTracker tracker, GridQueryCancel cancel) {
             return null;
         }
 
@@ -312,6 +320,14 @@
         }
 
         /** {@inheritDoc} */
+        @Override public UpdateSourceIterator<?> prepareDistributedUpdate(GridCacheContext<?, ?> cctx, int[] ids, int[] parts,
+            String schema, String qry, Object[] params, int flags, int pageSize, int timeout,
+            AffinityTopologyVersion topVer,
+            MvccSnapshot mvccVer, GridQueryCancel cancel) throws IgniteCheckedException {
+            return null;
+        }
+
+        /** {@inheritDoc} */
         @Override public boolean registerType(GridCacheContext cctx,
             GridQueryTypeDescriptor desc) throws IgniteCheckedException {
             return false;
@@ -319,7 +335,7 @@
 
         /** {@inheritDoc} */
         @Override public void store(GridCacheContext cctx, GridQueryTypeDescriptor type, CacheDataRow row,
-            CacheDataRow prevRow, boolean prevRowAvailable) {
+            CacheDataRow prevRow, boolean prevRowAvailable) throws IgniteCheckedException {
             // No-op.
         }
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClusterActivateDeactivateTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClusterActivateDeactivateTest.java
index 4ff97ba..4be660d 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClusterActivateDeactivateTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteClusterActivateDeactivateTest.java
@@ -357,11 +357,13 @@
      * @return State change future.
      * @throws Exception If failed.
      */
-    private IgniteInternalFuture<?> startNodesAndBlockStatusChange(int srvs,
+    private IgniteInternalFuture<?> startNodesAndBlockStatusChange(
+        int srvs,
         int clients,
         final int stateChangeFrom,
         final boolean initiallyActive,
-        int... blockMsgNodes) throws Exception {
+        int... blockMsgNodes
+    ) throws Exception {
         active = initiallyActive;
         testSpi = true;
 
@@ -1138,20 +1140,16 @@
 
         client = false;
 
-        IgniteInternalFuture startFut1 = GridTestUtils.runAsync(new Callable() {
-            @Override public Object call() throws Exception {
-                startGrid(4);
+        IgniteInternalFuture startFut1 = GridTestUtils.runAsync((Callable) () -> {
+            startGrid(4);
 
-                return null;
-            }
+            return null;
         }, "start-node1");
 
-        IgniteInternalFuture startFut2 = GridTestUtils.runAsync(new Callable() {
-            @Override public Object call() throws Exception {
-                startGrid(5);
+        IgniteInternalFuture startFut2 = GridTestUtils.runAsync((Callable) () -> {
+            startGrid(5);
 
-                return null;
-            }
+            return null;
         }, "start-node2");
 
         U.sleep(1000);
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteDynamicCacheStartFailWithPersistenceTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteDynamicCacheStartFailWithPersistenceTest.java
index 3b7bf52..24c9342 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteDynamicCacheStartFailWithPersistenceTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteDynamicCacheStartFailWithPersistenceTest.java
@@ -34,7 +34,7 @@
         return 5 * 60 * 1000;
     }
 
-    protected boolean persistenceEnabled() {
+    @Override protected boolean persistenceEnabled() {
         return true;
     }
 
@@ -75,7 +75,7 @@
     }
 
     /** {@inheritDoc} */
-    protected void checkCacheOperations(IgniteCache<Integer, Value> cache) throws Exception {
+    @Override protected void checkCacheOperations(IgniteCache<Integer, Value> cache) throws Exception {
         super.checkCacheOperations(cache);
 
         // Disable write-ahead log.
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteMarshallerCacheClassNameConflictTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteMarshallerCacheClassNameConflictTest.java
index 80d0fd1..64c7817 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteMarshallerCacheClassNameConflictTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteMarshallerCacheClassNameConflictTest.java
@@ -31,13 +31,13 @@
 import org.apache.ignite.configuration.BinaryConfiguration;
 import org.apache.ignite.configuration.CacheConfiguration;
 import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.IgniteInternalFuture;
 import org.apache.ignite.internal.managers.discovery.DiscoveryCustomMessage;
+import org.apache.ignite.internal.util.future.GridFinishedFuture;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.spi.discovery.DiscoverySpiCustomMessage;
 import org.apache.ignite.spi.discovery.DiscoverySpiListener;
 import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
-import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
-import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
 import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
 import org.jetbrains.annotations.Nullable;
 
@@ -193,7 +193,7 @@
             }
 
             /** {@inheritDoc} */
-            @Override public void onDiscovery(
+            @Override public IgniteInternalFuture onDiscovery(
                     int type,
                     long topVer,
                     ClusterNode node,
@@ -219,7 +219,9 @@
                 }
 
                 if (delegate != null)
-                    delegate.onDiscovery(type, topVer, node, topSnapshot, topHist, spiCustomMsg);
+                    return delegate.onDiscovery(type, topVer, node, topSnapshot, topHist, spiCustomMsg);
+
+                return new GridFinishedFuture();
             }
 
             /** {@inheritDoc} */
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteMarshallerCacheFSRestoreTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteMarshallerCacheFSRestoreTest.java
index 49f5311..7aa61eb 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteMarshallerCacheFSRestoreTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteMarshallerCacheFSRestoreTest.java
@@ -34,8 +34,10 @@
 import org.apache.ignite.configuration.IgniteConfiguration;
 import org.apache.ignite.configuration.PersistentStoreConfiguration;
 import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.IgniteInternalFuture;
 import org.apache.ignite.internal.managers.discovery.DiscoveryCustomMessage;
 import org.apache.ignite.internal.processors.marshaller.MappingProposedMessage;
+import org.apache.ignite.internal.util.future.GridFinishedFuture;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.spi.discovery.DiscoverySpiCustomMessage;
 import org.apache.ignite.spi.discovery.DiscoverySpiListener;
@@ -243,7 +245,7 @@
             }
 
             /** {@inheritDoc} */
-            @Override public void onDiscovery(
+            @Override public IgniteInternalFuture onDiscovery(
                 int type,
                 long topVer,
                 ClusterNode node,
@@ -267,7 +269,9 @@
                 }
 
                 if (delegate != null)
-                    delegate.onDiscovery(type, topVer, node, topSnapshot, topHist, spiCustomMsg);
+                    return delegate.onDiscovery(type, topVer, node, topSnapshot, topHist, spiCustomMsg);
+
+                return new GridFinishedFuture();
             }
 
             /** {@inheritDoc} */
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteTopologyValidatorAbstractCacheTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteTopologyValidatorAbstractCacheTest.java
index 9860199..e2a4a08 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteTopologyValidatorAbstractCacheTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteTopologyValidatorAbstractCacheTest.java
@@ -98,7 +98,7 @@
         int c = 0;
 
         for (ClusterNode node : nodes) {
-            if (!CU.clientNode(node))
+            if (!node.isClient())
                 c++;
         }
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteTopologyValidatorCacheGroupsAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteTopologyValidatorCacheGroupsAbstractTest.java
index 3070d9d..8613225 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteTopologyValidatorCacheGroupsAbstractTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/IgniteTopologyValidatorCacheGroupsAbstractTest.java
@@ -79,7 +79,7 @@
     /**
      * @throws Exception If failed.
      */
-    public void testTopologyValidator() throws Exception {
+    @Override public void testTopologyValidator() throws Exception {
         putValid(DEFAULT_CACHE_NAME);
         remove(DEFAULT_CACHE_NAME);
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/PartitionsExchangeCoordinatorFailoverTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/PartitionsExchangeCoordinatorFailoverTest.java
new file mode 100644
index 0000000..a2adcf7
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/PartitionsExchangeCoordinatorFailoverTest.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache;
+
+import java.util.concurrent.CountDownLatch;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.TestRecordingCommunicationSpi;
+import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
+import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage;
+import org.apache.ignite.internal.util.typedef.G;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgnitePredicate;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+import org.junit.Assert;
+
+/**
+ * Advanced coordinator failure scenarios during PME.
+ */
+public class PartitionsExchangeCoordinatorFailoverTest extends GridCommonAbstractTest {
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName);
+
+        cfg.setConsistentId(igniteInstanceName);
+
+        cfg.setCommunicationSpi(new TestRecordingCommunicationSpi());
+
+        IgnitePredicate<ClusterNode> nodeFilter = node -> node.consistentId().equals(igniteInstanceName);
+
+        cfg.setCacheConfiguration(
+            new CacheConfiguration("cache-" + igniteInstanceName)
+                .setBackups(1)
+                .setNodeFilter(nodeFilter)
+                .setAffinity(new RendezvousAffinityFunction(false, 32))
+        );
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        super.beforeTest();
+
+        stopAllGrids();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected long getTestTimeout() {
+        return 60 * 1000L;
+    }
+
+    /**
+     * Tests that new coordinator is able to finish old exchanges in case of in-complete coordinator initialization.
+     */
+    public void testNewCoordinatorCompletedExchange() throws Exception {
+        IgniteEx crd = (IgniteEx) startGrid("crd");
+
+        IgniteEx newCrd = startGrid(1);
+
+        crd.cluster().active(true);
+
+        // 3 node join topology version.
+        AffinityTopologyVersion joinThirdNodeVer = new AffinityTopologyVersion(3, 0);
+
+        // 4 node join topology version.
+        AffinityTopologyVersion joinFourNodeVer = new AffinityTopologyVersion(4, 0);
+
+        // Block FullMessage for newly joined nodes.
+        TestRecordingCommunicationSpi spi = TestRecordingCommunicationSpi.spi(crd);
+
+        final CountDownLatch sendFullMsgLatch = new CountDownLatch(1);
+
+        // Delay sending full message to newly joined nodes.
+        spi.blockMessages((node, msg) -> {
+            if (msg instanceof GridDhtPartitionsFullMessage && node.order() > 2) {
+                try {
+                    sendFullMsgLatch.await();
+                }
+                catch (Throwable ignored) { }
+
+                return true;
+            }
+
+            return false;
+        });
+
+        IgniteInternalFuture joinTwoNodesFut = GridTestUtils.runAsync(() -> startGridsMultiThreaded(2, 2));
+
+        GridCachePartitionExchangeManager exchangeMgr = newCrd.context().cache().context().exchange();
+
+        // Wait till new coordinator finishes third node join exchange.
+        GridTestUtils.waitForCondition(
+            () -> exchangeMgr.readyAffinityVersion().compareTo(joinThirdNodeVer) >= 0,
+            getTestTimeout()
+        );
+
+        IgniteInternalFuture startLastNodeFut = GridTestUtils.runAsync(() -> startGrid(5));
+
+        // Wait till new coordinator starts third node join exchange.
+        GridTestUtils.waitForCondition(
+            () -> exchangeMgr.lastTopologyFuture().initialVersion().compareTo(joinFourNodeVer) >= 0,
+            getTestTimeout()
+        );
+
+        IgniteInternalFuture stopCrdFut = GridTestUtils.runAsync(() -> stopGrid("crd", true, false));
+
+        // Magic sleep to make sure that coordinator stop process has started.
+        U.sleep(1000);
+
+        // Resume full messages sending to unblock coordinator stopping process.
+        sendFullMsgLatch.countDown();
+
+        // Coordinator stop should succeed.
+        stopCrdFut.get();
+
+        // Nodes join should succeed.
+        joinTwoNodesFut.get();
+
+        startLastNodeFut.get();
+
+        awaitPartitionMapExchange();
+
+        // Check that all caches are operable.
+        for (Ignite grid : G.allGrids()) {
+            IgniteCache cache = grid.cache("cache-" + grid.cluster().localNode().consistentId());
+
+            Assert.assertNotNull(cache);
+
+            cache.put(0, 0);
+        }
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/WalModeChangeAdvancedSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/WalModeChangeAdvancedSelfTest.java
index 0fee748..be0f5df 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/WalModeChangeAdvancedSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/WalModeChangeAdvancedSelfTest.java
@@ -17,18 +17,18 @@
 
 package org.apache.ignite.internal.processors.cache;
 
-import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteCache;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.internal.IgniteClientReconnectAbstractTest;
-import org.apache.ignite.internal.util.typedef.X;
-
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.internal.IgniteClientReconnectAbstractTest;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.testframework.GridTestUtils;
 
 import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
 import static org.apache.ignite.cache.CacheMode.PARTITIONED;
@@ -271,7 +271,7 @@
 
                     restartCnt.incrementAndGet();
 
-                    X.println(">>> Finished restart: " + restartCnt.get());
+                    log.info(">>> Finished restart: " + restartCnt.get());
                 }
             }
         });
@@ -310,55 +310,47 @@
 
         final AtomicBoolean done = new AtomicBoolean();
 
-        final CountDownLatch latch = new CountDownLatch(1);
-
         // Start load.
-        Thread t = new Thread(new Runnable() {
-            @Override public void run() {
-                boolean state = false;
+        IgniteInternalFuture<?> fut = GridTestUtils.runAsync(() -> {
+            boolean state = false;
 
-                while (!done.get()) {
-                    try {
-                        if (state)
-                            cli.cluster().enableWal(CACHE_NAME);
-                        else
-                            cli.cluster().disableWal(CACHE_NAME);
-                    }
-                    catch (IgniteException e) {
-                        String msg = e.getMessage();
-
-                        assert msg.startsWith("Client node disconnected") ||
-                            msg.startsWith("Client node was disconnected") : e.getMessage();
-                    }
-                    finally {
-                        state = !state;
-                    }
+            while (!done.get()) {
+                try {
+                    if (state)
+                        cli.cluster().enableWal(CACHE_NAME);
+                    else
+                        cli.cluster().disableWal(CACHE_NAME);
                 }
+                catch (IgniteException e) {
+                    String msg = e.getMessage();
 
-                latch.countDown();
+                    assert msg.startsWith("Client node disconnected") ||
+                        msg.startsWith("Client node was disconnected") : e.getMessage();
+                }
+                finally {
+                    state = !state;
+                }
             }
-        });
-
-        t.setName("wal-load-" + cli.name());
-
-        t.start();
+        }, "wal-load-" + cli.name());
 
         // Now perform multiple client reconnects.
-        for (int i = 1; i <= 10; i++) {
-            Thread.sleep(ThreadLocalRandom.current().nextLong(200, 1000));
+        try {
+            for (int i = 1; i <= 10; i++) {
+                Thread.sleep(ThreadLocalRandom.current().nextLong(200, 1000));
 
-            IgniteClientReconnectAbstractTest.reconnectClientNode(log, cli, srv, new Runnable() {
-                @Override public void run() {
-                    // No-op.
-                }
-            });
+                IgniteClientReconnectAbstractTest.reconnectClientNode(log, cli, srv, new Runnable() {
+                    @Override public void run() {
+                        // No-op.
+                    }
+                });
 
-            X.println(">>> Finished iteration: " + i);
+                log.info(">>> Finished iteration: " + i);
+            }
+        } finally {
+            done.set(true);
         }
 
-        done.set(true);
-
-        latch.await();
+        fut.get();
     }
 
     /**
@@ -376,56 +368,49 @@
 
         final AtomicBoolean done = new AtomicBoolean();
 
-        final CountDownLatch latch = new CountDownLatch(1);
-
         // Start load.
-        Thread t = new Thread(new Runnable() {
-            @Override public void run() {
-                boolean state = false;
+        IgniteInternalFuture<?> fut = GridTestUtils.runAsync(() -> {
+            boolean state = false;
 
-                while (!done.get()) {
-                    try {
-                        if (state)
-                            cli.cluster().enableWal(CACHE_NAME);
-                        else
-                            cli.cluster().disableWal(CACHE_NAME);
-                    }
-                    catch (IgniteException e) {
-                        String msg = e.getMessage();
-
-                        assert msg.startsWith("Cache doesn't exist") ||
-                            msg.startsWith("Failed to change WAL mode because some caches no longer exist") :
-                            e.getMessage();
-                    }
-                    finally {
-                        state = !state;
-                    }
+            while (!done.get()) {
+                try {
+                    if (state)
+                        cli.cluster().enableWal(CACHE_NAME);
+                    else
+                        cli.cluster().disableWal(CACHE_NAME);
                 }
+                catch (IgniteException e) {
+                    String msg = e.getMessage();
 
-                latch.countDown();
+                    assert msg.startsWith("Cache doesn't exist") ||
+                        msg.startsWith("Failed to change WAL mode because some caches no longer exist") :
+                        e.getMessage();
+                }
+                finally {
+                    state = !state;
+                }
             }
-        });
+        }, "wal-load-" + cli.name());
 
-        t.setName("wal-load-" + cli.name());
+        try {
+            // Now perform multiple client reconnects.
+            for (int i = 1; i <= 20; i++) {
+                Thread.sleep(ThreadLocalRandom.current().nextLong(200, 1000));
 
-        t.start();
+                srv.destroyCache(CACHE_NAME);
 
-        // Now perform multiple client reconnects.
-        for (int i = 1; i <= 20; i++) {
-            Thread.sleep(ThreadLocalRandom.current().nextLong(200, 1000));
+                Thread.sleep(100);
 
-            srv.destroyCache(CACHE_NAME);
+                srv.createCache(cacheConfig(PARTITIONED));
 
-            Thread.sleep(100);
-
-            srv.createCache(cacheConfig(PARTITIONED));
-
-            X.println(">>> Finished iteration: " + i);
+                log.info(">>> Finished iteration: " + i);
+            }
+        }
+        finally {
+            done.set(true);
         }
 
-        done.set(true);
-
-        latch.await();
+        fut.get();
     }
 
     /**
@@ -495,11 +480,11 @@
 
             done.set(true);
 
-            X.println(">>> Stopping iteration: " + i);
+            log.info(">>> Stopping iteration: " + i);
 
             latch.await();
 
-            X.println(">>> Iteration finished: " + i);
+            log.info(">>> Iteration finished: " + i);
         }
     }
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/WalModeChangeCommonAbstractSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/WalModeChangeCommonAbstractSelfTest.java
index d4a0078..a902bfa 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/WalModeChangeCommonAbstractSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/WalModeChangeCommonAbstractSelfTest.java
@@ -296,8 +296,8 @@
      * @param filter Whether node should be filtered out.
      * @return Node configuration.
      */
-    protected IgniteConfiguration config(String name, boolean cli, boolean filter) {
-        IgniteConfiguration cfg = new IgniteConfiguration();
+    protected IgniteConfiguration config(String name, boolean cli, boolean filter) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(name);
 
         cfg.setIgniteInstanceName(name);
         cfg.setClientMode(cli);
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/binary/GridCacheBinaryAtomicEntryProcessorDeploymentSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/binary/GridCacheBinaryAtomicEntryProcessorDeploymentSelfTest.java
index 596cf54..c65f41c 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/binary/GridCacheBinaryAtomicEntryProcessorDeploymentSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/binary/GridCacheBinaryAtomicEntryProcessorDeploymentSelfTest.java
@@ -29,7 +29,7 @@
 public class GridCacheBinaryAtomicEntryProcessorDeploymentSelfTest
     extends GridCacheAtomicEntryProcessorDeploymentSelfTest {
     /** {@inheritDoc} */
-    protected IgniteCache getCache() {
+    @Override protected IgniteCache getCache() {
         return grid(1).cache(DEFAULT_CACHE_NAME).withKeepBinary();
     }
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/GridCacheAbstractDataStructuresFailoverSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/GridCacheAbstractDataStructuresFailoverSelfTest.java
index 9da1161..797e90f 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/GridCacheAbstractDataStructuresFailoverSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/GridCacheAbstractDataStructuresFailoverSelfTest.java
@@ -119,11 +119,6 @@
     }
 
     /** {@inheritDoc} */
-    @Override protected void afterTestsStopped() throws Exception {
-        // No-op
-    }
-
-    /** {@inheritDoc} */
     @Override protected void beforeTest() throws Exception {
         startGridsMultiThreaded(gridCount());
 
@@ -1362,7 +1357,7 @@
                             if (failed.get())
                                 return;
 
-                            int idx = nodeIdx.getAndIncrement();
+                            int idx = nodeIdx.incrementAndGet();
 
                             Thread.currentThread().setName("thread-" + getTestIgniteInstanceName(idx));
 
@@ -1373,8 +1368,15 @@
 
                                 cb.apply(g);
                             }
+                            catch (IgniteException e) {
+                                if (!X.hasCause(e, NodeStoppingException.class) &&
+                                    !X.hasCause(e, IllegalStateException.class))
+                                    throw e;
+
+                                // OK for this test.
+                            }
                             finally {
-                                if(circular)
+                                if (circular)
                                     stopGrid(G.allGrids().get(0).configuration().getIgniteInstanceName());
                                 else
                                     stopGrid(idx);
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/GridCacheSequenceApiSelfAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/GridCacheSequenceApiSelfAbstractTest.java
index 45275f4..81292c7 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/GridCacheSequenceApiSelfAbstractTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/GridCacheSequenceApiSelfAbstractTest.java
@@ -91,7 +91,7 @@
     }
 
     /** {@inheritDoc} */
-    protected AtomicConfiguration atomicConfiguration() {
+    @Override protected AtomicConfiguration atomicConfiguration() {
         AtomicConfiguration atomicCfg = super.atomicConfiguration();
 
         atomicCfg.setAtomicSequenceReserveSize(BATCH_SIZE);
@@ -138,7 +138,7 @@
     }
 
     /** {@inheritDoc} */
-    protected IgniteEx grid() {
+    @Override protected IgniteEx grid() {
         return grid(0);
     }
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/GridCacheSetAbstractSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/GridCacheSetAbstractSelfTest.java
index 9a707eb..59f13d9 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/GridCacheSetAbstractSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/GridCacheSetAbstractSelfTest.java
@@ -43,6 +43,7 @@
 import org.apache.ignite.internal.IgniteKernal;
 import org.apache.ignite.internal.processors.cache.GridCacheAdapter;
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.IgniteInternalCache;
 import org.apache.ignite.internal.processors.cache.query.GridCacheQueryManager;
 import org.apache.ignite.internal.util.lang.GridAbsPredicate;
 import org.apache.ignite.internal.util.typedef.internal.U;
@@ -129,18 +130,12 @@
         for (int i = 0; i < gridCount(); i++) {
             IgniteKernal grid = (IgniteKernal)grid(i);
 
-            for (IgniteCache cache : grid.caches()) {
-                CacheDataStructuresManager dsMgr = grid.internalCache(cache.getName()).context().dataStructures();
+            for (IgniteInternalCache cache : grid.cachesx(null)) {
+                CacheDataStructuresManager dsMgr = cache.context().dataStructures();
 
                 Map map = GridTestUtils.getFieldValue(dsMgr, "setsMap");
 
                 assertEquals("Set not removed [grid=" + i + ", map=" + map + ']', 0, map.size());
-
-                map = GridTestUtils.getFieldValue(dsMgr, "setDataMap");
-
-                assertEquals("Set data not removed [grid=" + i + ", cache=" + cache.getName() + ", map=" + map + ']',
-                    0,
-                    map.size());
             }
         }
     }
@@ -582,8 +577,6 @@
         if (collectionCacheMode() == LOCAL)
             return;
 
-        fail("https://issues.apache.org/jira/browse/IGNITE-584");
-
         testNodeJoinsAndLeaves(false);
     }
 
@@ -594,8 +587,6 @@
         if (collectionCacheMode() == LOCAL)
             return;
 
-        fail("https://issues.apache.org/jira/browse/IGNITE-584");
-
         testNodeJoinsAndLeaves(true);
     }
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/GridCacheSetFailoverAbstractSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/GridCacheSetFailoverAbstractSelfTest.java
index f8af2a2..ce320bd 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/GridCacheSetFailoverAbstractSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/GridCacheSetFailoverAbstractSelfTest.java
@@ -116,8 +116,7 @@
                     try {
                         int size = set.size();
 
-                        // TODO: IGNITE-584, check for equality when IGNITE-584 fixed.
-                        assertTrue(size > 0);
+                        assertEquals(ITEMS, size);
                     }
                     catch (IgniteException ignore) {
                         // No-op.
@@ -134,8 +133,7 @@
                             cnt++;
                         }
 
-                        // TODO: IGNITE-584, check for equality when IGNITE-584 fixed.
-                        assertTrue(cnt > 0);
+                        assertEquals(ITEMS, cnt);
                     }
                     catch (IgniteException ignore) {
                         // No-op.
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/partitioned/GridCachePartitionedAtomicSetFailoverSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/partitioned/GridCachePartitionedAtomicSetFailoverSelfTest.java
index c2af2b1..4673549 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/partitioned/GridCachePartitionedAtomicSetFailoverSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/partitioned/GridCachePartitionedAtomicSetFailoverSelfTest.java
@@ -35,4 +35,4 @@
     @Override public void testNodeRestart() throws Exception {
         fail("https://issues.apache.org/jira/browse/IGNITE-170");
     }
-}
\ No newline at end of file
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/partitioned/GridCachePartitionedDataStructuresFailoverSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/partitioned/GridCachePartitionedDataStructuresFailoverSelfTest.java
index ecb2df9..eecfefe 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/partitioned/GridCachePartitionedDataStructuresFailoverSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/partitioned/GridCachePartitionedDataStructuresFailoverSelfTest.java
@@ -38,4 +38,18 @@
     @Override protected CacheAtomicityMode collectionCacheAtomicityMode() {
         return TRANSACTIONAL;
     }
+
+    /**
+     *
+     */
+    @Override public void testReentrantLockConstantTopologyChangeNonFailoverSafe() {
+        fail("https://issues.apache.org/jira/browse/IGNITE-6454");
+    }
+
+    /**
+     *
+     */
+    @Override public void testFairReentrantLockConstantTopologyChangeNonFailoverSafe() {
+        fail("https://issues.apache.org/jira/browse/IGNITE-6454");
+    }
 }
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/partitioned/IgnitePartitionedSetNoBackupsSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/partitioned/IgnitePartitionedSetNoBackupsSelfTest.java
index 4daaeca..5f09dfa 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/partitioned/IgnitePartitionedSetNoBackupsSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/partitioned/IgnitePartitionedSetNoBackupsSelfTest.java
@@ -70,4 +70,4 @@
             }
         }
     }
-}
\ No newline at end of file
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/replicated/GridCacheReplicatedDataStructuresFailoverSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/replicated/GridCacheReplicatedDataStructuresFailoverSelfTest.java
index a228718..27fbdcf 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/replicated/GridCacheReplicatedDataStructuresFailoverSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/datastructures/replicated/GridCacheReplicatedDataStructuresFailoverSelfTest.java
@@ -40,16 +40,30 @@
     }
 
     /**
-     * @throws Exception If failed.
+     *
      */
-    public void testFairReentrantLockConstantMultipleTopologyChangeNonFailoverSafe() throws Exception {
+    @Override public void testFairReentrantLockConstantMultipleTopologyChangeNonFailoverSafe() {
         fail("https://issues.apache.org/jira/browse/IGNITE-6454");
     }
 
     /**
-     * @throws Exception If failed.
+     *
      */
-    public void testReentrantLockConstantMultipleTopologyChangeNonFailoverSafe() throws Exception {
+    @Override public void testReentrantLockConstantMultipleTopologyChangeNonFailoverSafe() {
+        fail("https://issues.apache.org/jira/browse/IGNITE-6454");
+    }
+
+    /**
+     *
+     */
+    @Override public void testReentrantLockConstantTopologyChangeNonFailoverSafe() {
+        fail("https://issues.apache.org/jira/browse/IGNITE-6454");
+    }
+
+    /**
+     *
+     */
+    @Override public void testFairReentrantLockConstantTopologyChangeNonFailoverSafe() {
         fail("https://issues.apache.org/jira/browse/IGNITE-6454");
     }
 }
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheDataLossOnPartitionMoveTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheDataLossOnPartitionMoveTest.java
new file mode 100644
index 0000000..2a99271
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheDataLossOnPartitionMoveTest.java
@@ -0,0 +1,296 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.cache.CacheAtomicityMode;
+import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.cache.affinity.AffinityFunctionContext;
+import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.DataRegionConfiguration;
+import org.apache.ignite.configuration.DataStorageConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.configuration.WALMode;
+import org.apache.ignite.internal.TestRecordingCommunicationSpi;
+import org.apache.ignite.internal.processors.cache.GridCacheUtils;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition;
+import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionDemandMessage;
+import org.apache.ignite.internal.util.typedef.G;
+import org.apache.ignite.internal.util.typedef.internal.CU;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteBiPredicate;
+import org.apache.ignite.testframework.GridTestNode;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+import org.jetbrains.annotations.Nullable;
+
+import static org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState.EVICTED;
+import static org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState.OWNING;
+
+/**
+ *
+ */
+public class CacheDataLossOnPartitionMoveTest extends GridCommonAbstractTest {
+    /** */
+    public static final long MB = 1024 * 1024L;
+
+    /** */
+    public static final String GRP_ATTR = "grp";
+
+    /** */
+    public static final int GRIDS_CNT = 2;
+
+    /** */
+    public static final String EVEN_GRP = "event";
+
+    /** */
+    public static final String ODD_GRP = "odd";
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName);
+
+        cfg.setConsistentId(igniteInstanceName);
+
+        cfg.setCommunicationSpi(new TestRecordingCommunicationSpi());
+
+        cfg.setPeerClassLoadingEnabled(true);
+
+        Map<String, Object> attrs = new HashMap<>();
+
+        attrs.put(GRP_ATTR, grp(getTestIgniteInstanceIndex(igniteInstanceName)));
+
+        cfg.setUserAttributes(attrs);
+
+        DataStorageConfiguration memCfg = new DataStorageConfiguration()
+            .setDefaultDataRegionConfiguration(
+                new DataRegionConfiguration().setPersistenceEnabled(true).setInitialSize(50 * MB).setMaxSize(50 * MB))
+            .setWalMode(WALMode.LOG_ONLY);
+
+        cfg.setDataStorageConfiguration(memCfg);
+
+        cfg.setCacheConfiguration(configuration(DEFAULT_CACHE_NAME));
+
+        return cfg;
+    }
+
+    /**
+     * @param name Name.
+     */
+    private CacheConfiguration configuration(String name) {
+        return new CacheConfiguration(name).
+            setCacheMode(CacheMode.PARTITIONED).
+            setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL).
+            setBackups(1).
+            setRebalanceBatchSize(1).
+            setAffinity(new TestAffinityFunction().setPartitions(32));
+    }
+
+    /**
+     * @param idx Index.
+     */
+    private String grp(int idx) {
+        return idx < GRIDS_CNT / 2 ? EVEN_GRP : ODD_GRP;
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testDataLossOnPartitionMove() throws Exception {
+        try {
+            Ignite ignite = startGridsMultiThreaded(GRIDS_CNT / 2, false);
+
+            ignite.cluster().active(true);
+
+            List<Integer> toCp = movingKeysAfterJoin(ignite, DEFAULT_CACHE_NAME, 1,
+                node -> ((GridTestNode)node).setAttribute(GRP_ATTR, ODD_GRP));
+
+            int blockPartId = ignite.affinity(DEFAULT_CACHE_NAME).partition(toCp.get(0));
+
+            awaitPartitionMapExchange();
+
+            int c = 0;
+
+            for (int i = 0; i < 1000; i++) {
+                if (ignite.affinity(DEFAULT_CACHE_NAME).partition(i) == blockPartId) {
+                    ignite.cache(DEFAULT_CACHE_NAME).put(i, i);
+
+                    c++;
+                }
+            }
+
+            assertEquals(c, ignite.cache(DEFAULT_CACHE_NAME).size());
+
+            startGridsMultiThreaded(GRIDS_CNT / 2, GRIDS_CNT / 2);
+
+            // Prevent rebalancing to new nodes.
+            for (Ignite ig0 : G.allGrids()) {
+                TestRecordingCommunicationSpi.spi(ig0).blockMessages((node, message) -> {
+                    if (message instanceof GridDhtPartitionDemandMessage) {
+                        assertTrue(node.order() <= GRIDS_CNT / 2);
+
+                        GridDhtPartitionDemandMessage msg = (GridDhtPartitionDemandMessage)message;
+
+                        return msg.groupId() == CU.cacheId(DEFAULT_CACHE_NAME);
+                    }
+
+                    return false;
+                });
+            }
+
+            ignite.cluster().setBaselineTopology(GRIDS_CNT);
+
+            for (Ignite ig0 : G.allGrids()) {
+                if (ig0.cluster().localNode().order() <= GRIDS_CNT / 2)
+                    continue;
+
+                TestRecordingCommunicationSpi.spi(ig0).waitForBlocked();
+            }
+
+            assertEquals(c, ignite.cache(DEFAULT_CACHE_NAME).size());
+
+            int i = 0;
+
+            while(i < GRIDS_CNT / 2) {
+                stopGrid(GRIDS_CNT / 2 + i);
+
+                i++;
+            }
+
+            awaitPartitionMapExchange();
+
+            for (Ignite ig : G.allGrids()) {
+                GridDhtLocalPartition locPart = dht(ig.cache(DEFAULT_CACHE_NAME)).topology().localPartition(blockPartId);
+
+                assertNotNull(locPart);
+
+                assertEquals("Unexpected state", OWNING, locPart.state());
+            }
+
+            startGridsMultiThreaded(GRIDS_CNT / 2, GRIDS_CNT / 2);
+
+            awaitPartitionMapExchange(true, true, null);
+
+            for (Ignite ig : G.allGrids()) {
+                GridDhtLocalPartition locPart = dht(ig.cache(DEFAULT_CACHE_NAME)).topology().localPartition(blockPartId);
+
+                assertNotNull(locPart);
+
+                switch ((String)ig.cluster().localNode().attribute(GRP_ATTR)) {
+                    case EVEN_GRP:
+                        assertEquals("Unexpected state", EVICTED, locPart.state());
+
+                        break;
+
+                    case ODD_GRP:
+                        assertEquals("Unexpected state", OWNING, locPart.state());
+
+                        break;
+
+                    default:
+                        fail();
+                }
+            }
+        }
+        finally {
+            stopAllGrids();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        cleanPersistenceDir();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        cleanPersistenceDir();
+    }
+
+    /** */
+    public static class TestAffinityFunction extends RendezvousAffinityFunction {
+        /** */
+        public TestAffinityFunction() {
+        }
+
+        /** */
+        public TestAffinityFunction(boolean exclNeighbors) {
+            super(exclNeighbors);
+        }
+
+        /** */
+        public TestAffinityFunction(boolean exclNeighbors, int parts) {
+            super(exclNeighbors, parts);
+        }
+
+        /** */
+        public TestAffinityFunction(int parts,
+            @Nullable IgniteBiPredicate<ClusterNode, ClusterNode> backupFilter) {
+            super(parts, backupFilter);
+        }
+
+        /** {@inheritDoc} */
+        @Override public List<List<ClusterNode>> assignPartitions(AffinityFunctionContext affCtx) {
+            int parts = partitions();
+
+            List<List<ClusterNode>> assignments = new ArrayList<>(parts);
+
+            Map<UUID, Collection<ClusterNode>> neighborhoodCache = isExcludeNeighbors() ?
+                GridCacheUtils.neighbors(affCtx.currentTopologySnapshot()) : null;
+
+            List<ClusterNode> nodes = affCtx.currentTopologySnapshot();
+
+            Map<Object, List<ClusterNode>> nodesByGrp = U.newHashMap(2);
+
+            for (ClusterNode node : nodes) {
+                Object grp = node.attribute(GRP_ATTR);
+
+                List<ClusterNode> grpNodes = nodesByGrp.get(grp);
+
+                if (grpNodes == null)
+                    nodesByGrp.put(grp, (grpNodes = new ArrayList<>()));
+
+                grpNodes.add(node);
+            }
+
+            boolean split = nodesByGrp.size() == 2;
+
+            for (int i = 0; i < parts; i++) {
+                List<ClusterNode> partAssignment = assignPartition(i, split ?
+                        nodesByGrp.get(i % 2 == 0 ? EVEN_GRP : ODD_GRP) : nodes,
+                    affCtx.backups(), neighborhoodCache);
+
+                assignments.add(partAssignment);
+            }
+
+            return assignments;
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override protected long getTestTimeout() {
+        return Integer.MAX_VALUE;
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheExchangeMergeTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheExchangeMergeTest.java
index b255066..1183634 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheExchangeMergeTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheExchangeMergeTest.java
@@ -634,8 +634,6 @@
      * @throws Exception If failed.
      */
     public void testStartCacheOnJoinAndCoordinatorFailed1() throws Exception {
-        fail("https://issues.apache.org/jira/browse/IGNITE-9227");
-
         cfgCache = false;
 
         final Ignite srv0 = startGrids(2);
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheLateAffinityAssignmentTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheLateAffinityAssignmentTest.java
index 0cbe67a..db4a0da3 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheLateAffinityAssignmentTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheLateAffinityAssignmentTest.java
@@ -2843,7 +2843,7 @@
             IgnitePredicate<ClusterNode> filter = cacheDesc.cacheConfiguration().getNodeFilter();
 
             for (ClusterNode n : allNodes) {
-                if (!CU.clientNode(n) && (filter == null || filter.apply(n)))
+                if (!n.isClient() && (filter == null || filter.apply(n)))
                     affNodes.add(n);
             }
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheNearDisabledAtomicInvokeRestartSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheNearDisabledAtomicInvokeRestartSelfTest.java
index 28f2fe0..89e075d 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheNearDisabledAtomicInvokeRestartSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheNearDisabledAtomicInvokeRestartSelfTest.java
@@ -74,7 +74,7 @@
     }
 
     /** {@inheritDoc} */
-    protected void checkCache(IgniteEx ignite, IgniteCache cache) throws Exception {
+    @Override protected void checkCache(IgniteEx ignite, IgniteCache cache) throws Exception {
         log.info("Start cache validation.");
 
         long startTime = U.currentTimeMillis();
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheNearDisabledTransactionalInvokeRestartSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheNearDisabledTransactionalInvokeRestartSelfTest.java
index f4eea6c..e5f9e28 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheNearDisabledTransactionalInvokeRestartSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheNearDisabledTransactionalInvokeRestartSelfTest.java
@@ -71,7 +71,7 @@
     }
 
     /** {@inheritDoc} */
-    protected void checkCache(IgniteEx ignite, IgniteCache cache) {
+    @Override protected void checkCache(IgniteEx ignite, IgniteCache cache) {
         log.info("Start cache validation.");
 
         long startTime = U.currentTimeMillis();
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CachePageWriteLockUnlockTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CachePageWriteLockUnlockTest.java
new file mode 100644
index 0000000..f6a5ec1
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CachePageWriteLockUnlockTest.java
@@ -0,0 +1,196 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed;
+
+import java.util.Iterator;
+import javax.cache.Cache;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.DataRegionConfiguration;
+import org.apache.ignite.configuration.DataStorageConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.configuration.WALMode;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.IgniteKernal;
+import org.apache.ignite.internal.pagemem.store.IgnitePageStoreManager;
+import org.apache.ignite.internal.pagemem.store.PageStore;
+import org.apache.ignite.internal.processors.cache.CacheGroupContext;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheAdapter;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition;
+import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager;
+import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+import static org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState.OWNING;
+
+/**
+ *
+ */
+public class CachePageWriteLockUnlockTest extends GridCommonAbstractTest {
+    /** */
+    public static final int PARTITION = 0;
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName);
+
+        cfg.setCacheConfiguration(new CacheConfiguration(DEFAULT_CACHE_NAME).
+            setAffinity(new RendezvousAffinityFunction(false, 32)));
+
+        cfg.setActiveOnStart(false);
+
+        DataStorageConfiguration memCfg = new DataStorageConfiguration()
+            .setDefaultDataRegionConfiguration(
+                new DataRegionConfiguration()
+                    .setPersistenceEnabled(true)
+                    .setMaxSize(DataStorageConfiguration.DFLT_DATA_REGION_INITIAL_SIZE)
+            )
+            .setWalMode(WALMode.LOG_ONLY).setCheckpointFrequency(Integer.MAX_VALUE);
+
+        cfg.setDataStorageConfiguration(memCfg);
+
+        return cfg;
+    }
+
+    /**
+     *
+     */
+    public void testPreloadPartition() throws Exception {
+        try {
+            IgniteEx grid0 = startGrid(0);
+
+            grid0.cluster().active(true);
+
+            int total = 512;
+
+            putData(grid0, total, PARTITION);
+
+            grid0.cache(DEFAULT_CACHE_NAME).removeAll();
+
+            forceCheckpoint();
+
+            stopGrid(0);
+
+            grid0 = startGrid(0);
+
+            grid0.cluster().active(true);
+
+            putData(grid0, total, PARTITION); // Will use pages from reuse pool.
+
+            forceCheckpoint();
+
+            stopGrid(0);
+
+            grid0 = startGrid(0);
+
+            preloadPartition(grid0, DEFAULT_CACHE_NAME, PARTITION);
+
+            Iterator<Cache.Entry<Object, Object>> it = grid0.cache(DEFAULT_CACHE_NAME).iterator();
+
+            int c0 = 0;
+
+            while (it.hasNext()) {
+                Cache.Entry<Object, Object> entry = it.next();
+
+                c0++;
+            }
+
+            assertEquals(total, c0);
+        }
+        finally {
+            stopAllGrids();
+        }
+    }
+
+    /**
+     * @param grid Grid.
+     * @param total Total.
+     * @param part Partition.
+     */
+    private void putData(Ignite grid, int total, int part) {
+        int c = 0, k = 0;
+
+        while(c < total) {
+            if (grid(0).affinity(DEFAULT_CACHE_NAME).partition(k) == part) {
+                grid.cache(DEFAULT_CACHE_NAME).put(k, k);
+
+                c++;
+            }
+
+            k++;
+        }
+    }
+
+    /**
+     * Preload partition fast by iterating on all pages in disk order.
+     *
+     * @param grid Grid.
+     * @param cacheName Cache name.
+     * @param p P.
+     */
+    private void preloadPartition(Ignite grid, String cacheName, int p) throws IgniteCheckedException {
+        GridDhtCacheAdapter<Object, Object> dht = ((IgniteKernal)grid).internalCache(cacheName).context().dht();
+
+        GridDhtLocalPartition part = dht.topology().localPartition(p);
+
+        assertNotNull(part);
+
+        assertTrue(part.state() == OWNING);
+
+        CacheGroupContext grpCtx = dht.context().group();
+
+        if (part.state() != OWNING)
+            return;
+
+        IgnitePageStoreManager pageStoreMgr = grpCtx.shared().pageStore();
+
+        if (pageStoreMgr instanceof FilePageStoreManager) {
+            FilePageStoreManager filePageStoreMgr = (FilePageStoreManager)pageStoreMgr;
+
+            PageStore pageStore = filePageStoreMgr.getStore(grpCtx.groupId(), part.id());
+
+            PageMemoryEx pageMemory = (PageMemoryEx)grpCtx.dataRegion().pageMemory();
+
+            long pageId = pageMemory.partitionMetaPageId(grpCtx.groupId(), part.id());
+
+            for (int pageNo = 0; pageNo < pageStore.pages(); pageId++, pageNo++) {
+                long pagePointer = -1;
+
+                try {
+                    pagePointer = pageMemory.acquirePage(grpCtx.groupId(), pageId);
+                }
+                finally {
+                    if (pagePointer != -1)
+                        pageMemory.releasePage(grpCtx.groupId(), pageId, pagePointer);
+                }
+            }
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        cleanPersistenceDir();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        cleanPersistenceDir();
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CachePartitionStateTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CachePartitionStateTest.java
index 3b05ac3..b1c25b7 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CachePartitionStateTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CachePartitionStateTest.java
@@ -61,7 +61,7 @@
     private CacheConfiguration ccfg;
 
     /** {@inheritDoc} */
-    protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
+    @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
         IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName);
 
         ((TcpDiscoverySpi)cfg.getDiscoverySpi()).setIpFinder(ipFinder);
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheRentingStateRepairTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheRentingStateRepairTest.java
new file mode 100644
index 0000000..83a590a
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/CacheRentingStateRepairTest.java
@@ -0,0 +1,189 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed;
+
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.DataRegionConfiguration;
+import org.apache.ignite.configuration.DataStorageConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.configuration.WALMode;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.TestRecordingCommunicationSpi;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+import static org.apache.ignite.testframework.GridTestUtils.waitForCondition;
+
+/**
+ *
+ */
+public class CacheRentingStateRepairTest extends GridCommonAbstractTest {
+    /** */
+    public static final int PARTS = 1024;
+
+    /** */
+    private static final TcpDiscoveryVmIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName);
+
+        CacheConfiguration ccfg = new CacheConfiguration(DEFAULT_CACHE_NAME);
+
+        ccfg.setAffinity(new RendezvousAffinityFunction(false, PARTS).setPartitions(64));
+
+        ccfg.setOnheapCacheEnabled(false);
+
+        ccfg.setBackups(1);
+
+        ccfg.setRebalanceBatchSize(100);
+
+        cfg.setCommunicationSpi(new TestRecordingCommunicationSpi());
+
+        cfg.setCacheConfiguration(ccfg);
+
+        cfg.setActiveOnStart(false);
+
+        cfg.setConsistentId(igniteInstanceName);
+
+        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
+
+        discoSpi.setIpFinder(IP_FINDER);
+
+        cfg.setDiscoverySpi(discoSpi);
+
+        long sz = 100 * 1024 * 1024;
+
+        DataStorageConfiguration memCfg = new DataStorageConfiguration().setPageSize(1024)
+            .setDefaultDataRegionConfiguration(
+                new DataRegionConfiguration().setPersistenceEnabled(true).setInitialSize(sz).setMaxSize(sz))
+            .setWalMode(WALMode.LOG_ONLY).setCheckpointFrequency(24L * 60 * 60 * 1000);
+
+        cfg.setDataStorageConfiguration(memCfg);
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override public String getTestIgniteInstanceName(int idx) {
+        return "node" + idx;
+    }
+
+    /**
+     *
+     */
+    public void testRentingStateRepairAfterRestart() throws Exception {
+        try {
+            IgniteEx g0 = startGrid(0);
+
+            startGrid(1);
+
+            g0.cluster().active(true);
+
+            awaitPartitionMapExchange();
+
+            List<Integer> parts = evictingPartitionsAfterJoin(g0, g0.cache(DEFAULT_CACHE_NAME), 20);
+
+            int toEvictPart = parts.get(0);
+
+            int k = 0;
+
+            while (g0.affinity(DEFAULT_CACHE_NAME).partition(k) != toEvictPart)
+                k++;
+
+            g0.cache(DEFAULT_CACHE_NAME).put(k, k);
+
+            GridDhtPartitionTopology top = dht(g0.cache(DEFAULT_CACHE_NAME)).topology();
+
+            GridDhtLocalPartition part = top.localPartition(toEvictPart);
+
+            assertNotNull(part);
+
+            // Prevent eviction.
+            part.reserve();
+
+            startGrid(2);
+
+            g0.cluster().setBaselineTopology(3);
+
+            // Wait until all is evicted except first partition.
+            assertTrue("Failed to wait for partition eviction", waitForCondition(() -> {
+                for (int i = 1; i < parts.size(); i++) { // Skip reserved partition.
+                    Integer p = parts.get(i);
+
+                    if (top.localPartition(p).state() != GridDhtPartitionState.EVICTED)
+                        return false;
+                }
+
+                return true;
+            }, 5000));
+
+            /**
+             * Force renting state before node stop.
+             * This also could be achieved by stopping node just after RENTING state is set.
+             */
+            part.setState(GridDhtPartitionState.RENTING);
+
+            assertEquals(GridDhtPartitionState.RENTING, part.state());
+
+            stopGrid(0);
+
+            g0 = startGrid(0);
+
+            awaitPartitionMapExchange();
+
+            part = dht(g0.cache(DEFAULT_CACHE_NAME)).topology().localPartition(toEvictPart);
+
+            assertNotNull(part);
+
+            final GridDhtLocalPartition finalPart = part;
+
+            CountDownLatch clearLatch = new CountDownLatch(1);
+
+            part.onClearFinished(fut -> {
+                assertEquals(GridDhtPartitionState.EVICTED, finalPart.state());
+
+                clearLatch.countDown();
+            });
+
+            assertTrue("Failed to wait for partition eviction after restart",
+                clearLatch.await(5_000, TimeUnit.MILLISECONDS));
+        }
+        finally {
+            stopAllGrids();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        cleanPersistenceDir();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        cleanPersistenceDir();
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheAbstractNodeRestartSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheAbstractNodeRestartSelfTest.java
index 8e69e43..f32aab5 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheAbstractNodeRestartSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheAbstractNodeRestartSelfTest.java
@@ -684,7 +684,12 @@
             }
 
             for (Thread t : threads)
-                t.join();
+                t.join(2 * duration);
+
+            for (Thread t : threads) {
+                if (t.isAlive())
+                    t.interrupt();
+            }
 
             if (err.get() != null)
                 throw err.get();
@@ -1016,4 +1021,4 @@
         error("Attempt: " + attempt);
         error("Node: " + ignite.cluster().localNode().id());
     }
-}
\ No newline at end of file
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheEventAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheEventAbstractTest.java
index 55190ad..daa1557 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheEventAbstractTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheEventAbstractTest.java
@@ -718,14 +718,14 @@
         /**
          *
          */
-        public void listen() {
+        @Override public void listen() {
             listen = true;
         }
 
         /**
          *
          */
-        public void stopListen() {
+        @Override public void stopListen() {
             listen = false;
         }
 
@@ -733,7 +733,7 @@
          * @param type Event type.
          * @return Count.
          */
-        public int eventCount(int type) {
+        @Override public int eventCount(int type) {
             assert type > 0;
 
             AtomicInteger cntr = cntrs.get(type);
@@ -744,7 +744,7 @@
         /**
          * Reset listener.
          */
-        public void reset() {
+        @Override public void reset() {
             cntrs.clear();
 
             futs.clear();
@@ -778,7 +778,7 @@
          * @param evtCnts Array of tuples with values: V1 - event type, V2 - expected event count.
          * @throws IgniteCheckedException If failed to wait.
          */
-        public void waitForEventCount(IgniteBiTuple<Integer, Integer>... evtCnts)
+        @Override public void waitForEventCount(IgniteBiTuple<Integer, Integer>... evtCnts)
             throws IgniteCheckedException {
             if (F.isEmpty(evtCnts))
                 return;
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheNodeFailureAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheNodeFailureAbstractTest.java
index d98903f..8de2d79 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheNodeFailureAbstractTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/GridCacheNodeFailureAbstractTest.java
@@ -90,7 +90,6 @@
 
         disco.setIpFinder(ipFinder);
 
-        c.setFailureDetectionTimeout(Integer.MAX_VALUE);
         c.setDiscoverySpi(disco);
 
         c.setDeploymentMode(DeploymentMode.SHARED);
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/IgniteCacheClientNodeChangingTopologyTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/IgniteCacheClientNodeChangingTopologyTest.java
index 10c5f37..300ecb9 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/IgniteCacheClientNodeChangingTopologyTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/IgniteCacheClientNodeChangingTopologyTest.java
@@ -1703,8 +1703,7 @@
                                     }
                                 }
                                 finally {
-                                    cache0.context().evicts().touch(entry,
-                                        cache0.context().affinity().affinityTopologyVersion());
+                                    entry.touch(entry.context().affinity().affinityTopologyVersion());
                                 }
                             }
                             else
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/IgniteCacheCreatePutTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/IgniteCacheCreatePutTest.java
index 113cea4..646084c 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/IgniteCacheCreatePutTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/IgniteCacheCreatePutTest.java
@@ -55,7 +55,7 @@
     private boolean client;
 
     /** {@inheritDoc} */
-    protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
+    @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
         IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName);
 
         ((TcpCommunicationSpi)cfg.getCommunicationSpi()).setSharedMemoryPort(-1);
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/IgniteCachePartitionLossPolicySelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/IgniteCachePartitionLossPolicySelfTest.java
index 7cefc67..1aacc9c 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/IgniteCachePartitionLossPolicySelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/IgniteCachePartitionLossPolicySelfTest.java
@@ -21,9 +21,12 @@
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
 import java.util.concurrent.Semaphore;
-import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import javax.cache.CacheException;
 import org.apache.ignite.Ignite;
@@ -69,7 +72,7 @@
     private PartitionLossPolicy partLossPlc;
 
     /** */
-    private static final String CACHE_NAME = "partitioned";
+    protected static final String CACHE_NAME = "partitioned";
 
     /** */
     private int backups = 0;
@@ -78,7 +81,7 @@
     private final AtomicBoolean delayPartExchange = new AtomicBoolean(false);
 
     /** */
-    private final TopologyChanger killSingleNode = new TopologyChanger(false, Arrays.asList(3), Arrays.asList(0, 1, 2, 4));
+    private final TopologyChanger killSingleNode = new TopologyChanger(false, Arrays.asList(3), Arrays.asList(0, 1, 2, 4),0);
 
     /** {@inheritDoc} */
     @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
@@ -98,6 +101,15 @@
 
         cfg.setClientMode(client);
 
+        cfg.setCacheConfiguration(cacheConfiguration());
+
+        return cfg;
+    }
+
+    /**
+     * @return Cache configuration.
+     */
+    protected CacheConfiguration<Integer, Integer> cacheConfiguration() {
         CacheConfiguration<Integer, Integer> cacheCfg = new CacheConfiguration<>(CACHE_NAME);
 
         cacheCfg.setCacheMode(PARTITIONED);
@@ -106,9 +118,7 @@
         cacheCfg.setPartitionLossPolicy(partLossPlc);
         cacheCfg.setAffinity(new RendezvousAffinityFunction(false, 32));
 
-        cfg.setCacheConfiguration(cacheCfg);
-
-        return cfg;
+        return cacheCfg;
     }
 
     /** {@inheritDoc} */
@@ -167,7 +177,16 @@
     public void testReadWriteSafeAfterKillTwoNodes() throws Exception {
         partLossPlc = PartitionLossPolicy.READ_WRITE_SAFE;
 
-        checkLostPartition(true, true, new TopologyChanger(false, Arrays.asList(3, 2), Arrays.asList(0, 1, 4)));
+        checkLostPartition(true, true, new TopologyChanger(false, Arrays.asList(3, 2), Arrays.asList(0, 1, 4), 0));
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testReadWriteSafeAfterKillTwoNodesWithDelay() throws Exception {
+        partLossPlc = PartitionLossPolicy.READ_WRITE_SAFE;
+
+        checkLostPartition(true, true, new TopologyChanger(false, Arrays.asList(3, 2), Arrays.asList(0, 1, 4), 20));
     }
 
     /**
@@ -178,7 +197,7 @@
 
         backups = 1;
 
-        checkLostPartition(true, true, new TopologyChanger(true, Arrays.asList(3, 2, 1), Arrays.asList(0, 4)));
+        checkLostPartition(true, true, new TopologyChanger(true, Arrays.asList(3, 2, 1), Arrays.asList(0, 4), 0));
     }
 
     /**
@@ -187,7 +206,7 @@
     public void testReadWriteSafeAfterKillCrd() throws Exception {
         partLossPlc = PartitionLossPolicy.READ_WRITE_SAFE;
 
-        checkLostPartition(true, true, new TopologyChanger(true, Arrays.asList(3, 0), Arrays.asList(1, 2, 4)));
+        checkLostPartition(true, true, new TopologyChanger(true, Arrays.asList(3, 0), Arrays.asList(1, 2, 4), 0));
     }
 
     /**
@@ -198,7 +217,7 @@
 
         backups = 1;
 
-        checkLostPartition(true, true, new TopologyChanger(true, Arrays.asList(3, 2), Arrays.asList(0, 1, 4)));
+        checkLostPartition(true, true, new TopologyChanger(true, Arrays.asList(3, 2), Arrays.asList(0, 1, 4), 0));
     }
 
     /**
@@ -209,7 +228,7 @@
 
         backups = 1;
 
-        checkLostPartition(true, true, new TopologyChanger(true, Arrays.asList(3, 0), Arrays.asList(1, 2, 4)));
+        checkLostPartition(true, true, new TopologyChanger(true, Arrays.asList(3, 0), Arrays.asList(1, 2, 4), 0));
     }
 
     /**
@@ -247,7 +266,7 @@
     private void checkLostPartition(boolean canWrite, boolean safe, TopologyChanger topChanger) throws Exception {
         assert partLossPlc != null;
 
-        int part = topChanger.changeTopology();
+        int part = topChanger.changeTopology().get(0);
 
         // Wait for all grids (servers and client) have same topology version
         // to make sure that all nodes received map with lost partition.
@@ -282,6 +301,9 @@
 
             // Check that writing in recover mode does not clear partition state.
             verifyCacheOps(canWrite, safe, part, ig);
+
+            // Validate queries.
+            validateQuery(safe, part, ig);
         }
 
         // Check that partition state does not change after we start a new node.
@@ -371,11 +393,13 @@
 
     /**
      * @param nodes List of nodes to find partition.
-     * @return Partition id that isn't primary or backup for specified nodes.
+     * @return List of partitions that aren't primary or backup for specified nodes.
      */
-    protected Integer noPrimaryOrBackupPartition(List<Integer> nodes) {
+    protected List<Integer> noPrimaryOrBackupPartition(List<Integer> nodes) {
         Affinity<Object> aff = ignite(4).affinity(CACHE_NAME);
 
+        List<Integer> parts = new ArrayList<>();
+
         Integer part;
 
         for (int i = 0; i < aff.partitions(); i++) {
@@ -390,11 +414,21 @@
             }
 
             if (part != null)
-                return part;
-
+                parts.add(i);
         }
 
-        return null;
+        return parts;
+    }
+
+    /**
+     * Validate query execution on a node.
+     *
+     * @param safe Safe flag.
+     * @param part Partition.
+     * @param node Node.
+     */
+    protected void validateQuery(boolean safe, int part, Ignite node) {
+        // No-op.
     }
 
     /** */
@@ -408,22 +442,28 @@
         /** List of nodes to be alive */
         private List<Integer> aliveNodes;
 
+        /** Delay between node stops */
+        private long stopDelay;
+
         /**
          * @param delayExchange Flag for delay partition exchange.
          * @param killNodes List of nodes to kill.
          * @param aliveNodes List of nodes to be alive.
+         * @param stopDelay Delay between stopping nodes.
          */
-        public TopologyChanger(boolean delayExchange, List<Integer> killNodes, List<Integer> aliveNodes) {
+        public TopologyChanger(boolean delayExchange, List<Integer> killNodes, List<Integer> aliveNodes,
+            long stopDelay) {
             this.delayExchange = delayExchange;
             this.killNodes = killNodes;
             this.aliveNodes = aliveNodes;
+            this.stopDelay = stopDelay;
         }
 
         /**
          * @return Lost partition ID.
          * @throws Exception If failed.
          */
-        protected int changeTopology() throws Exception {
+        protected List<Integer> changeTopology() throws Exception {
             startGrids(4);
 
             Affinity<Object> aff = ignite(0).affinity(CACHE_NAME);
@@ -442,16 +482,21 @@
 
             awaitPartitionMapExchange();
 
-            final Integer part = noPrimaryOrBackupPartition(aliveNodes);
+            final List<Integer> parts = noPrimaryOrBackupPartition(aliveNodes);
 
-            if (part == null)
+            if (parts.size() == 0)
                 throw new IllegalStateException("No partition on nodes: " + killNodes);
 
-            final List<Semaphore> partLost = new ArrayList<>();
+            final List<Map<Integer, Semaphore>> lostMap = new ArrayList<>();
 
             for (int i : aliveNodes) {
-                final Semaphore sem = new Semaphore(0);
-                partLost.add(sem);
+                HashMap<Integer, Semaphore> semaphoreMap = new HashMap<>();
+
+                for (Integer part : parts)
+                    semaphoreMap.put(part, new Semaphore(0));
+
+                lostMap.add(semaphoreMap);
+
 
                 grid(i).events().localListen(new P1<Event>() {
                     @Override public boolean apply(Event evt) {
@@ -459,8 +504,10 @@
 
                         CacheRebalancingEvent cacheEvt = (CacheRebalancingEvent)evt;
 
-                        if (cacheEvt.partition() == part && F.eq(CACHE_NAME, cacheEvt.cacheName()))
-                            sem.release();
+                        if (F.eq(CACHE_NAME, cacheEvt.cacheName())) {
+                            if (semaphoreMap.containsKey(cacheEvt.partition()))
+                                semaphoreMap.get(cacheEvt.partition()).release();
+                        }
 
                         return true;
                     }
@@ -471,18 +518,35 @@
             if (delayExchange)
                 delayPartExchange.set(true);
 
-            for (Integer node : killNodes)
-                grid(node).close();
+            ExecutorService executor = Executors.newFixedThreadPool(killNodes.size());
+
+            for (Integer node : killNodes) {
+                executor.submit(new Runnable() {
+                    @Override public void run() {
+                        grid(node).close();
+                    }
+                });
+
+                Thread.sleep(stopDelay);
+            }
+
+            executor.shutdown();
 
             delayPartExchange.set(false);
 
-            for (Semaphore sem : partLost)
-                assertTrue("Failed to wait for partition LOST event", sem.tryAcquire(1, 10L, TimeUnit.SECONDS));
+            Thread.sleep(5_000L);
 
-            for (Semaphore sem : partLost)
-                assertFalse("Partition LOST event raised twice", sem.tryAcquire(1, 1L, TimeUnit.SECONDS));
+            for (Map<Integer, Semaphore> map : lostMap) {
+                for (Map.Entry<Integer, Semaphore> entry : map.entrySet())
+                    assertTrue("Failed to wait for partition LOST event for partition:" + entry.getKey(), entry.getValue().tryAcquire(1));
+            }
 
-            return part;
+            for (Map<Integer, Semaphore> map : lostMap) {
+                for (Map.Entry<Integer, Semaphore> entry : map.entrySet())
+                    assertFalse("Partition LOST event raised twice for partition:" + entry.getKey(), entry.getValue().tryAcquire(1));
+            }
+
+            return parts;
         }
     }
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/IgniteOptimisticTxSuspendResumeMultiServerTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/IgniteOptimisticTxSuspendResumeMultiServerTest.java
index a6318d4..b7003d4 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/IgniteOptimisticTxSuspendResumeMultiServerTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/IgniteOptimisticTxSuspendResumeMultiServerTest.java
@@ -24,7 +24,7 @@
     /**
      * @return Number of server nodes.
      */
-    protected int serversNumber() {
+    @Override protected int serversNumber() {
         return 4;
     }
 }
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/IgniteOptimisticTxSuspendResumeTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/IgniteOptimisticTxSuspendResumeTest.java
index 6751b1e..4b613c2 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/IgniteOptimisticTxSuspendResumeTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/IgniteOptimisticTxSuspendResumeTest.java
@@ -338,7 +338,7 @@
                     }
 
                     GridTestUtils.runMultiThreaded(new CI1Exc<Integer>() {
-                        public void applyx(Integer idx) throws Exception {
+                        @Override public void applyx(Integer idx) throws Exception {
                             Transaction tx = clientTxs.get(idx);
 
                             assertEquals(SUSPENDED, tx.state());
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridCacheColocatedTxSingleThreadedSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridCacheColocatedTxSingleThreadedSelfTest.java
index 644fabd..c55a606 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridCacheColocatedTxSingleThreadedSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridCacheColocatedTxSingleThreadedSelfTest.java
@@ -65,7 +65,6 @@
 
         spi.setIpFinder(ipFinder);
 
-        c.setFailureDetectionTimeout(Integer.MAX_VALUE);
         c.setDiscoverySpi(spi);
 
         c.setCacheConfiguration(cc);
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridCacheDhtPreloadDelayedSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridCacheDhtPreloadDelayedSelfTest.java
index 0105ece..c509366 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridCacheDhtPreloadDelayedSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridCacheDhtPreloadDelayedSelfTest.java
@@ -94,7 +94,6 @@
 
         disco.setIpFinder(ipFinder);
 
-        c.setFailureDetectionTimeout(Integer.MAX_VALUE);
         c.setDiscoverySpi(disco);
         c.setCacheConfiguration(cc);
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridCacheDhtPreloadMessageCountTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridCacheDhtPreloadMessageCountTest.java
index e2a8009..886a886 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridCacheDhtPreloadMessageCountTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridCacheDhtPreloadMessageCountTest.java
@@ -67,7 +67,6 @@
 
         disco.setIpFinder(ipFinder);
 
-        c.setFailureDetectionTimeout(Integer.MAX_VALUE);
         c.setDiscoverySpi(disco);
         c.setCacheConfiguration(cc);
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridCacheTxNodeFailureSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridCacheTxNodeFailureSelfTest.java
index 9b3033a..a5c41a0 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridCacheTxNodeFailureSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridCacheTxNodeFailureSelfTest.java
@@ -360,7 +360,7 @@
             assertTrue("Invalid backup cache entry: " + dhtEntry, dhtEntry.rawGet() == null);
         }
 
-        backupCache.context().evicts().touch(dhtEntry, null);
+        dhtEntry.touch(null);
     }
 
     /**
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/dht/IgniteCacheStartWithLoadTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/dht/IgniteCacheStartWithLoadTest.java
new file mode 100644
index 0000000..acccc5b
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/dht/IgniteCacheStartWithLoadTest.java
@@ -0,0 +1,165 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.distributed.dht;
+
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.cache.CacheAtomicityMode;
+import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.DataRegionConfiguration;
+import org.apache.ignite.configuration.DataStorageConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+import org.apache.ignite.transactions.Transaction;
+import org.apache.ignite.transactions.TransactionConcurrency;
+import org.apache.ignite.transactions.TransactionIsolation;
+import org.junit.Assert;
+
+/**
+ *
+ */
+public class IgniteCacheStartWithLoadTest extends GridCommonAbstractTest {
+    /** */
+    static final String CACHE_NAME = "tx_repl";
+
+    @Override
+    protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName);
+
+        cfg.setConsistentId(igniteInstanceName);
+
+        CacheConfiguration ccfg = new CacheConfiguration().setName(CACHE_NAME)
+            .setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL)
+            .setCacheMode(CacheMode.REPLICATED)
+            .setDataRegionName("ds")
+            .setAffinity(new RendezvousAffinityFunction(false, 32));
+
+        DataStorageConfiguration dsCfg = new DataStorageConfiguration()
+            .setDataRegionConfigurations(
+                new DataRegionConfiguration()
+                    .setName("ds")
+                    .setPersistenceEnabled(true)
+                    .setMaxSize(1024 * 1024 * 1024)
+            );
+
+        cfg.setDataStorageConfiguration(dsCfg);
+        cfg.setCacheConfiguration(ccfg);
+
+        return cfg;
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testNoRebalanceDuringCacheStart() throws Exception {
+        IgniteEx crd = (IgniteEx)startGrids(4);
+
+        crd.cluster().active(true);
+
+        AtomicBoolean txLoadStop = new AtomicBoolean();
+
+        AtomicInteger txLoaderNo = new AtomicInteger(0);
+
+        IgniteInternalFuture txLoadFuture = GridTestUtils.runMultiThreadedAsync(() -> {
+            Ignite node = grid(txLoaderNo.getAndIncrement());
+            IgniteCache<Object, Object> cache = node.cache(CACHE_NAME);
+            ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+            final int keys = 5;
+            final int keysSpace = 10_000;
+
+            while (!txLoadStop.get()) {
+                try (Transaction tx = node.transactions().txStart(TransactionConcurrency.PESSIMISTIC, TransactionIsolation.REPEATABLE_READ)) {
+                    for (int it = 0; it < keys; it++) {
+                        int key = rnd.nextInt(keysSpace);
+                        byte[] value = new byte[2048];
+
+                        cache.put(key, value);
+                    }
+                    tx.commit();
+
+                    U.sleep(10);
+                }
+                catch (Throwable t) {
+                    log.warning("Unexpected exception during tx load.", t);
+                }
+            }
+        }, 4, "tx-loader");
+
+        AtomicBoolean hasRebalance = new AtomicBoolean();
+
+        AtomicBoolean cacheRestartStop = new AtomicBoolean();
+
+        IgniteInternalFuture cacheRestartFuture = GridTestUtils.runAsync(() -> {
+            Ignite node = grid(0);
+
+            final String tmpCacheName = "tmp";
+
+            while (!cacheRestartStop.get()) {
+                try {
+                    node.getOrCreateCache(tmpCacheName);
+
+                    boolean hasMoving = false;
+
+                    for (int i = 0; i < 4; i++) {
+                        hasMoving |= grid(i).cachex(CACHE_NAME).context().topology().hasMovingPartitions();
+                    }
+
+                    if (hasMoving) {
+                        log.error("Cache restarter has been stopped because rebalance is triggered for stable caches.");
+
+                        hasRebalance.set(true);
+
+                        return;
+                    }
+
+                    node.destroyCache(tmpCacheName);
+
+                    U.sleep(10_000);
+                }
+                catch (Throwable t) {
+                    log.warning("Unexpected exception during caches restart.", t);
+                }
+            }
+        });
+
+        U.sleep(60_000);
+
+        cacheRestartStop.set(true);
+        txLoadStop.set(true);
+
+        cacheRestartFuture.get();
+        txLoadFuture.get();
+
+        Assert.assertFalse(hasRebalance.get());
+    }
+
+    /** {@inheritDoc} */
+    @Override protected long getTestTimeout() {
+        return 5 * 60 * 1000;
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/GridCacheNearMultiGetSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/GridCacheNearMultiGetSelfTest.java
index e3a00c0..e27c9ad 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/GridCacheNearMultiGetSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/GridCacheNearMultiGetSelfTest.java
@@ -83,7 +83,6 @@
 
         spi.setIpFinder(ipFinder);
 
-        c.setFailureDetectionTimeout(Integer.MAX_VALUE);
         c.setDiscoverySpi(spi);
 
         c.setCacheConfiguration(cc);
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/GridCacheNearMultiNodeSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/GridCacheNearMultiNodeSelfTest.java
index ee82d5c..06e1862 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/GridCacheNearMultiNodeSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/GridCacheNearMultiNodeSelfTest.java
@@ -107,7 +107,6 @@
 
         spi.setIpFinder(ipFinder);
 
-        cfg.setFailureDetectionTimeout(Integer.MAX_VALUE);
         cfg.setDiscoverySpi(spi);
 
         CacheConfiguration cacheCfg = defaultCacheConfiguration();
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/GridCachePartitionedTxSingleThreadedSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/GridCachePartitionedTxSingleThreadedSelfTest.java
index ef472a1..17936a7 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/GridCachePartitionedTxSingleThreadedSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/GridCachePartitionedTxSingleThreadedSelfTest.java
@@ -64,7 +64,6 @@
 
         spi.setIpFinder(ipFinder);
 
-        c.setFailureDetectionTimeout(Integer.MAX_VALUE);
         c.setDiscoverySpi(spi);
 
         c.setCacheConfiguration(cc);
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/rebalancing/GridCacheRebalancingOrderingTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/rebalancing/GridCacheRebalancingOrderingTest.java
index 54cadc5..43db931 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/rebalancing/GridCacheRebalancingOrderingTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/rebalancing/GridCacheRebalancingOrderingTest.java
@@ -807,7 +807,7 @@
          * @param part the partition
          * @return the set for the given partition
          */
-        public Set<IntegerKey> ensureKeySet(final int part) {
+        @Override public Set<IntegerKey> ensureKeySet(final int part) {
             return ensureKeySet(part, partMap);
         }
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/rebalancing/GridCacheRebalancingSyncSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/rebalancing/GridCacheRebalancingSyncSelfTest.java
index a027a41..dc1a33d 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/rebalancing/GridCacheRebalancingSyncSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/distributed/rebalancing/GridCacheRebalancingSyncSelfTest.java
@@ -133,7 +133,7 @@
         cachePCfg2.setRebalanceMode(CacheRebalanceMode.SYNC);
         cachePCfg2.setBackups(1);
         cachePCfg2.setRebalanceOrder(2);
-        //cachePCfg2.setRebalanceDelay(5000);//Known issue, possible deadlock in case of low priority cache rebalancing delayed.
+        cachePCfg2.setRebalanceDelay(5000);
 
         CacheConfiguration<Integer, Integer> cacheRCfg = new CacheConfiguration<>(DEFAULT_CACHE_NAME);
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/eviction/DhtAndNearEvictionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/eviction/DhtAndNearEvictionTest.java
new file mode 100644
index 0000000..8443469
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/eviction/DhtAndNearEvictionTest.java
@@ -0,0 +1,189 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.eviction;
+
+import java.io.Serializable;
+import java.util.Collections;
+import javax.cache.Cache;
+import javax.cache.configuration.Factory;
+import javax.cache.integration.CacheLoaderException;
+import javax.cache.integration.CacheWriterException;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.cache.eviction.lru.LruEvictionPolicyFactory;
+import org.apache.ignite.cache.store.CacheStoreAdapter;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.configuration.NearCacheConfiguration;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.GridStringLogger;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+/**
+ * Checking that DHT and near cache evictions work correctly when both are set.
+ *
+ * This is a regression test for IGNITE-9315.
+ */
+public class DhtAndNearEvictionTest extends GridCommonAbstractTest {
+    /** */
+    public GridStringLogger strLog;
+
+    /** */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        cfg.setGridLogger(strLog);
+
+        TcpDiscoveryVmIpFinder ipFinder = new TcpDiscoveryVmIpFinder();
+        ipFinder.setAddresses(Collections.singleton("127.0.0.1:47500..47501"));
+        cfg.setDiscoverySpi(new TcpDiscoverySpi().setIpFinder(ipFinder));
+
+        return cfg;
+    }
+
+    /** */
+    @Override protected void beforeTest() throws Exception {
+        super.beforeTest();
+
+        strLog = new GridStringLogger(false, log);
+        strLog.logLength(1024 * 1024);
+    }
+
+    /** */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids();
+
+        super.afterTest();
+    }
+
+    /**
+     * Checking the case that provokes usage of
+     * GridCacheUtils.createBackupPostProcessingClosure.BackupPostProcessingClosure
+     * which used to be affected by IGNITE-9315:
+     * <ul>
+     *     <li>2 nodes, one writing, one reading</li>
+     *     <li>cache store with read-through</li>
+     *     <li>backups=1</li>
+     * </ul>
+     */
+    public void testConcurrentWritesAndReadsWithReadThrough() throws Exception {
+        startGrid(0);
+        startGrid(1);
+
+        CacheConfiguration<?, ?> ccfg = new CacheConfiguration<Integer, Integer>("mycache")
+            .setOnheapCacheEnabled(true)
+            .setEvictionPolicyFactory(new LruEvictionPolicyFactory<>(500))
+            .setNearConfiguration(
+                new NearCacheConfiguration<Integer, Integer>()
+                    .setNearEvictionPolicyFactory(new LruEvictionPolicyFactory<>(100))
+            )
+            .setReadThrough(true)
+            .setCacheStoreFactory(DummyCacheStore.factoryOf())
+            .setBackups(1);
+
+        grid(0).createCache(ccfg);
+
+        IgniteInternalFuture<?> fut1 = GridTestUtils.runAsync(() -> {
+            IgniteCache<Integer, Integer> cache = grid(0).cache("mycache");
+
+            for (int i = 0; i < 1000; i++)
+                cache.put(i, i);
+
+            return null;
+        });
+
+        IgniteInternalFuture<?> fut2 = GridTestUtils.runAsync(() -> {
+            IgniteCache<Integer, Integer> cache = grid(1).cache("mycache");
+
+            for (int i = 0; i < 1000; i++)
+                cache.get(i);
+
+            return null;
+        });
+
+        // AssertionError may leave the node hanging.
+        // Because of that, wait until either the futures are done or the log contains an error.
+        while (!fut1.isDone() || !fut2.isDone()) {
+            assertFalse(strLog.toString().contains("AssertionError"));
+
+            Thread.sleep(1000);
+        }
+
+        fut1.get();
+        fut2.get();
+
+        assertFalse(strLog.toString().contains("AssertionError"));
+    }
+
+    /**
+     * Checking rebalancing which used to be affected by IGNITE-9315.
+     */
+    public void testRebalancing() throws Exception {
+        Ignite grid0 = startGrid(0);
+
+        CacheConfiguration<Integer, Integer> ccfg = new CacheConfiguration<Integer, Integer>("mycache")
+            .setOnheapCacheEnabled(true)
+            .setEvictionPolicyFactory(new LruEvictionPolicyFactory<>(500))
+            .setNearConfiguration(
+                new NearCacheConfiguration<Integer, Integer>()
+                    .setNearEvictionPolicyFactory(new LruEvictionPolicyFactory<>(100))
+            );
+
+        IgniteCache<Integer, Integer> cache = grid0.createCache(ccfg);
+
+        for (int i = 0; i < 1000; i++)
+            cache.put(i, i);
+
+        startGrid(1);
+
+        awaitPartitionMapExchange(true, true, null);
+
+        assertFalse(strLog.toString().contains("AssertionError"));
+    }
+
+    /** */
+    private static class DummyCacheStore extends CacheStoreAdapter<Integer, Integer> implements Serializable {
+        /** {@inheritDoc} */
+        @Override public Integer load(Integer key) throws CacheLoaderException {
+            return key;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void write(
+            Cache.Entry<? extends Integer, ? extends Integer> entry) throws CacheWriterException {
+            // No-op.
+        }
+
+        /** {@inheritDoc} */
+        @Override public void delete(Object key) throws CacheWriterException {
+            // No-op.
+        }
+
+        /** */
+        public static Factory<DummyCacheStore> factoryOf() {
+            return new Factory<DummyCacheStore>() {
+                @Override public DummyCacheStore create() {
+                    return new DummyCacheStore();
+                }
+            };
+        }
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/integration/IgniteCacheStoreSessionWriteBehindAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/integration/IgniteCacheStoreSessionWriteBehindAbstractTest.java
index 7ad240d..832676d 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/integration/IgniteCacheStoreSessionWriteBehindAbstractTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/integration/IgniteCacheStoreSessionWriteBehindAbstractTest.java
@@ -74,7 +74,7 @@
      * @return Cache configuration.
      * @throws Exception In case of error.
      */
-    @SuppressWarnings("unchecked")
+    @Override @SuppressWarnings("unchecked")
     protected CacheConfiguration cacheConfiguration(String igniteInstanceName) throws Exception {
         CacheConfiguration ccfg0 = super.cacheConfiguration(igniteInstanceName);
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/integration/IgniteCacheTxStoreSessionWriteBehindCoalescingTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/integration/IgniteCacheTxStoreSessionWriteBehindCoalescingTest.java
index 58cc380..a90b4f1 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/integration/IgniteCacheTxStoreSessionWriteBehindCoalescingTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/integration/IgniteCacheTxStoreSessionWriteBehindCoalescingTest.java
@@ -40,7 +40,7 @@
      * @return Cache configuration.
      * @throws Exception In case of error.
      */
-    @SuppressWarnings("unchecked")
+    @Override @SuppressWarnings("unchecked")
     protected CacheConfiguration cacheConfiguration(String igniteInstanceName) throws Exception {
         CacheConfiguration ccfg = super.cacheConfiguration(igniteInstanceName);
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicClientOnlyMultiJvmFullApiSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicClientOnlyMultiJvmFullApiSelfTest.java
index 2a70668..1a66e3f 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicClientOnlyMultiJvmFullApiSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicClientOnlyMultiJvmFullApiSelfTest.java
@@ -25,7 +25,7 @@
 public class GridCacheAtomicClientOnlyMultiJvmFullApiSelfTest extends
     GridCacheAtomicClientOnlyMultiNodeFullApiSelfTest {
     /** {@inheritDoc} */
-    protected boolean isMultiJvm() {
+    @Override protected boolean isMultiJvm() {
         return true;
     }
 }
\ No newline at end of file
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicClientOnlyMultiJvmP2PDisabledFullApiSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicClientOnlyMultiJvmP2PDisabledFullApiSelfTest.java
index aaf5151..396e7be 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicClientOnlyMultiJvmP2PDisabledFullApiSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicClientOnlyMultiJvmP2PDisabledFullApiSelfTest.java
@@ -25,7 +25,7 @@
 public class GridCacheAtomicClientOnlyMultiJvmP2PDisabledFullApiSelfTest extends
     GridCacheAtomicClientOnlyMultiNodeP2PDisabledFullApiSelfTest {
     /** {@inheritDoc} */
-    protected boolean isMultiJvm() {
+    @Override protected boolean isMultiJvm() {
         return true;
     }
 }
\ No newline at end of file
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicCopyOnReadDisabledMultiJvmFullApiSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicCopyOnReadDisabledMultiJvmFullApiSelfTest.java
index 3f33a0d..095f5bf 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicCopyOnReadDisabledMultiJvmFullApiSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicCopyOnReadDisabledMultiJvmFullApiSelfTest.java
@@ -25,7 +25,7 @@
 public class GridCacheAtomicCopyOnReadDisabledMultiJvmFullApiSelfTest extends
     GridCacheAtomicCopyOnReadDisabledMultiNodeFullApiSelfTest {
     /** {@inheritDoc} */
-    protected boolean isMultiJvm() {
+    @Override protected boolean isMultiJvm() {
         return true;
     }
 }
\ No newline at end of file
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicMultiJvmFullApiSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicMultiJvmFullApiSelfTest.java
index fce0f51..dc69ab2 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicMultiJvmFullApiSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicMultiJvmFullApiSelfTest.java
@@ -24,7 +24,7 @@
  */
 public class GridCacheAtomicMultiJvmFullApiSelfTest extends GridCacheAtomicMultiNodeFullApiSelfTest {
     /** {@inheritDoc} */
-    protected boolean isMultiJvm() {
+    @Override protected boolean isMultiJvm() {
         return true;
     }
 }
\ No newline at end of file
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicMultiJvmP2PDisabledFullApiSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicMultiJvmP2PDisabledFullApiSelfTest.java
index 51aa9e28..a026597 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicMultiJvmP2PDisabledFullApiSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicMultiJvmP2PDisabledFullApiSelfTest.java
@@ -25,7 +25,7 @@
 public class GridCacheAtomicMultiJvmP2PDisabledFullApiSelfTest extends
     GridCacheAtomicMultiNodeP2PDisabledFullApiSelfTest {
     /** {@inheritDoc} */
-    protected boolean isMultiJvm() {
+    @Override protected boolean isMultiJvm() {
         return true;
     }
 }
\ No newline at end of file
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicNearEnabledMultiJvmFullApiSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicNearEnabledMultiJvmFullApiSelfTest.java
index 8623de1..5d28cb7 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicNearEnabledMultiJvmFullApiSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicNearEnabledMultiJvmFullApiSelfTest.java
@@ -25,7 +25,7 @@
 public class GridCacheAtomicNearEnabledMultiJvmFullApiSelfTest extends
     GridCacheAtomicNearEnabledMultiNodeFullApiSelfTest {
     /** {@inheritDoc} */
-    protected boolean isMultiJvm() {
+    @Override protected boolean isMultiJvm() {
         return true;
     }
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicNearOnlyMultiJvmFullApiSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicNearOnlyMultiJvmFullApiSelfTest.java
index 29c0c86..b7626b4 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicNearOnlyMultiJvmFullApiSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicNearOnlyMultiJvmFullApiSelfTest.java
@@ -25,7 +25,7 @@
 public class GridCacheAtomicNearOnlyMultiJvmFullApiSelfTest extends
     GridCacheAtomicNearOnlyMultiNodeFullApiSelfTest {
     /** {@inheritDoc} */
-    protected boolean isMultiJvm() {
+    @Override protected boolean isMultiJvm() {
         return true;
     }
 }
\ No newline at end of file
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicNearOnlyMultiJvmP2PDisabledFullApiSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicNearOnlyMultiJvmP2PDisabledFullApiSelfTest.java
index 4993aba..424b7c2 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicNearOnlyMultiJvmP2PDisabledFullApiSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheAtomicNearOnlyMultiJvmP2PDisabledFullApiSelfTest.java
@@ -25,7 +25,7 @@
 public class GridCacheAtomicNearOnlyMultiJvmP2PDisabledFullApiSelfTest extends
     GridCacheAtomicNearOnlyMultiNodeP2PDisabledFullApiSelfTest {
     /** {@inheritDoc} */
-    protected boolean isMultiJvm() {
+    @Override protected boolean isMultiJvm() {
         return true;
     }
 }
\ No newline at end of file
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheNearOnlyMultiJvmFullApiSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheNearOnlyMultiJvmFullApiSelfTest.java
index 1a062db..7f8e268 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheNearOnlyMultiJvmFullApiSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheNearOnlyMultiJvmFullApiSelfTest.java
@@ -24,7 +24,7 @@
  */
 public class GridCacheNearOnlyMultiJvmFullApiSelfTest extends GridCacheNearOnlyMultiNodeFullApiSelfTest {
     /** {@inheritDoc} */
-    protected boolean isMultiJvm() {
+    @Override protected boolean isMultiJvm() {
         return true;
     }
 }
\ No newline at end of file
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheNearOnlyMultiJvmP2PDisabledFullApiSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheNearOnlyMultiJvmP2PDisabledFullApiSelfTest.java
index efe7c53..6fe3ab5 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheNearOnlyMultiJvmP2PDisabledFullApiSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheNearOnlyMultiJvmP2PDisabledFullApiSelfTest.java
@@ -25,7 +25,7 @@
 public class GridCacheNearOnlyMultiJvmP2PDisabledFullApiSelfTest extends
     GridCacheNearOnlyMultiNodeP2PDisabledFullApiSelfTest {
     /** {@inheritDoc} */
-    protected boolean isMultiJvm() {
+    @Override protected boolean isMultiJvm() {
         return true;
     }
 }
\ No newline at end of file
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCachePartitionedCopyOnReadDisabledMultiJvmFullApiSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCachePartitionedCopyOnReadDisabledMultiJvmFullApiSelfTest.java
index 321f027..6aca1ff 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCachePartitionedCopyOnReadDisabledMultiJvmFullApiSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCachePartitionedCopyOnReadDisabledMultiJvmFullApiSelfTest.java
@@ -25,7 +25,7 @@
 public class GridCachePartitionedCopyOnReadDisabledMultiJvmFullApiSelfTest extends
     GridCachePartitionedCopyOnReadDisabledMultiNodeFullApiSelfTest {
     /** {@inheritDoc} */
-    protected boolean isMultiJvm() {
+    @Override protected boolean isMultiJvm() {
         return true;
     }
 }
\ No newline at end of file
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCachePartitionedMultiJvmFullApiSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCachePartitionedMultiJvmFullApiSelfTest.java
index 51b2c73..68f5df1 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCachePartitionedMultiJvmFullApiSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCachePartitionedMultiJvmFullApiSelfTest.java
@@ -24,7 +24,7 @@
  */
 public class GridCachePartitionedMultiJvmFullApiSelfTest extends GridCachePartitionedMultiNodeFullApiSelfTest {
     /** {@inheritDoc} */
-    protected boolean isMultiJvm() {
+    @Override protected boolean isMultiJvm() {
         return true;
     }
 }
\ No newline at end of file
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCachePartitionedMultiJvmP2PDisabledFullApiSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCachePartitionedMultiJvmP2PDisabledFullApiSelfTest.java
index 8d4ca45..63f2334 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCachePartitionedMultiJvmP2PDisabledFullApiSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCachePartitionedMultiJvmP2PDisabledFullApiSelfTest.java
@@ -25,7 +25,7 @@
 public class GridCachePartitionedMultiJvmP2PDisabledFullApiSelfTest extends
     GridCachePartitionedMultiNodeP2PDisabledFullApiSelfTest {
     /** {@inheritDoc} */
-    protected boolean isMultiJvm() {
+    @Override protected boolean isMultiJvm() {
         return true;
     }
 }
\ No newline at end of file
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCachePartitionedNearDisabledMultiJvmFullApiSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCachePartitionedNearDisabledMultiJvmFullApiSelfTest.java
index 87dc8ee..2fde512 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCachePartitionedNearDisabledMultiJvmFullApiSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCachePartitionedNearDisabledMultiJvmFullApiSelfTest.java
@@ -25,7 +25,7 @@
 public class GridCachePartitionedNearDisabledMultiJvmFullApiSelfTest extends
     GridCachePartitionedNearDisabledMultiNodeFullApiSelfTest {
     /** {@inheritDoc} */
-    protected boolean isMultiJvm() {
+    @Override protected boolean isMultiJvm() {
         return true;
     }
 }
\ No newline at end of file
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCachePartitionedNearDisabledMultiJvmP2PDisabledFullApiSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCachePartitionedNearDisabledMultiJvmP2PDisabledFullApiSelfTest.java
index 2c006ae..5947f8d 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCachePartitionedNearDisabledMultiJvmP2PDisabledFullApiSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCachePartitionedNearDisabledMultiJvmP2PDisabledFullApiSelfTest.java
@@ -25,7 +25,7 @@
 public class GridCachePartitionedNearDisabledMultiJvmP2PDisabledFullApiSelfTest extends
     GridCachePartitionedNearDisabledMultiNodeP2PDisabledFullApiSelfTest {
     /** {@inheritDoc} */
-    protected boolean isMultiJvm() {
+    @Override protected boolean isMultiJvm() {
         return true;
     }
 }
\ No newline at end of file
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheReplicatedAtomicMultiJvmFullApiSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheReplicatedAtomicMultiJvmFullApiSelfTest.java
index 4d37c7f..e6df822 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheReplicatedAtomicMultiJvmFullApiSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheReplicatedAtomicMultiJvmFullApiSelfTest.java
@@ -25,7 +25,7 @@
 public class GridCacheReplicatedAtomicMultiJvmFullApiSelfTest extends
     GridCacheReplicatedAtomicMultiNodeFullApiSelfTest {
     /** {@inheritDoc} */
-    protected boolean isMultiJvm() {
+    @Override protected boolean isMultiJvm() {
         return true;
     }
 }
\ No newline at end of file
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheReplicatedMultiJvmFullApiSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheReplicatedMultiJvmFullApiSelfTest.java
index 78f0466..b03c79f 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheReplicatedMultiJvmFullApiSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheReplicatedMultiJvmFullApiSelfTest.java
@@ -24,7 +24,7 @@
  */
 public class GridCacheReplicatedMultiJvmFullApiSelfTest extends GridCacheReplicatedMultiNodeFullApiSelfTest {
     /** {@inheritDoc} */
-    protected boolean isMultiJvm() {
+    @Override protected boolean isMultiJvm() {
         return true;
     }
 }
\ No newline at end of file
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheReplicatedMultiJvmP2PDisabledFullApiSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheReplicatedMultiJvmP2PDisabledFullApiSelfTest.java
index 1305e3c..9ebefe3 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheReplicatedMultiJvmP2PDisabledFullApiSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheReplicatedMultiJvmP2PDisabledFullApiSelfTest.java
@@ -25,7 +25,7 @@
 public class GridCacheReplicatedMultiJvmP2PDisabledFullApiSelfTest extends
     GridCacheReplicatedMultiNodeP2PDisabledFullApiSelfTest {
     /** {@inheritDoc} */
-    protected boolean isMultiJvm() {
+    @Override protected boolean isMultiJvm() {
         return true;
     }
 }
\ No newline at end of file
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheReplicatedNearOnlyMultiJvmFullApiSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheReplicatedNearOnlyMultiJvmFullApiSelfTest.java
index 8693a51..9b4db5b 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheReplicatedNearOnlyMultiJvmFullApiSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/multijvm/GridCacheReplicatedNearOnlyMultiJvmFullApiSelfTest.java
@@ -25,7 +25,7 @@
 public class GridCacheReplicatedNearOnlyMultiJvmFullApiSelfTest extends
     GridCacheReplicatedNearOnlyMultiNodeFullApiSelfTest {
     /** {@inheritDoc} */
-    protected boolean isMultiJvm() {
+    @Override protected boolean isMultiJvm() {
         return true;
     }
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccAbstractBasicCoordinatorFailoverTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccAbstractBasicCoordinatorFailoverTest.java
new file mode 100644
index 0000000..b2cbe05
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccAbstractBasicCoordinatorFailoverTest.java
@@ -0,0 +1,681 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import javax.cache.CacheException;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.cluster.ClusterTopologyException;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.TestRecordingCommunicationSpi;
+import org.apache.ignite.internal.processors.cache.distributed.TestCacheNodeExcludingFilter;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearGetRequest;
+import org.apache.ignite.internal.processors.cache.mvcc.msg.MvccSnapshotResponse;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.G;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteBiInClosure;
+import org.apache.ignite.lang.IgniteBiPredicate;
+import org.apache.ignite.lang.IgniteInClosure;
+import org.apache.ignite.plugin.extensions.communication.Message;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.transactions.Transaction;
+import org.apache.ignite.transactions.TransactionConcurrency;
+import org.apache.ignite.transactions.TransactionIsolation;
+import org.jetbrains.annotations.Nullable;
+
+import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC;
+import static org.apache.ignite.transactions.TransactionConcurrency.OPTIMISTIC;
+import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC;
+import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ;
+import static org.apache.ignite.transactions.TransactionIsolation.SERIALIZABLE;
+
+/**
+ * Base class for Mvcc coordinator failover test.
+ */
+public abstract class CacheMvccAbstractBasicCoordinatorFailoverTest extends CacheMvccAbstractTest {
+    /**
+     * @param concurrency Transaction concurrency.
+     * @param isolation Transaction isolation.
+     * @throws Exception If failed.
+     */
+    @SuppressWarnings("unchecked")
+    protected void coordinatorFailureSimple(
+        final TransactionConcurrency concurrency,
+        final TransactionIsolation isolation,
+        ReadMode readMode,
+        WriteMode writeMode
+    ) throws Exception {
+        testSpi = true;
+
+        startGrids(3);
+
+        client = true;
+
+        final Ignite client = startGrid(3);
+
+        final IgniteCache cache = client.createCache(
+            cacheConfiguration(cacheMode(), FULL_SYNC, 0, DFLT_PARTITION_COUNT)
+                .setIndexedTypes(Integer.class, Integer.class));
+
+        final Integer key1 = primaryKey(jcache(1));
+        final Integer key2 = primaryKey(jcache(2));
+
+        TestRecordingCommunicationSpi crdSpi = TestRecordingCommunicationSpi.spi(ignite(0));
+
+        crdSpi.blockMessages(MvccSnapshotResponse.class, client.name());
+
+        IgniteInternalFuture fut = GridTestUtils.runAsync(new Callable() {
+            @Override public Object call() throws Exception {
+                try {
+                    try (Transaction tx = client.transactions().txStart(concurrency, isolation)) {
+                        writeByMode(cache, key1, 1, writeMode, INTEGER_CODEC);
+                        writeByMode(cache, key2, 2, writeMode, INTEGER_CODEC);
+
+                        tx.commit();
+                    }
+
+                    fail();
+                }
+                catch (ClusterTopologyException e) {
+                    info("Expected exception: " + e);
+                }
+                catch (CacheException e) {
+                    info("Expected exception: " + e);
+                }
+                catch (Throwable e) {
+                    fail("Unexpected exception: " + e);
+                }
+
+                return null;
+            }
+        }, "tx-thread");
+
+        crdSpi.waitForBlocked();
+
+        stopGrid(0);
+
+        fut.get();
+
+        assertNull(readByMode(cache, key1, readMode, INTEGER_CODEC));
+        assertNull(readByMode(cache, key2, readMode, INTEGER_CODEC));
+
+        try (Transaction tx = client.transactions().txStart(concurrency, isolation)) {
+            writeByMode(cache, key1, 1, writeMode, INTEGER_CODEC);
+            writeByMode(cache, key2, 2, writeMode, INTEGER_CODEC);
+
+            tx.commit();
+        }
+
+        assertEquals(1, readByMode(cache, key1, readMode, INTEGER_CODEC));
+        assertEquals(2, readByMode(cache, key2, readMode, INTEGER_CODEC));
+    }
+
+    /**
+     * @param readDelay {@code True} if delays get requests.
+     * @param readInTx {@code True} to read inside transaction.
+     * @throws Exception If failed.
+     */
+    @SuppressWarnings("unchecked")
+    protected void readInProgressCoordinatorFails(boolean readDelay,
+        final boolean readInTx,
+        final TransactionConcurrency concurrency,
+        final TransactionIsolation isolation,
+        ReadMode readMode,
+        WriteMode writeMode,
+        @Nullable IgniteInClosure<CacheConfiguration> cfgC) throws Exception {
+        final int COORD_NODES = 5;
+        final int SRV_NODES = 4;
+
+        if (readDelay)
+            testSpi = true;
+
+        startGrids(COORD_NODES);
+
+        startGridsMultiThreaded(COORD_NODES, SRV_NODES);
+
+        client = true;
+
+        Ignite client = startGrid(COORD_NODES + SRV_NODES);
+
+        final List<String> cacheNames = new ArrayList<>();
+
+        final int KEYS = 100;
+
+        final Map<Integer, Integer> vals = new HashMap<>();
+
+        for (int i = 0; i < KEYS; i++)
+            vals.put(i, 0);
+
+        String[] exclude = new String[COORD_NODES];
+
+        for (int i = 0; i < COORD_NODES; i++)
+            exclude[i] = testNodeName(i);
+
+        for (CacheConfiguration ccfg : cacheConfigurations()) {
+            ccfg.setName("cache-" + cacheNames.size());
+
+            if (cfgC != null)
+                cfgC.apply(ccfg);
+
+            // First server nodes are 'dedicated' coordinators.
+            ccfg.setNodeFilter(new TestCacheNodeExcludingFilter(exclude));
+
+            cacheNames.add(ccfg.getName());
+
+            IgniteCache cache = client.createCache(ccfg);
+
+            boolean updated = false;
+
+            while (!updated) {
+                try (Transaction tx = client.transactions().txStart(concurrency, isolation)) {
+                    tx.timeout(TX_TIMEOUT);
+
+                    writeAllByMode(cache, vals, writeMode, INTEGER_CODEC);
+
+                    tx.commit();
+
+                    updated = true;
+                }
+                catch (Exception e) {
+                    handleTxException(e);
+                }
+            }
+        }
+
+        if (readDelay) {
+            for (int i = COORD_NODES; i < COORD_NODES + SRV_NODES + 1; i++) {
+                TestRecordingCommunicationSpi.spi(ignite(i)).closure(new IgniteBiInClosure<ClusterNode, Message>() {
+                    @Override public void apply(ClusterNode node, Message msg) {
+                        if (msg instanceof GridNearGetRequest)
+                            doSleep(ThreadLocalRandom.current().nextLong(50) + 1);
+                    }
+                });
+            }
+        }
+
+        final AtomicBoolean done = new AtomicBoolean();
+
+        try {
+            final AtomicInteger readNodeIdx = new AtomicInteger(0);
+
+            IgniteInternalFuture getFut = GridTestUtils.runMultiThreadedAsync(new Callable<Void>() {
+                @Override public Void call() throws Exception {
+                    try {
+                        Ignite node = ignite(COORD_NODES + (readNodeIdx.getAndIncrement() % (SRV_NODES + 1)));
+
+                        int cnt = 0;
+
+                        while (!done.get() && !Thread.currentThread().isInterrupted()) {
+                            for (String cacheName : cacheNames) {
+                                IgniteCache cache = node.cache(cacheName);
+
+                                Map<Integer, Integer> res = null;
+
+                                if (readInTx) {
+                                    try (Transaction tx = node.transactions().txStart(concurrency, isolation)) {
+                                        tx.timeout(TX_TIMEOUT);
+
+                                        res = readAllByMode(cache, vals.keySet(), readMode, INTEGER_CODEC);
+
+                                        tx.commit();
+                                    }
+                                    catch (Exception e) { // TODO Remove catch clause when IGNITE-8841 implemented.
+                                        handleTxException(e);
+                                    }
+                                }
+                                else
+                                    res = readAllByMode(cache, vals.keySet(), readMode, INTEGER_CODEC);
+
+                                if (readInTx) { // TODO IGNITE-8841
+                                    assertTrue("res.size=" + (res == null ? 0 : res.size()) + ", res=" + res, res == null || vals.size() == res.size());
+                                }
+                                else {
+                                    assertEquals(vals.size(), res.size());
+
+                                    Integer val0 = null;
+
+                                    for (Integer val : res.values()) {
+                                        if (val0 == null)
+                                            val0 = val;
+                                        else
+                                            assertEquals(val0, val);
+                                    }
+                                }
+                            }
+
+                            cnt++;
+                        }
+
+                        log.info("Finished [node=" + node.name() + ", readCnt=" + cnt + ']');
+
+                        return null;
+                    }
+                    catch (Throwable e) {
+                        error("Unexpected error: " + e, e);
+
+                        throw e;
+                    }
+                }
+            }, ((SRV_NODES + 1) + 1) * 2, "get-thread");
+
+            IgniteInternalFuture putFut1 = GridTestUtils.runAsync(new Callable() {
+                @Override public Void call() throws Exception {
+                    Ignite node = ignite(COORD_NODES);
+
+                    List<IgniteCache> caches = new ArrayList<>();
+
+                    for (String cacheName : cacheNames)
+                        caches.add(node.cache(cacheName));
+
+                    Integer val = 1;
+
+                    while (!done.get()) {
+                        Map<Integer, Integer> vals = new HashMap<>();
+
+                        for (int i = 0; i < KEYS; i++)
+                            vals.put(i, val);
+
+                        for (IgniteCache cache : caches) {
+                            try {
+                                try (Transaction tx = node.transactions().txStart(concurrency, isolation)) {
+                                    tx.timeout(TX_TIMEOUT);
+
+                                    writeAllByMode(cache, vals, writeMode, INTEGER_CODEC);
+
+                                    tx.commit();
+                                }
+                            }
+                            catch (Exception e) {
+                                handleTxException(e);
+                            }
+                        }
+
+                        val++;
+                    }
+
+                    return null;
+                }
+            }, "putAll-thread");
+
+            IgniteInternalFuture putFut2 = GridTestUtils.runAsync(new Callable() {
+                @Override public Void call() throws Exception {
+                    Ignite node = ignite(COORD_NODES);
+
+                    IgniteCache cache = node.cache(cacheNames.get(0));
+
+                    Integer val = 0;
+
+                    while (!done.get()) {
+                        try {
+                            try (Transaction tx = node.transactions().txStart(concurrency, isolation)) {
+                                tx.timeout(TX_TIMEOUT);
+
+                                writeByMode(cache, Integer.MAX_VALUE, val, writeMode, INTEGER_CODEC);
+
+                                tx.commit();
+                            }
+                        }
+                        catch (Exception e) {
+                            handleTxException(e);
+                        }
+
+                        val++;
+                    }
+
+                    return null;
+                }
+            }, "put-thread");
+
+            for (int i = 0; i < COORD_NODES && !getFut.isDone(); i++) {
+                U.sleep(3000);
+
+                stopGrid(i);
+
+                awaitPartitionMapExchange();
+            }
+
+            done.set(true);
+
+            getFut.get();
+            putFut1.get();
+            putFut2.get();
+
+            for (Ignite node : G.allGrids())
+                checkActiveQueriesCleanup(node);
+        }
+        finally {
+            done.set(true);
+        }
+    }
+
+    /**
+     * @param concurrency Tx concurrency level.
+     * @param isolation Tx isolation level.
+     * @param cfgC Cache cfg closure.
+     * @param readMode Read mode.
+     * @param writeMode Write mode.
+     * @throws Exception  If failed.
+     */
+    protected void txInProgressCoordinatorChangeSimple(
+        final TransactionConcurrency concurrency,
+        final TransactionIsolation isolation,
+        @Nullable IgniteInClosure<CacheConfiguration> cfgC,
+        ReadMode readMode,
+        WriteMode writeMode) throws Exception {
+        MvccProcessorImpl.coordinatorAssignClosure(new CoordinatorAssignClosure());
+
+        Ignite srv0 = startGrids(4);
+
+        client = true;
+
+        startGrid(4);
+
+        client = false;
+
+        nodeAttr = CRD_ATTR;
+
+        int crdIdx = 5;
+
+        startGrid(crdIdx);
+
+        CacheConfiguration ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 0, DFLT_PARTITION_COUNT).
+            setNodeFilter(new CoordinatorNodeFilter());
+
+        if (cfgC != null)
+            cfgC.apply(ccfg);
+
+        srv0.createCache(ccfg);
+
+        Set<Integer> keys = F.asSet(1, 2, 3);
+
+        for (int i = 0; i < 5; i++) {
+            Ignite node = ignite(i);
+
+            info("Test with node: " + node.name());
+
+            IgniteCache cache = node.cache(DEFAULT_CACHE_NAME);
+
+            try (Transaction tx = node.transactions().txStart(concurrency, isolation)) {
+                assertTrue(readAllByMode(cache, keys, readMode, INTEGER_CODEC).isEmpty());
+
+                startGrid(crdIdx + 1);
+
+                stopGrid(crdIdx);
+
+                crdIdx++;
+
+                tx.commit();
+            }
+            catch (Exception e) {
+                handleTxException(e);
+            }
+
+            checkActiveQueriesCleanup(ignite(crdIdx));
+        }
+    }
+
+    /**
+     * @param fromClient {@code True} if read from client node, otherwise from server node.
+     * @throws Exception If failed.
+     */
+    protected void readInProgressCoordinatorFailsSimple(boolean fromClient,
+        @Nullable IgniteInClosure<CacheConfiguration> cfgC,
+        ReadMode readMode,
+        WriteMode writeMode) throws Exception {
+        for (boolean readInTx : new boolean[]{false, true}) {
+            for (int i = 1; i <= 3; i++) {
+                readInProgressCoordinatorFailsSimple(fromClient, i, readInTx,cfgC, readMode, writeMode);
+
+                afterTest();
+            }
+        }
+    }
+
+    /**
+     * @param fromClient {@code True} if read from client node, otherwise from server node.
+     * @param crdChangeCnt Number of coordinator changes.
+     * @param readInTx {@code True} to read inside transaction.
+     * @param cfgC Cache configuration closure.
+     * @param readMode Read mode.
+     * @param writeMode Write mode.
+     * @throws Exception If failed.
+     */
+    @SuppressWarnings("unchecked")
+    private void readInProgressCoordinatorFailsSimple(boolean fromClient,
+        int crdChangeCnt,
+        final boolean readInTx,
+        @Nullable IgniteInClosure<CacheConfiguration> cfgC,
+        ReadMode readMode,
+        WriteMode writeMode) throws Exception {
+        info("readInProgressCoordinatorFailsSimple [fromClient=" + fromClient +
+            ", crdChangeCnt=" + crdChangeCnt +
+            ", readInTx=" + readInTx + ']');
+
+        TransactionConcurrency concurrency = readMode == ReadMode.GET ? OPTIMISTIC : PESSIMISTIC; // TODO IGNITE-7184
+        TransactionIsolation isolation = readMode == ReadMode.GET ? SERIALIZABLE : REPEATABLE_READ; // TODO IGNITE-7184
+
+        testSpi = true;
+
+        client = false;
+
+        final int SRVS = 3;
+        final int COORDS = crdChangeCnt + 1;
+
+        startGrids(SRVS + COORDS);
+
+        client = true;
+
+        assertTrue(startGrid(SRVS + COORDS).configuration().isClientMode());
+
+        final Ignite getNode = fromClient ? ignite(SRVS + COORDS) : ignite(COORDS);
+
+        String[] excludeNodes = new String[COORDS];
+
+        for (int i = 0; i < COORDS; i++)
+            excludeNodes[i] = testNodeName(i);
+
+        CacheConfiguration ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 0, DFLT_PARTITION_COUNT).
+            setNodeFilter(new TestCacheNodeExcludingFilter(excludeNodes));
+
+        if (cfgC != null)
+            cfgC.apply(ccfg);
+
+        final IgniteCache cache = getNode.createCache(ccfg);
+
+        final Set<Integer> keys = new HashSet<>();
+
+        List<Integer> keys1 = primaryKeys(jcache(COORDS), 10);
+        List<Integer> keys2 = primaryKeys(jcache(COORDS + 1), 10);
+
+        keys.addAll(keys1);
+        keys.addAll(keys2);
+
+        Map<Integer, Integer> vals = new HashMap();
+
+        for (Integer key : keys)
+            vals.put(key, -1);
+
+        try (Transaction tx = getNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            writeAllByMode(cache, vals, writeMode, INTEGER_CODEC);
+
+            tx.commit();
+        }
+
+        final TestRecordingCommunicationSpi getNodeSpi = TestRecordingCommunicationSpi.spi(getNode);
+
+        getNodeSpi.blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
+            @Override public boolean apply(ClusterNode node, Message msg) {
+                String msgClsName = msg.getClass().getSimpleName();
+
+                return msgClsName.matches("GridNearGetRequest|GridH2QueryRequest|GridCacheQueryRequest");
+            }
+        });
+
+        IgniteInternalFuture getFut = GridTestUtils.runAsync(new Callable() {
+            @Override public Object call() throws Exception {
+                Map<Integer, Integer> res = null;
+
+                if (readInTx) {
+                    try (Transaction tx = getNode.transactions().txStart(concurrency, isolation)) {
+                        res = readAllByMode(cache, keys, readMode, INTEGER_CODEC);
+
+                        tx.rollback();
+                    }
+                    catch (Exception e) {
+                        handleTxException(e);
+                    }
+                }
+                else
+                    res = readAllByMode(cache, keys, readMode, INTEGER_CODEC);
+
+                assertTrue((res != null || readInTx) || (res != null && 20 == res.size()));
+
+                if (res != null) {
+                    Integer val = null;
+
+                    for (Integer val0 : res.values()) {
+                        assertNotNull(val0);
+
+                        if (val == null)
+                            val = val0;
+                        else
+                            assertEquals("res=" + res, val, val0);
+                    }
+                }
+
+                return null;
+            }
+        }, "get-thread");
+
+        getNodeSpi.waitForBlocked();
+
+        for (int i = 0; i < crdChangeCnt; i++)
+            stopGrid(i);
+
+        for (int i = 0; i < 10; i++) {
+            vals = new HashMap();
+
+            for (Integer key : keys)
+                vals.put(key, i);
+
+            while (true) {
+                try (Transaction tx = getNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                    writeAllByMode(cache, vals, writeMode, INTEGER_CODEC);
+
+                    tx.commit();
+
+                    break;
+                }
+                catch (Exception e) {
+                    handleTxException(e);
+                }
+            }
+        }
+
+        getNodeSpi.stopBlock(true);
+
+        getFut.get();
+
+        for (Ignite node : G.allGrids())
+            checkActiveQueriesCleanup(node);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    @SuppressWarnings("unchecked")
+    protected void checkCoordinatorChangeActiveQueryClientFails_Simple(@Nullable IgniteInClosure<CacheConfiguration> cfgC,
+        ReadMode readMode,
+        WriteMode writeMode) throws Exception {
+        testSpi = true;
+
+        client = false;
+
+        final int SRVS = 3;
+        final int COORDS = 1;
+
+        startGrids(SRVS + COORDS);
+
+        client = true;
+
+        Ignite client = startGrid(SRVS + COORDS);
+
+        CacheConfiguration ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 0, DFLT_PARTITION_COUNT).
+            setNodeFilter(new TestCacheNodeExcludingFilter(testNodeName(0)));
+
+        if (cfgC != null)
+            cfgC.apply(ccfg);
+
+        final IgniteCache cache = client.createCache(ccfg);
+
+        final Map<Integer, Integer> vals = new HashMap();
+
+        for (int i = 0; i < 100; i++)
+            vals.put(i, i);
+
+        try (Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            writeAllByMode(cache, vals, writeMode, INTEGER_CODEC);
+
+            tx.commit();
+        }
+
+        final TestRecordingCommunicationSpi clientSpi = TestRecordingCommunicationSpi.spi(client);
+
+        clientSpi.blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
+            @Override public boolean apply(ClusterNode node, Message msg) {
+                String msgClsName = msg.getClass().getSimpleName();
+
+                return msgClsName.matches("GridNearGetRequest|GridH2QueryRequest|GridCacheQueryRequest");
+            }
+        });
+
+        IgniteInternalFuture getFut = GridTestUtils.runAsync(new Callable() {
+            @Override public Object call() throws Exception {
+                Map res = readAllByMode(cache, vals.keySet(), readMode, INTEGER_CODEC);
+
+                assertEquals(vals, res);
+
+                return null;
+            }
+        }, "get-thread");
+
+        clientSpi.waitForBlocked();
+
+        stopGrid(0);
+
+        clientSpi.stopBlock(true);
+
+        getFut.get();
+
+        for (Ignite node : G.allGrids())
+            checkActiveQueriesCleanup(node);
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccAbstractCoordinatorFailoverTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccAbstractCoordinatorFailoverTest.java
new file mode 100644
index 0000000..54e4315
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccAbstractCoordinatorFailoverTest.java
@@ -0,0 +1,148 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import static org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.ReadMode.GET;
+import static org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.ReadMode.SCAN;
+import static org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.WriteMode.PUT;
+import static org.apache.ignite.transactions.TransactionConcurrency.OPTIMISTIC;
+import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC;
+import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ;
+import static org.apache.ignite.transactions.TransactionIsolation.SERIALIZABLE;
+
+/**
+ * Mvcc cache API coordinator failover test.
+ */
+public abstract class CacheMvccAbstractCoordinatorFailoverTest extends CacheMvccAbstractBasicCoordinatorFailoverTest {
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxGet_Server_Backups0_CoordinatorFails_Persistence() throws Exception {
+        persistence = true;
+
+        accountsTxReadAll(2, 0, 0, 64,
+            null, true, GET, PUT, DFLT_TEST_TIME, RestartMode.RESTART_CRD);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxGet_SingleNode_CoordinatorFails() throws Exception {
+        accountsTxReadAll(1, 0, 0, 1,
+            null, true, GET, PUT, DFLT_TEST_TIME, RestartMode.RESTART_CRD);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxScan_Server_Backups0_CoordinatorFails() throws Exception {
+        accountsTxReadAll(2, 0, 0, 64,
+            null, true, SCAN, PUT, DFLT_TEST_TIME, RestartMode.RESTART_CRD);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxScan_SingleNode_CoordinatorFails_Persistence() throws Exception {
+        persistence = true;
+
+        accountsTxReadAll(1, 0, 0, 1,
+            null, true, SCAN, PUT, DFLT_TEST_TIME, RestartMode.RESTART_CRD);
+    }
+
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutAllGetAll_Server_Backups0_RestartCoordinator_GetPut() throws Exception {
+        putAllGetAll(RestartMode.RESTART_CRD  , 2, 0, 0, 64,
+            null, GET, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutAllGetAll_SingleNode_RestartCoordinator_GetPut_Persistence() throws Exception {
+        persistence = true;
+
+        putAllGetAll(RestartMode.RESTART_CRD  , 1, 0, 0, 1,
+            null, GET, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testUpdate_N_Objects_Servers_Backups0__PutGet_CoordinatorFails_Persistence() throws Exception {
+        persistence = true;
+
+        updateNObjectsTest(5, 2, 0, 0, 64, DFLT_TEST_TIME,
+            null, GET, PUT, RestartMode.RESTART_CRD);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testUpdate_N_Objects_SingleNode__PutGet_CoordinatorFails() throws Exception {
+
+        updateNObjectsTest(7, 1, 0, 0, 1, DFLT_TEST_TIME,
+            null, GET, PUT, RestartMode.RESTART_CRD);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testCoordinatorFailureSimplePessimisticTxPutGet() throws Exception {
+        coordinatorFailureSimple(PESSIMISTIC, REPEATABLE_READ, GET, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testCoordinatorFailureSimpleSerializableTxPutGet() throws Exception {
+        coordinatorFailureSimple(OPTIMISTIC, SERIALIZABLE, GET, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testCoordinatorFailureSimpleOptimisticTxPutGet() throws Exception {
+        coordinatorFailureSimple(OPTIMISTIC, REPEATABLE_READ, GET, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testTxInProgressCoordinatorChangeSimple_ReadonlyPutGet() throws Exception {
+        txInProgressCoordinatorChangeSimple(OPTIMISTIC, SERIALIZABLE, null, GET, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testReadInProgressCoordinatorFailsSimple_FromClientPutGet() throws Exception {
+        readInProgressCoordinatorFailsSimple(true, null, GET, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testCoordinatorChangeActiveQueryClientFails_Simple() throws Exception {
+        checkCoordinatorChangeActiveQueryClientFails_Simple(null, GET, PUT);
+    }
+
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccAbstractFeatureTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccAbstractFeatureTest.java
new file mode 100644
index 0000000..fe450d1
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccAbstractFeatureTest.java
@@ -0,0 +1,299 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.io.Serializable;
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import javax.cache.Cache;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.IgniteInterruptedException;
+import org.apache.ignite.cache.CacheAtomicityMode;
+import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.cache.query.annotations.QuerySqlField;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.internal.util.lang.IgniteClosure2X;
+import org.apache.ignite.internal.util.lang.IgnitePair;
+import org.apache.ignite.internal.util.tostring.GridToStringInclude;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.transactions.Transaction;
+
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+
+/**
+ * Test that checks
+ */
+public abstract class CacheMvccAbstractFeatureTest extends CacheMvccAbstractTest {
+    /** */
+    private static final String CACHE_NAME = "Person";
+
+    /** */
+    private Ignite node;
+
+    /** {@inheritDoc} */
+    @Override protected CacheMode cacheMode() {
+        return PARTITIONED;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        super.beforeTestsStarted();
+
+        cleanPersistenceDir();
+
+        startGrids(4);
+
+        node = grid(0);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        stopAllGrids();
+
+        super.afterTestsStopped();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        CacheConfiguration<Integer, Person> ccfg = new CacheConfiguration<>(CACHE_NAME);
+
+        ccfg.setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT);
+
+        ccfg.setIndexedTypes(Integer.class, Person.class);
+
+        node.createCache(ccfg);
+
+        for (int i = 0; i < 100; i++)
+            cache().put(i, new Person("Name" + i, "LastName" + i));
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        node.destroyCache(CACHE_NAME);
+    }
+
+    /**
+     * @param clo Closure to check consistency upon.
+     * @throws Exception if failed.
+     */
+    void doTestConsistency(IgniteClosure2X<CountDownLatch, CountDownLatch, ?> clo) throws Exception {
+        ExecutorService svc = Executors.newFixedThreadPool(2);
+
+        CountDownLatch startLatch = new CountDownLatch(1);
+
+        CountDownLatch endLatch = new CountDownLatch(1);
+
+        try {
+            Future<IgnitePair<?>> fut = svc.submit(new Callable<IgnitePair<?>>() {
+                @Override public IgnitePair<?> call() {
+                    try (Transaction ignored = node.transactions().txStart()) {
+                        // First result that we'll later check w/respect to REPEATABLE READ semantic.
+                        Object res1 = clo.apply(null, null);
+
+                        Object res2 = clo.apply(startLatch, endLatch);
+
+                        return new IgnitePair<>(res1, res2);
+                    }
+                }
+            });
+
+            svc.submit(new Runnable() {
+                @Override public void run() {
+                    try {
+                        startLatch.await();
+                    }
+                    catch (InterruptedException e) {
+                        throw new IgniteInterruptedException(e);
+                    }
+
+                    try {
+                        modifyData(jdbcTx());
+                    }
+                    catch (SQLException e) {
+                        throw new IgniteException(e);
+                    }
+
+                    endLatch.countDown();
+                }
+            }).get();
+
+            IgnitePair<?> res2 = fut.get();
+
+            assertEquals(res2.get1(), res2.get2());
+        }
+        finally {
+            svc.shutdown();
+        }
+    }
+
+    /**
+     * @return Whether native or SQL transactions must be used.
+     */
+    boolean jdbcTx() {
+        return false;
+    }
+
+    /**
+     * @param jdbcTx Whether concurrent transaction must be of SQL type.
+     */
+    private void modifyData(boolean jdbcTx) throws SQLException {
+        Set<Integer> keys = new HashSet<>(10);
+
+        for (int i = 0; i < 10; i++) {
+            int idx;
+
+            do {
+                idx = (int) (Math.random() * 100) + 1;
+            }
+            while (!keys.add(idx));
+        }
+
+        if (!jdbcTx) {
+            try (Transaction ignored = node.transactions().txStart()) {
+                for (int idx : keys) {
+                    boolean rmv = Math.random() > 0.5;
+
+                    if (rmv)
+                        cache().remove(idx);
+                    else {
+                        Person p = cache().get(idx);
+
+                        cache().put(idx, new Person(p.fName, p.fName + "Updated"));
+                    }
+                }
+            }
+        }
+        else {
+            try (Connection c = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1")) {
+                c.setAutoCommit(false);
+
+                for (int idx : keys) {
+                    boolean rmv = Math.random() > 0.5;
+
+                    if (rmv) {
+                        try (Statement s = c.createStatement()) {
+                            s.execute("DELETE FROM \"Person\".PERSON WHERE _key = " + idx);
+                        }
+                    }
+                    else {
+                        try (Statement s = c.createStatement()) {
+                            s.execute("UPDATE \"Person\".PERSON SET lname = concat(lname, 'Updated')" +
+                                "WHERE _key = " + idx);
+                        }
+                    }
+                }
+
+                try (Statement s = c.createStatement()) {
+                    s.execute("COMMIT");
+                }
+            }
+        }
+    }
+
+    /**
+     * @return Cache.
+     */
+    IgniteCache<Integer, Person> cache() {
+        return node.cache(CACHE_NAME);
+    }
+
+    /**
+     *
+     */
+    static class Person implements Serializable {
+        /** */
+        @GridToStringInclude
+        @QuerySqlField(index = true, groups = "full_name")
+        private String fName;
+
+        /** */
+        @GridToStringInclude
+        @QuerySqlField(index = true, groups = "full_name")
+        private String lName;
+
+        /**
+         * @param fName First name.
+         * @param lName Last name.
+         */
+        public Person(String fName, String lName) {
+            this.fName = fName;
+            this.lName = lName;
+        }
+
+        /** {@inheritDoc} */
+        @Override public String toString() {
+            return S.toString(Person.class, this);
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean equals(Object o) {
+            if (this == o)
+                return true;
+
+            if (o == null || getClass() != o.getClass())
+                return false;
+
+            Person person = (Person)o;
+
+            return Objects.equals(fName, person.fName) &&
+                Objects.equals(lName, person.lName);
+        }
+
+        /** {@inheritDoc} */
+        @Override public int hashCode() {
+            return Objects.hash(fName, lName);
+        }
+    }
+
+    /** */
+    final static Comparator<Cache.Entry<Integer, Person>> ENTRY_CMP =
+        new Comparator<Cache.Entry<Integer, Person>>() {
+        @Override public int compare(Cache.Entry<Integer, Person> o1, Cache.Entry<Integer, Person> o2) {
+            return o1.getKey().compareTo(o2.getKey());
+        }
+    };
+
+    /**
+     *
+     */
+    static List<Person> entriesToPersons(List<Cache.Entry<Integer, Person>> entries) {
+        entries.sort(ENTRY_CMP);
+
+        List<Person> res = new ArrayList<>();
+
+        for (Cache.Entry<Integer, Person> e : entries)
+            res.add(e.getValue());
+
+        return res;
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccAbstractTest.java
new file mode 100644
index 0000000..a4962d1
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccAbstractTest.java
@@ -0,0 +1,2295 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.math.BigDecimal;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.TreeSet;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.stream.Collectors;
+import javax.cache.Cache;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteTransactions;
+import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.cache.CacheWriteSynchronizationMode;
+import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
+import org.apache.ignite.cache.query.FieldsQueryCursor;
+import org.apache.ignite.cache.query.ScanQuery;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.cache.query.SqlQuery;
+import org.apache.ignite.cache.query.annotations.QuerySqlField;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.cluster.ClusterTopologyException;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.DataRegionConfiguration;
+import org.apache.ignite.configuration.DataStorageConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.configuration.TransactionConfiguration;
+import org.apache.ignite.configuration.WALMode;
+import org.apache.ignite.internal.GridKernalContext;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.IgniteKernal;
+import org.apache.ignite.internal.TestRecordingCommunicationSpi;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.IgniteCacheProxy;
+import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.util.future.GridCompoundIdentityFuture;
+import org.apache.ignite.internal.util.lang.GridAbsPredicate;
+import org.apache.ignite.internal.util.lang.GridInClosure3;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.G;
+import org.apache.ignite.internal.util.typedef.X;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteBiPredicate;
+import org.apache.ignite.lang.IgniteBiTuple;
+import org.apache.ignite.lang.IgniteClosure;
+import org.apache.ignite.lang.IgniteInClosure;
+import org.apache.ignite.lang.IgnitePredicate;
+import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+import org.apache.ignite.transactions.Transaction;
+import org.apache.ignite.transactions.TransactionConcurrency;
+import org.apache.ignite.transactions.TransactionIsolation;
+import org.jetbrains.annotations.Nullable;
+
+import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT;
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+import static org.apache.ignite.cache.CacheMode.REPLICATED;
+import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC;
+import static org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.ReadMode.SCAN;
+import static org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.ReadMode.SQL;
+import static org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.ReadMode.SQL_SUM;
+import static org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.WriteMode.DML;
+import static org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.WriteMode.PUT;
+import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC;
+import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ;
+
+/**
+ *
+ */
+public abstract class CacheMvccAbstractTest extends GridCommonAbstractTest {
+    /** */
+    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
+    /** */
+    protected static final ObjectCodec<Integer> INTEGER_CODEC = new IntegerCodec();
+
+    /** */
+    protected static final ObjectCodec<MvccTestAccount> ACCOUNT_CODEC = new AccountCodec();
+
+    /** */
+    static final int DFLT_PARTITION_COUNT = RendezvousAffinityFunction.DFLT_PARTITION_COUNT;
+
+    /** */
+    static final String CRD_ATTR = "testCrd";
+
+    /** */
+    static final long DFLT_TEST_TIME = 30_000;
+
+    /** */
+    protected static final int PAGE_SIZE = DataStorageConfiguration.DFLT_PAGE_SIZE;
+
+    /** */
+    protected static final int SRVS = 4;
+
+    /** */
+    protected boolean client;
+
+    /** */
+    protected boolean testSpi;
+
+    /** */
+    protected String nodeAttr;
+
+    /** */
+    protected boolean persistence;
+
+    /** */
+    protected CacheConfiguration ccfg;
+
+    /** */
+    protected CacheConfiguration[] ccfgs;
+
+    /** */
+    protected boolean disableScheduledVacuum;
+
+    /** */
+    protected static final int TX_TIMEOUT = 3000;
+
+    /**
+     * @return Cache mode.
+     */
+    protected abstract CacheMode cacheMode();
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        if (disableScheduledVacuum)
+            cfg.setMvccVacuumFrequency(Integer.MAX_VALUE);
+
+        ((TcpDiscoverySpi)cfg.getDiscoverySpi()).setIpFinder(IP_FINDER);
+
+        if (testSpi)
+            cfg.setCommunicationSpi(new TestRecordingCommunicationSpi());
+
+        ((TcpCommunicationSpi)cfg.getCommunicationSpi()).setSharedMemoryPort(-1);
+
+        cfg.setClientMode(client);
+
+        assert (ccfg == null) || (ccfgs == null);
+
+        if (ccfg != null)
+            cfg.setCacheConfiguration(ccfg);
+
+        if (ccfgs != null)
+            cfg.setCacheConfiguration(ccfgs);
+
+        if (nodeAttr != null)
+            cfg.setUserAttributes(F.asMap(nodeAttr, true));
+
+        DataStorageConfiguration storageCfg = new DataStorageConfiguration();
+
+        storageCfg.setWalMode(WALMode.LOG_ONLY);
+        storageCfg.setPageSize(PAGE_SIZE);
+
+        DataRegionConfiguration regionCfg = new DataRegionConfiguration();
+
+        regionCfg.setPersistenceEnabled(persistence);
+        regionCfg.setMaxSize(64L * 1024 * 1024);
+
+        storageCfg.setDefaultDataRegionConfiguration(regionCfg);
+
+        cfg.setDataStorageConfiguration(storageCfg);
+
+        cfg.setConsistentId(gridName);
+
+        cfg.setTransactionConfiguration(new TransactionConfiguration()
+            .setDefaultTxConcurrency(TransactionConcurrency.PESSIMISTIC)
+            .setDefaultTxIsolation(TransactionIsolation.REPEATABLE_READ));
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected long getTestTimeout() {
+        return DFLT_TEST_TIME + 60_000;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        super.beforeTest();
+
+        MvccProcessorImpl.coordinatorAssignClosure(null);
+
+        cleanPersistenceDir();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        try {
+            verifyOldVersionsCleaned();
+
+            verifyCoordinatorInternalState();
+        }
+        finally {
+            stopAllGrids();
+        }
+
+        MvccProcessorImpl.coordinatorAssignClosure(null);
+
+        cleanPersistenceDir();
+
+        super.afterTest();
+    }
+
+    /**
+     * @param cfgC Optional closure applied to cache configuration.
+     * @throws Exception If failed.
+     */
+    final void cacheRecreate(@Nullable IgniteInClosure<CacheConfiguration> cfgC) throws Exception {
+        Ignite srv0 = startGrid(0);
+
+        final int PARTS = 64;
+
+        CacheConfiguration<Object, Object> ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 0, PARTS);
+
+        if (cfgC != null)
+            cfgC.apply(ccfg);
+
+        IgniteCache<Integer, MvccTestAccount> cache = (IgniteCache)srv0.createCache(ccfg);
+
+        for (int k = 0; k < PARTS * 2; k++) {
+            assertNull(cache.get(k));
+
+            int vals = k % 3 + 1;
+
+            for (int v = 0; v < vals; v++)
+                cache.put(k, new MvccTestAccount(v, 1));
+
+            assertEquals(vals - 1, cache.get(k).val);
+        }
+
+        srv0.destroyCache(cache.getName());
+
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 0, PARTS);
+
+        if (cfgC != null)
+            cfgC.apply(ccfg);
+
+        cache = (IgniteCache)srv0.createCache(ccfg);
+
+        for (int k = 0; k < PARTS * 2; k++) {
+            assertNull(cache.get(k));
+
+            int vals = k % 3 + 2;
+
+            for (int v = 0; v < vals; v++)
+                cache.put(k, new MvccTestAccount(v + 100, 1));
+
+            assertEquals(vals - 1 + 100, cache.get(k).val);
+        }
+
+        srv0.destroyCache(cache.getName());
+
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 0, PARTS);
+
+        IgniteCache<Long, Long> cache0 = (IgniteCache)srv0.createCache(ccfg);
+
+        for (long k = 0; k < PARTS * 2; k++) {
+            assertNull(cache0.get(k));
+
+            int vals = (int)(k % 3 + 2);
+
+            for (long v = 0; v < vals; v++)
+                cache0.put(k, v);
+
+            assertEquals((long)(vals - 1), (Object)cache0.get(k));
+        }
+    }
+
+    /**
+     * @param srvs Number of server nodes.
+     * @param clients Number of client nodes.
+     * @param cacheBackups Number of cache backups.
+     * @param cacheParts Number of cache partitions.
+     * @param cfgC Optional closure applied to cache configuration.
+     * @param withRmvs If {@code true} then in addition to puts tests also executes removes.
+     * @param readMode Read mode.
+     * @param writeMode Write mode.
+     * @throws Exception If failed.
+     */
+    final void accountsTxReadAll(
+        final int srvs,
+        final int clients,
+        int cacheBackups,
+        int cacheParts,
+        @Nullable IgniteInClosure<CacheConfiguration> cfgC,
+        final boolean withRmvs,
+        final ReadMode readMode,
+        final WriteMode writeMode
+    ) throws Exception {
+        accountsTxReadAll(srvs, clients, cacheBackups, cacheParts, cfgC, withRmvs, readMode, writeMode, DFLT_TEST_TIME, null);
+    }
+
+    /**
+     * @param srvs Number of server nodes.
+     * @param clients Number of client nodes.
+     * @param cacheBackups Number of cache backups.
+     * @param cacheParts Number of cache partitions.
+     * @param cfgC Optional closure applied to cache configuration.
+     * @param withRmvs If {@code true} then in addition to puts tests also executes removes.
+     * @param readMode Read mode.
+     * @param writeMode Write mode.
+     * @param testTime Test time.
+     * @throws Exception If failed.
+     */
+    final void accountsTxReadAll(
+        final int srvs,
+        final int clients,
+        int cacheBackups,
+        int cacheParts,
+        @Nullable IgniteInClosure<CacheConfiguration> cfgC,
+        final boolean withRmvs,
+        final ReadMode readMode,
+        final WriteMode writeMode,
+        long testTime,
+        RestartMode restartMode
+    ) throws Exception {
+        final int ACCOUNTS = 20;
+
+        final int ACCOUNT_START_VAL = 1000;
+
+        final int writers = 4;
+
+        final int readers = 4;
+
+        final IgniteInClosure<IgniteCache<Object, Object>> init = new IgniteInClosure<IgniteCache<Object, Object>>() {
+            @Override public void apply(IgniteCache<Object, Object> cache) {
+                final IgniteTransactions txs = cache.unwrap(Ignite.class).transactions();
+
+                if (writeMode == WriteMode.PUT) {
+                    Map<Integer, MvccTestAccount> accounts = new HashMap<>();
+
+                    for (int i = 0; i < ACCOUNTS; i++)
+                        accounts.put(i, new MvccTestAccount(ACCOUNT_START_VAL, 1));
+
+                    try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                        cache.putAll(accounts);
+
+                        tx.commit();
+                    }
+                }
+                else if (writeMode == WriteMode.DML) {
+                    try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                        SqlFieldsQuery qry = new SqlFieldsQuery("insert into MvccTestAccount(_key, val, updateCnt) values " +
+                                "(?," + ACCOUNT_START_VAL + ",1)");
+
+                        for (int i = 0; i < ACCOUNTS; i++) {
+                            try (FieldsQueryCursor<List<?>> cur = cache.query(qry.setArgs(i))) {
+                                assertEquals(1L, cur.iterator().next().get(0));
+                            }
+
+                            tx.commit();
+                        }
+                    }
+                }
+                else
+                    assert false : "Unknown write mode";
+            }
+        };
+
+        final RemovedAccountsTracker rmvdTracker = new RemovedAccountsTracker(ACCOUNTS);
+
+        GridInClosure3<Integer, List<TestCache>, AtomicBoolean> writer =
+            new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
+                @Override public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
+                    ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                    int cnt = 0;
+
+                    while (!stop.get()) {
+                        TestCache<Integer, MvccTestAccount> cache = randomCache(caches, rnd);
+
+                        try {
+                            IgniteTransactions txs = cache.cache.unwrap(Ignite.class).transactions();
+
+                            cnt++;
+
+                            int i1 = rnd.nextInt(ACCOUNTS), i2 = rnd.nextInt(ACCOUNTS);
+
+                            while (i2 == i1)
+                                i2 = rnd.nextInt(ACCOUNTS);
+
+                            Integer id1 = Math.min(i1, i2);
+                            Integer id2 = Math.max(i1, i2);
+
+                            TreeSet<Integer> keys = new TreeSet<>();
+
+                            keys.add(id1);
+                            keys.add(id2);
+
+                            Integer cntr1 = null;
+                            Integer cntr2 = null;
+
+                            Integer rmvd = null;
+                            Integer inserted = null;
+
+                            MvccTestAccount a1;
+                            MvccTestAccount a2;
+
+                            try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                                tx.timeout(TX_TIMEOUT);
+
+                                Map<Integer, MvccTestAccount> accounts = null;
+
+                                if (writeMode == WriteMode.PUT)
+                                    accounts = cache.cache.getAll(keys);
+                                else if (writeMode == WriteMode.DML)
+                                    accounts = getAllSql(cache);
+                                else
+                                    assert false : "Unknown write mode";
+
+                                a1 = accounts.get(id1);
+                                a2 = accounts.get(id2);
+
+                                if (!withRmvs) {
+                                    assertNotNull(a1);
+                                    assertNotNull(a2);
+
+                                    cntr1 = a1.updateCnt + 1;
+                                    cntr2 = a2.updateCnt + 1;
+
+                                    if (writeMode == WriteMode.PUT) {
+                                        cache.cache.put(id1, new MvccTestAccount(a1.val + 1, cntr1));
+                                        cache.cache.put(id2, new MvccTestAccount(a2.val - 1, cntr2));
+                                    }
+                                    else if (writeMode == WriteMode.DML)  {
+                                        updateSql(cache, id1, a1.val + 1, cntr1);
+                                        updateSql(cache, id2, a2.val - 1, cntr2);
+                                    }
+                                    else
+                                        assert false : "Unknown write mode";
+                                }
+                                else {
+                                    if (a1 != null || a2 != null) {
+                                        if (a1 != null && a2 != null) {
+                                            if (rnd.nextInt(10) == 0) {
+                                                if (rmvdTracker.size() < ACCOUNTS / 2) {
+                                                    rmvd = rnd.nextBoolean() ? id1 : id2;
+
+                                                    assertTrue(rmvdTracker.markRemoved(rmvd));
+                                                }
+                                            }
+
+                                            if (rmvd != null) {
+                                                if (writeMode == WriteMode.PUT) {
+                                                    if (rmvd.equals(id1)) {
+                                                        cache.cache.remove(id1);
+                                                        cache.cache.put(id2, new MvccTestAccount(a1.val + a2.val, 1));
+                                                    }
+                                                    else {
+                                                        cache.cache.put(id1, new MvccTestAccount(a1.val + a2.val, 1));
+                                                        cache.cache.remove(id2);
+                                                    }
+                                                }
+                                                else if (writeMode == WriteMode.DML)  {
+                                                    if (rmvd.equals(id1)) {
+                                                        removeSql(cache, id1);
+                                                        updateSql(cache, id2,a1.val + a2.val, 1);
+                                                    }
+                                                    else {
+                                                        updateSql(cache, id1,a1.val + a2.val, 1);
+                                                        removeSql(cache, id2);
+                                                    }
+                                                }
+                                                else
+                                                    assert false : "Unknown write mode";
+                                            }
+                                            else {
+                                                if (writeMode == WriteMode.PUT) {
+                                                    cache.cache.put(id1, new MvccTestAccount(a1.val + 1, 1));
+                                                    cache.cache.put(id2, new MvccTestAccount(a2.val - 1, 1));
+                                                }
+                                                else if (writeMode == WriteMode.DML) {
+                                                    updateSql(cache, id1, a1.val + 1, 1);
+                                                    updateSql(cache, id2, a2.val - 1, 1);
+                                                }
+                                                else
+                                                    assert false : "Unknown write mode";
+                                            }
+                                        }
+                                        else {
+                                            if (a1 == null) {
+                                                inserted = id1;
+
+                                                if (writeMode == WriteMode.PUT) {
+                                                    cache.cache.put(id1, new MvccTestAccount(100, 1));
+                                                    cache.cache.put(id2, new MvccTestAccount(a2.val - 100, 1));
+                                                }
+                                                else if (writeMode == WriteMode.DML) {
+                                                    insertSql(cache, id1, 100, 1);
+                                                    updateSql(cache, id2, a2.val - 100, 1);
+                                                }
+                                                else
+                                                    assert false : "Unknown write mode";
+                                            }
+                                            else {
+                                                inserted = id2;
+
+                                                if (writeMode == WriteMode.PUT) {
+                                                    cache.cache.put(id1, new MvccTestAccount(a1.val - 100, 1));
+                                                    cache.cache.put(id2, new MvccTestAccount(100, 1));
+                                                }
+                                                else if (writeMode == WriteMode.DML) {
+                                                    updateSql(cache, id1, a1.val - 100, 1);
+                                                    insertSql(cache, id2, 100, 1);
+                                                }
+                                                else
+                                                    assert false : "Unknown write mode";
+                                            }
+                                        }
+                                    }
+                                }
+
+                                tx.commit();
+
+                                // In case of tx success mark inserted.
+                                if (inserted != null) {
+                                    assert withRmvs;
+
+                                    assertTrue(rmvdTracker.unmarkRemoved(inserted));
+                                }
+                            }
+                            catch (Throwable e) {
+                                if (rmvd != null) {
+                                    assert withRmvs;
+
+                                    // If tx fails, unmark removed.
+                                    assertTrue(rmvdTracker.unmarkRemoved(rmvd));
+                                }
+
+                                throw e;
+                            }
+
+                            if (!withRmvs) {
+                                Map<Integer, MvccTestAccount> accounts = null;
+
+                                if (writeMode == WriteMode.PUT)
+                                    accounts = cache.cache.getAll(keys);
+                                else if (writeMode == WriteMode.DML)
+                                    accounts = getAllSql(cache);
+                                else
+                                    assert false : "Unknown write mode";
+
+                                a1 = accounts.get(id1);
+                                a2 = accounts.get(id2);
+
+                                assertNotNull(a1);
+                                assertNotNull(a2);
+
+                                assertTrue(a1.updateCnt >= cntr1);
+                                assertTrue(a2.updateCnt >= cntr2);
+                            }
+                        }
+                        catch (Exception e) {
+                            handleTxException(e);
+                        }
+                        finally {
+                            cache.readUnlock();
+                        }
+                    }
+
+                    info("Writer finished, updates: " + cnt);
+                }
+            };
+
+        GridInClosure3<Integer, List<TestCache>, AtomicBoolean> reader =
+            new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
+                @Override public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
+                    ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                    Set<Integer> keys = new LinkedHashSet<>();
+
+                    Map<Integer, Integer> lastUpdateCntrs = new HashMap<>();
+
+                    SqlFieldsQuery sumQry = new SqlFieldsQuery("select sum(val) from MvccTestAccount");
+
+                    while (!stop.get()) {
+                        while (keys.size() < ACCOUNTS)
+                            keys.add(rnd.nextInt(ACCOUNTS));
+
+                        TestCache<Integer, MvccTestAccount> cache = randomCache(caches, rnd);
+
+                        Map<Integer, MvccTestAccount> accounts = null;
+
+                        try {
+                            switch (readMode) {
+                                case GET: {
+                                    accounts = cache.cache.getAll(keys);
+
+                                    break;
+                                }
+
+                                case SCAN: {
+                                    accounts = new HashMap<>();
+
+                                    Iterator<Cache.Entry<Integer, MvccTestAccount>> it = cache.cache.iterator();
+
+                                    try {
+                                        for (; it.hasNext(); ) {
+                                            IgniteCache.Entry<Integer, MvccTestAccount> e = it.next();
+                                            MvccTestAccount old = accounts.put(e.getKey(), e.getValue());
+
+                                            assertNull("new=" + e + ", old=" + old, old);
+                                        }
+                                    } finally {
+                                        U.closeQuiet((AutoCloseable) it);
+                                    }
+
+                                    break;
+                                }
+
+                                case SQL: {
+                                    accounts = new HashMap<>();
+
+                                    if (rnd.nextBoolean()) {
+                                        SqlQuery<Integer, MvccTestAccount> qry =
+                                            new SqlQuery<>(MvccTestAccount.class, "_key >= 0");
+
+                                        for (IgniteCache.Entry<Integer, MvccTestAccount> e : cache.cache.query(qry).getAll()) {
+                                            MvccTestAccount old = accounts.put(e.getKey(), e.getValue());
+
+                                            assertNull(old);
+                                        }
+                                    }
+                                    else {
+                                        SqlFieldsQuery qry = new SqlFieldsQuery("select _key, val from MvccTestAccount");
+
+                                        for (List<?> row : cache.cache.query(qry).getAll()) {
+                                            Integer id = (Integer)row.get(0);
+                                            Integer val = (Integer)row.get(1);
+
+                                            MvccTestAccount old = accounts.put(id, new MvccTestAccount(val, 1));
+
+                                            assertNull(old);
+                                        }
+                                    }
+
+                                    break;
+                                }
+
+                                case SQL_SUM: {
+                                    BigDecimal sum;
+
+                                    if (rnd.nextBoolean()) {
+                                        List<List<?>> res =  cache.cache.query(sumQry).getAll();
+
+                                        assertEquals(1, res.size());
+
+                                        sum = (BigDecimal)res.get(0).get(0);
+                                    }
+                                    else {
+                                        Map res = readAllByMode(cache.cache, keys, readMode, ACCOUNT_CODEC);
+
+                                        sum = (BigDecimal)((Map.Entry)res.entrySet().iterator().next()).getValue();
+                                    }
+
+                                    assertEquals(ACCOUNT_START_VAL * ACCOUNTS, sum.intValue());
+
+                                    break;
+                                }
+
+                                default: {
+                                    fail();
+
+                                    return;
+                                }
+                            }
+                        }
+                        finally {
+                            cache.readUnlock();
+                        }
+
+                        if (accounts != null) {
+                            if (!withRmvs)
+                                assertEquals(ACCOUNTS, accounts.size());
+
+                            int sum = 0;
+
+                            for (int i = 0; i < ACCOUNTS; i++) {
+                                MvccTestAccount account = accounts.get(i);
+
+                                if (account != null) {
+                                    sum += account.val;
+
+                                    Integer cntr = lastUpdateCntrs.get(i);
+
+                                    if (cntr != null)
+                                        assertTrue(cntr <= account.updateCnt);
+
+                                    lastUpdateCntrs.put(i, cntr);
+                                }
+                                else
+                                    assertTrue(withRmvs);
+                            }
+
+                            assertEquals(ACCOUNTS * ACCOUNT_START_VAL, sum);
+                        }
+                    }
+
+                    if (idx == 0) {
+                        TestCache<Integer, MvccTestAccount> cache = randomCache(caches, rnd);
+
+                        Map<Integer, MvccTestAccount> accounts;
+
+                        ReadMode readMode0 = readMode == SQL_SUM ? SQL : readMode;
+
+                        try {
+                            accounts = readAllByMode(cache.cache, keys, readMode0, ACCOUNT_CODEC);;
+                        }
+                        finally {
+                            cache.readUnlock();
+                        }
+
+                        int sum = 0;
+
+                        for (int i = 0; i < ACCOUNTS; i++) {
+                            MvccTestAccount account = accounts.get(i);
+
+                            assertTrue(account != null || withRmvs);
+
+                            info("Account [id=" + i + ", val=" + (account != null ? account.val : null) + ']');
+
+                            if (account != null)
+                                sum += account.val;
+                        }
+
+                        info("Sum: " + sum);
+                    }
+                }
+            };
+
+        readWriteTest(
+            restartMode,
+            srvs,
+            clients,
+            cacheBackups,
+            cacheParts,
+            writers,
+            readers,
+            testTime,
+            cfgC,
+            init,
+            writer,
+            reader);
+    }
+
+    /**
+     * Returns all accounts from cache by means of SQL.
+     *
+     * @param cache Cache.
+     * @return All accounts
+     */
+    private static Map<Integer, MvccTestAccount> getAllSql(TestCache<Integer, MvccTestAccount> cache) {
+        Map<Integer, MvccTestAccount> accounts = new HashMap<>();
+
+        SqlFieldsQuery qry = new SqlFieldsQuery("select _key, val, updateCnt from MvccTestAccount");
+
+        for (List<?> row : cache.cache.query(qry).getAll()) {
+            Integer id = (Integer)row.get(0);
+            Integer val = (Integer)row.get(1);
+            Integer updateCnt = (Integer)row.get(2);
+
+            MvccTestAccount old = accounts.put(id, new MvccTestAccount(val, updateCnt));
+
+            assertNull(old);
+        }
+
+        return accounts;
+    }
+
+    /**
+     * Updates account by means of SQL API.
+     *
+     * @param cache Cache.
+     * @param key Key.
+     * @param val Value.
+     * @param updateCnt Update counter.
+     */
+    private static void updateSql(TestCache<Integer, MvccTestAccount> cache, Integer key, Integer val, Integer updateCnt) {
+        SqlFieldsQuery qry = new SqlFieldsQuery("update MvccTestAccount set val=" + val + ", updateCnt=" +
+            updateCnt + " where _key=" + key);
+
+        cache.cache.query(qry).getAll();
+    }
+
+    /**
+     * Removes account by means of SQL API.
+     *
+     * @param cache Cache.
+     * @param key Key.
+     */
+    private static void removeSql(TestCache<Integer, MvccTestAccount> cache, Integer key) {
+        SqlFieldsQuery qry = new SqlFieldsQuery("delete from MvccTestAccount where _key=" + key);
+
+        cache.cache.query(qry).getAll();
+    }
+
+    /**
+     * Inserts account by means of SQL API.
+     *
+     * @param cache Cache.
+     * @param key Key.
+     * @param val Value.
+     * @param updateCnt Update counter.
+     */
+    private static void insertSql(TestCache<Integer, MvccTestAccount> cache, int key, Integer val, Integer updateCnt) {
+        SqlFieldsQuery qry = new SqlFieldsQuery("insert into MvccTestAccount(_key, val, updateCnt) values " +
+            " (" + key+ ", " + val + ", " + updateCnt + ")");
+
+        cache.cache.query(qry).getAll();
+    }
+
+    /**
+     * @param restartMode Restart mode.
+     * @param srvs Number of server nodes.
+     * @param clients Number of client nodes.
+     * @param cacheBackups Number of cache backups.
+     * @param cacheParts Number of cache partitions.
+     * @param readMode Read mode.
+     * @throws Exception If failed.
+     */
+    @SuppressWarnings("unchecked")
+    protected void putAllGetAll(
+        RestartMode restartMode,
+        final int srvs,
+        final int clients,
+        int cacheBackups,
+        int cacheParts,
+        @Nullable IgniteInClosure<CacheConfiguration> cfgC,
+        ReadMode readMode,
+        WriteMode writeMode
+    ) throws Exception {
+        if(readMode == SCAN && writeMode == PUT)
+            fail("https://issues.apache.org/jira/browse/IGNITE-7764");
+
+        final int RANGE = 20;
+
+        final int writers = 4;
+
+        final int readers = 4;
+
+        GridInClosure3<Integer, List<TestCache>, AtomicBoolean> writer =
+            new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
+                @Override public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
+                    ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                    int min = idx * RANGE;
+                    int max = min + RANGE;
+
+                    info("Thread range [min=" + min + ", max=" + max + ']');
+
+                    Map<Integer, Integer> map = new HashMap<>();
+
+                    int v = idx * 1_000_000;
+
+                    boolean first = true;
+
+                    while (!stop.get()) {
+                        while (map.size() < RANGE)
+                            map.put(rnd.nextInt(min, max), v);
+
+                        TestCache<Integer, Integer> cache = randomCache(caches, rnd);
+
+                        try {
+                            IgniteTransactions txs = cache.cache.unwrap(Ignite.class).transactions();
+
+                            try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                                if (!first && rnd.nextBoolean()) {
+                                    Map<Integer, Integer> res = readAllByMode(cache.cache, map.keySet(), readMode, INTEGER_CODEC);
+
+                                    for (Integer k : map.keySet())
+                                        assertEquals("res=" + res, v - 1, (Object)res.get(k));
+                                }
+
+                                writeAllByMode(cache.cache, map, writeMode, INTEGER_CODEC);
+
+                                tx.commit();
+
+                                first = false;
+                            }
+
+                            if (rnd.nextBoolean()) {
+                                Map<Integer, Integer> res = readAllByMode(cache.cache, map.keySet(), readMode, INTEGER_CODEC);
+
+                                for (Integer k : map.keySet())
+                                    assertEquals("key=" + k, v, (Object)res.get(k));
+                            }
+
+                            map.clear();
+
+                            v++;
+                        }
+                        catch (Exception e) {
+                            handleTxException(e);
+                        }
+                        finally {
+                            cache.readUnlock();
+
+                            map.clear();
+                        }
+                    }
+
+                    info("Writer done, updates: " + v);
+                }
+            };
+
+        GridInClosure3<Integer, List<TestCache>, AtomicBoolean> reader =
+            new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
+                @Override public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
+                    ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                    Set<Integer> keys = new LinkedHashSet<>();
+
+                    Map<Integer, Integer> readVals = new HashMap<>();
+
+                    while (!stop.get()) {
+                        int range = rnd.nextInt(0, writers);
+
+                        int min = range * RANGE;
+                        int max = min + RANGE;
+
+                        while (keys.size() < RANGE)
+                            keys.add(rnd.nextInt(min, max));
+
+                        TestCache<Integer, Integer> cache = randomCache(caches, rnd);
+
+                        Map<Integer, Integer> map;
+
+                        try {
+                            map = readAllByMode(cache.cache, keys, readMode, INTEGER_CODEC);
+                        }
+                        catch (Exception e) {
+                            handleTxException(e);
+
+                            continue;
+                        }
+                        finally {
+                            cache.readUnlock();
+                        }
+
+                        assertTrue("Invalid map size: " + map.size() + ", map=" + map, map.isEmpty() || map.size() == RANGE);
+
+                        Integer val0 = null;
+
+                        for (Map.Entry<Integer, Integer> e: map.entrySet()) {
+                            Integer val = e.getValue();
+
+                            assertNotNull(val);
+
+                            if (val0 == null) {
+                                Integer readVal = readVals.get(range);
+
+                                if (readVal != null)
+                                    assertTrue("readVal=" + readVal + ", val=" + val +  ", map=" + map,readVal <= val);
+
+                                readVals.put(range, val);
+
+                                val0 = val;
+                            }
+                            else {
+                                if (!F.eq(val0, val)) {
+                                    assertEquals("Unexpected value [range=" + range + ", key=" + e.getKey() + ']' +
+                                        ", map=" + map,
+                                        val0,
+                                        val);
+                                }
+                            }
+                        }
+
+                        keys.clear();
+                    }
+                }
+            };
+
+        readWriteTest(
+            restartMode,
+            srvs,
+            clients,
+            cacheBackups,
+            cacheParts,
+            writers,
+            readers,
+            DFLT_TEST_TIME,
+            cfgC,
+            null,
+            writer,
+            reader);
+
+        for (Ignite node : G.allGrids())
+            checkActiveQueriesCleanup(node);
+    }
+
+
+
+    /**
+     * @param N Number of object to update in single transaction.
+     * @param srvs Number of server nodes.
+     * @param clients Number of client nodes.
+     * @param cacheBackups Number of cache backups.
+     * @param cacheParts Number of cache partitions.
+     * @param time Test time.
+     * @param readMode Read mode.
+     * @throws Exception If failed.
+     */
+    @SuppressWarnings("unchecked")
+    protected void updateNObjectsTest(
+        final int N,
+        final int srvs,
+        final int clients,
+        int cacheBackups,
+        int cacheParts,
+        long time,
+        @Nullable IgniteInClosure<CacheConfiguration> cfgC,
+        ReadMode readMode,
+        WriteMode writeMode,
+        RestartMode restartMode
+    )
+        throws Exception
+    {
+        if(readMode == SCAN && writeMode == PUT)
+            fail("https://issues.apache.org/jira/browse/IGNITE-7764");
+
+        final int TOTAL = 20;
+
+        assert N <= TOTAL;
+
+        info("updateNObjectsTest [n=" + N + ", total=" + TOTAL + ']');
+
+        final int writers = 4;
+
+        final int readers = 4;
+
+        final IgniteInClosure<IgniteCache<Object, Object>> init = new IgniteInClosure<IgniteCache<Object, Object>>() {
+            @Override public void apply(IgniteCache<Object, Object> cache) {
+                final IgniteTransactions txs = cache.unwrap(Ignite.class).transactions();
+
+                Map<Integer, Integer> vals = new HashMap<>();
+
+                for (int i = 0; i < TOTAL; i++)
+                    vals.put(i, N);
+
+                try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                    writeAllByMode(cache, vals, writeMode, INTEGER_CODEC);
+
+                    tx.commit();
+                }
+            }
+        };
+
+        GridInClosure3<Integer, List<TestCache>, AtomicBoolean> writer =
+            new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
+                @Override public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
+                    ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                    int cnt = 0;
+
+                    while (!stop.get()) {
+                        TestCache<Integer, Integer> cache = randomCache(caches, rnd);
+                        IgniteTransactions txs = cache.cache.unwrap(Ignite.class).transactions();
+
+                        TreeSet<Integer> keys = new TreeSet<>();
+
+                        while (keys.size() < N)
+                            keys.add(rnd.nextInt(TOTAL));
+
+                        try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                            tx.timeout(TX_TIMEOUT);
+
+                            Map<Integer, Integer> curVals = readAllByMode(cache.cache, keys, readMode, INTEGER_CODEC);
+
+                            assertEquals(N, curVals.size());
+
+                            Map<Integer, Integer> newVals = new TreeMap<>();
+
+                            for (Map.Entry<Integer, Integer> e : curVals.entrySet())
+                                newVals.put(e.getKey(), e.getValue() + 1);
+
+                            writeAllByMode(cache.cache, newVals, writeMode, INTEGER_CODEC);
+
+                            tx.commit();
+                        }
+                        catch (Exception e) {
+                            handleTxException(e);
+                        }
+                        finally {
+                            cache.readUnlock();
+                        }
+
+                        cnt++;
+                    }
+
+                    info("Writer finished, updates: " + cnt);
+                }
+            };
+
+        GridInClosure3<Integer, List<TestCache>, AtomicBoolean> reader =
+            new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
+                @Override public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
+                    ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                    Set<Integer> keys = new LinkedHashSet<>();
+
+                    while (!stop.get()) {
+                        while (keys.size() < TOTAL)
+                            keys.add(rnd.nextInt(TOTAL));
+
+                        TestCache<Integer, Integer> cache = randomCache(caches, rnd);
+
+                        Map<Integer, Integer> vals = null;
+
+                        try {
+                            vals = readAllByMode(cache.cache, keys, readMode, INTEGER_CODEC);
+                        }
+                        catch (Exception e) {
+                            handleTxException(e);
+                        }
+                        finally {
+                            cache.readUnlock();
+                        }
+
+                        assertEquals("vals=" + vals, TOTAL, vals.size());
+
+                        int sum = 0;
+
+                        for (int i = 0; i < TOTAL; i++) {
+                            Integer val = vals.get(i);
+
+                            assertNotNull(val);
+
+                            sum += val;
+                        }
+
+                        assertEquals(0, sum % N);
+                    }
+
+                    if (idx == 0) {
+                        TestCache<Integer, Integer> cache = randomCache(caches, rnd);
+
+                        Map<Integer, Integer> vals;
+
+                        try {
+                            vals = readAllByMode(cache.cache, keys, readMode, INTEGER_CODEC);
+                        }
+                        finally {
+                            cache.readUnlock();
+                        }
+
+                        int sum = 0;
+
+                        for (int i = 0; i < TOTAL; i++) {
+                            Integer val = vals.get(i);
+
+                            info("Value [id=" + i + ", val=" + val + ']');
+
+                            sum += val;
+                        }
+
+                        info("Sum [sum=" + sum + ", mod=" + sum % N + ']');
+                    }
+                }
+            };
+
+        readWriteTest(
+            restartMode,
+            srvs,
+            clients,
+            cacheBackups,
+            cacheParts,
+            writers,
+            readers,
+            time,
+            cfgC,
+            init,
+            writer,
+            reader);
+    }
+
+    /**
+     * @param restartMode Restart mode.
+     * @param srvs Number of server nodes.
+     * @param clients Number of client nodes.
+     * @param cacheBackups Number of cache backups.
+     * @param cacheParts Number of cache partitions.
+     * @param time Test time.
+     * @param cfgC Optional closure applied to cache configuration.
+     * @param writers Number of writers.
+     * @param readers Number of readers.
+     * @param init Optional init closure.
+     * @param writer Writers threads closure.
+     * @param reader Readers threads closure.
+     * @throws Exception If failed.
+     */
+    final void readWriteTest(
+        final RestartMode restartMode,
+        final int srvs,
+        final int clients,
+        int cacheBackups,
+        int cacheParts,
+        final int writers,
+        final int readers,
+        final long time,
+        @Nullable IgniteInClosure<CacheConfiguration> cfgC,
+        IgniteInClosure<IgniteCache<Object, Object>> init,
+        final GridInClosure3<Integer, List<TestCache>, AtomicBoolean> writer,
+        final GridInClosure3<Integer, List<TestCache>, AtomicBoolean> reader) throws Exception {
+        if (restartMode == RestartMode.RESTART_CRD)
+            MvccProcessorImpl.coordinatorAssignClosure(new CoordinatorAssignClosure());
+
+        Ignite srv0 = startGridsMultiThreaded(srvs);
+
+        if (clients > 0) {
+            client = true;
+
+            startGridsMultiThreaded(srvs, clients);
+
+            client = false;
+        }
+
+        CacheConfiguration<Object, Object> ccfg = cacheConfiguration(cacheMode(),
+            FULL_SYNC,
+            cacheBackups,
+            cacheParts);
+
+        if (restartMode == RestartMode.RESTART_CRD)
+            ccfg.setNodeFilter(new CoordinatorNodeFilter());
+
+        if (cfgC != null)
+            cfgC.apply(ccfg);
+
+        IgniteCache<Object, Object> cache = srv0.createCache(ccfg);
+
+        int crdIdx = srvs + clients;
+
+        if (restartMode == RestartMode.RESTART_CRD) {
+            nodeAttr = CRD_ATTR;
+
+            startGrid(crdIdx);
+        }
+
+        if (init != null)
+            init.apply(cache);
+
+        final List<TestCache> caches = new ArrayList<>(srvs + clients);
+
+        for (int i = 0; i < srvs + clients; i++) {
+            Ignite node = grid(i);
+
+            caches.add(new TestCache(node.cache(cache.getName())));
+        }
+
+        final long stopTime = U.currentTimeMillis() + time;
+
+        final AtomicBoolean stop = new AtomicBoolean();
+
+        try {
+            final AtomicInteger writerIdx = new AtomicInteger();
+
+            IgniteInternalFuture<?> writeFut = GridTestUtils.runMultiThreadedAsync(new Callable<Void>() {
+                @Override public Void call() throws Exception {
+                    try {
+                        int idx = writerIdx.getAndIncrement();
+
+                        writer.apply(idx, caches, stop);
+                    }
+                    catch (Throwable e) {
+                        if (restartMode != null && X.hasCause(e, ClusterTopologyException.class)) {
+                            log.info("Writer error: " + e);
+
+                            return null;
+                        }
+
+                        error("Unexpected error: " + e, e);
+
+                        stop.set(true);
+
+                        fail("Unexpected error: " + e);
+                    }
+
+                    return null;
+                }
+            }, writers, "writer");
+
+            final AtomicInteger readerIdx = new AtomicInteger();
+
+            IgniteInternalFuture<?> readFut = GridTestUtils.runMultiThreadedAsync(new Callable<Void>() {
+                @Override public Void call() throws Exception {
+                    try {
+                        int idx = readerIdx.getAndIncrement();
+
+                        reader.apply(idx, caches, stop);
+                    }
+                    catch (Throwable e) {
+                        error("Unexpected error: " + e, e);
+
+                        stop.set(true);
+
+                        fail("Unexpected error: " + e);
+                    }
+
+                    return null;
+                }
+            }, readers, "reader");
+
+            while (System.currentTimeMillis() < stopTime && !stop.get()) {
+                Thread.sleep(1000);
+
+                if (restartMode != null) {
+                    switch (restartMode) {
+                        case RESTART_CRD: {
+                            log.info("Start new coordinator: " + (crdIdx + 1));
+
+                            startGrid(crdIdx + 1);
+
+                            log.info("Stop current coordinator: " + crdIdx);
+
+                            stopGrid(crdIdx);
+
+                            crdIdx++;
+
+                            awaitPartitionMapExchange();
+
+                            break;
+                        }
+
+                        case RESTART_RND_SRV: {
+                            ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                            int idx = rnd.nextInt(srvs);
+
+                            TestCache cache0 = caches.get(idx);
+
+                            cache0.stopLock.writeLock().lock();
+
+                            log.info("Stop node: " + idx);
+
+                            stopGrid(idx);
+
+                            log.info("Start new node: " + idx);
+
+                            Ignite srv = startGrid(idx);
+
+                            synchronized (caches) {
+                                caches.set(idx, new TestCache(srv.cache(DEFAULT_CACHE_NAME)));
+                            }
+
+                            awaitPartitionMapExchange();
+
+                            break;
+                        }
+
+                        default:
+                            fail();
+                    }
+                }
+            }
+
+            stop.set(true);
+
+            Exception ex = null;
+
+            try {
+                writeFut.get();
+            }
+            catch (IgniteCheckedException e) {
+                ex = e;
+            }
+
+            try {
+                readFut.get();
+            }
+            catch (IgniteCheckedException e) {
+                if (ex != null)
+                    ex.addSuppressed(e);
+                else
+                    ex = e;
+            }
+
+            if (ex != null)
+                throw ex;
+        }
+        finally {
+            stop.set(true);
+        }
+    }
+
+    /**
+     * @param cacheMode Cache mode.
+     * @param syncMode Write synchronization mode.
+     * @param backups Number of backups.
+     * @param parts Number of partitions.
+     * @return Cache configuration.
+     */
+    final CacheConfiguration<Object, Object> cacheConfiguration(
+        CacheMode cacheMode,
+        CacheWriteSynchronizationMode syncMode,
+        int backups,
+        int parts) {
+        CacheConfiguration<Object, Object> ccfg = new CacheConfiguration<>(DEFAULT_CACHE_NAME);
+
+        ccfg.setCacheMode(cacheMode);
+        ccfg.setAtomicityMode(TRANSACTIONAL_SNAPSHOT);
+        ccfg.setWriteSynchronizationMode(syncMode);
+        ccfg.setAffinity(new RendezvousAffinityFunction(false, parts));
+
+        if (cacheMode == PARTITIONED)
+            ccfg.setBackups(backups);
+
+        return ccfg;
+    }
+
+    /**
+     * Handles transaction exception.
+     * @param e Exception.
+     */
+    protected void handleTxException(Exception e) {
+        if (log.isTraceEnabled())
+            log.trace("Exception during tx execution: " + X.getFullStackTrace(e));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    final void verifyCoordinatorInternalState() throws Exception {
+        for (Ignite node : G.allGrids()) {
+            final MvccProcessorImpl crd = mvccProcessor(node);
+
+            if (!crd.mvccEnabled())
+                continue;
+
+            crd.stopVacuumWorkers(); // to prevent new futures creation.
+
+            Map activeTxs = GridTestUtils.getFieldValue(crd, "activeTxs");
+            Map<?, Map> cntrFuts = GridTestUtils.getFieldValue(crd, "snapLsnrs");
+            Map ackFuts = GridTestUtils.getFieldValue(crd, "ackFuts");
+            Map activeTrackers = GridTestUtils.getFieldValue(crd, "activeTrackers");
+
+            GridAbsPredicate cond = () -> {
+                log.info("activeTxs=" + activeTxs + ", cntrFuts=" + cntrFuts + ", ackFuts=" + ackFuts +
+                    ", activeTrackers=" + activeTrackers);
+
+                boolean empty = true;
+
+                for (Map map : cntrFuts.values())
+                    if (!(empty = map.isEmpty()))
+                        break;
+
+                return activeTxs.isEmpty() && empty && ackFuts.isEmpty() && activeTrackers.isEmpty();
+            };
+
+            GridTestUtils.waitForCondition(cond, TX_TIMEOUT);
+
+            assertTrue("activeTxs: " + activeTxs,  activeTxs.isEmpty());
+
+            boolean empty = true;
+
+            for (Map map : cntrFuts.values())
+                if (!(empty = map.isEmpty())) break;
+
+            assertTrue("cntrFuts: " + cntrFuts,  empty);
+            assertTrue("ackFuts: " + ackFuts,  ackFuts.isEmpty());
+            assertTrue("activeTrackers: " + activeTrackers,  activeTrackers.isEmpty());
+
+            checkActiveQueriesCleanup(node);
+        }
+    }
+
+    /**
+     * Checks if less than 2 versions remain after the vacuum cleanup.
+     *
+     * @throws Exception If failed.
+     */
+    protected void verifyOldVersionsCleaned() throws Exception {
+        runVacuumSync();
+
+        // Check versions.
+        boolean cleaned = checkOldVersions(false);
+
+        if (!cleaned) { // Retry on a stable topology with a newer snapshot.
+            awaitPartitionMapExchange();
+
+            runVacuumSync();
+
+            checkOldVersions(true);
+        }
+    }
+
+    /**
+     * Checks if outdated versions were cleaned after the vacuum process.
+     *
+     * @param failIfNotCleaned Fail test if not cleaned.
+     * @return {@code False} if not cleaned.
+     * @throws IgniteCheckedException If failed.
+     */
+    private boolean checkOldVersions(boolean failIfNotCleaned) throws IgniteCheckedException {
+        for (Ignite node : G.allGrids()) {
+            for (IgniteCacheProxy cache : ((IgniteKernal)node).caches()) {
+                GridCacheContext cctx = cache.context();
+
+                if (!cctx.userCache() || !cctx.group().mvccEnabled())
+                    continue;
+
+                for (Iterator it = cache.withKeepBinary().iterator(); it.hasNext(); ) {
+                    IgniteBiTuple entry = (IgniteBiTuple)it.next();
+
+                    KeyCacheObject key = cctx.toCacheKeyObject(entry.getKey());
+
+                    List<IgniteBiTuple<Object, MvccVersion>> vers = cctx.offheap().mvccAllVersions(cctx, key)
+                        .stream().filter(t -> t.get1() != null).collect(Collectors.toList());
+
+                    if (vers.size() > 1) {
+                        if (failIfNotCleaned)
+                            fail("[key=" + key.value(null, false) + "; vers=" + vers + ']');
+                        else {
+                            U.closeQuiet((AutoCloseable)it);
+
+                            return false;
+                        }
+                    }
+                }
+            }
+        }
+
+        return true;
+    }
+
+    /**
+     * Runs vacuum on all nodes and waits for its completion.
+     *
+     * @throws IgniteCheckedException If failed.
+     */
+    private void runVacuumSync() throws IgniteCheckedException {
+        GridCompoundIdentityFuture<VacuumMetrics> fut = new GridCompoundIdentityFuture<>();
+
+        // Run vacuum manually.
+        for (Ignite node : G.allGrids()) {
+            if (!node.configuration().isClientMode()) {
+                MvccProcessorImpl crd = mvccProcessor(node);
+
+                if (!crd.mvccEnabled() || GridTestUtils.getFieldValue(crd, "vacuumWorkers") == null)
+                    continue;
+
+                assert GridTestUtils.getFieldValue(crd, "txLog") != null;
+
+                Throwable vacuumError = crd.vacuumError();
+
+                assertNull(X.getFullStackTrace(vacuumError), vacuumError);
+
+                fut.add(crd.runVacuum());
+            }
+        }
+
+        fut.markInitialized();
+
+        // Wait vacuum finished.
+        fut.get(getTestTimeout());
+    }
+
+    /**
+     * @param node Ignite node.
+     * @return Mvcc processor.
+     */
+    protected MvccProcessorImpl mvccProcessor(Ignite node) {
+        GridKernalContext ctx = ((IgniteEx)node).context();
+
+        MvccProcessor crd = ctx.coordinators();
+
+        assertNotNull(crd);
+
+        return (MvccProcessorImpl)crd;
+    }
+
+    /**
+     * @param node Node.
+     * @throws Exception If failed.
+     */
+    protected final void checkActiveQueriesCleanup(Ignite node) throws Exception {
+        final MvccProcessorImpl crd = mvccProcessor(node);
+
+        assertTrue("Active queries not cleared: " + node.name(), GridTestUtils.waitForCondition(
+            new GridAbsPredicate() {
+                @Override public boolean apply() {
+                    Object activeQueries = GridTestUtils.getFieldValue(crd, "activeQueries");
+
+                    synchronized (activeQueries) {
+                        Long minQry = GridTestUtils.getFieldValue(activeQueries, "minQry");
+
+                        if (minQry != null)
+                            log.info("Min query: " + minQry);
+
+                        Map<Object, Map> queriesMap = GridTestUtils.getFieldValue(activeQueries, "activeQueries");
+
+                        boolean empty = true;
+
+                        for (Map.Entry<Object, Map> e : queriesMap.entrySet()) {
+                            if (!e.getValue().isEmpty()) {
+                                empty = false;
+
+                                log.info("Active queries: " + e);
+                            }
+                        }
+
+                        return empty && minQry == null;
+                    }
+                }
+            }, 8_000)
+        );
+
+        assertTrue("Previous coordinator queries not empty: " + node.name(), GridTestUtils.waitForCondition(
+            new GridAbsPredicate() {
+                @Override public boolean apply() {
+                    Map queries = GridTestUtils.getFieldValue(crd, "prevCrdQueries", "activeQueries");
+                    Boolean prevDone = GridTestUtils.getFieldValue(crd, "prevCrdQueries", "prevQueriesDone");
+
+                    if (!queries.isEmpty() || !prevDone)
+                        log.info("Previous coordinator state [prevDone=" + prevDone + ", queries=" + queries + ']');
+
+                    return queries.isEmpty();
+                }
+            }, 8_000)
+        );
+    }
+
+    /**
+     * @return Cache configurations.
+     */
+    protected List<CacheConfiguration<Object, Object>> cacheConfigurations() {
+        List<CacheConfiguration<Object, Object>> ccfgs = new ArrayList<>();
+
+        ccfgs.add(cacheConfiguration(PARTITIONED, FULL_SYNC, 0, RendezvousAffinityFunction.DFLT_PARTITION_COUNT));
+        ccfgs.add(cacheConfiguration(PARTITIONED, FULL_SYNC, 1, RendezvousAffinityFunction.DFLT_PARTITION_COUNT));
+        ccfgs.add(cacheConfiguration(PARTITIONED, FULL_SYNC, 2, RendezvousAffinityFunction.DFLT_PARTITION_COUNT));
+        ccfgs.add(cacheConfiguration(REPLICATED, FULL_SYNC, 0, RendezvousAffinityFunction.DFLT_PARTITION_COUNT));
+
+        return ccfgs;
+    }
+
+    /**
+     * Reads value from cache for the given key using given read mode.
+     *
+     * @param cache Cache.
+     * @param key Key.
+     * @param readMode Read mode.
+     * @param codec Sql object codec.
+     * @return Value.
+     */
+    @SuppressWarnings("unchecked")
+    protected Object readByMode(IgniteCache cache, final Object key, ReadMode readMode, ObjectCodec codec) {
+        assert cache != null && key != null && readMode != null && readMode != SQL_SUM;
+        assert readMode != SQL || codec != null;
+
+        boolean emulateLongQry = ThreadLocalRandom.current().nextBoolean();
+
+        switch (readMode) {
+            case GET:
+                return cache.get(key);
+
+            case SCAN:
+                ScanQuery scanQry = new ScanQuery(new IgniteBiPredicate() {
+                    @Override public boolean apply(Object k, Object v) {
+                        if (emulateLongQry)
+                            doSleep(ThreadLocalRandom.current().nextInt(50));
+
+                        return k.equals(key);
+                    }
+                });
+
+                List res = cache.query(scanQry).getAll();
+
+                assertTrue(res.size() <= 1);
+
+                return res.isEmpty() ? null : ((IgniteBiTuple)res.get(0)).getValue();
+
+            case SQL:
+                String qry = "SELECT * FROM " + codec.tableName() + " WHERE _key=" + key;
+
+                SqlFieldsQuery sqlFieldsQry =  new SqlFieldsQuery(qry);
+
+                if (emulateLongQry)
+                    sqlFieldsQry.setLazy(true).setPageSize(1);
+
+                List<List> rows;
+
+                if (emulateLongQry) {
+                    FieldsQueryCursor<List> cur = cache.query(sqlFieldsQry);
+
+                    rows = new ArrayList<>();
+
+                    for (List row : cur) {
+                        rows.add(row);
+
+                        doSleep(ThreadLocalRandom.current().nextInt(50));
+                    }
+                }
+                else
+                    rows = cache.query(sqlFieldsQry).getAll();
+
+                assertTrue(rows.size() <= 1);
+
+                return rows.isEmpty() ? null : codec.decode(rows.get(0));
+
+            default:
+                throw new AssertionError("Unsupported read mode: " + readMode);
+        }
+    }
+
+    /**
+     * Writes value into cache using given write mode.
+     *
+     * @param cache Cache.
+     * @param key Key.
+     * @param val Value.
+     * @param writeMode Write mode.
+     * @param codec Sql object codec.
+     */
+    @SuppressWarnings("unchecked")
+    protected void writeByMode(IgniteCache cache, final Object key, Object val, WriteMode writeMode, ObjectCodec codec) {
+        assert writeMode != DML || codec != null;
+        assert cache != null && key != null && writeMode != null && val != null;
+
+        switch (writeMode) {
+            case PUT:
+                cache.put(key, val);
+
+                return;
+
+            case DML:
+                String qry = "MERGE INTO " + codec.tableName() + " (" + codec.columnsNames() +  ") VALUES " +
+                    '(' + key + ", " + codec.encode(val) + ')';
+
+                List<List> rows = cache.query(new SqlFieldsQuery(qry)).getAll();
+
+                assertTrue(rows.size() <= 1);
+
+                return;
+
+            default:
+                throw new AssertionError("Unsupported write mode: " + writeMode);
+        }
+    }
+
+
+    /**
+     * Reads value from cache for the given key using given read mode.
+     *
+     * @param cache Cache.
+     * @param keys Key.
+     * @param readMode Read mode.
+     * @param codec Value codec.
+     * @return Value.
+     */
+    @SuppressWarnings("unchecked")
+    protected Map readAllByMode(IgniteCache cache, Set keys, ReadMode readMode, ObjectCodec codec) {
+        assert cache != null && keys != null && readMode != null;
+        assert readMode != SQL || codec != null;
+
+        boolean emulateLongQry = ThreadLocalRandom.current().nextBoolean();
+
+        switch (readMode) {
+            case GET:
+                return cache.getAll(keys);
+
+            case SCAN:
+                ScanQuery scanQry = new ScanQuery(new IgniteBiPredicate() {
+                    @Override public boolean apply(Object k, Object v) {
+                        if (emulateLongQry)
+                            doSleep(ThreadLocalRandom.current().nextInt(50));
+
+                        return keys.contains(k);
+                    }
+                });
+
+
+                Map res = (Map)cache.query(scanQry).getAll()
+                    .stream()
+                    .collect(Collectors.toMap(v -> ((IgniteBiTuple)v).getKey(), v -> ((IgniteBiTuple)v).getValue()));
+
+                assertTrue("res.size()=" + res.size() + ", keys.size()=" + keys.size(), res.size() <= keys.size());
+
+                return res;
+
+            case SQL:
+                StringBuilder b = new StringBuilder("SELECT " + codec.columnsNames() + " FROM " + codec.tableName() + " WHERE _key IN (");
+
+                boolean first = true;
+
+                for (Object key : keys) {
+                    if (first)
+                        first = false;
+                    else
+                        b.append(", ");
+
+                    b.append(key);
+                }
+
+                b.append(')');
+
+                String qry = b.toString();
+
+                SqlFieldsQuery sqlFieldsQry =  new SqlFieldsQuery(qry);
+
+                if (emulateLongQry)
+                    sqlFieldsQry.setLazy(true).setPageSize(1);
+
+                List<List> rows;
+
+                if (emulateLongQry) {
+                    FieldsQueryCursor<List> cur = cache.query(sqlFieldsQry);
+
+                    rows = new ArrayList<>();
+
+                    for (List row : cur) {
+                        rows.add(row);
+
+                        doSleep(ThreadLocalRandom.current().nextInt(50));
+                    }
+                }
+                else
+                    rows = cache.query(sqlFieldsQry).getAll();
+
+                if (rows.isEmpty())
+                    return Collections.EMPTY_MAP;
+
+                res = new HashMap();
+
+                for (List row : rows)
+                    res.put(row.get(0), codec.decode(row));
+
+                return res;
+
+            case SQL_SUM:
+                b = new StringBuilder("SELECT SUM(" + codec.aggregateColumnName() + ") FROM " + codec.tableName() + " WHERE _key IN (");
+
+                first = true;
+
+                for (Object key : keys) {
+                    if (first)
+                        first = false;
+                    else
+                        b.append(", ");
+
+                    b.append(key);
+                }
+
+                b.append(')');
+
+                qry = b.toString();
+
+                FieldsQueryCursor<List> cur = cache.query(new SqlFieldsQuery(qry));
+
+                rows = cur.getAll();
+
+                if (rows.isEmpty())
+                    return Collections.EMPTY_MAP;
+
+                res = new HashMap();
+
+                for (List row : rows)
+                    res.put(row.get(0), row.get(0));
+
+                return res;
+
+            default:
+                throw new AssertionError("Unsupported read mode: " + readMode);
+        }
+    }
+
+    /**
+     * Writes all entries using given write mode.
+     *
+     * @param cache Cache.
+     * @param entries Entries to write.
+     * @param writeMode Write mode.
+     * @param codec Entry codec.
+     */
+    @SuppressWarnings("unchecked")
+    protected void writeAllByMode(IgniteCache cache, final Map entries, WriteMode writeMode, ObjectCodec codec) {
+        assert cache != null && entries != null && writeMode != null;
+        assert writeMode != DML || codec != null;
+
+        switch (writeMode) {
+            case PUT:
+                cache.putAll(entries);
+
+                return;
+
+            case DML:
+                StringBuilder b = new StringBuilder("MERGE INTO " + codec.tableName() + " (" + codec.columnsNames() +  ") VALUES ");
+
+                boolean first = true;
+
+                for (Object entry : entries.entrySet()) {
+                    Map.Entry e = (Map.Entry)entry;
+                    if (first)
+                        first = false;
+                    else
+                        b.append(", ");
+
+                    b.append('(')
+                        .append(e.getKey())
+                        .append(", ")
+                        .append(codec.encode(e.getValue()))
+                        .append(')');
+                }
+
+                String qry = b.toString();
+
+                cache.query(new SqlFieldsQuery(qry)).getAll();
+
+                return;
+
+            default:
+                throw new AssertionError("Unsupported write mode: " + writeMode);
+        }
+    }
+
+    /**
+     * Object codec for SQL queries.
+     *
+     * @param <T> Type.
+     */
+    private interface ObjectCodec<T> {
+        /**
+         * Decodes object from SQL request result.
+         *
+         * @param row SQL request result.
+         * @return Decoded object.
+         */
+        T decode(List<?> row);
+
+        /**
+         * Encodes object into SQL string for INSERT clause.
+         *
+         * @param obj Object.
+         * @return Sql string.
+         */
+        String encode(T obj);
+
+        /**
+         * @return Table name.
+         */
+        String tableName();
+
+        /**
+         * @return Columns names.
+         */
+        String columnsNames();
+
+        /**
+         * @return Column for aggregate functions.
+         */
+        String aggregateColumnName();
+    }
+
+    /**
+     * Codec for {@code Integer} table.
+     */
+    private static class IntegerCodec implements ObjectCodec<Integer> {
+        /** {@inheritDoc} */
+        @Override public Integer decode(List<?> row) {
+            return (Integer)row.get(1);
+        }
+
+        /** {@inheritDoc} */
+        @Override public String encode(Integer obj) {
+            return String.valueOf(obj);
+        }
+
+        /** {@inheritDoc} */
+        @Override public String tableName() {
+            return "Integer";
+        }
+
+        /** {@inheritDoc} */
+        @Override public String columnsNames() {
+            return "_key, _val";
+        }
+
+        /** {@inheritDoc} */
+        @Override public String aggregateColumnName() {
+            return "_val";
+        }
+    }
+
+    /**
+     * Codec for {@code MvccTestAccount} table.
+     */
+    private static class AccountCodec implements ObjectCodec<MvccTestAccount> {
+        /** {@inheritDoc} */
+        @Override public MvccTestAccount decode(List<?> row) {
+            Integer val = (Integer)row.get(1);
+            Integer updateCnt = (Integer)row.get(2);
+
+            return new MvccTestAccount(val, updateCnt);
+        }
+
+        /** {@inheritDoc} */
+        @Override public String encode(MvccTestAccount obj) {
+            return String.valueOf(obj.val) + ", " + String.valueOf(obj.updateCnt);
+        }
+
+        /** {@inheritDoc} */
+        @Override public String tableName() {
+            return "MvccTestAccount";
+        }
+
+        /** {@inheritDoc} */
+        @Override public String columnsNames() {
+            return "_key, val, updateCnt";
+        }
+
+        /** {@inheritDoc} */
+        @Override public String aggregateColumnName() {
+            return "val";
+        }
+    }
+
+
+    /**
+     * @param caches Caches.
+     * @param rnd Random.
+     * @return Random cache.
+     */
+    static <K, V> TestCache<K, V> randomCache(
+        List<TestCache> caches,
+        ThreadLocalRandom rnd) {
+        synchronized (caches) {
+            if (caches.size() == 1) {
+                TestCache cache = caches.get(0);
+
+                assertTrue(cache.readLock());
+
+                return cache;
+            }
+
+            for (;;) {
+                int idx = rnd.nextInt(caches.size());
+
+                TestCache testCache = caches.get(idx);
+
+                if (testCache.readLock())
+                    return testCache;
+            }
+        }
+    }
+
+    /**
+     *
+     */
+    static class MvccTestAccount {
+        /** */
+        @QuerySqlField(index = false)
+        final int val;
+
+        /** */
+        @QuerySqlField
+        final int updateCnt;
+
+        /**
+         * @param val Value.
+         * @param updateCnt Updates counter.
+         */
+        MvccTestAccount(int val, int updateCnt) {
+            assert updateCnt > 0;
+
+            this.val = val;
+            this.updateCnt = updateCnt;
+        }
+
+        /** {@inheritDoc} */
+        @Override public String toString() {
+            return "MvccTestAccount{" +
+                "val=" + val +
+                ", updateCnt=" + updateCnt +
+                '}';
+        }
+    }
+
+    /**
+     *
+     */
+    enum ReadMode {
+        /** */
+        GET,
+
+        /** */
+        SCAN,
+
+        /** */
+        SQL,
+
+        /** */
+        SQL_SUM
+    }
+
+    /**
+     *
+     */
+    enum WriteMode {
+        /** */
+        DML,
+
+        /** */
+        PUT
+    }
+
+    /**
+     *
+     */
+    enum RestartMode {
+        /**
+         * Dedicated coordinator node is restarted during test.
+         */
+        RESTART_CRD,
+
+        /** */
+        RESTART_RND_SRV
+    }
+
+    /**
+     *
+     */
+    static class CoordinatorNodeFilter implements IgnitePredicate<ClusterNode> {
+        /** {@inheritDoc} */
+        @Override public boolean apply(ClusterNode node) {
+            return node.attribute(CRD_ATTR) == null;
+        }
+    }
+
+    /**
+     *
+     */
+    static class CoordinatorAssignClosure implements IgniteClosure<Collection<ClusterNode>, ClusterNode> {
+        /** {@inheritDoc} */
+        @Override public ClusterNode apply(Collection<ClusterNode> clusterNodes) {
+            for (ClusterNode node : clusterNodes) {
+                if (node.attribute(CRD_ATTR) != null) {
+                    assert !node.isClient();
+
+                    return node;
+                }
+            }
+
+            return null;
+        }
+    }
+
+    /**
+     *
+     */
+    static class TestCache<K, V> {
+        /** */
+        final IgniteCache<K, V> cache;
+
+        /** Locks node to avoid node restart while test operation is in progress. */
+        final ReadWriteLock stopLock = new ReentrantReadWriteLock();
+
+        /**
+         * @param cache Cache.
+         */
+        TestCache(IgniteCache cache) {
+            this.cache = cache;
+        }
+
+        /**
+         * @return {@code True} if locked.
+         */
+        boolean readLock() {
+            return stopLock.readLock().tryLock();
+        }
+
+        /**
+         *
+         */
+        void readUnlock() {
+            stopLock.readLock().unlock();
+        }
+    }
+
+    /**
+     *
+     */
+    static class InitIndexing implements IgniteInClosure<CacheConfiguration> {
+        /** */
+        private final Class[] idxTypes;
+
+        /**
+         * @param idxTypes Indexed types.
+         */
+        InitIndexing(Class<?>... idxTypes) {
+            this.idxTypes = idxTypes;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void apply(CacheConfiguration cfg) {
+            cfg.setIndexedTypes(idxTypes);
+        }
+    }
+
+    /**
+     * Removed accounts tracker.
+     */
+    private static class RemovedAccountsTracker {
+        /** */
+        private final Map<Integer, Integer> rmvdKeys;
+
+        /**
+         * @param size Size.
+         */
+        RemovedAccountsTracker(int size) {
+            this.rmvdKeys = new HashMap<>(size);
+
+            for (int i = 0; i < size; i++)
+                rmvdKeys.put(i, 0);
+        }
+
+        /**
+         * @return Size.
+         */
+        public synchronized int size() {
+            int size = 0;
+
+            for (int i = 0; i < rmvdKeys.size(); i++) {
+                if (rmvdKeys.get(i) > 0)
+                    size++;
+            }
+
+            return size;
+        }
+
+        /**
+         * @param id Id.
+         * @return {@code True} if success.
+         */
+        synchronized boolean markRemoved(Integer id) {
+            Integer rmvdCntr = rmvdKeys.get(id);
+
+            Integer newCntr = rmvdCntr + 1;
+
+            rmvdKeys.put(id, newCntr);
+
+            return newCntr >= 0;
+        }
+
+        /**
+         * @param id Id.
+         * @return {@code True} if success.
+         */
+        synchronized boolean unmarkRemoved(Integer id) {
+            Integer rmvdCntr = rmvdKeys.get(id);
+
+            Integer newCntr = rmvdCntr - 1;
+
+            rmvdKeys.put(id, newCntr);
+
+            return newCntr >= 0;
+        }
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccClusterRestartTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccClusterRestartTest.java
new file mode 100644
index 0000000..5cabffc
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccClusterRestartTest.java
@@ -0,0 +1,176 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.DataRegionConfiguration;
+import org.apache.ignite.configuration.DataStorageConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.configuration.WALMode;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+import org.apache.ignite.transactions.Transaction;
+
+import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT;
+import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC;
+import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC;
+import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ;
+
+/**
+ *
+ */
+public class CacheMvccClusterRestartTest extends GridCommonAbstractTest {
+    /** */
+    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        cfg.setConsistentId(gridName);
+
+        ((TcpDiscoverySpi)cfg.getDiscoverySpi()).setIpFinder(IP_FINDER);
+
+        DataStorageConfiguration storageCfg = new DataStorageConfiguration();
+
+        storageCfg.setWalMode(WALMode.LOG_ONLY);
+        storageCfg.setPageSize(1024);
+
+        DataRegionConfiguration regionCfg = new DataRegionConfiguration();
+
+        regionCfg.setPersistenceEnabled(true);
+        regionCfg.setMaxSize(100 * 1024 * 1024);
+
+        storageCfg.setDefaultDataRegionConfiguration(regionCfg);
+
+        cfg.setDataStorageConfiguration(storageCfg);
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        super.afterTestsStopped();
+
+        stopAllGrids();
+
+        cleanPersistenceDir();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        fail("https://issues.apache.org/jira/browse/IGNITE-9394");
+
+        cleanPersistenceDir();
+
+        super.beforeTest();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        super.afterTest();
+
+        stopAllGrids();
+
+        cleanPersistenceDir();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testRestart1() throws Exception {
+       restart1(3, 3);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testRestart2() throws Exception {
+        restart1(1, 3);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testRestart3() throws Exception {
+        restart1(3, 1);
+    }
+
+    /**
+     * @param srvBefore Number of servers before restart.
+     * @param srvAfter Number of servers after restart.
+     * @throws Exception If failed.
+     */
+    private void restart1(int srvBefore, int srvAfter) throws Exception {
+        Ignite srv0 = startGridsMultiThreaded(srvBefore);
+
+        IgniteCache<Object, Object> cache = srv0.createCache(cacheConfiguration());
+
+        Set<Integer> keys = new HashSet<>(primaryKeys(cache, 1, 0));
+
+        try (Transaction tx = srv0.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            for (Integer k : keys)
+                cache.put(k, k);
+
+            tx.commit();
+        }
+
+        stopAllGrids();
+
+        srv0 = startGridsMultiThreaded(srvAfter);
+
+        cache = srv0.cache(DEFAULT_CACHE_NAME);
+
+        Map<Object, Object> res = cache.getAll(keys);
+
+        assertEquals(keys.size(), res.size());
+
+        for (Integer k : keys)
+            assertEquals(k, cache.get(k));
+
+        try (Transaction tx = srv0.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            for (Integer k : keys)
+                cache.put(k, k + 1);
+
+            tx.commit();
+        }
+
+        for (Integer k : keys)
+            assertEquals(k + 1, cache.get(k));
+    }
+
+    /**
+     * @return Cache configuration.
+     */
+    private CacheConfiguration<Object, Object> cacheConfiguration() {
+        CacheConfiguration<Object, Object> ccfg = new CacheConfiguration<>(DEFAULT_CACHE_NAME);
+
+        ccfg.setWriteSynchronizationMode(FULL_SYNC);
+        ccfg.setAtomicityMode(TRANSACTIONAL_SNAPSHOT);
+        ccfg.setBackups(2);
+
+        return ccfg;
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccConfigurationValidationTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccConfigurationValidationTest.java
new file mode 100644
index 0000000..a10c6c7
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccConfigurationValidationTest.java
@@ -0,0 +1,424 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.util.concurrent.Callable;
+import java.util.concurrent.TimeUnit;
+import javax.cache.CacheException;
+import javax.cache.configuration.Factory;
+import javax.cache.configuration.FactoryBuilder;
+import javax.cache.expiry.CreatedExpiryPolicy;
+import javax.cache.expiry.Duration;
+import javax.cache.expiry.ExpiryPolicy;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.cache.CacheInterceptorAdapter;
+import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.cache.store.CacheStore;
+import org.apache.ignite.cache.store.CacheStoreReadFromBackupTest;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.DataRegionConfiguration;
+import org.apache.ignite.configuration.DataStorageConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.testframework.configvariations.ConfigVariations;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+import static org.apache.ignite.cache.CacheAtomicityMode.ATOMIC;
+import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
+import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT;
+
+/**
+ *
+ */
+@SuppressWarnings("unchecked")
+public class CacheMvccConfigurationValidationTest extends GridCommonAbstractTest {
+    /** */
+    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        ((TcpDiscoverySpi)cfg.getDiscoverySpi()).setIpFinder(IP_FINDER);
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids();
+
+        super.afterTest();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testMvccModeMismatchForGroup1() throws Exception {
+        final Ignite node = startGrid(0);
+
+        node.createCache(new CacheConfiguration("cache1").setGroupName("grp1").setAtomicityMode(ATOMIC));
+
+        GridTestUtils.assertThrows(log, new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                node.createCache(new CacheConfiguration("cache2").setGroupName("grp1").setAtomicityMode(TRANSACTIONAL_SNAPSHOT));
+
+                return null;
+            }
+        }, CacheException.class, null);
+
+        node.createCache(new CacheConfiguration("cache2").setGroupName("grp1").setAtomicityMode(ATOMIC));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testMvccModeMismatchForGroup2() throws Exception {
+        final Ignite node = startGrid(0);
+
+        node.createCache(new CacheConfiguration("cache1").setGroupName("grp1").setAtomicityMode(TRANSACTIONAL_SNAPSHOT));
+
+        GridTestUtils.assertThrows(log, new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                node.createCache(new CacheConfiguration("cache2").setGroupName("grp1").setAtomicityMode(ATOMIC));
+
+                return null;
+            }
+        }, CacheException.class, null);
+
+        node.createCache(new CacheConfiguration("cache2").setGroupName("grp1").setAtomicityMode(TRANSACTIONAL_SNAPSHOT));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testMvccLocalCacheDisabled() throws Exception {
+        final Ignite node1 = startGrid(1);
+        final Ignite node2 = startGrid(2);
+
+        IgniteCache cache1 = node1.createCache(new CacheConfiguration("cache1")
+            .setAtomicityMode(TRANSACTIONAL_SNAPSHOT));
+
+        cache1.put(1,1);
+        cache1.put(2,2);
+        cache1.put(2,2);
+
+        GridTestUtils.assertThrows(log, new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                node1.createCache(new CacheConfiguration("cache2").setCacheMode(CacheMode.LOCAL)
+                    .setAtomicityMode(TRANSACTIONAL_SNAPSHOT));
+
+                return null;
+            }
+        }, CacheException.class, null);
+
+        IgniteCache cache3 = node2.createCache(new CacheConfiguration("cache3")
+            .setAtomicityMode(TRANSACTIONAL));
+
+        cache3.put(1, 1);
+        cache3.put(2, 2);
+        cache3.put(3, 3);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testMvccExpiredPolicyCacheDisabled() throws Exception {
+        fail("https://issues.apache.org/jira/browse/IGNITE-8640");
+
+        final Ignite node1 = startGrid(1);
+        final Ignite node2 = startGrid(2);
+
+        IgniteCache cache1 = node1.createCache(new CacheConfiguration("cache1")
+            .setAtomicityMode(TRANSACTIONAL_SNAPSHOT));
+
+        cache1.put(1,1);
+        cache1.put(2,2);
+        cache1.put(2,2);
+
+        GridTestUtils.assertThrows(log, new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                node1.createCache(new CacheConfiguration("cache2")
+                    .setExpiryPolicyFactory(CreatedExpiryPolicy.factoryOf(new Duration(TimeUnit.MINUTES, 1)))
+                    .setAtomicityMode(TRANSACTIONAL_SNAPSHOT));
+
+                return null;
+            }
+        }, CacheException.class, null);
+
+        IgniteCache cache3 = node2.createCache(new CacheConfiguration("cache3")
+            .setAtomicityMode(TRANSACTIONAL_SNAPSHOT));
+
+        cache3.put(1, 1);
+        cache3.put(2, 2);
+        cache3.put(3, 3);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testMvccThirdPartyStoreCacheDisabled() throws Exception {
+        fail("https://issues.apache.org/jira/browse/IGNITE-8640");
+
+        final Ignite node1 = startGrid(1);
+        final Ignite node2 = startGrid(2);
+
+        IgniteCache cache1 = node1.createCache(new CacheConfiguration("cache1")
+            .setAtomicityMode(TRANSACTIONAL_SNAPSHOT));
+
+        cache1.put(1,1);
+        cache1.put(2,2);
+        cache1.put(2,2);
+
+        GridTestUtils.assertThrows(log, new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                node1.createCache(new CacheConfiguration("cache2")
+                    .setCacheStoreFactory(FactoryBuilder.factoryOf(CacheStoreReadFromBackupTest.TestStore.class))
+                    .setAtomicityMode(TRANSACTIONAL_SNAPSHOT));
+
+                return null;
+            }
+        }, CacheException.class, null);
+
+        IgniteCache cache3 = node2.createCache(new CacheConfiguration("cache3")
+            .setAtomicityMode(TRANSACTIONAL_SNAPSHOT));
+
+        cache3.put(1, 1);
+        cache3.put(2, 2);
+        cache3.put(3, 3);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testMvccInterceptorCacheDisabled() throws Exception {
+        fail("https://issues.apache.org/jira/browse/IGNITE-8640");
+
+        final Ignite node1 = startGrid(1);
+        final Ignite node2 = startGrid(2);
+
+        IgniteCache cache1 = node1.createCache(new CacheConfiguration("cache1")
+            .setAtomicityMode(TRANSACTIONAL_SNAPSHOT));
+
+        cache1.put(1,1);
+        cache1.put(2,2);
+        cache1.put(2,2);
+
+        GridTestUtils.assertThrows(log, new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                node1.createCache(new CacheConfiguration("cache2")
+                    .setInterceptor(new ConfigVariations.NoopInterceptor())
+                    .setAtomicityMode(TRANSACTIONAL_SNAPSHOT));
+
+                return null;
+            }
+        }, CacheException.class, null);
+
+        IgniteCache cache3 = node2.createCache(new CacheConfiguration("cache3")
+            .setAtomicityMode(TRANSACTIONAL_SNAPSHOT));
+
+        cache3.put(1, 1);
+        cache3.put(2, 2);
+        cache3.put(3, 3);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testNodeRestartWithCacheModeChangedTxToMvcc() throws Exception {
+        cleanPersistenceDir();
+
+        //Enable persistence.
+        DataStorageConfiguration storageCfg = new DataStorageConfiguration();
+        DataRegionConfiguration regionCfg = new DataRegionConfiguration();
+        regionCfg.setPersistenceEnabled(true);
+        storageCfg.setDefaultDataRegionConfiguration(regionCfg);
+        IgniteConfiguration cfg = getConfiguration("testGrid");
+        cfg.setDataStorageConfiguration(storageCfg);
+        cfg.setConsistentId(cfg.getIgniteInstanceName());
+
+        Ignite node = startGrid(cfg);
+
+        node.cluster().active(true);
+
+        CacheConfiguration ccfg1 = new CacheConfiguration("test1").setAtomicityMode(TRANSACTIONAL);
+
+        IgniteCache cache = node.createCache(ccfg1);
+
+        cache.put(1, 1);
+        cache.put(1, 2);
+        cache.put(2, 2);
+
+        stopGrid(cfg.getIgniteInstanceName());
+
+        CacheConfiguration ccfg2 = new CacheConfiguration().setName(ccfg1.getName())
+            .setAtomicityMode(TRANSACTIONAL_SNAPSHOT);
+
+        IgniteConfiguration cfg2 = getConfiguration("testGrid")
+            .setConsistentId(cfg.getIgniteInstanceName())
+            .setCacheConfiguration(ccfg2)
+            .setDataStorageConfiguration(storageCfg);
+
+        GridTestUtils.assertThrows(log, new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                startGrid(cfg2);
+
+                return null;
+            }
+        }, IgniteCheckedException.class, "Failed to start processor");
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testNodeRestartWithCacheModeChangedMvccToTx() throws Exception {
+        cleanPersistenceDir();
+
+        //Enable persistence.
+        DataStorageConfiguration storageCfg = new DataStorageConfiguration();
+        DataRegionConfiguration regionCfg = new DataRegionConfiguration();
+        regionCfg.setPersistenceEnabled(true);
+        storageCfg.setDefaultDataRegionConfiguration(regionCfg);
+        IgniteConfiguration cfg = getConfiguration("testGrid");
+        cfg.setDataStorageConfiguration(storageCfg);
+        cfg.setConsistentId(cfg.getIgniteInstanceName());
+
+        Ignite node = startGrid(cfg);
+
+        node.cluster().active(true);
+
+        CacheConfiguration ccfg1 = new CacheConfiguration("test1").setAtomicityMode(TRANSACTIONAL_SNAPSHOT);
+
+        IgniteCache cache = node.createCache(ccfg1);
+
+        cache.put(1, 1);
+        cache.put(1, 2);
+        cache.put(2, 2);
+
+        stopGrid(cfg.getIgniteInstanceName());
+
+        CacheConfiguration ccfg2 = new CacheConfiguration().setName(ccfg1.getName())
+            .setAtomicityMode(TRANSACTIONAL);
+
+        IgniteConfiguration cfg2 = getConfiguration("testGrid")
+            .setConsistentId(cfg.getIgniteInstanceName())
+            .setCacheConfiguration(ccfg2)
+            .setDataStorageConfiguration(storageCfg);
+
+        GridTestUtils.assertThrows(log, new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                startGrid(cfg2);
+
+                return null;
+            }
+        }, IgniteCheckedException.class, "Failed to start processor");
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testTxCacheWithCacheStore() throws Exception {
+        checkTransactionalModeConflict("cacheStoreFactory", new TestFactory(),
+            "Transactional cache may not have a third party cache store when MVCC is enabled.");
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testTxCacheWithExpiryPolicy() throws Exception {
+        checkTransactionalModeConflict("expiryPolicyFactory0", CreatedExpiryPolicy.factoryOf(Duration.FIVE_MINUTES),
+            "Transactional cache may not have expiry policy when MVCC is enabled.");
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testTxCacheWithInterceptor() throws Exception {
+        checkTransactionalModeConflict("interceptor", new CacheInterceptorAdapter(),
+            "Transactional cache may not have an interceptor when MVCC is enabled.");
+    }
+
+    /**
+     * Check that setting specified property conflicts with transactional cache atomicity mode.
+     * @param propName Property name.
+     * @param obj Property value.
+     * @param errMsg Expected error message.
+     * @throws IgniteCheckedException if failed.
+     */
+    @SuppressWarnings("ThrowableNotThrown")
+    private void checkTransactionalModeConflict(String propName, Object obj, String errMsg)
+        throws Exception {
+        final String setterName = "set" + propName.substring(0, 1).toUpperCase() + propName.substring(1);
+
+        try (final Ignite node = startGrid(0)) {
+            final CacheConfiguration cfg = new TestConfiguration("cache");
+
+            cfg.setAtomicityMode(TRANSACTIONAL_SNAPSHOT);
+
+            U.invoke(TestConfiguration.class, cfg, setterName, obj);
+
+            GridTestUtils.assertThrows(log, new Callable<Void>() {
+                @SuppressWarnings("unchecked")
+                @Override public Void call() {
+                    node.getOrCreateCache(cfg);
+
+                    return null;
+                }
+            }, IgniteCheckedException.class, errMsg);
+        }
+    }
+
+    /**
+     * Dummy class to overcome ambiguous method name "setExpiryPolicyFactory".
+     */
+    private final static class TestConfiguration extends CacheConfiguration {
+        /**
+         *
+         */
+        TestConfiguration(String cacheName) {
+            super(cacheName);
+        }
+
+        /**
+         *
+         */
+        @SuppressWarnings("unused")
+        public void setExpiryPolicyFactory0(Factory<ExpiryPolicy> plcFactory) {
+            super.setExpiryPolicyFactory(plcFactory);
+        }
+    }
+
+    /**
+     *
+     */
+    private static class TestFactory implements Factory<CacheStore> {
+        /** Serial version uid. */
+        private static final long serialVersionUID = 0L;
+
+        /** {@inheritDoc} */
+        @Override public CacheStore create() {
+            return null;
+        }
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccIteratorWithConcurrentTransactionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccIteratorWithConcurrentTransactionTest.java
new file mode 100644
index 0000000..90c5b6e
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccIteratorWithConcurrentTransactionTest.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import javax.cache.Cache;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.util.lang.IgniteClosure2X;
+import org.apache.ignite.internal.util.typedef.internal.U;
+
+/**
+ *
+ */
+public class CacheMvccIteratorWithConcurrentTransactionTest extends CacheMvccAbstractFeatureTest {
+    /**
+     * @throws Exception if failed.
+     */
+    public void testScanQuery() throws Exception {
+        doTestConsistency(clo);
+    }
+
+    /** Test closure. */
+    private final IgniteClosure2X<CountDownLatch, CountDownLatch, List<Person>> clo =
+        new IgniteClosure2X<CountDownLatch, CountDownLatch, List<Person>>() {
+        @Override public List<Person> applyx(CountDownLatch startLatch, CountDownLatch endLatch2)
+            throws IgniteCheckedException {
+            Iterator<Cache.Entry<Integer, Person>> it = cache().iterator();
+
+            List<Cache.Entry<Integer, Person>> pres = new ArrayList<>();
+
+            for (int i = 0; i < 50; i++)
+                pres.add(it.next());
+
+            if (startLatch != null)
+                startLatch.countDown();
+
+            while (it.hasNext())
+                pres.add(it.next());
+
+            if (endLatch2 != null)
+                U.await(endLatch2);
+
+            return entriesToPersons(pres);
+        }
+    };
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccLocalEntriesWithConcurrentTransactionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccLocalEntriesWithConcurrentTransactionTest.java
new file mode 100644
index 0000000..f4c9781
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccLocalEntriesWithConcurrentTransactionTest.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import javax.cache.Cache;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.cache.CachePeekMode;
+import org.apache.ignite.internal.util.lang.IgniteClosure2X;
+import org.apache.ignite.internal.util.typedef.internal.U;
+
+/**
+ *
+ */
+public class CacheMvccLocalEntriesWithConcurrentTransactionTest extends CacheMvccAbstractFeatureTest {
+    /**
+     * @throws Exception if failed.
+     */
+    public void testLocalEntries() throws Exception {
+        doTestConsistency(clo);
+    }
+
+    /** Test closure. */
+    private final IgniteClosure2X<CountDownLatch, CountDownLatch, List<Person>> clo =
+        new IgniteClosure2X<CountDownLatch, CountDownLatch, List<Person>>() {
+        @Override public List<Person> applyx(CountDownLatch startLatch, CountDownLatch endLatch2)
+            throws IgniteCheckedException {
+            Iterator<Cache.Entry<Integer, Person>> it = cache().localEntries(CachePeekMode.PRIMARY).iterator();
+
+            List<Cache.Entry<Integer, Person>> pres = new ArrayList<>();
+
+            for (int i = 0; i < 10; i++)
+                pres.add(it.next());
+
+            if (startLatch != null)
+                startLatch.countDown();
+
+            while (it.hasNext())
+                pres.add(it.next());
+
+            if (endLatch2 != null)
+                U.await(endLatch2);
+
+            return entriesToPersons(pres);
+        }
+    };
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccOperationChecksTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccOperationChecksTest.java
new file mode 100644
index 0000000..5aedf17
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccOperationChecksTest.java
@@ -0,0 +1,201 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import javax.cache.expiry.EternalExpiryPolicy;
+import javax.cache.expiry.ExpiryPolicy;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.cache.CachePeekMode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.internal.processors.cache.GridCacheAdapter;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteBiPredicate;
+import org.apache.ignite.lang.IgniteFuture;
+import org.apache.ignite.testframework.GridTestUtils;
+
+import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT;
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+
+/**
+ *
+ */
+public class CacheMvccOperationChecksTest extends CacheMvccAbstractTest {
+    /** Empty Class[]. */
+    private final static Class[] E = new Class[]{};
+
+    /** {@inheritDoc} */
+    @Override protected CacheMode cacheMode() {
+        return PARTITIONED;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        super.beforeTestsStarted();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        super.afterTestsStopped();
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testClearOperationsUnsupported() throws Exception {
+        checkOperationUnsupported("clear", m("Clear"), E);
+
+        checkOperationUnsupported("clearAsync", m("Clear"), E);
+
+        checkOperationUnsupported("clear", m("Clear"), t(Object.class), 1);
+
+        checkOperationUnsupported("clearAsync", m("Clear"), t(Object.class), 1);
+
+        checkOperationUnsupported("clearAll", m("Clear"), t(Set.class), Collections.singleton(1));
+
+        checkOperationUnsupported("clearAllAsync", m("Clear"), t(Set.class),
+            Collections.singleton(1));
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testLoadOperationsUnsupported() throws Exception {
+        checkOperationUnsupported("loadCache", m("Load"), t(IgniteBiPredicate.class, Object[].class),
+            P, new Object[]{ 1 });
+
+        checkOperationUnsupported("loadCacheAsync", m("Load"), t(IgniteBiPredicate.class, Object[].class),
+            P, new Object[]{ 1 });
+
+        checkOperationUnsupported("localLoadCache", m("Load"), t(IgniteBiPredicate.class, Object[].class),
+            P, new Object[]{ 1 });
+
+        checkOperationUnsupported("localLoadCacheAsync", m("Load"), t(IgniteBiPredicate.class, Object[].class),
+            P, new Object[]{ 1 });
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testLockOperationsUnsupported() throws Exception {
+        checkOperationUnsupported("lock", m("Lock"), t(Object.class), 1);
+
+        checkOperationUnsupported("lockAll", m("Lock"), t(Collection.class), Collections.singleton(1));
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testPeekOperationsUnsupported() throws Exception {
+        checkOperationUnsupported("localPeek", m("Peek"), t(Object.class, CachePeekMode[].class), 1,
+            new CachePeekMode[]{CachePeekMode.NEAR});
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testEvictOperationsUnsupported() throws Exception {
+        checkOperationUnsupported("localEvict", m("Evict"), t(Collection.class), Collections.singleton(1));
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testWithExpiryPolicyUnsupported() throws Exception {
+        checkOperationUnsupported("withExpiryPolicy", m("withExpiryPolicy"), t(ExpiryPolicy.class),
+            EternalExpiryPolicy.factoryOf().create());
+    }
+
+    /**
+     * @param opTypeName Operation type name.
+     * @return Typical error message from {@link GridCacheAdapter}.
+     */
+    private static String m(String opTypeName) {
+        return opTypeName + " operations are not supported on transactional caches when MVCC is enabled.";
+    }
+
+    /**
+     * @param types Parameter types.
+     * @return Types array.
+     */
+    private static Class[] t(Class... types) {
+        return types;
+    }
+
+    /**
+     * @param mtdName Method name.
+     * @param errMsg Expected error message.
+     * @param paramTypes Operation param types.
+     * @param args Operation arguments.
+     * @throws Exception if failed.
+     */
+    @SuppressWarnings("ThrowableNotThrown")
+    private void checkOperationUnsupported(String mtdName, String errMsg, Class[] paramTypes,
+        Object... args) throws Exception {
+        final boolean async = mtdName.endsWith("Async");
+
+        try (final Ignite node = startGrid(0)) {
+            final CacheConfiguration<Integer, String> cfg = new CacheConfiguration<>("cache");
+
+            cfg.setCacheMode(cacheMode());
+            cfg.setAtomicityMode(TRANSACTIONAL_SNAPSHOT);
+
+            try (IgniteCache<Integer, String> cache = node.createCache(cfg)) {
+                GridTestUtils.assertThrows(log, new Callable<Void>() {
+                    @SuppressWarnings("unchecked")
+                    @Override public Void call() throws Exception {
+                        try {
+                            Object o = U.invoke(null, cache, mtdName, paramTypes, args);
+
+                            if (async) {
+                                assertTrue(o instanceof IgniteFuture<?>);
+
+                                ((IgniteFuture)o).get();
+                            }
+                        }
+                        catch (Exception e) {
+                            if (e.getCause() == null)
+                                throw e;
+
+                            if (e.getCause().getCause() == null)
+                                throw e;
+
+                            throw (Exception)e.getCause().getCause();
+                        }
+
+                        return null;
+                    }
+                }, UnsupportedOperationException.class, errMsg);
+            }
+        }
+    }
+
+    /**
+     *
+     */
+    private final static IgniteBiPredicate<Object, Object> P = new IgniteBiPredicate<Object, Object>() {
+        @Override public boolean apply(Object o, Object o2) {
+            return false;
+        }
+    };
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccPartitionedCoordinatorFailoverTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccPartitionedCoordinatorFailoverTest.java
new file mode 100644
index 0000000..3ea1c5b
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccPartitionedCoordinatorFailoverTest.java
@@ -0,0 +1,144 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import org.apache.ignite.cache.CacheMode;
+
+import static org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.ReadMode.GET;
+import static org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.ReadMode.SCAN;
+import static org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.WriteMode.PUT;
+import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC;
+import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ;
+
+/**
+ * Coordinator failover test for partitioned caches.
+ */
+public class CacheMvccPartitionedCoordinatorFailoverTest extends CacheMvccAbstractCoordinatorFailoverTest {
+    /** {@inheritDoc} */
+    @Override protected CacheMode cacheMode() {
+        return CacheMode.PARTITIONED;
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxGet_ClientServer_Backups2_CoordinatorFails_Persistence() throws Exception {
+        persistence = true;
+
+        accountsTxReadAll(4, 2, 2, DFLT_PARTITION_COUNT,
+            null, true, GET, PUT, DFLT_TEST_TIME, RestartMode.RESTART_CRD);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxGet_Server_Backups1_CoordinatorFails() throws Exception {
+        accountsTxReadAll(2, 0, 1, DFLT_PARTITION_COUNT,
+            null, true, GET, PUT, DFLT_TEST_TIME, RestartMode.RESTART_CRD);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxScan_ClientServer_Backups2_CoordinatorFails() throws Exception {
+        accountsTxReadAll(4, 2, 2, DFLT_PARTITION_COUNT,
+            null, true, SCAN, PUT, DFLT_TEST_TIME, RestartMode.RESTART_CRD);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxScan_Server_Backups1_CoordinatorFails_Persistence() throws Exception {
+        persistence = true;
+
+        accountsTxReadAll(2, 0, 1, DFLT_PARTITION_COUNT,
+            null, true, SCAN, PUT, DFLT_TEST_TIME, RestartMode.RESTART_CRD);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutAllGetAll_ClientServer_Backups2_RestartCoordinator_GetPut() throws Exception {
+        putAllGetAll(RestartMode.RESTART_CRD, 4, 2, 2, DFLT_PARTITION_COUNT,
+            null, GET, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutAllGetAll_ClientServer_Backups1_RestartCoordinator_GetPut_Persistence() throws Exception {
+        persistence = true;
+
+        putAllGetAll(RestartMode.RESTART_CRD, 2, 1, 1, 64,
+            null, GET, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testUpdate_N_Objects_ClientServer_Backups1_PutGet_CoordinatorFails_Persistence() throws Exception {
+        persistence = true;
+
+        updateNObjectsTest(3, 5, 3, 1, DFLT_PARTITION_COUNT, DFLT_TEST_TIME,
+            null, GET, PUT, RestartMode.RESTART_CRD);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testUpdate_N_Objects_ClientServer_Backups1__PutGet_CoordinatorFails() throws Exception {
+        updateNObjectsTest(10, 3, 2, 1, DFLT_PARTITION_COUNT, DFLT_TEST_TIME,
+            null, GET, PUT, RestartMode.RESTART_CRD);
+    }
+
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testGetReadInProgressCoordinatorFails() throws Exception {
+        readInProgressCoordinatorFails(false, false, PESSIMISTIC, REPEATABLE_READ, GET, PUT, null);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testGetReadInsideTxInProgressCoordinatorFails() throws Exception {
+        readInProgressCoordinatorFails(false, true, PESSIMISTIC, REPEATABLE_READ, GET, PUT, null);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testGetReadInProgressCoordinatorFails_ReadDelay() throws Exception {
+        readInProgressCoordinatorFails(true, false, PESSIMISTIC, REPEATABLE_READ, GET, PUT, null);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testGetReadInsideTxInProgressCoordinatorFails_ReadDelay() throws Exception {
+        readInProgressCoordinatorFails(true, true, PESSIMISTIC, REPEATABLE_READ, GET, PUT, null);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testReadInProgressCoordinatorFailsSimple_FromServerPutGet() throws Exception {
+        readInProgressCoordinatorFailsSimple(false, null, GET, PUT);
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccProcessorLazyStartTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccProcessorLazyStartTest.java
new file mode 100644
index 0000000..064e7bb
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccProcessorLazyStartTest.java
@@ -0,0 +1,175 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.cache.CacheAtomicityMode;
+import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.cache.CacheWriteSynchronizationMode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.IgniteEx;
+
+/**
+ * Tests for a lazy MVCC processor start.
+ */
+@SuppressWarnings("unchecked")
+public class CacheMvccProcessorLazyStartTest extends CacheMvccAbstractTest {
+    /** {@inheritDoc} */
+    @Override protected CacheMode cacheMode() {
+        return CacheMode.PARTITIONED;
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPreconfiguredCacheMvccNotStarted() throws Exception {
+        CacheConfiguration ccfg = cacheConfiguration(CacheMode.PARTITIONED, CacheWriteSynchronizationMode.FULL_SYNC, 0, 1);
+        ccfg.setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL);
+
+        IgniteConfiguration cfg = getConfiguration();
+        cfg.setCacheConfiguration(ccfg);
+        IgniteConfiguration cfg2 = getConfiguration("node2");
+
+        IgniteEx node1 = startGrid(cfg);
+        IgniteEx node2 = startGrid(cfg2);
+
+        IgniteCache cache = node1.cache(ccfg.getName());
+
+        cache.put(1, 1);
+        cache.put(1, 2);
+
+        assertFalse(mvccEnabled(node1));
+        assertFalse(mvccEnabled(node2));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPreconfiguredCacheMvccStarted() throws Exception {
+        CacheConfiguration ccfg = cacheConfiguration(CacheMode.PARTITIONED, CacheWriteSynchronizationMode.FULL_SYNC, 0, 1);
+
+        IgniteConfiguration cfg1 = getConfiguration();
+        cfg1.setCacheConfiguration(ccfg);
+        IgniteConfiguration cfg2 = getConfiguration("node2");
+
+        IgniteEx node1 = startGrid(cfg1);
+        IgniteEx node2 = startGrid(cfg2);
+
+        IgniteCache cache = node1.cache(ccfg.getName());
+
+        cache.put(1, 1);
+        cache.put(1, 2);
+
+        assertTrue(mvccEnabled(node1));
+        assertTrue(mvccEnabled(node2));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testMvccRestartedWithDynamicCache() throws Exception {
+        persistence = true;
+
+        IgniteEx node1 = startGrid(1);
+        IgniteEx node2 = startGrid(2);
+
+        assertFalse(mvccEnabled(node1));
+        assertFalse(mvccEnabled(node2));
+
+        node1.cluster().active(true);
+
+        assertFalse(mvccEnabled(node1));
+        assertFalse(mvccEnabled(node2));
+
+        CacheConfiguration ccfg = cacheConfiguration(CacheMode.PARTITIONED, CacheWriteSynchronizationMode.FULL_SYNC, 0, 1);
+
+        IgniteCache cache = node1.createCache(ccfg);
+
+        cache.put(1, 1);
+        cache.put(1, 2);
+
+        assertTrue(mvccEnabled(node1));
+        assertTrue(mvccEnabled(node2));
+
+        stopGrid(1);
+        stopGrid(2);
+
+        node1 = startGrid(1);
+        node2 = startGrid(2);
+
+        node1.cluster().active(true);
+
+        assertTrue(mvccEnabled(node1));
+        assertTrue(mvccEnabled(node2));
+
+        cache = node1.cache(ccfg.getName());
+
+        cache.put(1, 1);
+        cache.put(1, 2);
+
+        assertTrue(mvccEnabled(node1));
+        assertTrue(mvccEnabled(node2));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testMvccStartedWithDynamicCache() throws Exception {
+        IgniteEx node1 = startGrid(1);
+        IgniteEx node2 = startGrid(2);
+
+        assertFalse(mvccEnabled(node1));
+        assertFalse(mvccEnabled(node2));
+
+        CacheConfiguration ccfg = cacheConfiguration(CacheMode.PARTITIONED, CacheWriteSynchronizationMode.FULL_SYNC, 0, 1);
+
+        IgniteCache cache = node1.createCache(ccfg);
+
+        cache.put(1, 1);
+        cache.put(1, 2);
+
+        assertTrue(mvccEnabled(node1));
+        assertTrue(mvccEnabled(node2));
+
+        stopGrid(1);
+        stopGrid(2);
+
+        node1 = startGrid(1);
+        node2 = startGrid(2);
+
+        // Should not be started because we do not have persistence enabled
+        assertFalse(mvccEnabled(node1));
+        assertFalse(mvccEnabled(node2));
+
+        cache = node1.createCache(ccfg);
+
+        cache.put(1, 1);
+        cache.put(1, 2);
+
+        assertTrue(mvccEnabled(node1));
+        assertTrue(mvccEnabled(node2));
+    }
+
+    /**
+     * @param node Node.
+     * @return {@code True} if {@link MvccProcessor} is started.
+     */
+    private boolean mvccEnabled(IgniteEx node) {
+        return node.context().coordinators().mvccEnabled();
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccProcessorTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccProcessorTest.java
new file mode 100644
index 0000000..dc902fc
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccProcessorTest.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import org.apache.ignite.cache.CacheAtomicityMode;
+import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxState;
+
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+
+/**
+ *
+ */
+public class CacheMvccProcessorTest extends CacheMvccAbstractTest {
+    /** {@inheritDoc} */
+    @Override protected CacheMode cacheMode() {
+        return PARTITIONED;
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testTreeWithPersistence() throws Exception {
+        persistence = true;
+
+        checkTreeOperations();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testTreeWithoutPersistence() throws Exception {
+        persistence = true;
+
+        checkTreeOperations();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    private void checkTreeOperations() throws Exception {
+        IgniteEx grid = startGrid(0);
+
+        grid.cluster().active(true);
+
+        grid.createCache(new CacheConfiguration<>("test").setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT));
+
+        MvccProcessorImpl mvccProcessor = mvccProcessor(grid);
+
+        assertEquals(TxState.NA, mvccProcessor.state(new MvccVersionImpl(1, 1, MvccUtils.MVCC_OP_COUNTER_NA)));
+
+        mvccProcessor.updateState(new MvccVersionImpl(1, 1, MvccUtils.MVCC_OP_COUNTER_NA), TxState.PREPARED);
+        mvccProcessor.updateState(new MvccVersionImpl(1, 2, MvccUtils.MVCC_OP_COUNTER_NA), TxState.PREPARED);
+        mvccProcessor.updateState(new MvccVersionImpl(1, 3, MvccUtils.MVCC_OP_COUNTER_NA), TxState.COMMITTED);
+        mvccProcessor.updateState(new MvccVersionImpl(1, 4, MvccUtils.MVCC_OP_COUNTER_NA), TxState.ABORTED);
+        mvccProcessor.updateState(new MvccVersionImpl(1, 5, MvccUtils.MVCC_OP_COUNTER_NA), TxState.ABORTED);
+        mvccProcessor.updateState(new MvccVersionImpl(1, 6, MvccUtils.MVCC_OP_COUNTER_NA), TxState.PREPARED);
+
+        if (persistence) {
+            stopGrid(0, false);
+            grid = startGrid(0);
+
+            grid.cluster().active(true);
+
+            mvccProcessor = mvccProcessor(grid);
+        }
+
+        assertEquals(TxState.PREPARED, mvccProcessor.state(new MvccVersionImpl(1, 1, MvccUtils.MVCC_OP_COUNTER_NA)));
+        assertEquals(TxState.PREPARED, mvccProcessor.state(new MvccVersionImpl(1, 2, MvccUtils.MVCC_OP_COUNTER_NA)));
+        assertEquals(TxState.COMMITTED, mvccProcessor.state(new MvccVersionImpl(1, 3, MvccUtils.MVCC_OP_COUNTER_NA)));
+        assertEquals(TxState.ABORTED, mvccProcessor.state(new MvccVersionImpl(1, 4, MvccUtils.MVCC_OP_COUNTER_NA)));
+        assertEquals(TxState.ABORTED, mvccProcessor.state(new MvccVersionImpl(1, 5, MvccUtils.MVCC_OP_COUNTER_NA)));
+        assertEquals(TxState.PREPARED, mvccProcessor.state(new MvccVersionImpl(1, 6, MvccUtils.MVCC_OP_COUNTER_NA)));
+
+        mvccProcessor.removeUntil(new MvccVersionImpl(1, 5, MvccUtils.MVCC_OP_COUNTER_NA));
+
+        assertEquals(TxState.NA, mvccProcessor.state(new MvccVersionImpl(1, 1, MvccUtils.MVCC_OP_COUNTER_NA)));
+        assertEquals(TxState.NA, mvccProcessor.state(new MvccVersionImpl(1, 2, MvccUtils.MVCC_OP_COUNTER_NA)));
+        assertEquals(TxState.NA, mvccProcessor.state(new MvccVersionImpl(1, 3, MvccUtils.MVCC_OP_COUNTER_NA)));
+        assertEquals(TxState.NA, mvccProcessor.state(new MvccVersionImpl(1, 4, MvccUtils.MVCC_OP_COUNTER_NA)));
+        assertEquals(TxState.NA, mvccProcessor.state(new MvccVersionImpl(1, 5, MvccUtils.MVCC_OP_COUNTER_NA)));
+        assertEquals(TxState.PREPARED, mvccProcessor.state(new MvccVersionImpl(1, 6, MvccUtils.MVCC_OP_COUNTER_NA)));
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccReplicatedCoordinatorFailoverTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccReplicatedCoordinatorFailoverTest.java
new file mode 100644
index 0000000..dc948cd
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccReplicatedCoordinatorFailoverTest.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import org.apache.ignite.cache.CacheMode;
+
+/**
+ * Coordinator failover test for replicated caches.
+ */
+public class CacheMvccReplicatedCoordinatorFailoverTest extends CacheMvccAbstractCoordinatorFailoverTest {
+    /** {@inheritDoc} */
+    @Override protected CacheMode cacheMode() {
+        return CacheMode.REPLICATED;
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccScanQueryWithConcurrentTransactionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccScanQueryWithConcurrentTransactionTest.java
new file mode 100644
index 0000000..8af6a5b
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccScanQueryWithConcurrentTransactionTest.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.concurrent.CountDownLatch;
+import javax.cache.Cache;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.cache.query.QueryCursor;
+import org.apache.ignite.cache.query.ScanQuery;
+import org.apache.ignite.internal.util.lang.IgniteClosure2X;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteBiPredicate;
+
+/**
+ *
+ */
+public class CacheMvccScanQueryWithConcurrentTransactionTest extends CacheMvccAbstractFeatureTest {
+    /**
+     * @throws Exception if failed.
+     */
+    public void testScanQuery() throws Exception {
+        doTestConsistency(clo);
+    }
+
+    /** Test closure. */
+    private final IgniteClosure2X<CountDownLatch, CountDownLatch, List<Person>> clo =
+        new IgniteClosure2X<CountDownLatch, CountDownLatch, List<Person>>() {
+        @Override public List<Person> applyx(CountDownLatch startLatch, CountDownLatch endLatch2)
+            throws IgniteCheckedException {
+            IgniteBiPredicate<Integer, Person> f = new IgniteBiPredicate<Integer, Person>() {
+                @Override public boolean apply(Integer k, Person v) {
+                    return k % 2 == 0;
+                }
+            };
+
+            try (QueryCursor<Cache.Entry<Integer, Person>> cur = cache().query(new ScanQuery<Integer, Person>()
+                .setFilter(f))) {
+                Iterator<Cache.Entry<Integer, Person>> it = cur.iterator();
+
+                List<Cache.Entry<Integer, Person>> pres = new ArrayList<>();
+
+                for (int i = 0; i < 50; i++)
+                    pres.add(it.next());
+
+                if (startLatch != null)
+                    startLatch.countDown();
+
+                while (it.hasNext())
+                    pres.add(it.next());
+
+                if (endLatch2 != null)
+                    U.await(endLatch2);
+
+                return entriesToPersons(pres);
+            }
+        }
+    };
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccSizeWithConcurrentTransactionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccSizeWithConcurrentTransactionTest.java
new file mode 100644
index 0000000..2b8b73e
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccSizeWithConcurrentTransactionTest.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.util.concurrent.CountDownLatch;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.util.lang.IgniteClosure2X;
+import org.apache.ignite.internal.util.typedef.internal.U;
+
+/**
+ *
+ */
+public class CacheMvccSizeWithConcurrentTransactionTest extends CacheMvccAbstractFeatureTest {
+    /**
+     * @throws Exception if failed.
+     */
+    public void testSize() throws Exception {
+        doTestConsistency(clo);
+    }
+
+    /** Test closure. */
+    private final IgniteClosure2X<CountDownLatch, CountDownLatch, Integer> clo =
+        new IgniteClosure2X<CountDownLatch, CountDownLatch, Integer>() {
+        @Override public Integer applyx(CountDownLatch startLatch, CountDownLatch endLatch2)
+            throws IgniteCheckedException {
+            if (startLatch != null)
+                startLatch.countDown();
+
+            int res = cache().size();
+
+            if (endLatch2 != null)
+                U.await(endLatch2);
+
+            return res;
+        }
+    };
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccTransactionsTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccTransactionsTest.java
new file mode 100644
index 0000000..83bb81c
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccTransactionsTest.java
@@ -0,0 +1,3689 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.TreeSet;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Collectors;
+import javax.cache.Cache;
+import javax.cache.expiry.Duration;
+import javax.cache.expiry.TouchedExpiryPolicy;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteDataStreamer;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.IgniteTransactions;
+import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.cache.affinity.Affinity;
+import org.apache.ignite.cache.query.ScanQuery;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.cluster.ClusterTopologyException;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.IgniteKernal;
+import org.apache.ignite.internal.TestRecordingCommunicationSpi;
+import org.apache.ignite.internal.processors.cache.CacheObject;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.GridCacheMapEntry;
+import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.distributed.TestCacheNodeExcludingFilter;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearGetRequest;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearGetResponse;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxFinishRequest;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxFinishResponse;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxPrepareRequest;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxPrepareResponse;
+import org.apache.ignite.internal.processors.cache.mvcc.msg.MvccAckRequestQueryCntr;
+import org.apache.ignite.internal.processors.cache.mvcc.msg.MvccAckRequestTx;
+import org.apache.ignite.internal.processors.cache.mvcc.msg.MvccSnapshotResponse;
+import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
+import org.apache.ignite.internal.util.lang.GridAbsPredicate;
+import org.apache.ignite.internal.util.lang.GridInClosure3;
+import org.apache.ignite.internal.util.typedef.CI1;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.G;
+import org.apache.ignite.internal.util.typedef.X;
+import org.apache.ignite.internal.util.typedef.internal.CU;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteBiInClosure;
+import org.apache.ignite.lang.IgniteBiPredicate;
+import org.apache.ignite.lang.IgniteBiTuple;
+import org.apache.ignite.lang.IgniteInClosure;
+import org.apache.ignite.plugin.extensions.communication.Message;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.transactions.Transaction;
+import org.apache.ignite.transactions.TransactionConcurrency;
+import org.apache.ignite.transactions.TransactionIsolation;
+import org.apache.ignite.transactions.TransactionOptimisticException;
+import org.jetbrains.annotations.Nullable;
+import org.junit.Assert;
+
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC;
+import static org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.ReadMode.GET;
+import static org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.ReadMode.SCAN;
+import static org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.WriteMode.PUT;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccQueryTracker.MVCC_TRACKER_ID_NA;
+import static org.apache.ignite.transactions.TransactionConcurrency.OPTIMISTIC;
+import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC;
+import static org.apache.ignite.transactions.TransactionIsolation.READ_COMMITTED;
+import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ;
+import static org.apache.ignite.transactions.TransactionIsolation.SERIALIZABLE;
+
+/**
+ * TODO IGNITE-6739: tests reload
+ * TODO IGNITE-6739: extend tests to use single/mutiple nodes, all tx types.
+ * TODO IGNITE-6739: test with cache groups.
+ */
+@SuppressWarnings("unchecked")
+public class CacheMvccTransactionsTest extends CacheMvccAbstractTest {
+    /** {@inheritDoc} */
+    @Override protected CacheMode cacheMode() {
+        return PARTITIONED;
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPessimisticTx1() throws Exception {
+        checkTx1(PESSIMISTIC, REPEATABLE_READ);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testOptimisticSerializableTx1() throws Exception {
+        checkTx1(OPTIMISTIC, SERIALIZABLE);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testOptimisticRepeatableReadTx1() throws Exception {
+        checkTx1(OPTIMISTIC, REPEATABLE_READ);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testOptimisticReadCommittedTx1() throws Exception {
+        checkTx1(OPTIMISTIC, READ_COMMITTED);
+    }
+
+    /**
+     * @param concurrency Transaction concurrency.
+     * @param isolation Transaction isolation.
+     * @throws Exception If failed.
+     */
+    private void checkTx1(final TransactionConcurrency concurrency, final TransactionIsolation isolation)
+        throws Exception {
+        checkTxWithAllCaches(new CI1<IgniteCache<Integer, Integer>>() {
+            @Override public void apply(IgniteCache<Integer, Integer> cache) {
+                try {
+                    IgniteTransactions txs = cache.unwrap(Ignite.class).transactions();
+
+                    List<Integer> keys = testKeys(cache);
+
+                    for (Integer key : keys) {
+                        log.info("Test key: " + key);
+
+                        try (Transaction tx = txs.txStart(concurrency, isolation)) {
+                            Integer val = cache.get(key);
+
+                            assertNull(val);
+
+                            cache.put(key, key);
+
+                            val = (Integer)checkAndGet(true, cache, key, GET, SCAN);
+
+                            assertEquals(key, val);
+
+                            tx.commit();
+                        }
+
+                        Integer val = (Integer)checkAndGet(false, cache, key, SCAN, GET);
+
+                        assertEquals(key, val);
+                    }
+                }
+                catch (Exception e) {
+                    throw new IgniteException(e);
+                }
+            }
+        });
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPessimisticTx2() throws Exception {
+        checkTx2(PESSIMISTIC, REPEATABLE_READ);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testOptimisticSerializableTx2() throws Exception {
+        checkTx2(OPTIMISTIC, SERIALIZABLE);
+    }
+
+    /**
+     * @param concurrency Transaction concurrency.
+     * @param isolation Transaction isolation.
+     * @throws Exception If failed.
+     */
+    private void checkTx2(final TransactionConcurrency concurrency, final TransactionIsolation isolation)
+        throws Exception {
+        checkTxWithAllCaches(new CI1<IgniteCache<Integer, Integer>>() {
+            @Override public void apply(IgniteCache<Integer, Integer> cache) {
+                try {
+                    IgniteTransactions txs = cache.unwrap(Ignite.class).transactions();
+
+                    List<Integer> keys = testKeys(cache);
+
+                    for (Integer key : keys) {
+                        log.info("Test key: " + key);
+
+                        try (Transaction tx = txs.txStart(concurrency, isolation)) {
+                            cache.put(key, key);
+                            cache.put(key + 1, key + 1);
+
+                            assertEquals(key, checkAndGet(true, cache, key, GET, SCAN));
+                            assertEquals(key + 1, checkAndGet(true, cache, key + 1, GET, SCAN));
+
+                            tx.commit();
+                        }
+
+                        assertEquals(key, checkAndGet(false, cache, key, GET, SCAN));
+                        assertEquals(key + 1, checkAndGet(false, cache, key + 1, GET, SCAN));
+                    }
+                }
+                catch (Exception e) {
+                    throw new IgniteException(e);
+                }
+            }
+        });
+    }
+
+    /**
+     * @param c Closure to run.
+     * @throws Exception If failed.
+     */
+    private void checkTxWithAllCaches(IgniteInClosure<IgniteCache<Integer, Integer>> c) throws Exception {
+        client = false;
+
+        startGridsMultiThreaded(SRVS);
+
+        client = true;
+
+        startGrid(SRVS);
+
+        try {
+            for (CacheConfiguration<Object, Object> ccfg : cacheConfigurations()) {
+                logCacheInfo(ccfg);
+
+                ignite(0).createCache(ccfg);
+
+                try {
+                    Ignite node = ignite(0);
+
+                    IgniteCache<Integer, Integer> cache = node.cache(ccfg.getName());
+
+                    c.apply(cache);
+                }
+                finally {
+                    ignite(0).destroyCache(ccfg.getName());
+                }
+            }
+
+            verifyCoordinatorInternalState();
+        }
+        finally {
+            stopAllGrids();
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testWithCacheGroups() throws Exception {
+        Ignite srv0 = startGrid(0);
+
+        List<CacheConfiguration> ccfgs = new ArrayList<>();
+
+        for (int c = 0; c < 3; c++) {
+            CacheConfiguration ccfg = cacheConfiguration(PARTITIONED, FULL_SYNC, 0, DFLT_PARTITION_COUNT);
+
+            ccfg.setName("cache-" + c);
+            ccfg.setGroupName("grp1");
+
+            ccfgs.add(ccfg);
+        }
+
+        srv0.createCaches(ccfgs);
+
+        final int PUTS = 5;
+
+        for (int i = 0; i < PUTS; i++) {
+            for (int c = 0; c < 3; c++) {
+                IgniteCache cache = srv0.cache("cache-" + c);
+
+                Map<Integer, Integer> vals = new HashMap<>();
+
+                for (int k = 0; k < 10; k++) {
+                    cache.put(k, i);
+
+                    vals.put(k, i);
+
+                    assertEquals(i, checkAndGet(false, cache, k, SCAN, GET));
+                }
+
+                assertEquals(vals, checkAndGetAll(false, cache, vals.keySet(), GET, SCAN));
+            }
+        }
+
+        for (int c = 0; c < 3; c++) {
+            IgniteCache cache = srv0.cache("cache-" + c);
+
+            Map<Integer, Integer> vals = new HashMap<>();
+
+            for (int k = 0; k < 10; k++) {
+                if (k % 2 == 0)
+                    vals.put(k, PUTS - 1);
+                else {
+                    cache.remove(k);
+
+                    assertNull(checkAndGet(false, cache, k, SCAN, GET));
+                }
+            }
+
+            assertEquals(vals, checkAndGetAll(false, cache, vals.keySet(), GET, SCAN));
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testCacheRecreate() throws Exception {
+        cacheRecreate(null);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testActiveQueriesCleanup() throws Exception {
+        activeQueriesCleanup(false);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testActiveQueriesCleanupTx() throws Exception {
+        activeQueriesCleanup(true);
+    }
+
+    /**
+     * @param tx If {@code true} tests reads inside transaction.
+     * @throws Exception If failed.
+     */
+    private void activeQueriesCleanup(final boolean tx) throws Exception {
+        startGridsMultiThreaded(SRVS);
+
+        client = true;
+
+        Ignite client = startGrid(SRVS);
+
+        final int NODES = SRVS + 1;
+
+        CacheConfiguration ccfg = cacheConfiguration(PARTITIONED, FULL_SYNC, 1, 512);
+
+        client.createCache(ccfg);
+
+        final long stopTime = System.currentTimeMillis() + 5000;
+
+        GridTestUtils.runMultiThreaded(new IgniteInClosure<Integer>() {
+            @Override public void apply(Integer idx) {
+                ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                Ignite node = ignite(idx % NODES);
+
+                IgniteTransactions txs = node.transactions();
+
+                IgniteCache cache = node.cache(DEFAULT_CACHE_NAME);
+
+                while (System.currentTimeMillis() < stopTime) {
+                    int keyCnt = rnd.nextInt(10) + 1;
+
+                    Set<Integer> keys = new HashSet<>();
+
+                    for (int i = 0; i < keyCnt; i++)
+                        keys.add(rnd.nextInt());
+
+                    if (tx) {
+                        try (Transaction tx = txs.txStart(OPTIMISTIC, SERIALIZABLE)) {
+                            cache.getAll(keys);
+
+                            if (rnd.nextBoolean())
+                                tx.commit();
+                            else
+                                tx.rollback();
+                        }
+                    }
+                    else
+                        cache.getAll(keys);
+                }
+            }
+        }, NODES * 2, "get-thread");
+
+        for (Ignite node : G.allGrids())
+            checkActiveQueriesCleanup(node);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testTxReadIsolationSimple() throws Exception {
+        fail("https://issues.apache.org/jira/browse/IGNITE-7764");
+
+        Ignite srv0 = startGrids(4);
+
+        client = true;
+
+        startGrid(4);
+
+        for (CacheConfiguration ccfg : cacheConfigurations()) {
+            IgniteCache<Object, Object> cache0 = srv0.createCache(ccfg);
+
+            final Map<Integer, Integer> startVals = new HashMap<>();
+
+            final int KEYS = 10;
+
+            for (int i = 0; i < KEYS; i++)
+                startVals.put(i, 0);
+
+            for (final TransactionIsolation isolation : TransactionIsolation.values()) {
+                for (final Ignite node : G.allGrids()) {
+                    info("Run test [node=" + node.name() + ", isolation=" + isolation + ']');
+
+                    try (Transaction tx = srv0.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                        cache0.putAll(startVals);
+
+                        tx.commit();
+                    }
+
+                    final CountDownLatch readStart = new CountDownLatch(1);
+
+                    final CountDownLatch readProceed = new CountDownLatch(1);
+
+                    IgniteInternalFuture fut = GridTestUtils.runAsync(new Callable<Void>() {
+                        @Override public Void call() throws Exception {
+                            IgniteCache<Object, Object> cache = node.cache(DEFAULT_CACHE_NAME);
+
+                            try (Transaction tx = node.transactions().txStart(OPTIMISTIC, isolation)) {
+                                assertEquals(0, checkAndGet(false, cache, 0, SCAN, GET));
+
+                                readStart.countDown();
+
+                                assertTrue(readProceed.await(5, TimeUnit.SECONDS));
+
+                                if (isolation == READ_COMMITTED) {
+                                    assertNull(checkAndGet(false, cache, 1, SCAN, GET));
+
+                                    assertEquals(1, checkAndGet(false, cache, 2, SCAN, GET));
+
+                                    Map<Object, Object> res = checkAndGetAll(false, cache, startVals.keySet(), SCAN, GET);
+
+                                    assertEquals(startVals.size() / 2, res.size());
+
+                                    for (Map.Entry<Object, Object> e : res.entrySet())
+                                        assertEquals("Invalid value for key: " + e.getKey(), 1, e.getValue());
+                                }
+                                else {
+                                    assertEquals(0, checkAndGet(true, cache, 1, GET, SCAN));
+
+                                    assertEquals(0, checkAndGet(true, cache, 2, GET, SCAN));
+
+                                    Map<Object, Object> res = checkAndGetAll(true, cache, startVals.keySet(), GET, SCAN);
+
+                                    assertEquals(startVals.size(), res.size());
+
+                                    for (Map.Entry<Object, Object> e : res.entrySet())
+                                        assertEquals("Invalid value for key: " + e.getKey(), 0, e.getValue());
+                                }
+
+                                tx.rollback();
+                            }
+
+                            return null;
+                        }
+                    });
+
+                    assertTrue(readStart.await(5, TimeUnit.SECONDS));
+
+                    for (int i = 0; i < KEYS; i++) {
+                        try (Transaction tx = srv0.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                            if (i % 2 == 0)
+                                cache0.put(i, 1);
+                            else
+                                cache0.remove(i);
+
+                            tx.commit();
+                        }
+                    }
+
+                    readProceed.countDown();
+
+                    fut.get();
+                }
+            }
+
+            srv0.destroyCache(cache0.getName());
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutGetAllSimple() throws Exception {
+        Ignite node = startGrid(0);
+
+        IgniteTransactions txs = node.transactions();
+
+        final IgniteCache<Object, Object> cache = node.createCache(cacheConfiguration(PARTITIONED, FULL_SYNC, 0, 1));
+
+        final int KEYS = 10_000;
+
+        Set<Integer> keys = new HashSet<>();
+
+        for (int k = 0; k < KEYS; k++)
+            keys.add(k);
+
+        Map<Object, Object> map = checkAndGetAll(false, cache, keys, SCAN, GET);
+
+        assertTrue(map.isEmpty());
+
+        for (int v = 0; v < 3; v++) {
+            try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                for (int k = 0; k < KEYS; k++) {
+                    if (k % 2 == 0)
+                        cache.put(k, v);
+                }
+
+                tx.commit();
+            }
+
+            map = checkAndGetAll(false, cache, keys, SCAN, GET);
+
+            for (int k = 0; k < KEYS; k++) {
+                if (k % 2 == 0)
+                    assertEquals(v, map.get(k));
+                else
+                    assertNull(map.get(k));
+            }
+
+            assertEquals(KEYS / 2, map.size());
+
+            try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                map = checkAndGetAll(true, cache, keys, SCAN, GET);
+
+                for (int k = 0; k < KEYS; k++) {
+                    if (k % 2 == 0)
+                        assertEquals(v, map.get(k));
+                    else
+                        assertNull(map.get(k));
+                }
+
+                assertEquals(KEYS / 2, map.size());
+
+                tx.commit();
+            }
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutRemoveSimple() throws Exception {
+        putRemoveSimple(false);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutRemoveSimple_LargeKeys() throws Exception {
+        putRemoveSimple(true);
+    }
+
+    /**
+     * @throws Exception If failed.
+     * @param largeKeys {@code True} to use large keys (not fitting in single page).
+     */
+    private void putRemoveSimple(boolean largeKeys) throws Exception {
+        Ignite node = startGrid(0);
+
+        IgniteTransactions txs = node.transactions();
+
+        final IgniteCache<Object, Object> cache = node.createCache(cacheConfiguration(PARTITIONED, FULL_SYNC, 0, 1));
+
+        final int KEYS = 100;
+
+        checkValues(new HashMap<>(), cache);
+
+        try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            for (int k = 0; k < KEYS; k++)
+                cache.remove(testKey(largeKeys, k));
+
+            tx.commit();
+        }
+
+        checkValues(new HashMap<>(), cache);
+
+        Map<Object, Object> expVals = new HashMap<>();
+
+        try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            for (int k = 0; k < KEYS; k++) {
+                Object key = testKey(largeKeys, k);
+
+                expVals.put(key, k);
+
+                cache.put(key, k);
+            }
+
+            tx.commit();
+        }
+
+        checkValues(expVals, cache);
+
+        try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            for (int k = 0; k < KEYS; k++) {
+                if (k % 2 == 0) {
+                    Object key = testKey(largeKeys, k);
+
+                    cache.remove(key);
+
+                    expVals.remove(key);
+                }
+            }
+
+            tx.commit();
+        }
+
+        checkValues(expVals, cache);
+
+        ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+        Object key = testKey(largeKeys, 0);
+
+        for (int i = 0; i < 500; i++) {
+            boolean rmvd;
+
+            try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                if (rnd.nextBoolean()) {
+                    cache.remove(key);
+
+                    rmvd = true;
+                }
+                else {
+                    cache.put(key, i);
+
+                    rmvd = false;
+                }
+
+                tx.commit();
+            }
+
+            if (rmvd) {
+                assertNull(checkAndGet(false, cache, key, SCAN, GET));
+                assertTrue(checkAndGetAll(false, cache, F.asSet(key), SCAN, GET).isEmpty());
+            }
+            else {
+                assertEquals(i, checkAndGet(false, cache, key, SCAN, GET));
+
+                Map<Object, Object> res = checkAndGetAll(false, cache, F.asSet(key), SCAN, GET);
+
+                assertEquals(i, res.get(key));
+            }
+        }
+    }
+
+    /**
+     * @param largeKeys {@code True} to use large keys (not fitting in single page).
+     * @param idx Index.
+     * @return Key instance.
+     */
+    private static Object testKey(boolean largeKeys, int idx) {
+        if (largeKeys) {
+            int payloadSize = PAGE_SIZE + ThreadLocalRandom.current().nextInt(PAGE_SIZE * 10);
+
+            return new TestKey(idx, payloadSize);
+        }
+        else
+            return idx;
+    }
+
+    /**
+     * @param expVals Expected values.
+     * @param cache Cache.
+     */
+    private void checkValues(Map<Object, Object> expVals, IgniteCache<Object, Object> cache) {
+        for (Map.Entry<Object, Object> e : expVals.entrySet())
+            assertEquals(e.getValue(), checkAndGet(false, cache, e.getKey(), SCAN, GET));
+
+        Map<Object, Object> res = checkAndGetAll(false, cache, expVals.keySet(), SCAN, GET);
+
+        assertEquals(expVals, res);
+
+        res = new HashMap<>();
+
+        for (IgniteCache.Entry<Object, Object> e : cache)
+            res.put(e.getKey(), e.getValue());
+
+        assertEquals(expVals, res);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testThreadUpdatesAreVisibleForThisThread() throws Exception {
+        final Ignite ignite = startGrid(0);
+
+        final IgniteCache<Object, Object> cache = ignite.createCache(cacheConfiguration(PARTITIONED, FULL_SYNC, 0, 1));
+
+        final int THREADS = Runtime.getRuntime().availableProcessors() * 2;
+
+        final int KEYS = 10;
+
+        final CyclicBarrier b = new CyclicBarrier(THREADS);
+
+        GridTestUtils.runMultiThreaded(new IgniteInClosure<Integer>() {
+            @Override public void apply(Integer idx) {
+                try {
+                    int min = idx * KEYS;
+                    int max = min + KEYS;
+
+                    Set<Integer> keys = new HashSet<>();
+
+                    for (int k = min; k < max; k++)
+                        keys.add(k);
+
+                    b.await();
+
+                    for (int i = 0; i < 100; i++) {
+                        try (Transaction tx = ignite.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                            for (int k = min; k < max; k++)
+                                cache.put(k, i);
+
+                            tx.commit();
+                        }
+
+                        Map<Object, Object> res = checkAndGetAll(false, cache, keys, SCAN, GET);
+
+                        for (Integer key : keys)
+                            assertEquals(i, res.get(key));
+
+                        assertEquals(KEYS, res.size());
+                    }
+                }
+                catch (Exception e) {
+                    error("Unexpected error: " + e, e);
+
+                    fail("Unexpected error: " + e);
+                }
+            }
+        }, THREADS, "test-thread");
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testWaitPreviousTxAck() throws Exception {
+        testSpi = true;
+
+        startGrid(0);
+
+        client = true;
+
+        final Ignite ignite = startGrid(1);
+
+        final IgniteCache<Object, Object> cache =
+            ignite.createCache(cacheConfiguration(PARTITIONED, FULL_SYNC, 0, 16));
+
+        try (Transaction tx = ignite.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            cache.put(1, 1);
+            cache.put(2, 1);
+            cache.put(3, 1);
+
+            tx.commit();
+        }
+
+        TestRecordingCommunicationSpi clientSpi = TestRecordingCommunicationSpi.spi(ignite);
+
+        clientSpi.blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
+            /** */
+            boolean block = true;
+
+            @Override public boolean apply(ClusterNode node, Message msg) {
+                if (block && msg instanceof MvccAckRequestTx) {
+                    block = false;
+
+                    return true;
+                }
+
+                return false;
+            }
+        });
+
+        IgniteInternalFuture<?> txFut1 = GridTestUtils.runAsync(new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                try (Transaction tx = ignite.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                    cache.put(2, 2);
+                    cache.put(3, 2);
+
+                    tx.commit();
+                }
+
+                return null;
+            }
+        });
+
+        IgniteInternalFuture<?> txFut2 = GridTestUtils.runAsync(new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                try (Transaction tx = ignite.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                    cache.put(1, 3);
+                    cache.put(2, 3);
+
+                    tx.commit();
+                }
+
+                // Should see changes mady by both tx1 and tx2.
+                Map<Object, Object> res = checkAndGetAll(false, cache, F.asSet(1, 2, 3), SCAN, GET);
+
+                assertEquals(3, res.get(1));
+                assertEquals(3, res.get(2));
+                assertEquals(2, res.get(3));
+
+                return null;
+            }
+        });
+
+        clientSpi.waitForBlocked();
+
+        Thread.sleep(1000);
+
+        clientSpi.stopBlock(true);
+
+        txFut1.get();
+        txFut2.get();
+
+        Map<Object, Object> res = checkAndGetAll(false, cache, F.asSet(1, 2, 3), SCAN, GET);
+
+        assertEquals(3, res.get(1));
+        assertEquals(3, res.get(2));
+        assertEquals(2, res.get(3));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPartialCommitResultNoVisible() throws Exception {
+        testSpi = true;
+
+        startGrids(2);
+
+        client = true;
+
+        final Ignite ignite = startGrid(2);
+
+        awaitPartitionMapExchange();
+
+        final IgniteCache<Object, Object> cache =
+            ignite.createCache(cacheConfiguration(PARTITIONED, FULL_SYNC, 0, 16));
+
+        final Integer key1 = primaryKey(ignite(0).cache(cache.getName()));
+        final Integer key2 = primaryKey(ignite(1).cache(cache.getName()));
+
+        info("Test keys [key1=" + key1 + ", key2=" + key2 + ']');
+
+        try (Transaction tx = ignite.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            cache.put(key1, 1);
+            cache.put(key2, 1);
+
+            tx.commit();
+        }
+
+        Integer val = 1;
+
+        // Allow finish update for key1 and block update for key2.
+
+        TestRecordingCommunicationSpi clientSpi = TestRecordingCommunicationSpi.spi(ignite);
+        TestRecordingCommunicationSpi srvSpi = TestRecordingCommunicationSpi.spi(ignite(0));
+
+        for (int i = 0; i < 10; i++) {
+            info("Iteration: " + i);
+
+            clientSpi.blockMessages(GridNearTxFinishRequest.class, getTestIgniteInstanceName(1));
+
+            srvSpi.record(GridNearTxFinishResponse.class);
+
+            final Integer newVal = val + 1;
+
+            IgniteInternalFuture<?> fut = GridTestUtils.runAsync(new Callable<Void>() {
+                @Override public Void call() throws Exception {
+                    try (Transaction tx = ignite.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                        cache.put(key1, newVal);
+                        cache.put(key2, newVal);
+
+                        tx.commit();
+                    }
+
+                    return null;
+                }
+            });
+
+            try {
+                srvSpi.waitForRecorded();
+
+                srvSpi.recordedMessages(true);
+
+                assertFalse(fut.isDone());
+
+                if (i % 2 == 1) {
+                    // Execute one more update to increase counter.
+                    try (Transaction tx = ignite.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                        cache.put(primaryKeys(jcache(0), 1, 100_000).get(0), 1);
+
+                        tx.commit();
+                    }
+                }
+
+                Set<Integer> keys = new HashSet<>();
+
+                keys.add(key1);
+                keys.add(key2);
+
+                Map<Object, Object> res;
+
+                res = checkAndGetAll(false, cache, keys, SCAN, GET);
+
+                assertEquals(val, res.get(key1));
+                assertEquals(val, res.get(key2));
+
+                clientSpi.stopBlock(true);
+
+                fut.get();
+
+                res = checkAndGetAll(false, cache, keys, SCAN, GET);
+
+                assertEquals(newVal, res.get(key1));
+                assertEquals(newVal, res.get(key2));
+
+                val = newVal;
+            }
+            finally {
+                clientSpi.stopBlock(true);
+            }
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testCleanupWaitsForGet1() throws Exception {
+        boolean vals[] = {true, false};
+
+        for (boolean otherPuts : vals) {
+            for (boolean putOnStart : vals) {
+                for (boolean inTx : vals) {
+                    cleanupWaitsForGet1(otherPuts, putOnStart, inTx);
+
+                    afterTest();
+                }
+            }
+        }
+    }
+
+    /**
+     * @param otherPuts {@code True} to update unrelated keys to increment mvcc counter.
+     * @param putOnStart {@code True} to put data in cache before getAll.
+     * @param inTx {@code True} to read inside transaction.
+     * @throws Exception If failed.
+     */
+    private void cleanupWaitsForGet1(boolean otherPuts, final boolean putOnStart, final boolean inTx) throws Exception {
+        info("cleanupWaitsForGet [otherPuts=" + otherPuts +
+            ", putOnStart=" + putOnStart +
+            ", inTx=" + inTx + "]");
+
+        testSpi = true;
+
+        client = false;
+
+        final Ignite srv = startGrid(0);
+
+        client = true;
+
+        final Ignite client = startGrid(1);
+
+        awaitPartitionMapExchange();
+
+        final IgniteCache<Object, Object> srvCache =
+            srv.createCache(cacheConfiguration(PARTITIONED, FULL_SYNC, 0, 16));
+
+        final Integer key1 = 1;
+        final Integer key2 = 2;
+
+        if (putOnStart) {
+            try (Transaction tx = srv.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                srvCache.put(key1, 0);
+                srvCache.put(key2, 0);
+
+                tx.commit();
+            }
+        }
+
+        if (otherPuts) {
+            for (int i = 0; i < 3; i++) {
+                try (Transaction tx = srv.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                    srvCache.put(1_000_000 + i, 99);
+
+                    tx.commit();
+                }
+            }
+        }
+
+        TestRecordingCommunicationSpi clientSpi = TestRecordingCommunicationSpi.spi(client);
+
+        clientSpi.blockMessages(GridNearGetRequest.class, getTestIgniteInstanceName(0));
+
+        IgniteInternalFuture<?> getFut = GridTestUtils.runAsync(new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                IgniteCache<Integer, Integer> cache = client.cache(srvCache.getName());
+
+
+                Map<Integer, Integer> vals;
+
+                if (inTx) {
+                    try (Transaction tx = client.transactions().txStart(OPTIMISTIC, SERIALIZABLE)) {
+                        vals = checkAndGetAll(false, cache, F.asSet(key1, key2), SCAN, GET);
+
+                        tx.rollback();
+                    }
+                }
+                else
+                    vals = checkAndGetAll(false, cache, F.asSet(key1, key2), SCAN, GET);
+
+                if (putOnStart) {
+                    assertEquals(2, vals.size());
+                    assertEquals(0, (Object)vals.get(key1));
+                    assertEquals(0, (Object)vals.get(key2));
+                }
+                else
+                    assertEquals(0, vals.size());
+
+                return null;
+            }
+        }, "get-thread");
+
+        clientSpi.waitForBlocked();
+
+        for (int i = 0; i < 5; i++) {
+            try (Transaction tx = srv.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                srvCache.put(key1, i + 1);
+                srvCache.put(key2, i + 1);
+
+                tx.commit();
+            }
+        }
+
+        clientSpi.stopBlock(true);
+
+        getFut.get();
+
+        IgniteCache<Integer, Integer> cache = client.cache(srvCache.getName());
+
+        Map<Integer, Integer> vals = checkAndGetAll(false, cache, F.asSet(key1, key2), SCAN, GET);
+
+        assertEquals(2, vals.size());
+        assertEquals(5, (Object)vals.get(key1));
+        assertEquals(5, (Object)vals.get(key2));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testCleanupWaitsForGet2() throws Exception {
+        /*
+        Simulate case when there are two active transactions modifying the same key
+        (it is possible if key lock is released but ack message is delayed), and at this moment
+        query is started.
+         */
+        testSpi = true;
+
+        client = false;
+
+        startGrids(2);
+
+        client = true;
+
+        final Ignite client = startGrid(2);
+
+        awaitPartitionMapExchange();
+
+        final IgniteCache<Object, Object> cache = client.createCache(cacheConfiguration(PARTITIONED, FULL_SYNC, 0, 16).
+            setNodeFilter(new TestCacheNodeExcludingFilter(ignite(0).name())));
+
+        final Integer key1 = 1;
+        final Integer key2 = 2;
+
+        try (Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            cache.put(key1, 0);
+            cache.put(key2, 0);
+
+            tx.commit();
+        }
+
+        TestRecordingCommunicationSpi crdSpi = TestRecordingCommunicationSpi.spi(grid(0));
+
+        TestRecordingCommunicationSpi clientSpi = TestRecordingCommunicationSpi.spi(client);
+
+        final CountDownLatch getLatch = new CountDownLatch(1);
+
+        clientSpi.closure(new IgniteBiInClosure<ClusterNode, Message>() {
+            @Override public void apply(ClusterNode node, Message msg) {
+                if (msg instanceof MvccAckRequestTx)
+                    doSleep(2000);
+            }
+        });
+
+        crdSpi.closure(new IgniteBiInClosure<ClusterNode, Message>() {
+            /** */
+            private AtomicInteger cntr = new AtomicInteger();
+
+            @Override public void apply(ClusterNode node, Message msg) {
+                if (msg instanceof MvccSnapshotResponse) {
+                    if (cntr.incrementAndGet() == 2) {
+                        getLatch.countDown();
+
+                        doSleep(1000);
+                    }
+                }
+            }
+        });
+
+        final IgniteInternalFuture<?> putFut1 = GridTestUtils.runAsync(new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                try (Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                    cache.put(key1, 1);
+
+                    tx.commit();
+                }
+
+                return null;
+            }
+        }, "put1");
+
+        final IgniteInternalFuture<?> putFut2 = GridTestUtils.runAsync(new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                try (Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                    cache.put(key1, 2);
+
+                    tx.commit();
+                }
+
+                return null;
+            }
+        }, "put2");
+
+        IgniteInternalFuture<?> getFut = GridTestUtils.runMultiThreadedAsync(new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                U.await(getLatch);
+
+                while (!putFut1.isDone() || !putFut2.isDone()) {
+                    Map<Object, Object> vals1 = checkAndGetAll(false, cache, F.asSet(key1, key2), SCAN);
+                    Map<Object, Object> vals2 = checkAndGetAll(false, cache, F.asSet(key1, key2), GET);
+
+                    assertEquals(2, vals1.size());
+                    assertEquals(2, vals2.size());
+                }
+
+                return null;
+            }
+        }, 4, "get-thread");
+
+        putFut1.get();
+        putFut2.get();
+        getFut.get();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testCleanupWaitsForGet3() throws Exception {
+        for (int i = 0; i < 4; i++) {
+            cleanupWaitsForGet3(i + 1);
+
+            afterTest();
+        }
+    }
+
+    /**
+     * @param updates Number of updates.
+     * @throws Exception If failed.
+     */
+    private void cleanupWaitsForGet3(int updates) throws Exception {
+        /*
+        Simulate case when coordinator assigned query version has active transaction,
+        query is delayed, after this active transaction finish and the same key is
+        updated several more times before query starts.
+         */
+        testSpi = true;
+
+        client = false;
+
+        startGrids(1);
+
+        client = true;
+
+        final Ignite client = startGrid(1);
+
+        awaitPartitionMapExchange();
+
+        final IgniteCache<Object, Object> cache = client.createCache(cacheConfiguration(PARTITIONED, FULL_SYNC, 0, 16));
+
+        final Integer key1 = 1;
+        final Integer key2 = 2;
+
+        for (int i = 0; i < updates; i++) {
+            try (Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                cache.put(key1, i);
+                cache.put(key2, i);
+
+                tx.commit();
+            }
+        }
+
+        TestRecordingCommunicationSpi crdSpi = TestRecordingCommunicationSpi.spi(grid(0));
+
+        TestRecordingCommunicationSpi clientSpi = TestRecordingCommunicationSpi.spi(client);
+
+        clientSpi.blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
+            /** */
+            private boolean blocked;
+
+            @Override public boolean apply(ClusterNode node, Message msg) {
+                if (!blocked && (msg instanceof MvccAckRequestTx)) {
+                    blocked = true;
+
+                    return true;
+                }
+                return false;
+            }
+        });
+
+        final IgniteInternalFuture<?> putFut = GridTestUtils.runAsync(new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                try (Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                    cache.put(key2, 3);
+
+                    tx.commit();
+                }
+
+                return null;
+            }
+        }, "put");
+
+        clientSpi.waitForBlocked();
+
+        for (int i = 0; i < updates; i++) {
+            try (Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                cache.put(key1, i + 3);
+
+                tx.commit();
+            }
+        }
+
+        // Delay version for getAll.
+        crdSpi.blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
+            /** */
+            private boolean blocked;
+
+            @Override public boolean apply(ClusterNode node, Message msg) {
+                if (!blocked && (msg instanceof MvccSnapshotResponse)) {
+                    blocked = true;
+
+                    return true;
+                }
+                return false;
+            }
+        });
+
+        final IgniteInternalFuture<?> getFut = GridTestUtils.runAsync(new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                final Map<Object, Object> res1 = checkAndGetAll(false, cache, F.asSet(key1, key2), SCAN);
+                final Map<Object, Object> res2 = checkAndGetAll(false, cache, F.asSet(key1, key2), GET);
+
+                assertEquals(2, res1.size());
+                assertEquals(2, res2.size());
+
+                return null;
+            }
+        }, "get");
+
+        crdSpi.waitForBlocked();
+
+        clientSpi.stopBlock(true);
+
+        putFut.get();
+
+        for (int i = 0; i < updates; i++) {
+            try (Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                cache.put(key2, i + 4);
+
+                tx.commit();
+            }
+        }
+
+        crdSpi.stopBlock(true);
+
+        getFut.get();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutAllGetAll_SingleNode_GetAll() throws Exception {
+        putAllGetAll(null, 1, 0, 0, 64, null, GET, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutAllGetAll_SingleNode_SinglePartition_GetAll() throws Exception {
+        putAllGetAll(null, 1, 0, 0, 1, null, GET, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutAllGetAll_ClientServer_Backups0_GetAll() throws Exception {
+        putAllGetAll(null, 4, 2, 0, 64, null, GET, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutAllGetAll_ClientServer_Backups0_Persistence_GetAll() throws Exception {
+        persistence = true;
+
+        testPutAllGetAll_ClientServer_Backups0_GetAll();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutAllGetAll_ClientServer_Backups1_GetAll() throws Exception {
+        putAllGetAll(null, 4, 2, 1, 64, null, GET, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutAllGetAll_ClientServer_Backups2_GetAll() throws Exception {
+        putAllGetAll(null, 4, 2, 2, 64, null, GET, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutAllGetAll_ClientServer_Backups1_RestartCoordinator_GetAll() throws Exception {
+        putAllGetAll(RestartMode.RESTART_CRD, 4, 2, 1, 64, null, GET, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutAllGetAll_SingleNode_Scan() throws Exception {
+        putAllGetAll(null, 1, 0, 0, 64, null, SCAN, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutAllGetAll_SingleNode_SinglePartition_Scan() throws Exception {
+        putAllGetAll(null, 1, 0, 0, 1, null, SCAN, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutAllGetAll_ClientServer_Backups0_Scan() throws Exception {
+        putAllGetAll(null, 4, 2, 0, 64, null, SCAN, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutAllGetAll_ClientServer_Backups0_Persistence_Scan() throws Exception {
+        persistence = true;
+
+        testPutAllGetAll_ClientServer_Backups0_Scan();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutAllGetAll_ClientServer_Backups1_Scan() throws Exception {
+        putAllGetAll(null, 4, 2, 1, 64, null, SCAN, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutAllGetAll_ClientServer_Backups2_Scan() throws Exception {
+        putAllGetAll(null, 4, 2, 2, 64, null, SCAN, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutAllGetAll_ClientServer_Backups1_RestartCoordinator_Scan() throws Exception {
+        putAllGetAll(RestartMode.RESTART_CRD, 4, 2, 1, 64, null, SCAN, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutAllGetAll_ClientServer_Backups1_Restart_Scan() throws Exception {
+        putAllGetAll(RestartMode.RESTART_RND_SRV, 4, 2, 1, 64, null, SCAN, PUT);
+    }
+
+
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxGetAll_SingleNode() throws Exception {
+        accountsTxReadAll(1, 0, 0, 64, null, false, GET, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxGetAll_WithRemoves_SingleNode() throws Exception {
+        accountsTxReadAll(1, 0, 0, 64, null, true, GET, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxGetAll_SingleNode_SinglePartition() throws Exception {
+        accountsTxReadAll(1, 0, 0, 1, null, false, GET, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxGetAll_WithRemoves_SingleNode_SinglePartition() throws Exception {
+        accountsTxReadAll(1, 0, 0, 1, null, true, GET, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxGetAll_ClientServer_Backups0() throws Exception {
+        accountsTxReadAll(4, 2, 0, 64, null, false, GET, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxGetAll_WithRemoves_ClientServer_Backups0() throws Exception {
+        accountsTxReadAll(4, 2, 0, 64, null, true, GET, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxGetAll_ClientServer_Backups1() throws Exception {
+        accountsTxReadAll(4, 2, 1, 64, null, false, GET, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxGetAll_WithRemoves_ClientServer_Backups1() throws Exception {
+        accountsTxReadAll(4, 2, 1, 64, null, true, GET, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxGetAll_ClientServer_Backups2() throws Exception {
+        accountsTxReadAll(4, 2, 2, 64, null, false, GET, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxGetAll_WithRemoves_ClientServer_Backups2() throws Exception {
+        accountsTxReadAll(4, 2, 2, 64, null, true, GET, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxScan_SingleNode_SinglePartition() throws Exception {
+        accountsTxReadAll(1, 0, 0, 1, null, false, SCAN, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxScan_WithRemoves_SingleNode_SinglePartition() throws Exception {
+        accountsTxReadAll(1, 0, 0, 1, null, true, SCAN, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxScan_SingleNode() throws Exception {
+        accountsTxReadAll(1, 0, 0, 64, null, false, SCAN, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxScan_WithRemoves_SingleNode() throws Exception {
+        accountsTxReadAll(1, 0, 0, 64, null, true, SCAN, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxScan_ClientServer_Backups0() throws Exception {
+        accountsTxReadAll(4, 2, 0, 64, null, false, SCAN, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxScan_WithRemoves_ClientServer_Backups0() throws Exception {
+        accountsTxReadAll(4, 2, 0, 64, null, true, SCAN, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxScan_ClientServer_Backups1() throws Exception {
+        accountsTxReadAll(4, 2, 1, 64, null, false, SCAN, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxScan_WithRemoves_ClientServer_Backups1() throws Exception {
+        accountsTxReadAll(4, 2, 1, 64, null, true, SCAN, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxScan_ClientServer_Backups2() throws Exception {
+        accountsTxReadAll(4, 2, 2, 64, null, false, SCAN, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxScan_WithRemoves_ClientServer_Backups2() throws Exception {
+        accountsTxReadAll(4, 2, 2, 64, null, true, SCAN, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPessimisticTxGetAllReadsSnapshot_SingleNode_SinglePartition() throws Exception {
+        txReadsSnapshot(1, 0, 0, 1, true, GET);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPessimisticTxGetAllReadsSnapshot_ClientServer() throws Exception {
+        txReadsSnapshot(4, 2, 1, 64, true, GET);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testOptimisticTxGetAllReadsSnapshot_SingleNode() throws Exception {
+        txReadsSnapshot(1, 0, 0, 64, false, GET);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testOptimisticTxGetAllReadsSnapshot_SingleNode_SinglePartition() throws Exception {
+        txReadsSnapshot(1, 0, 0, 1, false, GET);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testOptimisticTxGetAllReadsSnapshot_ClientServer() throws Exception {
+        txReadsSnapshot(4, 2, 1, 64, false, GET);
+    }
+
+//    TODO: IGNITE-7371
+//    /**
+//     * @throws Exception If failed.
+//     */
+//    public void testPessimisticTxScanReadsSnapshot_SingleNode_SinglePartition() throws Exception {
+//        txReadsSnapshot(1, 0, 0, 1, true, SCAN);
+//    }
+//
+//    /**
+//     * @throws Exception If failed.
+//     */
+//    public void testPessimisticTxScanReadsSnapshot_ClientServer() throws Exception {
+//        txReadsSnapshot(4, 2, 1, 64, true, SCAN);
+//    }
+//
+//    /**
+//     * @throws Exception If failed.
+//     */
+//    public void testOptimisticTxScanReadsSnapshot_SingleNode() throws Exception {
+//        txReadsSnapshot(1, 0, 0, 64, false, SCAN);
+//    }
+//
+//    /**
+//     * @throws Exception If failed.
+//     */
+//    public void testOptimisticTxScanReadsSnapshot_SingleNode_SinglePartition() throws Exception {
+//        txReadsSnapshot(1, 0, 0, 1, false, SCAN);
+//    }
+//
+//    /**
+//     * @throws Exception If failed.
+//     */
+//    public void testOptimisticTxScanReadsSnapshot_ClientServer() throws Exception {
+//        txReadsSnapshot(4, 2, 1, 64, false, SCAN);
+//    }
+
+    /**
+     * @param srvs Number of server nodes.
+     * @param clients Number of client nodes.
+     * @param cacheBackups Number of cache backups.
+     * @param cacheParts Number of cache partitions.
+     * @param pessimistic If {@code true} uses pessimistic tx, otherwise optimistic.
+     * @param readMode Read mode.
+     * @throws Exception If failed.
+     */
+    private void txReadsSnapshot(
+        final int srvs,
+        final int clients,
+        int cacheBackups,
+        int cacheParts,
+        final boolean pessimistic,
+        ReadMode readMode
+    ) throws Exception {
+        final int ACCOUNTS = 20;
+
+        final int ACCOUNT_START_VAL = 1000;
+
+        final int writers = 4;
+
+        final int readers = 4;
+
+        final TransactionConcurrency concurrency;
+        final TransactionIsolation isolation;
+
+        if (pessimistic) {
+            concurrency = PESSIMISTIC;
+            isolation = REPEATABLE_READ;
+        }
+        else {
+            concurrency = OPTIMISTIC;
+            isolation = SERIALIZABLE;
+        }
+
+        final IgniteInClosure<IgniteCache<Object, Object>> init = new IgniteInClosure<IgniteCache<Object, Object>>() {
+            @Override public void apply(IgniteCache<Object, Object> cache) {
+                final IgniteTransactions txs = cache.unwrap(Ignite.class).transactions();
+
+                Map<Integer, MvccTestAccount> accounts = new HashMap<>();
+
+                for (int i = 0; i < ACCOUNTS; i++)
+                    accounts.put(i, new MvccTestAccount(ACCOUNT_START_VAL, 1));
+
+                try (Transaction tx = txs.txStart(concurrency, isolation)) {
+                    cache.putAll(accounts);
+
+                    tx.commit();
+                }
+            }
+        };
+
+        GridInClosure3<Integer, List<TestCache>, AtomicBoolean> writer =
+            new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
+                @Override public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
+                    ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                    int cnt = 0;
+
+                    while (!stop.get()) {
+                        TestCache<Integer, MvccTestAccount> cache = randomCache(caches, rnd);
+
+                        try {
+                            IgniteTransactions txs = cache.cache.unwrap(Ignite.class).transactions();
+
+                            cnt++;
+
+                            Integer id1 = rnd.nextInt(ACCOUNTS);
+                            Integer id2 = rnd.nextInt(ACCOUNTS);
+
+                            while (id1.equals(id2))
+                                id2 = rnd.nextInt(ACCOUNTS);
+
+                            TreeSet<Integer> keys = new TreeSet<>();
+
+                            keys.add(id1);
+                            keys.add(id2);
+
+                            try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                                MvccTestAccount a1;
+                                MvccTestAccount a2;
+
+                                Map<Integer, MvccTestAccount> accounts = checkAndGetAll(false, cache.cache, keys, readMode);
+
+                                a1 = accounts.get(id1);
+                                a2 = accounts.get(id2);
+
+                                assertNotNull(a1);
+                                assertNotNull(a2);
+
+                                cache.cache.put(id1, new MvccTestAccount(a1.val + 1, 1));
+                                cache.cache.put(id2, new MvccTestAccount(a2.val - 1, 1));
+
+                                tx.commit();
+                            }
+                        }
+                        finally {
+                            cache.readUnlock();
+                        }
+                    }
+
+                    info("Writer finished, updates: " + cnt);
+                }
+            };
+
+        GridInClosure3<Integer, List<TestCache>, AtomicBoolean> reader =
+            new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
+                @Override public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
+                    ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                    int cnt = 0;
+
+                    while (!stop.get()) {
+                        TestCache<Integer, MvccTestAccount> cache = randomCache(caches, rnd);
+                        IgniteTransactions txs = cache.cache.unwrap(Ignite.class).transactions();
+
+                        Map<Integer, MvccTestAccount> accounts = new HashMap<>();
+
+                        if (pessimistic) {
+                            try (Transaction tx = txs.txStart(concurrency, isolation)) {
+                                int remaining = ACCOUNTS;
+
+                                do {
+                                    int readCnt = rnd.nextInt(remaining) + 1;
+
+                                    Set<Integer> readKeys = new TreeSet<>();
+
+                                    for (int i = 0; i < readCnt; i++)
+                                        readKeys.add(accounts.size() + i);
+
+                                    Map<Integer, MvccTestAccount> readRes =
+                                        checkAndGetAll(false, cache.cache, readKeys, readMode);
+
+                                    assertEquals(readCnt, readRes.size());
+
+                                    accounts.putAll(readRes);
+
+                                    remaining = ACCOUNTS - accounts.size();
+                                }
+                                while (remaining > 0);
+
+                                validateSum(accounts);
+
+                                tx.commit();
+
+                                cnt++;
+                            }
+                            finally {
+                                cache.readUnlock();
+                            }
+                        }
+                        else {
+                            try (Transaction tx = txs.txStart(concurrency, isolation)) {
+                                int remaining = ACCOUNTS;
+
+                                do {
+                                    int readCnt = rnd.nextInt(remaining) + 1;
+
+                                    if (rnd.nextInt(3) == 0) {
+                                        for (int i = 0; i < readCnt; i++) {
+                                            Integer key = rnd.nextInt(ACCOUNTS);
+
+                                            MvccTestAccount account =
+                                                (MvccTestAccount)checkAndGet(false, cache.cache, key, readMode);
+
+                                            assertNotNull(account);
+
+                                            accounts.put(key, account);
+                                        }
+                                    }
+                                    else {
+                                        Set<Integer> readKeys = new LinkedHashSet<>();
+
+                                        for (int i = 0; i < readCnt; i++)
+                                            readKeys.add(rnd.nextInt(ACCOUNTS));
+
+                                        Map<Integer, MvccTestAccount> readRes =
+                                            checkAndGetAll(false, cache.cache, readKeys, readMode);
+
+                                        assertEquals(readKeys.size(), readRes.size());
+
+                                        accounts.putAll(readRes);
+                                    }
+
+                                    remaining = ACCOUNTS - accounts.size();
+                                }
+                                while (remaining > 0);
+
+                                validateSum(accounts);
+
+                                cnt++;
+
+                                tx.commit();
+                            }
+                            catch (TransactionOptimisticException ignore) {
+                                // No-op.
+                            }
+                            finally {
+                                cache.readUnlock();
+                            }
+                        }
+                    }
+
+                    info("Reader finished, txs: " + cnt);
+                }
+
+                /**
+                 * @param accounts Read accounts.
+                 */
+                private void validateSum(Map<Integer, MvccTestAccount> accounts) {
+                    int sum = 0;
+
+                    for (int i = 0; i < ACCOUNTS; i++) {
+                        MvccTestAccount account = accounts.get(i);
+
+                        assertNotNull(account);
+
+                        sum += account.val;
+                    }
+
+                    assertEquals(ACCOUNTS * ACCOUNT_START_VAL, sum);
+                }
+            };
+
+        readWriteTest(
+            null,
+            srvs,
+            clients,
+            cacheBackups,
+            cacheParts,
+            writers,
+            readers,
+            DFLT_TEST_TIME,
+            null,
+            init,
+            writer,
+            reader);
+    }
+
+    /**
+     * @throws Exception If failed
+     */
+    public void testOperationsSequenceScanConsistency_SingleNode_SinglePartition() throws Exception {
+        operationsSequenceConsistency(1, 0, 0, 1, SCAN);
+    }
+
+    /**
+     * @throws Exception If failed
+     */
+    public void testOperationsSequenceScanConsistency_SingleNode() throws Exception {
+        operationsSequenceConsistency(1, 0, 0, 64, SCAN);
+    }
+
+    /**
+     * @throws Exception If failed
+     */
+    public void testOperationsSequenceScanConsistency_ClientServer_Backups0() throws Exception {
+        operationsSequenceConsistency(4, 2, 0, 64, SCAN);
+    }
+
+    /**
+     * @throws Exception If failed
+     */
+    public void testOperationsSequenceScanConsistency_ClientServer_Backups1() throws Exception {
+        operationsSequenceConsistency(4, 2, 1, 64, SCAN);
+    }
+
+    /**
+     * @throws Exception If failed
+     */
+    public void testOperationsSequenceGetConsistency_SingleNode_SinglePartition() throws Exception {
+        operationsSequenceConsistency(1, 0, 0, 1, GET);
+    }
+
+    /**
+     * @throws Exception If failed
+     */
+    public void testOperationsSequenceGetConsistency_SingleNode() throws Exception {
+        operationsSequenceConsistency(1, 0, 0, 64, GET);
+    }
+
+    /**
+     * @throws Exception If failed
+     */
+    public void testOperationsSequenceGetConsistency_ClientServer_Backups0() throws Exception {
+        operationsSequenceConsistency(4, 2, 0, 64, GET);
+    }
+
+    /**
+     * @throws Exception If failed
+     */
+    public void testOperationsSequenceGetConsistency_ClientServer_Backups1() throws Exception {
+        operationsSequenceConsistency(4, 2, 1, 64, GET);
+    }
+
+    /**
+     * @param srvs Number of server nodes.
+     * @param clients Number of client nodes.
+     * @param cacheBackups Number of cache backups.
+     * @param cacheParts Number of cache partitions.
+     * @param readMode Read mode.
+     * @throws Exception If failed.
+     */
+    private void operationsSequenceConsistency(
+        final int srvs,
+        final int clients,
+        int cacheBackups,
+        int cacheParts,
+        ReadMode readMode
+    )
+        throws Exception
+    {
+        final int writers = 4;
+
+        final int readers = 4;
+
+        final long time = 10_000;
+
+        final AtomicInteger keyCntr = new AtomicInteger();
+
+        GridInClosure3<Integer, List<TestCache>, AtomicBoolean> writer =
+            new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
+                @Override public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
+                    ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                    int cnt = 0;
+
+                    while (!stop.get()) {
+                        TestCache<Integer, Value> cache = randomCache(caches, rnd);
+
+                        try {
+                            IgniteTransactions txs = cache.cache.unwrap(Ignite.class).transactions();
+
+                            Integer key = keyCntr.incrementAndGet();
+
+                            try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                                cache.cache.put(key, new Value(idx, cnt++));
+
+                                tx.commit();
+                            }
+
+                            if (key > 100_000)
+                                break;
+                        }
+                        finally {
+                            cache.readUnlock();
+                        }
+                    }
+
+                    info("Writer finished, updates: " + cnt);
+                }
+            };
+
+        GridInClosure3<Integer, List<TestCache>, AtomicBoolean> reader =
+            new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
+                @Override public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
+                    ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                    Set keys = new HashSet();
+
+                    while (!stop.get()) {
+                        TestCache<Integer, Value> cache = randomCache(caches, rnd);
+
+                        try {
+                            Map<Integer, TreeSet<Integer>> vals = new HashMap<>();
+
+                            switch (readMode) {
+                                case SCAN:
+                                    for (Cache.Entry<Integer, Value> e : cache.cache) {
+                                        Value val = e.getValue();
+
+                                        assertNotNull(val);
+
+                                        TreeSet<Integer> cntrs = vals.get(val.key);
+
+                                        if (cntrs == null)
+                                            vals.put(val.key, cntrs = new TreeSet<>());
+
+                                        boolean add = cntrs.add(val.cnt);
+
+                                        assertTrue(add);
+                                    }
+
+                                    break;
+
+                                case GET:
+                                    for (int i = keys.size(); i < keyCntr.get(); i++)
+                                        keys.add(i);
+
+                                    Iterable<Map.Entry<Integer, Value>> entries = cache.cache.getAll(keys).entrySet();
+
+                                    for (Map.Entry<Integer, Value> e : entries) {
+                                        Value val = e.getValue();
+
+                                        assertNotNull(val);
+
+                                        TreeSet<Integer> cntrs = vals.get(val.key);
+
+                                        if (cntrs == null)
+                                            vals.put(val.key, cntrs = new TreeSet<>());
+
+                                        boolean add = cntrs.add(val.cnt);
+
+                                        assertTrue(add);
+                                    }
+
+                                    break;
+
+                                default:
+                                    fail("Unsupported read mode: " + readMode.name() + '.');
+                            }
+
+                            for (TreeSet<Integer> readCntrs : vals.values()) {
+                                for (int i = 0; i < readCntrs.size(); i++)
+                                    assertTrue(readCntrs.contains(i));
+                            }
+                        }
+                        finally {
+                            cache.readUnlock();
+                        }
+                    }
+                }
+            };
+
+        readWriteTest(
+            null,
+            srvs,
+            clients,
+            cacheBackups,
+            cacheParts,
+            writers,
+            readers,
+            time,
+            null,
+            null,
+            writer,
+            reader);
+    }
+
+    /**
+     * TODO IGNITE-5935 enable when recovery is implemented.
+     *
+     * @throws Exception If failed.
+     */
+    public void _testNodesRestartNoHang() throws Exception {
+        final int srvs = 4;
+        final int clients = 4;
+        final int writers = 6;
+        final int readers = 2;
+
+        final int KEYS = 100_000;
+
+        GridInClosure3<Integer, List<TestCache>, AtomicBoolean> writer =
+            new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
+                @Override public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
+                    ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                    Map<Integer, Integer> map = new TreeMap<>();
+
+                    int cnt = 0;
+
+                    while (!stop.get()) {
+                        int keys = rnd.nextInt(32) + 1;
+
+                        while (map.size() < keys)
+                            map.put(rnd.nextInt(KEYS), cnt);
+
+                        TestCache<Integer, Integer> cache = randomCache(caches, rnd);
+
+                        try {
+                            IgniteTransactions txs = cache.cache.unwrap(Ignite.class).transactions();
+
+                            TransactionConcurrency concurrency;
+                            TransactionIsolation isolation;
+
+                            switch (rnd.nextInt(3)) {
+                                case 0: {
+                                    concurrency = PESSIMISTIC;
+                                    isolation = REPEATABLE_READ;
+
+                                    break;
+                                }
+                                case 1: {
+                                    concurrency = OPTIMISTIC;
+                                    isolation = REPEATABLE_READ;
+
+                                    break;
+                                }
+                                case 2: {
+                                    concurrency = OPTIMISTIC;
+                                    isolation = SERIALIZABLE;
+
+                                    break;
+                                }
+                                default: {
+                                    fail();
+
+                                    return;
+                                }
+                            }
+
+                            try (Transaction tx = txs.txStart(concurrency, isolation)) {
+                                if (rnd.nextBoolean()) {
+                                    Map<Integer, Integer> res = checkAndGetAll(false, cache.cache, map.keySet(),
+                                        rnd.nextBoolean() ? GET : SCAN);
+
+                                    assertNotNull(res);
+                                }
+
+                                cache.cache.putAll(map);
+
+                                tx.commit();
+                            }
+                            catch (TransactionOptimisticException e) {
+                                assertEquals(SERIALIZABLE, isolation);
+                            }
+                            catch (Exception e) {
+                                Assert.assertTrue("Unexpected error: " + e, X.hasCause(e, ClusterTopologyException.class));
+                            }
+                        }
+                        finally {
+                            cache.readUnlock();
+                        }
+
+                        map.clear();
+
+                        cnt++;
+                    }
+
+                    info("Writer done, updates: " + cnt);
+                }
+            };
+
+        GridInClosure3<Integer, List<TestCache>, AtomicBoolean> reader =
+            new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
+                @Override public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
+                    ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                    Set<Integer> keys = new LinkedHashSet<>();
+
+                    while (!stop.get()) {
+                        int keyCnt = rnd.nextInt(64) + 1;
+
+                        while (keys.size() < keyCnt)
+                            keys.add(rnd.nextInt(KEYS));
+
+                        TestCache<Integer, Integer> cache = randomCache(caches, rnd);
+
+                        Map<Integer, Integer> map;
+
+                        try {
+                            map = checkAndGetAll(false, cache.cache, keys, rnd.nextBoolean() ? GET : SCAN);
+
+                            assertNotNull(map);
+                        }
+                        finally {
+                            cache.readUnlock();
+                        }
+
+                        keys.clear();
+                    }
+                }
+            };
+
+        readWriteTest(
+            RestartMode.RESTART_RND_SRV,
+            srvs,
+            clients,
+            1,
+            256,
+            writers,
+            readers,
+            DFLT_TEST_TIME,
+            null,
+            null,
+            writer,
+            reader);
+
+        for (Ignite node : G.allGrids())
+            checkActiveQueriesCleanup(node);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testActiveQueryCleanupOnNodeFailure() throws Exception {
+        testSpi = true;
+
+        final Ignite srv = startGrid(0);
+
+        srv.createCache(cacheConfiguration(PARTITIONED, FULL_SYNC, 0, 1024));
+
+        client = true;
+
+        final Ignite client = startGrid(1);
+
+        TestRecordingCommunicationSpi srvSpi = TestRecordingCommunicationSpi.spi(srv);
+
+        srvSpi.blockMessages(GridNearGetResponse.class, getTestIgniteInstanceName(1));
+
+        TestRecordingCommunicationSpi.spi(client).blockMessages(MvccAckRequestQueryCntr.class,
+            getTestIgniteInstanceName(0));
+
+        IgniteInternalFuture<?> fut = GridTestUtils.runAsync(new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                IgniteCache cache = client.cache(DEFAULT_CACHE_NAME);
+
+                cache.getAll(F.asSet(1, 2, 3));
+
+                return null;
+            }
+        });
+
+        srvSpi.waitForBlocked();
+
+        assertFalse(fut.isDone());
+
+        stopGrid(1);
+
+        checkActiveQueriesCleanup(ignite(0));
+
+        try {
+            fut.get();
+        }
+        catch (Exception ignore) {
+            // No-op.
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testRebalanceSimple() throws Exception {
+        fail("https://issues.apache.org/jira/browse/IGNITE-9451");
+
+        Ignite srv0 = startGrid(0);
+
+        IgniteCache<Integer, Integer> cache =  (IgniteCache)srv0.createCache(
+            cacheConfiguration(PARTITIONED, FULL_SYNC, 0, DFLT_PARTITION_COUNT));
+
+        Map<Integer, Integer> map;
+        Map<Integer, Integer> resMap;
+
+        try (Transaction tx = srv0.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            map = new HashMap<>();
+
+            for (int i = 0; i < DFLT_PARTITION_COUNT * 3; i++)
+                map.put(i, i);
+
+            cache.putAll(map);
+
+            tx.commit();
+        }
+
+        startGrid(1);
+
+        awaitPartitionMapExchange();
+
+        resMap = checkAndGetAll(false, cache, map.keySet(), GET, SCAN);
+
+        assertEquals(map.size(), resMap.size());
+
+        for (int i = 0; i < map.size(); i++)
+            assertEquals(i, (Object)resMap.get(i));
+
+        try (Transaction tx = srv0.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            for (int i = 0; i < DFLT_PARTITION_COUNT * 3; i++)
+                map.put(i, i + 1);
+
+            cache.putAll(map);
+
+            tx.commit();
+        }
+        try (Transaction tx = srv0.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            for (int i = 0; i < DFLT_PARTITION_COUNT * 3; i++)
+                map.put(i, i + 2);
+
+            cache.putAll(map);
+
+            tx.commit();
+        }
+
+        startGrid(2);
+
+        awaitPartitionMapExchange();
+
+        resMap = checkAndGetAll(false, cache, map.keySet(), GET, SCAN);
+
+        assertEquals(map.size(), map.size());
+
+        for (int i = 0; i < map.size(); i++)
+            assertEquals(i + 2, (Object)resMap.get(i));
+
+        // Run fake transaction
+        try (Transaction tx = srv0.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            Integer val = cache.get(0);
+
+            cache.put(0, val);
+
+            tx.commit();
+        }
+
+        resMap = checkAndGetAll(false, cache, map.keySet(), GET, SCAN);
+
+        assertEquals(map.size(), map.size());
+
+        for (int i = 0; i < map.size(); i++)
+            assertEquals(i + 2, (Object)resMap.get(i));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testRebalanceWithRemovedValuesSimple() throws Exception {
+        fail("https://issues.apache.org/jira/browse/IGNITE-9451");
+
+        Ignite node = startGrid(0);
+
+        IgniteTransactions txs = node.transactions();
+
+        final IgniteCache<Object, Object> cache = node.createCache(cacheConfiguration(PARTITIONED, FULL_SYNC, 1, 64));
+
+        try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            for (int k = 0; k < 100; k++)
+                cache.remove(k);
+
+            tx.commit();
+        }
+
+        Map<Object, Object> expVals = new HashMap<>();
+
+        try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            for (int k = 100; k < 200; k++) {
+                cache.put(k, k);
+
+                expVals.put(k, k);
+            }
+
+            tx.commit();
+        }
+
+        try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            for (int k = 100; k < 200; k++) {
+                if (k % 2 == 0) {
+                    cache.remove(k);
+
+                    expVals.remove(k);
+                }
+            }
+
+            tx.commit();
+        }
+
+        startGrid(1);
+
+        awaitPartitionMapExchange();
+
+        checkValues(expVals, jcache(1));
+
+        stopGrid(0);
+
+        checkValues(expVals, jcache(1));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testTxPrepareFailureSimplePessimisticTx() throws Exception {
+        txPrepareFailureSimple(PESSIMISTIC, REPEATABLE_READ);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testTxPrepareFailureSimpleSerializableTx() throws Exception {
+        txPrepareFailureSimple(OPTIMISTIC, SERIALIZABLE);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testTxPrepareFailureSimpleOptimisticTx() throws Exception {
+        txPrepareFailureSimple(OPTIMISTIC, REPEATABLE_READ);
+    }
+
+    /**
+     * @param concurrency Transaction concurrency.
+     * @param isolation Transaction isolation.
+     * @throws Exception If failed.
+     */
+    private void txPrepareFailureSimple(
+        final TransactionConcurrency concurrency,
+        final TransactionIsolation isolation
+    ) throws Exception {
+        testSpi = true;
+
+        startGrids(3);
+
+        client = true;
+
+        final Ignite client = startGrid(3);
+
+        final IgniteCache cache = client.createCache(
+            cacheConfiguration(PARTITIONED, FULL_SYNC, 0, DFLT_PARTITION_COUNT));
+
+        final Integer key1 = primaryKey(jcache(1));
+        final Integer key2 = primaryKey(jcache(2));
+
+        TestRecordingCommunicationSpi srv1Spi = TestRecordingCommunicationSpi.spi(ignite(1));
+
+        srv1Spi.blockMessages(GridNearTxPrepareResponse.class, client.name());
+
+        IgniteInternalFuture fut = GridTestUtils.runAsync(new Callable() {
+            @Override public Object call() throws Exception {
+                try {
+                    try (Transaction tx = client.transactions().txStart(concurrency, isolation)) {
+                        cache.put(key1, 1);
+                        cache.put(key2, 2);
+
+                        tx.commit();
+                    }
+
+                    fail();
+                }
+                catch (ClusterTopologyException e) {
+                    info("Expected exception: " + e);
+                }
+
+                return null;
+            }
+        }, "tx-thread");
+
+        srv1Spi.waitForBlocked();
+
+        assertFalse(fut.isDone());
+
+        stopGrid(1);
+
+        fut.get();
+
+        assertNull(cache.get(key1));
+        assertNull(cache.get(key2));
+
+        try (Transaction tx = client.transactions().txStart(concurrency, isolation)) {
+            cache.put(key1, 1);
+            cache.put(key2, 2);
+
+            tx.commit();
+        }
+
+        assertEquals(1, checkAndGet(false, cache, key1, GET, SCAN));
+        assertEquals(2, checkAndGet(false, cache, key2, GET, SCAN));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testSerializableTxRemap() throws Exception {
+        testSpi = true;
+
+        startGrids(2);
+
+        client = true;
+
+        final Ignite client = startGrid(2);
+
+        final IgniteCache cache = client.createCache(
+            cacheConfiguration(PARTITIONED, FULL_SYNC, 0, DFLT_PARTITION_COUNT));
+
+        final Map<Object, Object> vals = new HashMap<>();
+
+        for (int i = 0; i < 100; i++)
+            vals.put(i, i);
+
+        TestRecordingCommunicationSpi clientSpi = TestRecordingCommunicationSpi.spi(ignite(2));
+
+        clientSpi.blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
+            @Override public boolean apply(ClusterNode node, Message msg) {
+                return msg instanceof GridNearTxPrepareRequest;
+            }
+        });
+
+        IgniteInternalFuture fut = GridTestUtils.runAsync(new Callable() {
+            @Override public Object call() throws Exception {
+                try (Transaction tx = client.transactions().txStart(OPTIMISTIC, SERIALIZABLE)) {
+                    cache.putAll(vals);
+
+                    tx.commit();
+                }
+
+                return null;
+            }
+        }, "tx-thread");
+
+        clientSpi.waitForBlocked(2);
+
+        this.client = false;
+
+        startGrid(3);
+
+        assertFalse(fut.isDone());
+
+        clientSpi.stopBlock();
+
+        fut.get();
+
+        for (Ignite node : G.allGrids())
+            checkValues(vals, node.cache(cache.getName()));
+    }
+
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testMvccCoordinatorChangeSimple() throws Exception {
+        Ignite srv0 = startGrid(0);
+
+        final List<String> cacheNames = new ArrayList<>();
+
+        for (CacheConfiguration ccfg : cacheConfigurations()) {
+            ccfg.setName("cache-" + cacheNames.size());
+
+            cacheNames.add(ccfg.getName());
+
+            srv0.createCache(ccfg);
+        }
+
+        checkPutGet(cacheNames);
+
+        for (int i = 0; i < 3; i++) {
+            startGrid(i + 1);
+
+            checkPutGet(cacheNames);
+
+            checkCoordinatorsConsistency(null);
+        }
+
+        client = true;
+
+        for (int i = 0; i < 3; i++) {
+            Ignite node = startGrid(i + 4);
+
+            // Init client caches outside of transactions.
+            for (String cacheName : cacheNames)
+                node.cache(cacheName);
+
+            checkPutGet(cacheNames);
+
+            checkCoordinatorsConsistency(null);
+        }
+
+        for (int i = 0; i < 3; i++) {
+            stopGrid(i);
+
+            awaitPartitionMapExchange();
+
+            checkPutGet(cacheNames);
+
+            checkCoordinatorsConsistency(null);
+        }
+    }
+
+    /**
+     * @param cacheNames Cache names.
+     */
+    private void checkPutGet(List<String> cacheNames) {
+        List<Ignite> nodes = G.allGrids();
+
+        assertFalse(nodes.isEmpty());
+
+        Ignite putNode = nodes.get(ThreadLocalRandom.current().nextInt(nodes.size()));
+
+        Map<Integer, Integer> vals = new HashMap();
+
+        Integer val = ThreadLocalRandom.current().nextInt();
+
+        for (int i = 0; i < 10; i++)
+            vals.put(i, val);
+
+        TransactionConcurrency concurrency;
+        TransactionIsolation isolation;
+
+        if (ThreadLocalRandom.current().nextBoolean()) {
+            concurrency = PESSIMISTIC;
+            isolation = REPEATABLE_READ;
+        }
+        else {
+            concurrency = OPTIMISTIC;
+            isolation = SERIALIZABLE;
+        }
+
+        try (Transaction tx = putNode.transactions().txStart(concurrency, isolation)) {
+            for (String cacheName : cacheNames)
+                putNode.cache(cacheName).putAll(vals);
+
+            tx.commit();
+        }
+
+        for (Ignite node : nodes) {
+            for (String cacheName : cacheNames) {
+                Map<Object, Object> res = checkAndGetAll(false, node.cache(cacheName), vals.keySet(), SCAN, GET);
+
+                assertEquals(vals, res);
+            }
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testMvccCoordinatorInfoConsistency() throws Exception {
+        for (int i = 0; i < 4; i++) {
+            startGrid(i);
+
+            if (persistence && i == 0)
+                ignite(i).active(true);
+
+            checkCoordinatorsConsistency(i + 1);
+        }
+
+        client = true;
+
+        startGrid(4);
+
+        checkCoordinatorsConsistency(5);
+
+        startGrid(5);
+
+        checkCoordinatorsConsistency(6);
+
+        client = false;
+
+        stopGrid(0);
+
+        awaitPartitionMapExchange();
+
+        checkCoordinatorsConsistency(5);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testMvccCoordinatorInfoConsistency_Persistence() throws Exception {
+        persistence = true;
+
+        testMvccCoordinatorInfoConsistency();
+    }
+
+    /**
+     * @param expNodes Expected nodes number.
+     */
+    private void checkCoordinatorsConsistency(@Nullable Integer expNodes) {
+        List<Ignite> nodes = G.allGrids();
+
+        if (expNodes != null)
+            assertEquals(expNodes, (Integer)nodes.size());
+
+        MvccCoordinator crd = null;
+
+        for (Ignite node : G.allGrids()) {
+            MvccCoordinator crd0 = mvccProcessor(node).currentCoordinator();
+
+            if (crd != null)
+                assertEquals(crd, crd0);
+            else
+                crd = crd0;
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testGetVersionRequestFailover() throws Exception {
+        final int NODES = 5;
+
+        testSpi = true;
+
+        startGridsMultiThreaded(NODES - 1);
+
+        client = true;
+
+        Ignite client = startGrid(NODES - 1);
+
+        final List<String> cacheNames = new ArrayList<>();
+
+        final Map<Integer, Integer> vals = new HashMap<>();
+
+        for (int i = 0; i < 100; i++)
+            vals.put(i, i);
+
+        for (CacheConfiguration ccfg : cacheConfigurations()) {
+            ccfg.setName("cache-" + cacheNames.size());
+
+            ccfg.setNodeFilter(new TestCacheNodeExcludingFilter(getTestIgniteInstanceName(0)));
+
+            cacheNames.add(ccfg.getName());
+
+            IgniteCache cache = client.createCache(ccfg);
+
+            try (Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                writeAllByMode(cache, vals, PUT, INTEGER_CODEC);
+
+                tx.commit();
+            }
+        }
+
+        final AtomicInteger nodeIdx = new AtomicInteger(1);
+
+        final AtomicBoolean done = new AtomicBoolean();
+
+        try {
+            IgniteInternalFuture getFut = GridTestUtils.runMultiThreadedAsync(new Callable<Void>() {
+                @Override public Void call() throws Exception {
+                    Ignite node = ignite(nodeIdx.getAndIncrement());
+
+                    int cnt = 0;
+
+                    while (!done.get()) {
+                        for (String cacheName : cacheNames) {
+                            // TODO IGNITE-6754 add SQL and SCAN support.
+                            Map<Integer, Integer> res = readAllByMode(node.cache(cacheName), vals.keySet(), GET, INTEGER_CODEC);
+
+                            assertEquals(vals, res);
+                        }
+
+                        cnt++;
+                    }
+
+                    log.info("Finished [node=" + node.name() + ", cnt=" + cnt + ']');
+
+                    return null;
+                }
+            }, NODES - 1, "get-thread");
+
+            doSleep(1000);
+
+            TestRecordingCommunicationSpi crdSpi = TestRecordingCommunicationSpi.spi(ignite(0));
+
+            crdSpi.blockMessages(new IgniteBiPredicate<ClusterNode, Message>() {
+                @Override public boolean apply(ClusterNode node, Message msg) {
+                    return msg instanceof MvccSnapshotResponse;
+                }
+            });
+
+            crdSpi.waitForBlocked();
+
+            stopGrid(0);
+
+            doSleep(1000);
+
+            done.set(true);
+
+            getFut.get();
+        }
+        finally {
+            done.set(true);
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testLoadWithStreamer() throws Exception {
+        startGridsMultiThreaded(5);
+
+        client = true;
+
+        startGrid(5);
+
+        Ignite node = ignite(0);
+
+        IgniteCache cache = node.createCache(cacheConfiguration(PARTITIONED, FULL_SYNC, 2, 64));
+
+        final int KEYS = 1_000;
+
+        Map<Object, Object> data = new HashMap<>();
+
+        try (IgniteDataStreamer<Integer, Integer> streamer = node.dataStreamer(cache.getName())) {
+            for (int i = 0; i < KEYS; i++) {
+                streamer.addData(i, i);
+
+                data.put(i, i);
+            }
+        }
+
+        checkValues(data, cache);
+
+        checkCacheData(data, cache.getName());
+
+        checkPutGet(F.asList(cache.getName()));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testUpdate_N_Objects_SingleNode_SinglePartition_Get() throws Exception {
+        int[] nValues = {3, 5, 10};
+
+        for (int n : nValues) {
+            updateNObjectsTest(n, 1, 0, 0, 1, 10_000, null, GET, PUT, null);
+
+            afterTest();
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testUpdate_N_Objects_SingleNode_Get() throws Exception {
+        int[] nValues = {3, 5, 10};
+
+        for (int n : nValues) {
+            updateNObjectsTest(n, 1, 0, 0, 64, 10_000, null, GET, PUT, null);
+
+            afterTest();
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testUpdate_N_Objects_SingleNode_SinglePartition_Scan() throws Exception {
+        int[] nValues = {3, 5, 10};
+
+        for (int n : nValues) {
+            updateNObjectsTest(n, 1, 0, 0, 1, 10_000, null, SCAN, PUT, null);
+
+            afterTest();
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testUpdate_N_Objects_SingleNode_Scan() throws Exception {
+        int[] nValues = {3, 5, 10};
+
+        for (int n : nValues) {
+            updateNObjectsTest(n, 1, 0, 0, 64, 10_000, null, SCAN, PUT, null);
+
+            afterTest();
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testUpdate_N_Objects_ClientServer_Backups2_Get() throws Exception {
+        int[] nValues = {3, 5, 10};
+
+        for (int n : nValues) {
+            updateNObjectsTest(n, 4, 2, 2, DFLT_PARTITION_COUNT, 10_000, null, GET, PUT, null);
+
+            afterTest();
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testUpdate_N_Objects_ClientServer_Backups1_Scan() throws Exception {
+        fail("https://issues.apache.org/jira/browse/IGNITE-7764");
+
+        int[] nValues = {3, 5, 10};
+
+        for (int n : nValues) {
+            updateNObjectsTest(n, 2, 1, 1, DFLT_PARTITION_COUNT, 10_000, null, SCAN, PUT, null);
+
+            afterTest();
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testImplicitPartsScan_SingleNode_SinglePartition() throws Exception {
+        doImplicitPartsScanTest(1, 0, 0, 1, 10_000);
+    }
+
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testImplicitPartsScan_SingleNode() throws Exception {
+        doImplicitPartsScanTest(1, 0, 0, 64, 10_000);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testImplicitPartsScan_ClientServer_Backups0() throws Exception {
+        doImplicitPartsScanTest(4, 2, 0, 64, 10_000);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testImplicitPartsScan_ClientServer_Backups1() throws Exception {
+        doImplicitPartsScanTest(4, 2, 1, 64, 10_000);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testImplicitPartsScan_ClientServer_Backups2() throws Exception {
+        doImplicitPartsScanTest(4, 2, 2, 64, 10_000);
+    }
+
+    /**
+     * @param srvs Number of server nodes.
+     * @param clients Number of client nodes.
+     * @param cacheBackups Number of cache backups.
+     * @param cacheParts Number of cache partitions.
+     * @param time Test time.
+     * @throws Exception If failed.
+     */
+    private void doImplicitPartsScanTest(
+        final int srvs,
+        final int clients,
+        int cacheBackups,
+        int cacheParts,
+        long time) throws Exception {
+        final int KEYS_PER_PART = 20;
+
+        final int writers = 4;
+
+        final int readers = 4;
+
+        Map<Integer, List<Integer>> keysByParts = new HashMap<>();
+
+        final IgniteInClosure<IgniteCache<Object, Object>> init = new IgniteInClosure<IgniteCache<Object, Object>>() {
+            @Override public void apply(IgniteCache<Object, Object> cache) {
+                final IgniteTransactions txs = cache.unwrap(Ignite.class).transactions();
+
+                for (int i = 0; i < cacheParts; i++) {
+                    List<Integer> keys = new ArrayList<>();
+
+                    keysByParts.put(i, keys);
+                }
+
+                Affinity aff = affinity(cache);
+
+                int cntr = 0;
+                int key = 0;
+
+                while (cntr < KEYS_PER_PART * cacheParts) {
+                    int part = aff.partition(key);
+
+                    List<Integer> keys = keysByParts.get(part);
+
+                    if (keys.size() < KEYS_PER_PART) {
+                        keys.add(key);
+
+                        cntr++;
+                    }
+
+                    key++;
+                }
+
+                try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                    for (List<Integer> keys : keysByParts.values())
+                        for (Integer k : keys)
+                            cache.put(k, new MvccTestAccount(0, 1));
+
+                    tx.commit();
+                }
+            }
+        };
+
+        GridInClosure3<Integer, List<TestCache>, AtomicBoolean> writer =
+            new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
+                @Override public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
+                    ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                    while (!stop.get()) {
+                        int part = rnd.nextInt(cacheParts);
+
+                        List<Integer> partKeys = keysByParts.get(part);
+
+                        TestCache<Integer, MvccTestAccount> cache = randomCache(caches, rnd);
+                        IgniteTransactions txs = cache.cache.unwrap(Ignite.class).transactions();
+
+                        Integer k1 = partKeys.get(rnd.nextInt(KEYS_PER_PART));
+                        Integer k2 = partKeys.get(rnd.nextInt(KEYS_PER_PART));
+
+                        while (k1.equals(k2))
+                            k2 = partKeys.get(rnd.nextInt(KEYS_PER_PART));
+
+                        TreeSet<Integer> keys = new TreeSet<>();
+
+                        keys.add(k1);
+                        keys.add(k2);
+
+                        try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                            Map<Integer, MvccTestAccount> accs = cache.cache.getAll(keys);
+
+                            MvccTestAccount acc1 = accs.get(k1);
+                            MvccTestAccount acc2 = accs.get(k2);
+
+                            assertNotNull(acc1);
+                            assertNotNull(acc2);
+
+                            cache.cache.put(k1, new MvccTestAccount(acc1.val + 1, acc1.updateCnt + 1));
+                            cache.cache.put(k2, new MvccTestAccount(acc2.val - 1, acc2.updateCnt + 1));
+
+                            tx.commit();
+                        }
+                        finally {
+                            cache.readUnlock();
+                        }
+                    }
+                }
+            };
+
+        GridInClosure3<Integer, List<TestCache>, AtomicBoolean> reader =
+            new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
+                @Override public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
+                    ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                    while (!stop.get()) {
+                        int part = rnd.nextInt(cacheParts);
+
+                        TestCache<Integer, Integer> cache = randomCache(caches, rnd);
+
+                        try {
+                            Affinity aff = affinity(cache.cache);
+
+                            ScanQuery<Integer, MvccTestAccount> qry = new ScanQuery<>(part);
+
+                            List<Cache.Entry<Integer, MvccTestAccount>> res =  cache.cache.query(qry).getAll();
+
+                            int sum = 0;
+
+                            for (Cache.Entry<Integer, MvccTestAccount> entry : res) {
+                                Integer key = entry.getKey();
+                                MvccTestAccount acc = entry.getValue();
+
+                                assertEquals(part, aff.partition(key));
+
+                                sum += acc.val;
+                            }
+
+                            assertEquals(0, sum);
+
+                        }
+                        finally {
+                            cache.readUnlock();
+                        }
+
+                        if (idx == 0) {
+                            cache = randomCache(caches, rnd);
+
+                            try {
+                                ScanQuery<Integer, MvccTestAccount> qry = new ScanQuery<>();
+
+                                List<Cache.Entry<Integer, MvccTestAccount>> res =  cache.cache.query(qry).getAll();
+
+                                int sum = 0;
+
+                                for (Cache.Entry<Integer, MvccTestAccount> entry : res) {
+                                    Integer key = entry.getKey();
+                                    MvccTestAccount acc = entry.getValue();
+
+                                    sum += acc.val;
+                                }
+
+                                assertEquals(0, sum);
+                            }
+                            finally {
+                                cache.readUnlock();
+                            }
+                        }
+                    }
+                }
+            };
+
+        readWriteTest(
+            null,
+            srvs,
+            clients,
+            cacheBackups,
+            cacheParts,
+            writers,
+            readers,
+            time,
+            null,
+            init,
+            writer,
+            reader);
+    }
+
+    /**
+     * @throws IgniteCheckedException If failed.
+     */
+    public void testSize() throws Exception {
+        fail("https://issues.apache.org/jira/browse/IGNITE-9451");
+
+        Ignite node = startGrid(0);
+
+        IgniteCache cache = node.createCache(cacheConfiguration(PARTITIONED, FULL_SYNC, 0, 1));
+
+        assertEquals(cache.size(), 0);
+
+        final int KEYS = 10;
+
+        for (int i = 0; i < KEYS; i++) {
+            final Integer key = i;
+
+            try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                cache.put(key, i);
+
+                tx.commit();
+            }
+
+            assertEquals(i + 1, cache.size());
+        }
+
+        for (int i = 0; i < KEYS; i++) {
+            final Integer key = i;
+
+            try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                cache.put(key, i);
+
+                tx.commit();
+            }
+
+            assertEquals(KEYS, cache.size());
+        }
+
+        int size = KEYS;
+
+        for (int i = 0; i < KEYS; i++) {
+            if (i % 2 == 0) {
+                final Integer key = i;
+
+                try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                    cache.remove(key);
+
+                    tx.commit();
+                }
+
+                size--;
+
+                assertEquals(size, cache.size());
+            }
+        }
+
+        // Check size does not change if remove already removed keys.
+        for (int i = 0; i < KEYS; i++) {
+            if (i % 2 == 0) {
+                final Integer key = i;
+
+                try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                    cache.remove(key);
+
+                    tx.commit();
+                }
+
+                assertEquals(size, cache.size());
+            }
+        }
+
+        for (int i = 0; i < KEYS; i++) {
+            if (i % 2 == 0) {
+                final Integer key = i;
+
+                try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                    cache.put(key, i);
+
+                    tx.commit();
+                }
+
+                size++;
+
+                assertEquals(size, cache.size());
+            }
+        }
+    }
+
+    /**
+     * @throws IgniteCheckedException If failed.
+     */
+    public void testInternalApi() throws Exception {
+        Ignite node = startGrid(0);
+
+        IgniteCache cache = node.createCache(cacheConfiguration(PARTITIONED, FULL_SYNC, 0, 1));
+
+        GridCacheContext cctx =
+            ((IgniteKernal)node).context().cache().context().cacheContext(CU.cacheId(cache.getName()));
+
+        MvccProcessorImpl crd = mvccProcessor(node);
+
+        // Start query to prevent cleanup.
+        IgniteInternalFuture<MvccSnapshot> fut = crd.requestSnapshotAsync();
+
+        fut.get();
+
+        final int KEYS = 1000;
+
+        for (int i = 0; i < 10; i++) {
+            for (int k = 0; k < KEYS; k++) {
+                final Integer key = k;
+
+                try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                    cache.put(key, i);
+
+                    tx.commit();
+                }
+            }
+        }
+
+        for (int k = 0; k < KEYS; k++) {
+            final Integer key = k;
+
+            KeyCacheObject key0 = cctx.toCacheKeyObject(key);
+
+            List<IgniteBiTuple<Object, MvccVersion>> vers = cctx.offheap().mvccAllVersions(cctx, key0);
+
+            assertEquals(10, vers.size());
+
+            CacheDataRow row = cctx.offheap().read(cctx, key0);
+
+            Object val = ((CacheObject)vers.get(0).get1()).value(cctx.cacheObjectContext(), false);
+
+            checkRow(cctx, row, key0, val);
+
+            for (IgniteBiTuple<Object, MvccVersion> ver : vers) {
+                MvccVersion cntr = ver.get2();
+
+                MvccSnapshot readVer =
+                    new MvccSnapshotWithoutTxs(cntr.coordinatorVersion(), cntr.counter(), Integer.MAX_VALUE, 0);
+
+                row = cctx.offheap().mvccRead(cctx, key0, readVer);
+
+                Object verVal = ((CacheObject)ver.get1()).value(cctx.cacheObjectContext(), false);
+
+                checkRow(cctx, row, key0, verVal);
+            }
+
+            checkRow(cctx,
+                cctx.offheap().mvccRead(cctx, key0, version(vers.get(0).get2().coordinatorVersion() + 1, 1)),
+                key0,
+                val);
+
+            checkRow(cctx,
+                cctx.offheap().mvccRead(cctx, key0, version(vers.get(0).get2().coordinatorVersion(), vers.get(0).get2().counter() + 1)),
+                key0,
+                val);
+
+            MvccSnapshotResponse ver = version(vers.get(0).get2().coordinatorVersion(), 100000);
+
+            for (int v = 0; v < vers.size(); v++) {
+                MvccVersion cntr = vers.get(v).get2();
+
+                ver.addTx(cntr.counter());
+
+                row = cctx.offheap().mvccRead(cctx, key0, ver);
+
+                if (v == vers.size() - 1)
+                    assertNull(row);
+                else {
+                    Object nextVal = ((CacheObject)vers.get(v + 1).get1()).value(cctx.cacheObjectContext(), false);
+
+                    checkRow(cctx, row, key0, nextVal);
+                }
+            }
+        }
+
+        KeyCacheObject key = cctx.toCacheKeyObject(KEYS);
+
+        cache.put(key, 0);
+
+        cache.remove(key);
+
+        cctx.offheap().mvccRemoveAll((GridCacheMapEntry)cctx.cache().entryEx(key));
+
+        crd.ackQueryDone(fut.get(), MVCC_TRACKER_ID_NA);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testExpiration() throws Exception {
+        fail("https://issues.apache.org/jira/browse/IGNITE-7311");
+
+        final IgniteEx node = startGrid(0);
+
+        IgniteCache cache = node.createCache(cacheConfiguration(PARTITIONED, FULL_SYNC, 1, 64));
+
+        final IgniteCache expiryCache =
+            cache.withExpiryPolicy(new TouchedExpiryPolicy(new Duration(TimeUnit.SECONDS, 1)));
+
+        for (int i = 0; i < 10; i++)
+            expiryCache.put(1, i);
+
+        assertTrue("Failed to wait for expiration", GridTestUtils.waitForCondition(new GridAbsPredicate() {
+            @Override public boolean apply() {
+                return expiryCache.localPeek(1) == null;
+            }
+        }, 5000));
+
+        for (int i = 0; i < 11; i++) {
+            if (i % 2 == 0)
+                expiryCache.put(1, i);
+            else
+                expiryCache.remove(1);
+        }
+
+        assertTrue("Failed to wait for expiration", GridTestUtils.waitForCondition(new GridAbsPredicate() {
+            @Override public boolean apply() {
+                return expiryCache.localPeek(1) == null;
+            }
+        }, 5000));
+
+        expiryCache.put(1, 1);
+
+        assertTrue("Failed to wait for expiration", GridTestUtils.waitForCondition(new GridAbsPredicate() {
+            @Override public boolean apply() {
+                try {
+                    GridCacheContext cctx = node.context().cache().context().cacheContext(CU.cacheId(DEFAULT_CACHE_NAME));
+
+                    KeyCacheObject key = cctx.toCacheKeyObject(1);
+
+                    return cctx.offheap().read(cctx, key) == null;
+                }
+                catch (Exception e) {
+                    fail();
+
+                    return false;
+                }
+            }
+        }, 5000));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testChangeExpireTime() throws Exception {
+        fail("https://issues.apache.org/jira/browse/IGNITE-7311");
+
+        final IgniteEx node = startGrid(0);
+
+        IgniteCache cache = node.createCache(cacheConfiguration(PARTITIONED, FULL_SYNC, 1, 64));
+
+        cache.put(1, 1);
+
+        final IgniteCache expiryCache =
+            cache.withExpiryPolicy(new TouchedExpiryPolicy(new Duration(TimeUnit.SECONDS, 1)));
+
+        expiryCache.get(1);
+    }
+
+    /**
+     * @param cctx Context.
+     * @param row Row.
+     * @param expKey Expected row key.
+     * @param expVal Expected row value.
+     */
+    private void checkRow(GridCacheContext cctx, CacheDataRow row, KeyCacheObject expKey, Object expVal) {
+        assertNotNull(row);
+        assertEquals(expKey, row.key());
+        assertEquals(expVal, row.value().value(cctx.cacheObjectContext(), false));
+    }
+
+    /**
+     * @param crdVer Coordinator version.
+     * @param cntr Counter.
+     * @return Version.
+     */
+    private MvccSnapshotResponse version(long crdVer, long cntr) {
+        MvccSnapshotResponse res = new MvccSnapshotResponse();
+
+        res.init(0, crdVer, cntr, MvccUtils.MVCC_START_OP_CNTR, MvccUtils.MVCC_COUNTER_NA, 0);
+
+        return res;
+    }
+
+    /**
+     * @param ccfg Cache configuration.
+     */
+    private void logCacheInfo(CacheConfiguration<?, ?> ccfg) {
+        log.info("Test cache [mode=" + ccfg.getCacheMode() +
+            ", sync=" + ccfg.getWriteSynchronizationMode() +
+            ", backups=" + ccfg.getBackups() +
+            ", near=" + (ccfg.getNearConfiguration() != null) +
+            ']');
+    }
+
+    /**
+     * @param cache Cache.
+     * @return Test keys.
+     * @throws Exception If failed.
+     */
+    private List<Integer> testKeys(IgniteCache<Integer, Integer> cache) throws Exception {
+        CacheConfiguration ccfg = cache.getConfiguration(CacheConfiguration.class);
+
+        List<Integer> keys = new ArrayList<>();
+
+        if (ccfg.getCacheMode() == PARTITIONED)
+            keys.add(nearKey(cache));
+
+        keys.add(primaryKey(cache));
+
+        if (ccfg.getBackups() != 0)
+            keys.add(backupKey(cache));
+
+        return keys;
+    }
+
+    /**
+     * Checks values obtained with different read modes.
+     * And returns value in case of it's equality for all read modes.
+     * Do not use in tests with writers contention.
+     *
+     * // TODO remove inTx flag in IGNITE-6938
+     * @param inTx Flag whether current read is inside transaction.
+     * This is because reads can't see writes made in current transaction.
+     * @param cache Cache.
+     * @param key Key.
+     * @param readModes Read modes to check.
+     * @return Value.
+     */
+    private Object checkAndGet(boolean inTx, IgniteCache cache, Object key, ReadMode ... readModes) {
+        assert readModes != null && readModes.length > 0;
+
+        if (inTx)
+            return getByReadMode(inTx, cache, key, GET);
+
+        Object prevVal = null;
+
+        for (int i = 0; i < readModes.length; i++) {
+            ReadMode readMode = readModes[i];
+
+            Object curVal = getByReadMode(inTx, cache, key, readMode);
+
+            if (i == 0)
+                prevVal = curVal;
+            else {
+                assertEquals("Different results on " + readModes[i - 1].name() + " and " +
+                    readMode.name() + " read modes.", prevVal, curVal);
+
+                prevVal = curVal;
+            }
+        }
+
+        return prevVal;
+    }
+
+    /**
+     * Reads value from cache for the given key using given read mode.
+     *
+     * // TODO IGNITE-6938 remove inTx flag
+     * // TODO IGNITE-6739 add SQL-get support "select _key, _val from cache where _key = key"
+     * @param inTx Flag whether current read is inside transaction.
+     * This is because reads can't see writes made in current transaction.
+     * @param cache Cache.
+     * @param key Key.
+     * @param readMode Read mode.
+     * @return Value.
+     */
+    private Object getByReadMode(boolean inTx, IgniteCache cache, final Object key, ReadMode readMode) {
+
+        // TODO Remove in IGNITE-6938
+        if (inTx)
+            readMode = GET;
+
+        switch (readMode) {
+            case GET:
+                return cache.get(key);
+
+            case SCAN:
+                List res = cache.query(new ScanQuery(new IgniteBiPredicate() {
+                    @Override public boolean apply(Object k, Object v) {
+                        return k.equals(key);
+                    }
+                })).getAll();
+
+                assertTrue(res.size() <= 1);
+
+                return res.isEmpty() ? null : ((IgniteBiTuple)res.get(0)).getValue();
+
+            default:
+                throw new IgniteException("Unsupported read mode: " + readMode);
+        }
+    }
+
+
+    /**
+     * Checks values obtained with different read modes.
+     * And returns value in case of it's equality for all read modes.
+     * Do not use in tests with writers contention.
+     *
+     * // TODO remove inTx flag in IGNITE-7764
+     * @param inTx Flag whether current read is inside transaction.
+     * This is because reads can't see writes made in current transaction.
+     * @param cache Cache.
+     * @param keys Key.
+     * @param readModes Read modes to check.
+     * @return Value.
+     */
+    private Map checkAndGetAll(boolean inTx, IgniteCache cache, Set keys, ReadMode ... readModes) {
+        assert readModes != null && readModes.length > 0;
+
+        if (inTx)
+            return getAllByReadMode(inTx, cache, keys, GET);
+
+        Map prevVal = null;
+
+        for (int i = 0; i < readModes.length; i++) {
+            ReadMode readMode = readModes[i];
+
+            Map curVal = getAllByReadMode(inTx, cache, keys, readMode);
+
+            if (i == 0)
+                prevVal = curVal;
+            else {
+                assertEquals("Different results on read modes " + readModes[i - 1] + " and " +
+                    readMode.name(), prevVal, curVal);
+
+                prevVal = curVal;
+            }
+        }
+
+        return prevVal;
+    }
+
+
+    /**
+     * Reads value from cache for the given key using given read mode.
+     *
+     * // TODO IGNITE-7764 remove inTx flag
+     * // TODO IGNITE-6739 add SQL-get support "select _key, _val from cache where _key in ... keySet"
+     * @param inTx Flag whether current read is inside transaction.
+     * This is because reads can't see writes made in current transaction.
+     * @param cache Cache.
+     * @param keys Key.
+     * @param readMode Read mode.
+     * @return Value.
+     */
+    private Map getAllByReadMode(boolean inTx, IgniteCache cache, Set keys, ReadMode readMode) {
+
+        // TODO Remove in IGNITE-6938
+        if (inTx)
+            readMode = GET;
+
+        switch (readMode) {
+            case GET:
+                return cache.getAll(keys);
+
+            case SCAN:
+                Map res = (Map)cache.query(new ScanQuery(new IgniteBiPredicate() {
+                    @Override public boolean apply(Object k, Object v) {
+                        return keys.contains(k);
+                    }
+                })).getAll()
+                    .stream()
+                    .collect(Collectors.toMap(v -> ((IgniteBiTuple)v).getKey(), v -> ((IgniteBiTuple)v).getValue()));
+
+                assertTrue(res.size() <= keys.size());
+
+                return res;
+
+            default:
+                throw new IgniteException("Unsupported read mode: " + readMode);
+        }
+    }
+
+    /**
+     *
+     */
+    static class Value {
+        /** */
+        int key;
+
+        /** */
+        int cnt;
+
+        /**
+         * @param key Key.
+         * @param cnt Update count.
+         */
+        Value(int key, int cnt) {
+            this.key = key;
+            this.cnt = cnt;
+        }
+
+        /** {@inheritDoc} */
+        @Override public String toString() {
+            return S.toString(Value.class, this);
+        }
+    }
+
+    /**
+     *
+     */
+    static class TestKey implements Serializable {
+        /** */
+        private final int key;
+
+        /** */
+        private final byte[] payload;
+
+        /**
+         * @param key Key.
+         * @param payloadSize Payload size.
+         */
+        public TestKey(int key, int payloadSize) {
+            this.key = key;
+            this.payload = new byte[payloadSize];
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean equals(Object o) {
+            if (this == o)
+                return true;
+
+            if (o == null || getClass() != o.getClass())
+                return false;
+
+            TestKey testKey = (TestKey)o;
+
+            if (key != testKey.key)
+                return false;
+
+            return Arrays.equals(payload, testKey.payload);
+        }
+
+        /** {@inheritDoc} */
+        @Override public int hashCode() {
+            int res = key;
+
+            res = 31 * res + Arrays.hashCode(payload);
+
+            return res;
+        }
+
+        /** {@inheritDoc} */
+        @Override public String toString() {
+            return "TestKey [k=" + key + ", payloadLen=" + payload.length + ']';
+        }
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccVacuumTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccVacuumTest.java
new file mode 100644
index 0000000..8c96b2e
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccVacuumTest.java
@@ -0,0 +1,184 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.util.List;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.cache.CacheAtomicityMode;
+import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.util.worker.GridWorker;
+import org.apache.ignite.testframework.GridTestUtils;
+
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+
+/**
+ * Vacuum test.
+ */
+public class CacheMvccVacuumTest extends CacheMvccAbstractTest {
+    /** {@inheritDoc} */
+    @Override protected CacheMode cacheMode() {
+        return PARTITIONED;
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testStartStopVacuumInMemory() throws Exception {
+        Ignite node0 = startGrid(0);
+        Ignite node1 = startGrid(1);
+
+        node1.createCache(new CacheConfiguration<>("test1")
+            .setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL));
+
+        ensureNoVacuum(node0);
+        ensureNoVacuum(node1);
+
+        node1.createCache(new CacheConfiguration<>("test2")
+            .setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT));
+
+        ensureVacuum(node0);
+        ensureVacuum(node1);
+
+        stopGrid(0);
+
+        ensureNoVacuum(node0);
+        ensureVacuum(node1);
+
+        stopGrid(1);
+
+        ensureNoVacuum(node0);
+        ensureNoVacuum(node1);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testStartStopVacuumPersistence() throws Exception {
+        persistence = true;
+
+        Ignite node0 = startGrid(0);
+        Ignite node1 = startGrid(1);
+
+        ensureNoVacuum(node0);
+        ensureNoVacuum(node1);
+
+        node1.cluster().active(true);
+
+        ensureNoVacuum(node0);
+        ensureNoVacuum(node1);
+
+        node1.createCache(new CacheConfiguration<>("test2")
+            .setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT));
+
+        ensureVacuum(node0);
+        ensureVacuum(node1);
+
+        node1.cluster().active(false);
+
+        ensureNoVacuum(node0);
+        ensureNoVacuum(node1);
+
+        node1.cluster().active(true);
+
+        ensureVacuum(node0);
+        ensureVacuum(node1);
+
+        stopGrid(0);
+
+        ensureNoVacuum(node0);
+        ensureVacuum(node1);
+
+        stopGrid(1);
+
+        ensureNoVacuum(node0);
+        ensureNoVacuum(node1);
+
+        node0 = startGrid(0);
+        node1 = startGrid(1);
+
+        ensureNoVacuum(node0);
+        ensureNoVacuum(node1);
+
+        node1.cluster().active(true);
+
+        ensureVacuum(node0);
+        ensureVacuum(node1);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testVacuumNotStartedWithoutMvcc() throws Exception {
+        IgniteConfiguration cfg = getConfiguration("grid1");
+
+        Ignite node = startGrid(cfg);
+
+        ensureNoVacuum(node);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testVacuumNotStartedWithoutMvccPersistence() throws Exception {
+        persistence = true;
+
+        IgniteConfiguration cfg = getConfiguration("grid1");
+
+        Ignite node = startGrid(cfg);
+
+        ensureNoVacuum(node);
+
+        node.cluster().active(true);
+
+        ensureNoVacuum(node);
+    }
+
+    /**
+     * Ensures vacuum is running on the given node.
+     *
+     * @param node Node.
+     */
+    private void ensureVacuum(Ignite node) {
+        MvccProcessorImpl crd = mvccProcessor(node);
+
+        assertNotNull(crd);
+
+        List<GridWorker> vacuumWorkers = GridTestUtils.getFieldValue(crd, "vacuumWorkers");
+
+        assertNotNull(vacuumWorkers);
+        assertFalse(vacuumWorkers.isEmpty());
+
+        for (GridWorker w : vacuumWorkers) {
+            assertFalse(w.isCancelled());
+            assertFalse(w.isDone());
+        }
+    }
+
+    /**
+     * Ensures vacuum is stopped on the given node.
+     *
+     * @param node Node.
+     */
+    private void ensureNoVacuum(Ignite node) {
+        MvccProcessorImpl crd = mvccProcessor(node);
+
+        assertNull(GridTestUtils.<List<GridWorker>>getFieldValue(crd, "vacuumWorkers"));
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsCacheObjectBinaryProcessorOnDiscoveryTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsCacheObjectBinaryProcessorOnDiscoveryTest.java
new file mode 100644
index 0000000..279d3c8
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsCacheObjectBinaryProcessorOnDiscoveryTest.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.persistence;
+
+import java.util.Arrays;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.binary.BinaryObject;
+import org.apache.ignite.binary.BinaryObjectBuilder;
+import org.apache.ignite.configuration.DataRegionConfiguration;
+import org.apache.ignite.configuration.DataStorageConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.failure.StopNodeFailureHandler;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.lang.IgniteBiTuple;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+/**
+ *
+ */
+public class IgnitePdsCacheObjectBinaryProcessorOnDiscoveryTest extends GridCommonAbstractTest {
+    /** */
+    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName);
+
+        if ("client".equals(igniteInstanceName))
+            cfg.setClientMode(true).setFailureHandler(new StopNodeFailureHandler());
+
+        return cfg.setDiscoverySpi(new TcpDiscoverySpi()
+                .setIpFinder(IP_FINDER))
+                .setDataStorageConfiguration(new DataStorageConfiguration()
+                        .setDefaultDataRegionConfiguration(new DataRegionConfiguration()
+                                .setPersistenceEnabled(true)));
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        stopAllGrids();
+
+        cleanPersistenceDir();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids();
+
+        cleanPersistenceDir();
+    }
+
+    /**
+     * Tests that joining node metadata correctly handled on client.
+     * @throws Exception If fails.
+     */
+    public void testJoiningNodeBinaryMetaOnClient() throws Exception {
+        IgniteEx ig0 = (IgniteEx)startGrids(2);
+
+        ig0.cluster().active(true);
+
+        addBinaryType(ig0, "test_1", new IgniteBiTuple<>("name", String.class));
+
+        stopGrid(0);
+
+        Ignite ig1 = grid(1);
+
+        // Modify existing type.
+        addBinaryType(ig1, "test_1", new IgniteBiTuple<>("id", Integer.class));
+
+        // Add new type.
+        addBinaryType(ig1, "test_2", new IgniteBiTuple<>("name", String.class));
+
+        stopGrid(1);
+
+        startGrid(0);
+
+        IgniteEx client = startGrid(getConfiguration("client"));
+
+        startGrid(1);
+
+        awaitPartitionMapExchange();
+
+        // Check that new metadata from grid_1 was handled without NPE on client.
+        assertNull(client.context().failure().failureContext());
+
+        // Check that metadata from grid_1 correctly loaded on client.
+        assertTrue(client.binary().type("test_1").fieldNames().containsAll(Arrays.asList("id", "name")));
+        assertTrue(client.binary().type("test_2").fieldNames().contains("name"));
+    }
+
+    /**
+     * @param ig Ig.
+     * @param typeName Type name.
+     * @param fields Fields.
+     */
+    @SafeVarargs
+    private final BinaryObject addBinaryType(Ignite ig, String typeName, IgniteBiTuple<String, Class<?>>... fields) {
+        BinaryObjectBuilder builder = ig.binary().builder(typeName);
+
+        if (fields != null) {
+            for (IgniteBiTuple<String,Class<?>> field: fields)
+                builder.setField(field.get1(), field.get2());
+        }
+
+        return builder.build();
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsCorruptedStoreTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsCorruptedStoreTest.java
index fc2a7d6..40025f6 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsCorruptedStoreTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsCorruptedStoreTest.java
@@ -38,7 +38,6 @@
 import org.apache.ignite.internal.IgniteInternalFuture;
 import org.apache.ignite.internal.IgniteInterruptedCheckedException;
 import org.apache.ignite.internal.pagemem.PageIdUtils;
-import org.apache.ignite.internal.pagemem.wal.StorageException;
 import org.apache.ignite.internal.processors.cache.IgniteInternalCache;
 import org.apache.ignite.internal.processors.cache.persistence.file.FileIO;
 import org.apache.ignite.internal.processors.cache.persistence.file.FileIODecorator;
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsTaskCancelingTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsTaskCancelingTest.java
index d42b788..7318c25 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsTaskCancelingTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePdsTaskCancelingTest.java
@@ -200,11 +200,7 @@
         DataStorageConfiguration dbCfg = getDataStorageConfiguration();
 
         FilePageStore pageStore = new FilePageStore(PageMemory.FLAG_DATA, file, factory, dbCfg,
-            new AllocatedPageTracker() {
-                @Override public void updateTotalAllocatedPages(long delta) {
-                    // No-op.
-                }
-            });
+            AllocatedPageTracker.NO_OP);
 
         int pageSize = dbCfg.getPageSize();
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePersistentStoreDataStructuresTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePersistentStoreDataStructuresTest.java
index 53e014e..dc4e17e 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePersistentStoreDataStructuresTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/IgnitePersistentStoreDataStructuresTest.java
@@ -17,6 +17,7 @@
 
 package org.apache.ignite.internal.processors.cache.persistence;
 
+import java.util.concurrent.TimeUnit;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteAtomicLong;
 import org.apache.ignite.IgniteAtomicSequence;
@@ -30,6 +31,8 @@
 import org.apache.ignite.configuration.DataStorageConfiguration;
 import org.apache.ignite.configuration.IgniteConfiguration;
 import org.apache.ignite.configuration.WALMode;
+import org.apache.ignite.internal.IgniteFutureTimeoutCheckedException;
+import org.apache.ignite.internal.IgniteInternalFuture;
 import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
 import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
 import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
@@ -43,6 +46,9 @@
     /** */
     private static final TcpDiscoveryIpFinder ipFinder = new TcpDiscoveryVmIpFinder(true);
 
+    /** */
+    private static volatile boolean autoActivationEnabled = false;
+
     /** {@inheritDoc} */
     @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
         IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName);
@@ -56,7 +62,7 @@
 
         cfg.setDataStorageConfiguration(memCfg);
 
-        cfg.setAutoActivationEnabled(false);
+        cfg.setAutoActivationEnabled(autoActivationEnabled);
 
         return cfg;
     }
@@ -71,6 +77,8 @@
         super.beforeTest();
 
         cleanPersistenceDir();
+
+        autoActivationEnabled = false;
     }
 
     /** {@inheritDoc} */
@@ -162,9 +170,42 @@
     /**
      * @throws Exception If failed.
      */
-    public void testSet() throws Exception {
-        fail("https://issues.apache.org/jira/browse/IGNITE-5553");
+    public void testSequenceAfterAutoactivation() throws Exception {
+        final String seqName = "testSequence";
 
+        autoActivationEnabled = true;
+
+        Ignite ignite = startGrids(2);
+
+        ignite.cluster().active(true);
+
+        ignite.atomicSequence(seqName, 0, true);
+
+        stopAllGrids(true);
+
+        final Ignite node = startGrids(2);
+
+        IgniteInternalFuture fut = GridTestUtils.runAsync(new Runnable() {
+            @Override public void run() {
+                // Should not hang.
+                node.atomicSequence(seqName, 0, false);
+            }
+        });
+
+        try {
+            fut.get(10, TimeUnit.SECONDS);
+        }
+        catch (IgniteFutureTimeoutCheckedException e) {
+            fut.cancel();
+
+            fail("Ignite was stuck on getting the atomic sequence after autoactivation.");
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testSet() throws Exception {
         Ignite ignite = startGrids(4);
 
         ignite.active(true);
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/MemoryPolicyInitializationTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/MemoryPolicyInitializationTest.java
index 774fcc6..fac021e 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/MemoryPolicyInitializationTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/MemoryPolicyInitializationTest.java
@@ -75,7 +75,7 @@
 
         Collection<DataRegion> allMemPlcs = ignite.context().cache().context().database().dataRegions();
 
-        assertTrue(allMemPlcs.size() == 2);
+        assertEquals(3, allMemPlcs.size());
 
         verifyDefaultAndSystemMemoryPolicies(allMemPlcs);
     }
@@ -91,7 +91,7 @@
 
         Collection<DataRegion> allMemPlcs = ignite.context().cache().context().database().dataRegions();
 
-        assertTrue(allMemPlcs.size() == 3);
+        assertEquals(4, allMemPlcs.size());
 
         verifyDefaultAndSystemMemoryPolicies(allMemPlcs);
 
@@ -112,13 +112,13 @@
 
         Collection<DataRegion> allMemPlcs = dbMgr.dataRegions();
 
-        assertTrue(allMemPlcs.size() == 2);
+        assertEquals(3, allMemPlcs.size());
 
         verifyDefaultAndSystemMemoryPolicies(allMemPlcs);
 
         DataRegion dfltMemPlc = U.field(dbMgr, "dfltDataRegion");
 
-        assertTrue(dfltMemPlc.config().getMaxSize() == USER_DEFAULT_MEM_PLC_SIZE);
+        assertEquals(dfltMemPlc.config().getMaxSize(), USER_DEFAULT_MEM_PLC_SIZE);
     }
 
     /**
@@ -136,13 +136,13 @@
 
         Collection<DataRegion> allMemPlcs = dbMgr.dataRegions();
 
-        assertTrue(allMemPlcs.size() == 3);
+        assertEquals(4, allMemPlcs.size());
 
         verifyDefaultAndSystemMemoryPolicies(allMemPlcs);
 
         DataRegion dfltMemPlc = U.field(dbMgr, "dfltDataRegion");
 
-        assertTrue(dfltMemPlc.config().getMaxSize() == USER_CUSTOM_MEM_PLC_SIZE);
+        assertEquals(dfltMemPlc.config().getMaxSize(), USER_CUSTOM_MEM_PLC_SIZE);
     }
 
     /**
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsDataRegionMetricsTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsDataRegionMetricsTest.java
index 268d2fb..4a22a2b 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsDataRegionMetricsTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsDataRegionMetricsTest.java
@@ -35,8 +35,10 @@
 import org.apache.ignite.configuration.IgniteConfiguration;
 import org.apache.ignite.internal.IgniteEx;
 import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.pagemem.PageIdAllocator;
 import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl;
 import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager;
+import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStore;
 import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager;
 import org.apache.ignite.internal.util.future.GridFutureAdapter;
 import org.apache.ignite.internal.util.typedef.T2;
@@ -82,13 +84,13 @@
         ((TcpDiscoverySpi)cfg.getDiscoverySpi()).setIpFinder(IP_FINDER);
 
         DataStorageConfiguration memCfg = new DataStorageConfiguration()
-                .setDefaultDataRegionConfiguration(
-                        new DataRegionConfiguration()
-                                .setInitialSize(INIT_REGION_SIZE)
-                                .setMaxSize(MAX_REGION_SIZE)
-                                .setPersistenceEnabled(true)
-                                .setMetricsEnabled(true))
-                .setCheckpointFrequency(1000);
+            .setDefaultDataRegionConfiguration(
+                new DataRegionConfiguration()
+                    .setInitialSize(INIT_REGION_SIZE)
+                    .setMaxSize(MAX_REGION_SIZE)
+                    .setPersistenceEnabled(true)
+                    .setMetricsEnabled(true))
+            .setCheckpointFrequency(1000);
 
         cfg.setDataStorageConfiguration(memCfg);
 
@@ -219,7 +221,7 @@
         ig.cluster().active(true);
 
         DataRegionMetricsImpl regionMetrics = ig.cachex(DEFAULT_CACHE_NAME)
-                .context().group().dataRegion().memoryMetrics();
+            .context().group().dataRegion().memoryMetrics();
 
         Assert.assertTrue(regionMetrics.getCheckpointBufferSize() != 0);
         Assert.assertTrue(regionMetrics.getCheckpointBufferSize() <= MAX_REGION_SIZE);
@@ -236,14 +238,14 @@
         ig.cluster().active(true);
 
         final DataRegionMetricsImpl regionMetrics = ig.cachex(DEFAULT_CACHE_NAME)
-                .context().group().dataRegion().memoryMetrics();
+            .context().group().dataRegion().memoryMetrics();
 
         Assert.assertEquals(0, regionMetrics.getUsedCheckpointBufferPages());
         Assert.assertEquals(0, regionMetrics.getUsedCheckpointBufferSize());
 
         load(ig);
 
-        GridCacheDatabaseSharedManager psMgr = (GridCacheDatabaseSharedManager) ig.context().cache().context().database();
+        GridCacheDatabaseSharedManager psMgr = (GridCacheDatabaseSharedManager)ig.context().cache().context().database();
 
         GridFutureAdapter<T2<Long, Long>> metricsResult = new GridFutureAdapter<>();
 
@@ -267,7 +269,7 @@
     /**
      * @param ig Ignite.
      */
-    private void load(Ignite ig){
+    private void load(Ignite ig) {
         IgniteCache<Integer, byte[]> cache = ig.cache(DEFAULT_CACHE_NAME);
 
         Random rnd = new Random();
@@ -292,18 +294,26 @@
 
     /** */
     private void checkMetricsConsistency(final IgniteEx node, String cacheName) throws Exception {
-        FilePageStoreManager pageStoreManager = (FilePageStoreManager)node.context().cache().context().pageStore();
+        FilePageStoreManager pageStoreMgr = (FilePageStoreManager)node.context().cache().context().pageStore();
 
-        long totalPersistanceSize = 0;
-        File cacheWorkDir = pageStoreManager.cacheWorkDir(
+        assert pageStoreMgr != null : "Persistence is not enabled";
+
+        File cacheWorkDir = pageStoreMgr.cacheWorkDir(
             node.getOrCreateCache(cacheName).getConfiguration(CacheConfiguration.class)
         );
 
+        long totalPersistenceSize = 0;
+
         try (DirectoryStream<Path> files = newDirectoryStream(
             cacheWorkDir.toPath(), entry -> entry.toFile().getName().endsWith(".bin"))
         ) {
-            for (Path path : files)
-                totalPersistanceSize += path.toFile().length();
+            for (Path path : files) {
+                File file = path.toFile();
+
+                FilePageStore store = (FilePageStore)pageStoreMgr.getStore(CU.cacheId(cacheName), partId(file));
+
+                totalPersistenceSize += path.toFile().length() - store.headerSize();
+            }
         }
 
         long totalAllocatedPagesFromMetrics = node.context().cache().context()
@@ -313,6 +323,21 @@
             .memoryMetrics()
             .getTotalAllocatedPages();
 
-        assertEquals(totalPersistanceSize / pageStoreManager.pageSize(), totalAllocatedPagesFromMetrics);
+        assertEquals(totalPersistenceSize / pageStoreMgr.pageSize(), totalAllocatedPagesFromMetrics);
+    }
+
+    /**
+     * @param partFile Partition file.
+     */
+    private static int partId(File partFile) {
+        String name = partFile.getName();
+
+        if (name.equals(FilePageStoreManager.INDEX_FILE_NAME))
+            return PageIdAllocator.INDEX_PARTITION;
+
+        if (name.startsWith(FilePageStoreManager.PART_FILE_PREFIX))
+            return Integer.parseInt(name.substring(FilePageStoreManager.PART_FILE_PREFIX.length(), name.indexOf('.')));
+
+        throw new IllegalStateException("Illegal partition file name: " + name);
     }
 }
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsReserveWalSegmentsTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsReserveWalSegmentsTest.java
new file mode 100644
index 0000000..5885b7a
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsReserveWalSegmentsTest.java
@@ -0,0 +1,188 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.persistence.db;
+
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.DataRegionConfiguration;
+import org.apache.ignite.configuration.DataStorageConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.configuration.WALMode;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager;
+import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager;
+import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointHistory;
+import org.apache.ignite.internal.processors.cache.persistence.wal.FileWALPointer;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+import static org.apache.ignite.IgniteSystemProperties.IGNITE_PDS_MAX_CHECKPOINT_MEMORY_HISTORY_SIZE;
+
+/**
+ * Test correctness of truncating unused WAL segments.
+ */
+public class IgnitePdsReserveWalSegmentsTest extends GridCommonAbstractTest {
+    /** */
+    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        System.setProperty(IGNITE_PDS_MAX_CHECKPOINT_MEMORY_HISTORY_SIZE, "2");
+
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        cfg.setConsistentId(gridName);
+
+        cfg.setDiscoverySpi(new TcpDiscoverySpi().setIpFinder(IP_FINDER));
+
+        CacheConfiguration<Integer, Object> ccfg = new CacheConfiguration<>(DEFAULT_CACHE_NAME);
+
+        ccfg.setAffinity(new RendezvousAffinityFunction(false, 32));
+
+        cfg.setCacheConfiguration(ccfg);
+
+        DataStorageConfiguration dbCfg = new DataStorageConfiguration();
+
+        dbCfg.setPageSize(4 * 1024);
+
+        cfg.setDataStorageConfiguration(dbCfg);
+
+        dbCfg.setWalSegmentSize(1024 * 1024)
+            .setMaxWalArchiveSize(Long.MAX_VALUE)
+            .setWalSegments(10)
+            .setWalMode(WALMode.LOG_ONLY)
+            .setDefaultDataRegionConfiguration(new DataRegionConfiguration()
+                .setMaxSize(100 * 1024 * 1024)
+                .setPersistenceEnabled(true));
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        stopAllGrids();
+
+        cleanPersistenceDir();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids();
+
+        cleanPersistenceDir();
+    }
+
+    /**
+     * Tests that range reserved method return correct number of reserved WAL segments.
+     *
+     * @throws Exception if failed.
+     */
+    public void testWalManagerRangeReservation() throws Exception {
+        IgniteEx ig0 = prepareGrid(4);
+
+        GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager)ig0.context().cache().context()
+            .database();
+
+        IgniteWriteAheadLogManager wal = ig0.context().cache().context().wal();
+
+        long resIdx = getReservedWalSegmentIndex(dbMgr);
+
+        assertTrue("Expected that at least resIdx greater than 0, real is " + resIdx, resIdx > 0);
+
+            FileWALPointer lowPtr = (FileWALPointer)dbMgr.checkpointHistory().firstCheckpointPointer();
+
+        assertTrue("Expected that dbMbr returns valid resIdx", lowPtr.index() == resIdx);
+
+        // Reserve previous WAL segment.
+        wal.reserve(new FileWALPointer(resIdx - 1, 0, 0));
+
+        int resCnt = wal.reserved(new FileWALPointer(resIdx - 1, 0, 0), new FileWALPointer(resIdx, 0, 0));
+
+        assertTrue("Expected resCnt is 2, real is " + resCnt, resCnt == 2);
+    }
+
+    /**
+     * Tests that grid cache manager correctly truncates unused WAL segments;
+     *
+     * @throws Exception if failed.
+     */
+    public void testWalDoesNotTruncatedWhenSegmentReserved() throws Exception {
+        IgniteEx ig0 = prepareGrid(4);
+
+        GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager)ig0.context().cache().context()
+            .database();
+
+        IgniteWriteAheadLogManager wal = ig0.context().cache().context().wal();
+
+        long resIdx = getReservedWalSegmentIndex(dbMgr);
+
+        assertTrue("Expected that at least resIdx greater than 0, real is " + resIdx, resIdx > 0);
+
+            FileWALPointer lowPtr = (FileWALPointer) dbMgr.checkpointHistory().firstCheckpointPointer();
+
+        assertTrue("Expected that dbMbr returns valid resIdx", lowPtr.index() == resIdx);
+
+        // Reserve previous WAL segment.
+        wal.reserve(new FileWALPointer(resIdx - 1, 0, 0));
+
+        int numDel = wal.truncate(null, lowPtr);
+
+        int expNumDel = (int)resIdx - 1;
+
+        assertTrue("Expected del segments is " + expNumDel + ", real is " + numDel, expNumDel == numDel);
+    }
+
+    /**
+     * Starts grid and populates test data.
+     *
+     * @param cnt Grid count.
+     * @return First started grid.
+     * @throws Exception If failed.
+     */
+    private IgniteEx prepareGrid(int cnt) throws Exception {
+        IgniteEx ig0 = (IgniteEx)startGrids(cnt);
+
+        ig0.cluster().active(true);
+
+        IgniteCache<Object, Object> cache = ig0.cache(DEFAULT_CACHE_NAME);
+
+        for (int k = 0; k < 1_000; k++) {
+            cache.put(k, new byte[1024]);
+
+            if (k % 100 == 0)
+                forceCheckpoint();
+        }
+
+        return ig0;
+    }
+
+    /**
+     * Get index of reserved WAL segment by checkpointer.
+     *
+     * @param dbMgr Database shared manager.
+     */
+    private long getReservedWalSegmentIndex(GridCacheDatabaseSharedManager dbMgr) {
+        CheckpointHistory cpHist = dbMgr.checkpointHistory();
+
+        return ((FileWALPointer) cpHist.firstCheckpointPointer()).index();
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsUnusedWalSegmentsTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsUnusedWalSegmentsTest.java
deleted file mode 100644
index 06a9ec2..0000000
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/IgnitePdsUnusedWalSegmentsTest.java
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.cache.persistence.db;
-
-import org.apache.ignite.IgniteCache;
-import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
-import org.apache.ignite.configuration.CacheConfiguration;
-import org.apache.ignite.configuration.DataRegionConfiguration;
-import org.apache.ignite.configuration.DataStorageConfiguration;
-import org.apache.ignite.configuration.IgniteConfiguration;
-import org.apache.ignite.configuration.WALMode;
-import org.apache.ignite.internal.IgniteEx;
-import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager;
-import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager;
-import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointHistory;
-import org.apache.ignite.internal.processors.cache.persistence.wal.FileWALPointer;
-import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
-import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
-import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
-import org.apache.ignite.testframework.GridTestUtils;
-import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
-
-import static org.apache.ignite.IgniteSystemProperties.IGNITE_PDS_MAX_CHECKPOINT_MEMORY_HISTORY_SIZE;
-
-/**
- * Test correctness of truncating unused WAL segments.
- */
-public class IgnitePdsUnusedWalSegmentsTest extends GridCommonAbstractTest {
-    /** */
-    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
-
-    /** {@inheritDoc} */
-    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
-        System.setProperty(IGNITE_PDS_MAX_CHECKPOINT_MEMORY_HISTORY_SIZE, "2");
-
-        IgniteConfiguration cfg = super.getConfiguration(gridName);
-
-        cfg.setConsistentId(gridName);
-
-        cfg.setDiscoverySpi(new TcpDiscoverySpi().setIpFinder(IP_FINDER));
-
-        CacheConfiguration<Integer, Object> ccfg = new CacheConfiguration<>(DEFAULT_CACHE_NAME);
-
-        ccfg.setAffinity(new RendezvousAffinityFunction(false, 32));
-
-        cfg.setCacheConfiguration(ccfg);
-
-        DataStorageConfiguration dbCfg = new DataStorageConfiguration();
-
-        dbCfg.setPageSize(4 * 1024);
-
-        cfg.setDataStorageConfiguration(dbCfg);
-
-        dbCfg.setWalSegmentSize(1024 * 1024)
-                .setWalHistorySize(Integer.MAX_VALUE)
-                .setWalSegments(10)
-                .setWalMode(WALMode.LOG_ONLY)
-                .setDefaultDataRegionConfiguration(new DataRegionConfiguration()
-                        .setMaxSize(100 * 1024 * 1024)
-                        .setPersistenceEnabled(true));
-
-        return cfg;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void beforeTest() throws Exception {
-        cleanPersistenceDir();
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void afterTest() throws Exception {
-        cleanPersistenceDir();
-    }
-
-    /**
-     * Tests that range reserved method return correct number of reserved WAL segments.
-     *
-     * @throws Exception if failed.
-     */
-    public void testWalManagerRangeReservation() throws Exception {
-        try{
-            IgniteEx ig0 = prepareGrid(4);
-
-            GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager) ig0.context().cache().context()
-                    .database();
-
-            IgniteWriteAheadLogManager wal = ig0.context().cache().context().wal();
-
-            long resIdx = getReservedWalSegmentIndex(dbMgr);
-
-            assertTrue("Expected that at least resIdx greater than 0, real is " + resIdx, resIdx > 0);
-
-            FileWALPointer lowPtr = (FileWALPointer)dbMgr.checkpointHistory().firstCheckpointPointer();
-
-            assertTrue("Expected that dbMbr returns valid resIdx", lowPtr.index() == resIdx);
-
-            // Reserve previous WAL segment.
-            wal.reserve(new FileWALPointer(resIdx - 1, 0, 0));
-
-            int resCnt = wal.reserved(new FileWALPointer(resIdx - 1, 0, 0), new FileWALPointer(resIdx, 0, 0));
-
-            assertTrue("Expected resCnt is 2, real is " + resCnt, resCnt == 2);
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
-     * Tests that grid cache manager correctly truncates unused WAL segments;
-     *
-     * @throws Exception if failed.
-     */
-    public void testUnusedWalTruncate() throws Exception {
-        try{
-            IgniteEx ig0 = prepareGrid(4);
-
-            GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager) ig0.context().cache().context()
-                    .database();
-
-            IgniteWriteAheadLogManager wal = ig0.context().cache().context().wal();
-
-            long resIdx = getReservedWalSegmentIndex(dbMgr);
-
-            assertTrue("Expected that at least resIdx greater than 0, real is " + resIdx, resIdx > 0);
-
-            FileWALPointer lowPtr = (FileWALPointer) dbMgr.checkpointHistory().firstCheckpointPointer();
-
-            assertTrue("Expected that dbMbr returns valid resIdx", lowPtr.index() == resIdx);
-
-            // Reserve previous WAL segment.
-            wal.reserve(new FileWALPointer(resIdx - 1, 0, 0));
-
-            int numDel = wal.truncate(null, lowPtr);
-
-            int expNumDel = (int)resIdx - 1;
-
-            assertTrue("Expected del segments is " + expNumDel + ", real is " + numDel, expNumDel == numDel);
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
-     * Starts grid and populates test data.
-     *
-     * @param cnt Grid count.
-     * @return First started grid.
-     * @throws Exception If failed.
-     */
-    private IgniteEx prepareGrid(int cnt) throws Exception {
-        IgniteEx ig0 = (IgniteEx)startGrids(cnt);
-
-        ig0.cluster().active(true);
-
-        IgniteCache<Object, Object> cache = ig0.cache(DEFAULT_CACHE_NAME);
-
-        for (int k = 0; k < 10_000; k++)
-            cache.put(k, new byte[1024]);
-
-        forceCheckpoint();
-
-        for (int k = 0; k < 1_000; k++)
-            cache.put(k, new byte[1024]);
-
-        forceCheckpoint();
-
-        return ig0;
-    }
-
-
-    /**
-     * Get index of reserved WAL segment by checkpointer.
-     *
-     * @param dbMgr Database shared manager.
-     */
-    private long getReservedWalSegmentIndex(GridCacheDatabaseSharedManager dbMgr) {
-        CheckpointHistory cpHist = dbMgr.checkpointHistory();
-
-        return ((FileWALPointer) cpHist.firstCheckpointPointer()).index();
-    }
-}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsDiskErrorsRecoveringTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsDiskErrorsRecoveringTest.java
index 7efe29b..bd30696 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsDiskErrorsRecoveringTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/file/IgnitePdsDiskErrorsRecoveringTest.java
@@ -44,6 +44,7 @@
 import org.apache.ignite.internal.processors.cache.persistence.file.FileIO;
 import org.apache.ignite.internal.processors.cache.persistence.file.FileIODecorator;
 import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory;
+import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager;
 import org.apache.ignite.internal.processors.cache.persistence.file.RandomAccessFileIO;
 import org.apache.ignite.internal.processors.cache.persistence.file.RandomAccessFileIOFactory;
 import org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager;
@@ -158,7 +159,7 @@
      */
     public void testRecoveringOnNodeStartMarkerWriteFail() throws Exception {
         // Fail to write node start marker tmp file at the second checkpoint. Pass only initial checkpoint.
-        ioFactory = new FilteringFileIOFactory("started.bin" + GridCacheDatabaseSharedManager.FILE_TMP_SUFFIX, new LimitedSizeFileIOFactory(new RandomAccessFileIOFactory(), 20));
+        ioFactory = new FilteringFileIOFactory("started.bin" + FilePageStoreManager.TMP_SUFFIX, new LimitedSizeFileIOFactory(new RandomAccessFileIOFactory(), 20));
 
         IgniteEx grid = startGrid(0);
         grid.cluster().active(true);
@@ -213,7 +214,7 @@
      */
     public void testRecoveringOnCheckpointBeginFail() throws Exception {
         // Fail to write checkpoint start marker tmp file at the second checkpoint. Pass only initial checkpoint.
-        ioFactory = new FilteringFileIOFactory("START.bin" + GridCacheDatabaseSharedManager.FILE_TMP_SUFFIX, new LimitedSizeFileIOFactory(new RandomAccessFileIOFactory(), 20));
+        ioFactory = new FilteringFileIOFactory("START.bin" + FilePageStoreManager.TMP_SUFFIX, new LimitedSizeFileIOFactory(new RandomAccessFileIOFactory(), 20));
 
         final IgniteEx grid = startGrid(0);
         grid.cluster().active(true);
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteNodeStoppedDuringDisableWALTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteNodeStoppedDuringDisableWALTest.java
index 80198e8..a744ab1 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteNodeStoppedDuringDisableWALTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteNodeStoppedDuringDisableWALTest.java
@@ -37,6 +37,7 @@
 import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
 import org.apache.ignite.internal.processors.cache.WalStateManager.WALDisableContext;
 import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager;
+import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager;
 import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFoldersResolver;
 import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
 import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
@@ -47,7 +48,7 @@
 import static java.nio.file.FileVisitResult.CONTINUE;
 import static java.nio.file.Files.walkFileTree;
 import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.CP_FILE_NAME_PATTERN;
-import static org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager.FILE_TMP_SUFFIX;
+
 import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.INDEX_FILE_NAME;
 import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.META_STORAGE_NAME;
 import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.PART_FILE_PREFIX;
@@ -208,7 +209,7 @@
 
                     boolean failed = false;
 
-                    if (name.endsWith(FILE_TMP_SUFFIX))
+                    if (name.endsWith(FilePageStoreManager.TMP_SUFFIX))
                         failed = true;
 
                     if (CP_FILE_NAME_PATTERN.matcher(name).matches())
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWALTailIsReachedDuringIterationOverArchiveTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWALTailIsReachedDuringIterationOverArchiveTest.java
new file mode 100644
index 0000000..e3c2c6c
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWALTailIsReachedDuringIterationOverArchiveTest.java
@@ -0,0 +1,246 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.persistence.db.wal;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Random;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteDataStreamer;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.DataRegionConfiguration;
+import org.apache.ignite.configuration.DataStorageConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager;
+import org.apache.ignite.internal.pagemem.wal.WALIterator;
+import org.apache.ignite.internal.pagemem.wal.WALPointer;
+import org.apache.ignite.internal.pagemem.wal.record.WALRecord;
+import org.apache.ignite.internal.processors.cache.persistence.file.FileIO;
+import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory;
+import org.apache.ignite.internal.processors.cache.persistence.file.RandomAccessFileIOFactory;
+import org.apache.ignite.internal.processors.cache.persistence.wal.FileDescriptor;
+import org.apache.ignite.internal.processors.cache.persistence.wal.FileWALPointer;
+import org.apache.ignite.internal.processors.cache.persistence.wal.reader.IgniteWalIteratorFactory;
+import org.apache.ignite.internal.processors.cache.persistence.wal.reader.IgniteWalIteratorFactory.IteratorParametersBuilder;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteBiTuple;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+import org.junit.Assert;
+
+import static java.nio.ByteBuffer.allocate;
+import static java.nio.file.StandardOpenOption.WRITE;
+import static java.util.concurrent.ThreadLocalRandom.current;
+
+/**
+ *
+ */
+public class IgniteWALTailIsReachedDuringIterationOverArchiveTest extends GridCommonAbstractTest {
+    /** */
+    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
+    /** WAL segment size. */
+    private static final int WAL_SEGMENT_SIZE = 10 * 1024 * 1024;
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String name) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(name);
+
+        cfg.setDataStorageConfiguration(
+            new DataStorageConfiguration()
+                .setWalSegmentSize(WAL_SEGMENT_SIZE)
+                .setWalSegments(2)
+                .setDefaultDataRegionConfiguration(
+                    new DataRegionConfiguration()
+                        .setPersistenceEnabled(true)
+                )
+        );
+
+        cfg.setDiscoverySpi(new TcpDiscoverySpi().setIpFinder(IP_FINDER));
+
+        cfg.setCacheConfiguration(new CacheConfiguration(DEFAULT_CACHE_NAME));
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        super.beforeTest();
+
+        cleanPersistenceDir();
+
+        Ignite ig = startGrid();
+
+        ig.cluster().active(true);
+
+        try (IgniteDataStreamer<Integer, byte[]> st = ig.dataStreamer(DEFAULT_CACHE_NAME)){
+            st.allowOverwrite(true);
+
+            byte[] payload = new byte[1024];
+
+            // Generate WAL segment files.
+            for (int i = 0; i < 100 * 1024; i++)
+                st.addData(i, payload);
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        super.afterTest();
+
+        stopAllGrids();
+
+        cleanPersistenceDir();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testStandAloneIterator() throws Exception {
+        IgniteEx ig = grid();
+
+        IgniteWriteAheadLogManager wal = ig.context().cache().context().wal();
+
+        File walArchiveDir = U.field(wal, "walArchiveDir");
+
+        IgniteWalIteratorFactory iteratorFactory = new IgniteWalIteratorFactory();
+
+        doTest(wal, iteratorFactory.iterator(walArchiveDir));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testWALManagerIterator() throws Exception {
+        IgniteEx ig = grid();
+
+        IgniteWriteAheadLogManager wal = ig.context().cache().context().wal();
+
+        doTest(wal, wal.replay(null));
+    }
+
+    /**
+     *
+     * @param walMgr WAL manager.
+     * @param it WAL iterator.
+     * @throws IOException If IO exception.
+     * @throws IgniteCheckedException If WAL iterator failed.
+     */
+    private void doTest(IgniteWriteAheadLogManager walMgr, WALIterator it) throws IOException, IgniteCheckedException {
+        File walArchiveDir = U.field(walMgr, "walArchiveDir");
+
+        IgniteWalIteratorFactory iteratorFactory = new IgniteWalIteratorFactory();
+
+        List<FileDescriptor> descs = iteratorFactory.resolveWalFiles(
+            new IteratorParametersBuilder()
+                .filesOrDirs(walArchiveDir)
+        );
+
+        int maxIndex = descs.size() - 1;
+        int minIndex = 1;
+
+        int corruptedIdx = current().nextInt(minIndex, maxIndex);
+
+        log.info("Corrupted segment with idx:" + corruptedIdx);
+
+        FileWALPointer corruptedPtr = corruptedWAlSegmentFile(
+            descs.get(corruptedIdx),
+            new RandomAccessFileIOFactory(),
+            iteratorFactory
+        );
+
+        log.info("Should fail on ptr " + corruptedPtr);
+
+        FileWALPointer lastReadPtr = null;
+
+        boolean exception = false;
+
+        try (WALIterator it0 = it) {
+            while (it0.hasNextX()) {
+                IgniteBiTuple<WALPointer, WALRecord> tup = it0.nextX();
+
+                lastReadPtr = (FileWALPointer)tup.get1();
+            }
+        }
+        catch (IgniteCheckedException e) {
+            if (e.getMessage().contains("WAL tail reached in archive directory, WAL segment file is corrupted")
+                || e.getMessage().contains("WAL tail reached not in the last available segment"))
+                exception = true;
+        }
+
+        Assert.assertNotNull(lastReadPtr);
+
+        if (!exception) {
+            fail("Last read ptr=" + lastReadPtr + ", corruptedPtr=" + corruptedPtr);
+        }
+    }
+
+    /**
+     *
+     * @param desc WAL segment descriptor.
+     * @param ioFactory IO factory.
+     * @param iteratorFactory Iterator factory.
+     * @return Corrupted position/
+     * @throws IOException If IO exception.
+     * @throws IgniteCheckedException If iterator failed.
+     */
+    private FileWALPointer corruptedWAlSegmentFile(
+        FileDescriptor desc,
+        FileIOFactory ioFactory,
+        IgniteWalIteratorFactory iteratorFactory
+    ) throws IOException, IgniteCheckedException {
+        LinkedList<FileWALPointer> pointers = new LinkedList<>();
+
+        try (WALIterator it = iteratorFactory.iterator(desc.file())) {
+            while (it.hasNext()) {
+                IgniteBiTuple<WALPointer, WALRecord> tup = it.next();
+
+                pointers.add((FileWALPointer)tup.get1());
+            }
+        }
+
+        int pointToCorrupt = current().nextInt(pointers.size());
+
+        FileWALPointer ptr = pointers.get(pointToCorrupt);
+
+        int offset = ptr.fileOffset();
+
+        // 20 pointer size, 8 idx, 4 offset, 4 length.
+        ByteBuffer buf = allocate(20);
+
+        Random r = new Random();
+
+        // Corrupt record pointer.
+        r.nextBytes(buf.array());
+
+        try (FileIO io = ioFactory.create(desc.file(), WRITE)) {
+            io.write(buf, offset + 1);
+
+            io.force(true);
+        }
+
+        return ptr;
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalFormatFileFailoverTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalFormatFileFailoverTest.java
index 379b8c3..5a1a6fa 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalFormatFileFailoverTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalFormatFileFailoverTest.java
@@ -32,7 +32,7 @@
 import org.apache.ignite.failure.FailureHandler;

 import org.apache.ignite.failure.TestFailureHandler;

 import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager;

-import org.apache.ignite.internal.pagemem.wal.StorageException;

+import org.apache.ignite.internal.processors.cache.persistence.StorageException;

 import org.apache.ignite.internal.processors.cache.persistence.file.FileIO;

 import org.apache.ignite.internal.processors.cache.persistence.file.FileIODecorator;

 import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory;

diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalIteratorSwitchSegmentTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalIteratorSwitchSegmentTest.java
index 00ed6f1..9dbef5d 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalIteratorSwitchSegmentTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalIteratorSwitchSegmentTest.java
@@ -142,6 +142,7 @@
                 null,
                 null,
                 null,
+                null,
 
                 null)
         ).createSerializer(serVer);
@@ -370,6 +371,7 @@
             new GridCacheIoManager(),
             null,
             null,
+            null,
             null
         );
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRebalanceTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRebalanceTest.java
index d4f6f0c..57565bf 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRebalanceTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRebalanceTest.java
@@ -17,6 +17,10 @@
 
 package org.apache.ignite.internal.processors.cache.persistence.db.wal;
 
+import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.file.OpenOption;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -38,18 +42,31 @@
 import org.apache.ignite.configuration.IgniteConfiguration;
 import org.apache.ignite.configuration.WALMode;
 import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.TestRecordingCommunicationSpi;
 import org.apache.ignite.internal.managers.communication.GridIoMessage;
+import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
+import org.apache.ignite.internal.processors.cache.GridCachePreloader;
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionDemandMessage;
+import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionDemander;
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.IgniteDhtDemandedPartitionsMap;
+import org.apache.ignite.internal.processors.cache.persistence.file.FileIO;
+import org.apache.ignite.internal.processors.cache.persistence.file.FileIODecorator;
+import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory;
+import org.apache.ignite.internal.processors.cache.persistence.file.RandomAccessFileIOFactory;
+import org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager;
 import org.apache.ignite.internal.util.typedef.G;
 import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.lang.IgniteBiPredicate;
 import org.apache.ignite.lang.IgniteInClosure;
 import org.apache.ignite.plugin.extensions.communication.Message;
 import org.apache.ignite.spi.IgniteSpiException;
-import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi;
+import org.apache.ignite.testframework.GridTestUtils;
 import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
 import org.junit.Assert;
 
+import static java.nio.file.StandardOpenOption.CREATE;
+import static java.nio.file.StandardOpenOption.READ;
+import static java.nio.file.StandardOpenOption.WRITE;
 import static org.apache.ignite.IgniteSystemProperties.IGNITE_PDS_WAL_REBALANCE_THRESHOLD;
 
 /**
@@ -62,6 +79,9 @@
     /** Partitions count. */
     private static final int PARTS_CNT = 32;
 
+    /** Block message predicate to set to Communication SPI in node configuration. */
+    private IgniteBiPredicate<ClusterNode, Message> blockMessagePredicate;
+
     /** {@inheritDoc} */
     @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
         System.setProperty(IGNITE_PDS_WAL_REBALANCE_THRESHOLD, "0"); //to make all rebalance wal-based
@@ -92,6 +112,12 @@
 
         cfg.setCommunicationSpi(new WalRebalanceCheckingCommunicationSpi());
 
+        if (blockMessagePredicate != null) {
+            TestRecordingCommunicationSpi spi = (TestRecordingCommunicationSpi) cfg.getCommunicationSpi();
+
+            spi.blockMessages(blockMessagePredicate);
+        }
+
         return cfg;
     }
 
@@ -227,6 +253,8 @@
                 cache.put(k, new IndexedObject(k - 1));
         }
 
+        forceCheckpoint();
+
         stopAllGrids();
 
         IgniteEx ig0 = (IgniteEx) startGrids(2);
@@ -240,6 +268,8 @@
         for (int k = 0; k < entryCnt; k++)
             cache.put(k, new IndexedObject(k));
 
+        forceCheckpoint();
+
         // This node should rebalance data from other nodes and shouldn't have WAL history.
         Ignite ignite = startGrid(2);
 
@@ -258,6 +288,8 @@
                 cache.remove(k);
         }
 
+        forceCheckpoint();
+
         // Stop grids which have actual WAL history.
         stopGrid(0);
 
@@ -309,6 +341,8 @@
                 cache.put(k, new IndexedObject(k - 1));
         }
 
+        forceCheckpoint();
+
         stopAllGrids();
 
         // Rewrite data with globally disabled WAL.
@@ -325,6 +359,8 @@
         for (int k = 0; k < entryCnt; k++)
             cache.put(k, new IndexedObject(k));
 
+        forceCheckpoint();
+
         crd.cluster().enableWal(CACHE_NAME);
 
         // This node shouldn't rebalance data using WAL, because it was disabled on other nodes.
@@ -365,6 +401,100 @@
     }
 
     /**
+     * Tests that cache rebalance is cancelled if supplyer node got exception during iteration over WAL.
+     *
+     * @throws Exception If failed.
+     */
+    public void testRebalanceCancelOnSupplyError() throws Exception {
+        // Prepare some data.
+        IgniteEx crd = (IgniteEx) startGrids(3);
+
+        crd.cluster().active(true);
+
+        final int entryCnt = PARTS_CNT * 10;
+
+        {
+            IgniteCache<Object, Object> cache = crd.cache(CACHE_NAME);
+
+            for (int k = 0; k < entryCnt; k++)
+                cache.put(k, new IndexedObject(k - 1));
+        }
+
+        forceCheckpoint();
+
+        stopAllGrids();
+
+        // Rewrite data to trigger further rebalance.
+        IgniteEx supplierNode = (IgniteEx) startGrid(0);
+
+        supplierNode.cluster().active(true);
+
+        IgniteCache<Object, Object> cache = supplierNode.cache(CACHE_NAME);
+
+        for (int k = 0; k < entryCnt; k++)
+            cache.put(k, new IndexedObject(k));
+
+        forceCheckpoint();
+
+        final int groupId = supplierNode.cachex(CACHE_NAME).context().groupId();
+
+        // Delay rebalance process for specified group.
+        blockMessagePredicate = (node, msg) -> {
+            if (msg instanceof GridDhtPartitionDemandMessage)
+                return ((GridDhtPartitionDemandMessage) msg).groupId() == groupId;
+
+            return false;
+        };
+
+        IgniteEx demanderNode = startGrid(2);
+
+        AffinityTopologyVersion curTopVer = demanderNode.context().discovery().topologyVersionEx();
+
+        // Wait for rebalance process start on demander node.
+        final GridCachePreloader preloader = demanderNode.cachex(CACHE_NAME).context().group().preloader();
+
+        GridTestUtils.waitForCondition(() ->
+            ((GridDhtPartitionDemander.RebalanceFuture) preloader.rebalanceFuture()).topologyVersion().equals(curTopVer),
+            getTestTimeout()
+        );
+
+        // Inject I/O factory which can throw exception during WAL read on supplier node.
+        FailingIOFactory ioFactory = new FailingIOFactory(new RandomAccessFileIOFactory());
+
+        ((FileWriteAheadLogManager) supplierNode.cachex(CACHE_NAME).context().shared().wal()).setFileIOFactory(ioFactory);
+
+        ioFactory.throwExceptionOnWalRead();
+
+        // Resume rebalance process.
+        TestRecordingCommunicationSpi spi = (TestRecordingCommunicationSpi) demanderNode.configuration().getCommunicationSpi();
+
+        spi.stopBlock();
+
+        // Wait till rebalance will be failed and cancelled.
+        Boolean result = preloader.rebalanceFuture().get();
+
+        Assert.assertEquals("Rebalance should be cancelled on demander node: " + preloader.rebalanceFuture(), false, result);
+
+        // Stop blocking messages and fail WAL during read.
+        blockMessagePredicate = null;
+
+        ioFactory.reset();
+
+        // Start last grid and wait for rebalance.
+        startGrid(1);
+
+        awaitPartitionMapExchange();
+
+        // Check data consistency.
+        for (Ignite ig : G.allGrids()) {
+            IgniteCache<Object, Object> cache1 = ig.cache(CACHE_NAME);
+
+            for (int k = 0; k < entryCnt; k++)
+                assertEquals(new IndexedObject(k), cache1.get(k));
+        }
+    }
+
+    /**
      *
      */
     private static class IndexedObject {
@@ -409,7 +539,7 @@
     /**
      * Wrapper of communication spi to detect on what topology versions WAL rebalance has happened.
      */
-    public static class WalRebalanceCheckingCommunicationSpi extends TcpCommunicationSpi {
+    public static class WalRebalanceCheckingCommunicationSpi extends TestRecordingCommunicationSpi {
         /** (Group ID, Set of topology versions). */
         private static final Map<Integer, Set<Long>> topVers = new HashMap<>();
 
@@ -464,4 +594,55 @@
             super.sendMessage(node, msg, ackC);
         }
     }
+
+    /**
+     *
+     */
+    static class FailingIOFactory implements FileIOFactory {
+        /** Fail read operations. */
+        private volatile boolean failRead;
+
+        /** Delegate. */
+        private final FileIOFactory delegate;
+
+        /**
+         * @param delegate Delegate.
+         */
+        FailingIOFactory(FileIOFactory delegate) {
+            this.delegate = delegate;
+        }
+
+        /** {@inheritDoc} */
+        @Override public FileIO create(File file) throws IOException {
+            return create(file, CREATE, WRITE, READ);
+        }
+
+        /** {@inheritDoc} */
+        @Override public FileIO create(File file, OpenOption... modes) throws IOException {
+            FileIO delegateIO = delegate.create(file, modes);
+
+            if (file.getName().endsWith(".wal") && failRead)
+                return new FileIODecorator(delegateIO) {
+                    @Override public int read(ByteBuffer destBuf) throws IOException {
+                        throw new IgniteException("Test exception.");
+                    }
+                };
+
+            return delegateIO;
+        }
+
+        /**
+         *
+         */
+        public void throwExceptionOnWalRead() {
+            failRead = true;
+        }
+
+        /**
+         *
+         */
+        public void reset() {
+            failRead = false;
+        }
+    }
 }
\ No newline at end of file
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRecoveryTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRecoveryTest.java
deleted file mode 100644
index 116f174..0000000
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRecoveryTest.java
+++ /dev/null
@@ -1,1906 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.internal.processors.cache.persistence.db.wal;
-
-import java.io.File;
-import java.nio.ByteBuffer;
-import java.nio.ByteOrder;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Random;
-import java.util.Set;
-import java.util.UUID;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ThreadLocalRandom;
-import java.util.concurrent.TimeUnit;
-import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteCache;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteCompute;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.IgniteSystemProperties;
-import org.apache.ignite.cache.CacheAtomicityMode;
-import org.apache.ignite.cache.CacheMode;
-import org.apache.ignite.cache.CacheRebalanceMode;
-import org.apache.ignite.cache.CacheWriteSynchronizationMode;
-import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
-import org.apache.ignite.cache.query.SqlFieldsQuery;
-import org.apache.ignite.cache.query.annotations.QuerySqlField;
-import org.apache.ignite.cluster.ClusterNode;
-import org.apache.ignite.configuration.BinaryConfiguration;
-import org.apache.ignite.configuration.CacheConfiguration;
-import org.apache.ignite.configuration.DataRegionConfiguration;
-import org.apache.ignite.configuration.DataStorageConfiguration;
-import org.apache.ignite.configuration.IgniteConfiguration;
-import org.apache.ignite.configuration.WALMode;
-import org.apache.ignite.internal.IgniteEx;
-import org.apache.ignite.internal.IgniteInternalFuture;
-import org.apache.ignite.internal.IgniteInterruptedCheckedException;
-import org.apache.ignite.internal.pagemem.FullPageId;
-import org.apache.ignite.internal.pagemem.PageUtils;
-import org.apache.ignite.internal.pagemem.wal.WALIterator;
-import org.apache.ignite.internal.pagemem.wal.WALPointer;
-import org.apache.ignite.internal.pagemem.wal.record.DataEntry;
-import org.apache.ignite.internal.pagemem.wal.record.DataRecord;
-import org.apache.ignite.internal.pagemem.wal.record.MemoryRecoveryRecord;
-import org.apache.ignite.internal.pagemem.wal.record.PageSnapshot;
-import org.apache.ignite.internal.pagemem.wal.record.TxRecord;
-import org.apache.ignite.internal.pagemem.wal.record.WALRecord;
-import org.apache.ignite.internal.pagemem.wal.record.delta.PageDeltaRecord;
-import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
-import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager;
-import org.apache.ignite.internal.processors.cache.persistence.filename.PdsConsistentIdProcessor;
-import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetaStorage;
-import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx;
-import org.apache.ignite.internal.processors.cache.persistence.tree.io.TrackingPageIO;
-import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
-import org.apache.ignite.internal.util.GridUnsafe;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.internal.util.typedef.PA;
-import org.apache.ignite.internal.util.typedef.PAX;
-import org.apache.ignite.internal.util.typedef.X;
-import org.apache.ignite.internal.util.typedef.internal.S;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.lang.IgniteBiTuple;
-import org.apache.ignite.lang.IgniteCallable;
-import org.apache.ignite.lang.IgnitePredicate;
-import org.apache.ignite.lang.IgniteRunnable;
-import org.apache.ignite.resources.IgniteInstanceResource;
-import org.apache.ignite.testframework.GridTestUtils;
-import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
-import org.apache.ignite.testframework.junits.multijvm.IgniteProcessProxy;
-import org.apache.ignite.transactions.Transaction;
-import org.apache.ignite.transactions.TransactionConcurrency;
-import org.apache.ignite.transactions.TransactionIsolation;
-import org.junit.Assert;
-
-import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.DFLT_STORE_DIR;
-
-/**
- *
- */
-public class IgniteWalRecoveryTest extends GridCommonAbstractTest {
-    /** */
-    private static final String HAS_CACHE = "HAS_CACHE";
-
-    /** */
-    private static final int LARGE_ARR_SIZE = 1025;
-
-    /** */
-    private boolean fork;
-
-    /** */
-    private String cacheName;
-
-    /** */
-    private int walSegmentSize;
-
-    /** Log only. */
-    private boolean logOnly;
-
-    /** {@inheritDoc} */
-    @Override protected boolean isMultiJvm() {
-        return fork;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
-        IgniteConfiguration cfg = super.getConfiguration(gridName);
-
-        CacheConfiguration<Integer, IndexedObject> ccfg = new CacheConfiguration<>(cacheName);
-
-        ccfg.setAtomicityMode(CacheAtomicityMode.ATOMIC);
-        ccfg.setRebalanceMode(CacheRebalanceMode.SYNC);
-        ccfg.setAffinity(new RendezvousAffinityFunction(false, 32));
-        ccfg.setNodeFilter(new RemoteNodeFilter());
-        ccfg.setIndexedTypes(Integer.class, IndexedObject.class);
-
-        cfg.setCacheConfiguration(ccfg);
-
-        DataStorageConfiguration dbCfg = new DataStorageConfiguration();
-
-        dbCfg.setPageSize(4 * 1024);
-
-        DataRegionConfiguration memPlcCfg = new DataRegionConfiguration();
-
-        memPlcCfg.setName("dfltDataRegion");
-        memPlcCfg.setInitialSize(1024L * 1024 * 1024);
-        memPlcCfg.setMaxSize(1024L * 1024 * 1024);
-        memPlcCfg.setPersistenceEnabled(true);
-
-        dbCfg.setDefaultDataRegionConfiguration(memPlcCfg);
-
-        dbCfg.setWalRecordIteratorBufferSize(1024 * 1024);
-
-        dbCfg.setWalHistorySize(2);
-
-        if (logOnly)
-            dbCfg.setWalMode(WALMode.LOG_ONLY);
-
-        if (walSegmentSize != 0)
-            dbCfg.setWalSegmentSize(walSegmentSize);
-
-        cfg.setDataStorageConfiguration(dbCfg);
-
-        cfg.setMarshaller(null);
-
-        BinaryConfiguration binCfg = new BinaryConfiguration();
-
-        binCfg.setCompactFooter(false);
-
-        cfg.setBinaryConfiguration(binCfg);
-
-        if (!getTestIgniteInstanceName(0).equals(gridName))
-            cfg.setUserAttributes(F.asMap(HAS_CACHE, true));
-
-        return cfg;
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void beforeTest() throws Exception {
-        stopAllGrids();
-
-        cleanPersistenceDir();
-
-        cacheName = "partitioned";
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void afterTest() throws Exception {
-        stopAllGrids();
-
-        logOnly = false;
-
-        cleanPersistenceDir();
-    }
-
-    /**
-     * @throws Exception if failed.
-     */
-    public void testWalBig() throws Exception {
-        IgniteEx ignite = startGrid(1);
-
-        ignite.active(true);
-
-        IgniteCache<Object, Object> cache = ignite.cache("partitioned");
-
-        Random rnd = new Random();
-
-        Map<Integer, IndexedObject> map = new HashMap<>();
-
-        for (int i = 0; i < 10_000; i++) {
-            if (i % 1000 == 0)
-                X.println(" >> " + i);
-
-            int k = rnd.nextInt(300_000);
-            IndexedObject v = new IndexedObject(rnd.nextInt(10_000));
-
-            cache.put(k, v);
-            map.put(k, v);
-        }
-
-        // Check.
-        for (Integer k : map.keySet())
-            assertEquals(map.get(k), cache.get(k));
-
-        stopGrid(1);
-
-        ignite = startGrid(1);
-
-        ignite.active(true);
-
-        cache = ignite.cache("partitioned");
-
-        // Check.
-        for (Integer k : map.keySet())
-            assertEquals(map.get(k), cache.get(k));
-    }
-
-    /**
-     * @throws Exception if failed.
-     */
-    public void testWalBigObjectNodeCancel() throws Exception {
-        final int MAX_SIZE_POWER = 21;
-
-        IgniteEx ignite = startGrid(1);
-
-        ignite.active(true);
-
-        IgniteCache<Object, Object> cache = ignite.cache("partitioned");
-
-        for (int i = 0; i < MAX_SIZE_POWER; ++i) {
-            int size = 1 << i;
-
-            cache.put("key_" + i, createTestData(size));
-        }
-
-        stopGrid(1, true);
-
-        ignite = startGrid(1);
-
-        ignite.active(true);
-
-        cache = ignite.cache("partitioned");
-
-        // Check.
-        for (int i = 0; i < MAX_SIZE_POWER; ++i) {
-            int size = 1 << i;
-
-            int[] data = createTestData(size);
-
-            int[] val = (int[])cache.get("key_" + i);
-
-            assertTrue("Invalid data. [key=key_" + i + ']', Arrays.equals(data, val));
-        }
-    }
-
-    /**
-     * @throws Exception If fail.
-     */
-    public void testSwitchClassLoader() throws Exception {
-        try {
-            final IgniteEx igniteEx = startGrid(1);
-
-            // CustomDiscoveryMessage will trigger service tasks
-            startGrid(2);
-
-            igniteEx.active(true);
-
-            IgniteCache<Integer, EnumVal> cache = igniteEx.cache("partitioned");
-
-            // Creates LoadCacheJobV2
-//            cache.loadCache(null);
-
-            final ClassLoader oldCl = Thread.currentThread().getContextClassLoader();
-            final ClassLoader newCl = getExternalClassLoader();
-
-            Thread.currentThread().setContextClassLoader(newCl);
-
-            for (int i = 0; i < 10; i++)
-                cache.put(i, i % 2 == 0 ? EnumVal.VAL1 : EnumVal.VAL2);
-
-            for (int i = 0; i < 10; i++)
-                assert cache.containsKey(i);
-
-            // Invokes ClearTask with new class loader
-            cache.clear();
-
-            Thread.currentThread().setContextClassLoader(oldCl);
-
-            for (int i = 0; i < 10; i++)
-                cache.put(i, i % 2 == 0 ? EnumVal.VAL1 : EnumVal.VAL2);
-
-            for (int i = 0; i < 10; i++)
-                assert cache.containsKey(i);
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
-     * @throws Exception if failed.
-     */
-    public void testWalSimple() throws Exception {
-        try {
-            IgniteEx ignite = startGrid(1);
-
-            ignite.active(true);
-
-            IgniteCache<Object, Object> cache = ignite.cache("partitioned");
-
-            info(" --> step1");
-
-            for (int i = 0; i < 10_000; i += 2) {
-//                X.println(" -> put: " + i);
-
-                cache.put(i, new IndexedObject(i));
-            }
-
-            info(" --> step2");
-
-            for (int i = 0; i < 10_000; i += 3)
-                cache.put(i, new IndexedObject(i * 2));
-
-            info(" --> step3");
-
-            for (int i = 0; i < 10_000; i += 7)
-                cache.put(i, new IndexedObject(i * 3));
-
-            info(" --> check1");
-
-            // Check.
-            for (int i = 0; i < 10_000; i++) {
-                IndexedObject o;
-
-                if (i % 7 == 0)
-                    o = new IndexedObject(i * 3);
-                else if (i % 3 == 0)
-                    o = new IndexedObject(i * 2);
-                else if (i % 2 == 0)
-                    o = new IndexedObject(i);
-                else
-                    o = null;
-
-                assertEquals(o, cache.get(i));
-            }
-
-            stopGrid(1);
-
-            ignite = startGrid(1);
-
-            ignite.active(true);
-
-            cache = ignite.cache("partitioned");
-
-            info(" --> check2");
-
-            // Check.
-            for (int i = 0; i < 10_000; i++) {
-                IndexedObject o;
-
-                if (i % 7 == 0)
-                    o = new IndexedObject(i * 3);
-                else if (i % 3 == 0)
-                    o = new IndexedObject(i * 2);
-                else if (i % 2 == 0)
-                    o = new IndexedObject(i);
-                else
-                    o = null;
-
-                assertEquals(o, cache.get(i));
-            }
-
-            info(" --> ok");
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
-     * @throws Exception If fail.
-     */
-    public void testWalLargeValue() throws Exception {
-        try {
-            IgniteEx ignite = startGrid(1);
-
-            ignite.active(true);
-
-            IgniteCache<Object, Object> cache = ignite.cache("partitioned");
-
-            for (int i = 0; i < 10_000; i++) {
-                final byte[] data = new byte[i];
-
-                Arrays.fill(data, (byte)i);
-
-                cache.put(i, data);
-
-                if (i % 1000 == 0)
-                    X.println(" ---> put: " + i);
-
-//                Assert.assertArrayEquals(data, (byte[])cache.get(i));
-            }
-
-//            info(" --> check1");
-//
-//            for (int i = 0; i < 25_000; i++) {
-//                final byte[] data = new byte[i];
-//
-//                Arrays.fill(data, (byte)i);
-//
-//                final byte[] loaded = (byte[]) cache.get(i);
-//
-//                Assert.assertArrayEquals(data, loaded);
-//            }
-
-            stopGrid(1);
-
-            ignite = startGrid(1);
-
-            ignite.active(true);
-
-            cache = ignite.cache("partitioned");
-
-            info(" --> check2");
-
-            for (int i = 0; i < 10_000; i++) {
-                final byte[] data = new byte[i];
-
-                Arrays.fill(data, (byte)i);
-
-                final byte[] loaded = (byte[]) cache.get(i);
-
-                Assert.assertArrayEquals(data, loaded);
-
-                if (i % 1000 == 0)
-                    X.println(" ---> get: " + i);
-            }
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
-     * @throws Exception if failed.
-     */
-    public void testWalRolloverMultithreadedDefault() throws Exception {
-        logOnly = false;
-
-        checkWalRolloverMultithreaded();
-    }
-
-    /**
-     * @throws Exception if failed.
-     */
-    public void testWalRolloverMultithreadedLogOnly() throws Exception {
-        logOnly = true;
-
-        checkWalRolloverMultithreaded();
-    }
-
-    /**
-     * @throws Exception if failed.
-     */
-    public void testHugeCheckpointRecord() throws Exception {
-        try {
-            final IgniteEx ignite = startGrid(1);
-
-            ignite.active(true);
-
-            for (int i = 0; i < 50; i++) {
-                CacheConfiguration<Object, Object> ccfg = new CacheConfiguration<>("cache-" + i);
-
-                // We can get 'too many open files' with default number of partitions.
-                ccfg.setAffinity(new RendezvousAffinityFunction(false, 128));
-
-                IgniteCache<Object, Object> cache = ignite.getOrCreateCache(ccfg);
-
-                cache.put(i, i);
-            }
-
-            final long endTime = System.currentTimeMillis() + 30_000;
-
-            IgniteInternalFuture<Long> fut = GridTestUtils.runMultiThreadedAsync(new Callable<Void>() {
-                @Override public Void call() throws Exception {
-                    Random rnd = ThreadLocalRandom.current();
-
-                    while (U.currentTimeMillis() < endTime) {
-                        IgniteCache<Object, Object> cache = ignite.cache("cache-" + rnd.nextInt(50));
-
-                        cache.put(rnd.nextInt(50_000), rnd.nextInt());
-                    }
-
-                    return null;
-                }
-            }, 16, "put-thread");
-
-            while (System.currentTimeMillis() < endTime) {
-                ignite.context().cache().context().database().wakeupForCheckpoint("test").get();
-
-                U.sleep(500);
-            }
-
-            fut.get();
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
-     * @throws Exception if failed.
-     */
-    private void checkWalRolloverMultithreaded() throws Exception {
-        walSegmentSize = 2 * 1024 * 1024;
-
-        final long endTime = System.currentTimeMillis() + 2 * 60 * 1000;
-
-        try {
-            IgniteEx ignite = startGrid(1);
-
-            ignite.active(true);
-
-            final IgniteCache<Object, Object> cache = ignite.cache("partitioned");
-
-            GridTestUtils.runMultiThreaded(new Callable<Void>() {
-                @Override public Void call() throws Exception {
-                    Random rnd = ThreadLocalRandom.current();
-
-                    while (U.currentTimeMillis() < endTime)
-                        cache.put(rnd.nextInt(50_000), rnd.nextInt());
-
-                    return null;
-                }
-            }, 16, "put-thread");
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
-     * @throws Exception If fail.
-     */
-    public void testWalRenameDirSimple() throws Exception {
-        try {
-            IgniteEx ignite = startGrid(1);
-
-            ignite.active(true);
-
-            IgniteCache<Object, Object> cache = ignite.cache("partitioned");
-
-            for (int i = 0; i < 100; i++)
-                cache.put(i, new IndexedObject(i));
-
-            final Object consistentId = ignite.cluster().localNode().consistentId();
-
-            stopGrid(1);
-
-            final File cacheDir = cacheDir("partitioned", consistentId.toString());
-
-            final boolean renamed = cacheDir.renameTo(new File(cacheDir.getParent(), "cache-partitioned0"));
-
-            assert renamed;
-
-            cacheName = "partitioned0";
-
-            ignite = startGrid(1);
-
-            ignite.active(true);
-
-            cache = ignite.cache(cacheName);
-
-            for (int i = 0; i < 100; i++)
-                assertEquals(new IndexedObject(i), cache.get(i));
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
-     * @param cacheName Cache name.
-     * @param consId Consistent ID.
-     * @return Cache dir.
-     * @throws IgniteCheckedException If fail.
-     */
-    private File cacheDir(final String cacheName, final String consId) throws IgniteCheckedException {
-        final String subfolderName
-            = PdsConsistentIdProcessor.genNewStyleSubfolderName(0, UUID.fromString(consId));
-
-        final File dbDir = U.resolveWorkDirectory(U.defaultWorkDirectory(), DFLT_STORE_DIR, false);
-
-        assert dbDir.exists();
-
-        final File consIdDir = new File(dbDir.getAbsolutePath(), subfolderName);
-
-        assert consIdDir.exists();
-
-        final File cacheDir = new File(consIdDir.getAbsolutePath(), "cache-" + cacheName);
-
-        assert cacheDir.exists();
-
-        return cacheDir;
-    }
-
-    /**
-     * @throws Exception if failed.
-     */
-    public void testRecoveryNoCheckpoint() throws Exception {
-        try {
-            IgniteEx ctrlGrid = startGrid(0);
-
-            fork = true;
-
-            IgniteEx cacheGrid = startGrid(1);
-
-            ctrlGrid.active(true);
-
-            ctrlGrid.compute(ctrlGrid.cluster().forRemotes()).run(new LoadRunnable(false));
-
-            info("Killing remote process...");
-
-            ((IgniteProcessProxy)cacheGrid).kill();
-
-            final IgniteEx g0 = ctrlGrid;
-
-            GridTestUtils.waitForCondition(new PA() {
-                /** {@inheritDoc} */
-                @Override public boolean apply() {
-                    return g0.cluster().nodes().size() == 1;
-                }
-            }, getTestTimeout());
-
-            fork = false;
-
-            // Now start the grid and verify that updates were restored from WAL.
-            cacheGrid = startGrid(1);
-
-            IgniteCache<Object, Object> cache = cacheGrid.cache("partitioned");
-
-            for (int i = 0; i < 10_000; i++)
-                assertEquals(new IndexedObject(i), cache.get(i));
-
-            List<List<?>> res = cache.query(new SqlFieldsQuery("select count(iVal) from IndexedObject")).getAll();
-
-            assertEquals(1, res.size());
-            assertEquals(10_000L, res.get(0).get(0));
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
-     * @throws Exception if failed.
-     */
-    public void testRecoveryLargeNoCheckpoint() throws Exception {
-        try {
-            IgniteEx ctrlGrid = startGrid(0);
-
-            fork = true;
-
-            IgniteEx cacheGrid = startGrid(1);
-
-            ctrlGrid.active(true);
-
-            ctrlGrid.compute(ctrlGrid.cluster().forRemotes()).run(new LargeLoadRunnable(false));
-
-            info("Killing remote process...");
-
-            ((IgniteProcessProxy)cacheGrid).kill();
-
-            final IgniteEx g0 = ctrlGrid;
-
-            GridTestUtils.waitForCondition(new PA() {
-                /** {@inheritDoc} */
-                @Override public boolean apply() {
-                    return g0.cluster().nodes().size() == 1;
-                }
-            }, getTestTimeout());
-
-            fork = false;
-
-            // Now start the grid and verify that updates were restored from WAL.
-            cacheGrid = startGrid(1);
-
-            IgniteCache<Object, Object> cache = cacheGrid.cache("partitioned");
-
-            for (int i = 0; i < 1000; i++) {
-                final long[] data = new long[LARGE_ARR_SIZE];
-
-                Arrays.fill(data, i);
-
-                final long[] loaded = (long[]) cache.get(i);
-
-                Assert.assertArrayEquals(data, loaded);
-            }
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override protected long getTestTimeout() {
-        return TimeUnit.MINUTES.toMillis(20);
-    }
-
-    /**
-     * @throws Exception if failed.
-     */
-    public void testRandomCrash() throws Exception {
-        try {
-            IgniteEx ctrlGrid = startGrid(0);
-
-            fork = true;
-
-            IgniteEx cacheGrid = startGrid(1);
-
-            ctrlGrid.active(true);
-
-            IgniteCompute rmt = ctrlGrid.compute(ctrlGrid.cluster().forRemotes());
-
-            rmt.run(new LoadRunnable(false));
-
-            info(">>> Finished cache population.");
-
-            rmt.run(new AsyncLoadRunnable());
-
-            Thread.sleep(20_000);
-
-            info(">>> Killing remote process...");
-
-            ((IgniteProcessProxy)cacheGrid).kill();
-
-            startGrid(1);
-
-            Boolean res = rmt.call(new VerifyCallable());
-
-            assertTrue(res);
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
-     * @throws Exception if failed.
-     */
-    public void testLargeRandomCrash() throws Exception {
-        try {
-            IgniteEx ctrlGrid = startGrid(0);
-
-            fork = true;
-
-            IgniteEx cacheGrid = startGrid(1);
-
-            ctrlGrid.active(true);
-
-            IgniteCompute rmt = ctrlGrid.compute(ctrlGrid.cluster().forRemotes());
-
-            rmt.run(new LargeLoadRunnable(false));
-
-            info(">>> Finished cache population.");
-
-            rmt.run(new AsyncLargeLoadRunnable());
-
-            Thread.sleep(20_000);
-
-            info(">>> Killing remote process...");
-
-            ((IgniteProcessProxy)cacheGrid).kill();
-
-            startGrid(1);
-
-            Boolean res = rmt.call(new VerifyLargeCallable());
-
-            assertTrue(res);
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
-     *
-     */
-    private static class RemoteNodeFilter implements IgnitePredicate<ClusterNode> {
-        /** {@inheritDoc} */
-        @Override public boolean apply(ClusterNode clusterNode) {
-            return clusterNode.attribute(HAS_CACHE) != null;
-        }
-    }
-
-    /**
-     * @throws Exception If failed.
-     */
-    public void testDestroyCache() throws Exception {
-        try {
-            IgniteEx ignite = startGrid(1);
-
-            ignite.active(true);
-
-            IgniteCache<Object, Object> cache = ignite.getOrCreateCache("test");
-
-            cache.put(1, new IndexedObject(1));
-
-            ignite.destroyCache("test");
-
-            cache = ignite.getOrCreateCache("test");
-
-            // No entry available after cache destroy.
-            assertNull(cache.get(1));
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
-     * @throws Exception If fail.
-     */
-    public void testEvictPartition() throws Exception {
-        try {
-            Ignite ignite1 = startGrid("node1");
-
-            ignite1.active(true);
-
-            IgniteCache<Object, Object> cache1 = ignite1.cache(cacheName);
-
-            for (int i = 0; i < 100; i++)
-                cache1.put(i, new IndexedObject(i));
-
-            Ignite ignite2 = startGrid("node2");
-
-            IgniteCache<Object, Object> cache2 = ignite2.cache(cacheName);
-
-            for (int i = 0; i < 100; i++) {
-                assertEquals(new IndexedObject(i), cache1.get(i));
-                assertEquals(new IndexedObject(i), cache2.get(i));
-            }
-
-            ignite1.close();
-            ignite2.close();
-
-            ignite1 = startGrid("node1");
-            ignite2 = startGrid("node2");
-
-            ignite1.active(true);
-
-            cache1 = ignite1.cache(cacheName);
-            cache2 = ignite2.cache(cacheName);
-
-            for (int i = 0; i < 100; i++) {
-                assertEquals(new IndexedObject(i), cache1.get(i));
-                assertEquals(new IndexedObject(i), cache2.get(i));
-            }
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
-     * @throws Exception If fail.
-     */
-    public void testMetastorage() throws Exception {
-        try {
-            int cnt = 5000;
-
-            IgniteEx ignite0 = (IgniteEx)startGrid("node1");
-            IgniteEx ignite1 = (IgniteEx)startGrid("node2");
-
-            ignite1.active(true);
-
-            GridCacheSharedContext<Object, Object> sharedCtx0 = ignite0.context().cache().context();
-            GridCacheSharedContext<Object, Object> sharedCtx1 = ignite1.context().cache().context();
-
-            MetaStorage storage0 = sharedCtx0.database().metaStorage();
-            MetaStorage storage1 = sharedCtx1.database().metaStorage();
-
-            assert storage0 != null;
-
-            for (int i = 0; i < cnt; i++) {
-                sharedCtx0.database().checkpointReadLock();
-
-                try {
-                    storage0.putData(String.valueOf(i), new byte[]{(byte)(i % 256), 2, 3});
-                }
-                finally {
-                    sharedCtx0.database().checkpointReadUnlock();
-                }
-
-                byte[] b1 = new byte[i + 3];
-                b1[0] = 1;
-                b1[1] = 2;
-                b1[2] = 3;
-
-                sharedCtx1.database().checkpointReadLock();
-
-                try {
-                    storage1.putData(String.valueOf(i), b1);
-                }
-                finally {
-                    sharedCtx1.database().checkpointReadUnlock();
-                }
-            }
-
-            for (int i = 0; i < cnt; i++) {
-                byte[] d1 = storage0.getData(String.valueOf(i));
-                assertEquals(3, d1.length);
-                assertEquals((byte)(i % 256), d1[0]);
-                assertEquals(2, d1[1]);
-                assertEquals(3, d1[2]);
-
-                byte[] d2 = storage1.getData(String.valueOf(i));
-                assertEquals(i + 3, d2.length);
-                assertEquals(1, d2[0]);
-                assertEquals(2, d2[1]);
-                assertEquals(3, d2[2]);
-            }
-
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
-     * @throws Exception If fail.
-     */
-    public void testMetastorageLargeArray() throws Exception {
-        try {
-            int cnt = 5000;
-            int arraySize = 32_768;
-
-            IgniteEx ignite = (IgniteEx)startGrid("node1");
-
-            ignite.active(true);
-
-            GridCacheSharedContext<Object, Object> sharedCtx = ignite.context().cache().context();
-
-            MetaStorage storage = sharedCtx.database().metaStorage();
-
-            for (int i = 0; i < cnt; i++) {
-                byte[] b1 = new byte[arraySize];
-                for (int k = 0; k < arraySize; k++) {
-                    b1[k] = (byte) (k % 100);
-                }
-
-                sharedCtx.database().checkpointReadLock();
-
-                try {
-                    storage.putData(String.valueOf(i), b1);
-                }
-                finally {
-                    sharedCtx.database().checkpointReadUnlock();
-                }
-            }
-
-            for (int i = 0; i < cnt; i++) {
-                byte[] d2 = storage.getData(String.valueOf(i));
-                assertEquals(arraySize, d2.length);
-
-                for (int k = 0; k < arraySize; k++) {
-                    assertEquals((byte) (k % 100), d2[k]);
-                }
-            }
-
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
-     * @throws Exception If fail.
-     */
-    public void testMetastorageRemove() throws Exception {
-        try {
-            int cnt = 400;
-
-            IgniteEx ignite0 = (IgniteEx)startGrid("node1");
-
-            ignite0.active(true);
-
-            GridCacheSharedContext<Object, Object> sharedCtx0 = ignite0.context().cache().context();
-
-            MetaStorage storage = sharedCtx0.database().metaStorage();
-
-            assert storage != null;
-
-            for (int i = 0; i < cnt; i++) {
-                sharedCtx0.database().checkpointReadLock();
-
-                try {
-                    storage.putData(String.valueOf(i), new byte[]{1, 2, 3});
-                }
-                finally {
-                    sharedCtx0.database().checkpointReadUnlock();
-                }
-            }
-
-            for (int i = 0; i < 10; i++) {
-                sharedCtx0.database().checkpointReadLock();
-
-                try {
-                    storage.removeData(String.valueOf(i));
-                }
-                finally {
-                    sharedCtx0.database().checkpointReadUnlock();
-                }
-            }
-
-            for (int i = 10; i < cnt; i++) {
-                byte[] d1 = storage.getData(String.valueOf(i));
-                assertEquals(3, d1.length);
-                assertEquals(1, d1[0]);
-                assertEquals(2, d1[1]);
-                assertEquals(3, d1[2]);
-            }
-
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
-     * @throws Exception If fail.
-     */
-    public void testMetastorageUpdate() throws Exception {
-        try {
-            int cnt = 2000;
-
-            IgniteEx ignite0 = (IgniteEx)startGrid("node1");
-
-            ignite0.active(true);
-
-            GridCacheSharedContext<Object, Object> sharedCtx0 = ignite0.context().cache().context();
-
-            MetaStorage storage = sharedCtx0.database().metaStorage();
-
-            assert storage != null;
-
-            for (int i = 0; i < cnt; i++) {
-                sharedCtx0.database().checkpointReadLock();
-
-                try {
-                    storage.putData(String.valueOf(i), new byte[]{1, 2, 3});
-                }
-                finally {
-                    sharedCtx0.database().checkpointReadUnlock();
-                }
-            }
-
-            for (int i = 0; i < cnt; i++) {
-                sharedCtx0.database().checkpointReadLock();
-
-                try {
-                    storage.putData(String.valueOf(i), new byte[]{2, 2, 3, 4});
-                }
-                finally {
-                    sharedCtx0.database().checkpointReadUnlock();
-                }
-            }
-
-            for (int i = 0; i < cnt; i++) {
-                byte[] d1 = storage.getData(String.valueOf(i));
-                assertEquals(4, d1.length);
-                assertEquals(2, d1[0]);
-                assertEquals(2, d1[1]);
-                assertEquals(3, d1[2]);
-            }
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
-     * @throws Exception If fail.
-     */
-    public void testMetastorageWalRestore() throws Exception {
-        try {
-            int cnt = 2000;
-
-            IgniteEx ignite0 = (IgniteEx)startGrid(0);
-
-            ignite0.active(true);
-
-            GridCacheSharedContext<Object, Object> sharedCtx0 = ignite0.context().cache().context();
-
-            MetaStorage storage = sharedCtx0.database().metaStorage();
-
-            assert storage != null;
-
-            for (int i = 0; i < cnt; i++) {
-                sharedCtx0.database().checkpointReadLock();
-
-                try {
-                    storage.putData(String.valueOf(i), new byte[]{1, 2, 3});
-                }
-                finally {
-                    sharedCtx0.database().checkpointReadUnlock();
-                }
-            }
-
-            for (int i = 0; i < cnt; i++) {
-                byte[] value = storage.getData(String.valueOf(i));
-                assert value != null;
-                assert value.length == 3;
-            }
-
-            stopGrid(0);
-
-            ignite0 = startGrid(0);
-
-            ignite0.active(true);
-
-            sharedCtx0 = ignite0.context().cache().context();
-
-            storage = sharedCtx0.database().metaStorage();
-
-            assert storage != null;
-
-            for (int i = 0; i < cnt; i++) {
-                byte[] value = storage.getData(String.valueOf(i));
-                assert value != null;
-            }
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
-     * @throws Exception if failed.
-     */
-    public void testApplyDeltaRecords() throws Exception {
-        try {
-            IgniteEx ignite0 = (IgniteEx)startGrid("node0");
-
-            ignite0.active(true);
-
-            IgniteCache<Object, Object> cache0 = ignite0.cache(cacheName);
-
-            for (int i = 0; i < 1000; i++)
-                cache0.put(i, new IndexedObject(i));
-
-            GridCacheSharedContext<Object, Object> sharedCtx = ignite0.context().cache().context();
-
-            GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager)sharedCtx.database();
-
-            db.waitForCheckpoint("test");
-            db.enableCheckpoints(false).get();
-
-            // Log something to know where to start.
-            WALPointer ptr = sharedCtx.wal().log(new MemoryRecoveryRecord(U.currentTimeMillis()));
-
-            info("Replay marker: " + ptr);
-
-            for (int i = 1000; i < 5000; i++)
-                cache0.put(i, new IndexedObject(i));
-
-            info("Done puts...");
-
-            for (int i = 2_000; i < 3_000; i++)
-                cache0.remove(i);
-
-            info("Done removes...");
-
-            for (int i = 5000; i < 6000; i++)
-                cache0.put(i, new IndexedObject(i));
-
-            info("Done puts...");
-
-            Map<FullPageId, byte[]> rolledPages = new HashMap<>();
-
-            int pageSize = sharedCtx.database().pageSize();
-
-            ByteBuffer buf = ByteBuffer.allocateDirect(pageSize);
-
-            // Now check that deltas can be correctly applied.
-            try (WALIterator it = sharedCtx.wal().replay(ptr)) {
-                while (it.hasNext()) {
-                    IgniteBiTuple<WALPointer, WALRecord> tup = it.next();
-
-                    WALRecord rec = tup.get2();
-
-                    if (rec instanceof PageSnapshot) {
-                        PageSnapshot page = (PageSnapshot)rec;
-
-                        rolledPages.put(page.fullPageId(), page.pageData());
-                    }
-                    else if (rec instanceof PageDeltaRecord) {
-                        PageDeltaRecord delta = (PageDeltaRecord)rec;
-
-                        FullPageId fullId = new FullPageId(delta.pageId(), delta.groupId());
-
-                        byte[] pageData = rolledPages.get(fullId);
-
-                        if (pageData == null) {
-                            pageData = new byte[pageSize];
-
-                            rolledPages.put(fullId, pageData);
-                        }
-
-                        assertNotNull("Missing page snapshot [page=" + fullId + ", delta=" + delta + ']', pageData);
-
-                        buf.order(ByteOrder.nativeOrder());
-
-                        buf.position(0);
-                        buf.put(pageData);
-                        buf.position(0);
-
-                        delta.applyDelta(sharedCtx.database().dataRegion(null).pageMemory(),
-                            GridUnsafe.bufferAddress(buf));
-
-                        buf.position(0);
-
-                        buf.get(pageData);
-                    }
-                }
-            }
-
-            info("Done apply...");
-
-            PageMemoryEx pageMem = (PageMemoryEx)db.dataRegion(null).pageMemory();
-
-            for (Map.Entry<FullPageId, byte[]> entry : rolledPages.entrySet()) {
-                FullPageId fullId = entry.getKey();
-
-                ignite0.context().cache().context().database().checkpointReadLock();
-
-                try {
-                    long page = pageMem.acquirePage(fullId.groupId(), fullId.pageId(), true);
-
-                    try {
-                        long bufPtr = pageMem.writeLock(fullId.groupId(), fullId.pageId(), page, true);
-
-                        try {
-                            byte[] data = entry.getValue();
-
-                            for (int i = 0; i < data.length; i++) {
-                                if (fullId.pageId() == TrackingPageIO.VERSIONS.latest().trackingPageFor(fullId.pageId(), db.pageSize()))
-                                    continue; // Skip tracking pages.
-
-                                assertEquals("page=" + fullId + ", pos=" + i, PageUtils.getByte(bufPtr, i), data[i]);
-                            }
-                        }
-                        finally {
-                            pageMem.writeUnlock(fullId.groupId(), fullId.pageId(), page, null, false, true);
-                        }
-                    }
-                    finally {
-                        pageMem.releasePage(fullId.groupId(), fullId.pageId(), page);
-                    }
-                }
-                finally {
-                    ignite0.context().cache().context().database().checkpointReadUnlock();
-                }
-            }
-
-            ignite0.close();
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
-     * Test recovery from WAL on 3 nodes in case of transactional cache.
-     *
-     * @throws Exception If fail.
-     */
-    public void testRecoveryOnTransactionalAndPartitionedCache() throws Exception {
-        IgniteEx ignite = (IgniteEx) startGrids(3);
-        ignite.active(true);
-
-        try {
-            final String cacheName = "transactional";
-
-            CacheConfiguration<Object, Object> cacheConfiguration = new CacheConfiguration<>(cacheName)
-                    .setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL)
-                    .setAffinity(new RendezvousAffinityFunction(false, 32))
-                    .setCacheMode(CacheMode.PARTITIONED)
-                    .setRebalanceMode(CacheRebalanceMode.SYNC)
-                    .setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC)
-                    .setBackups(2);
-
-            ignite.createCache(cacheConfiguration);
-
-            IgniteCache<Object, Object> cache = ignite.cache(cacheName);
-            Map<Object, Object> map = new HashMap<>();
-
-            final int transactions = 100;
-            final int operationsPerTransaction = 40;
-
-            Random random = new Random();
-
-            for (int t = 1; t <= transactions; t++) {
-                Transaction tx = ignite.transactions().txStart(
-                        TransactionConcurrency.OPTIMISTIC, TransactionIsolation.READ_COMMITTED);
-
-                Map<Object, Object> changesInTransaction = new HashMap<>();
-
-                for (int op = 0; op < operationsPerTransaction; op++) {
-                    int key = random.nextInt(1000) + 1;
-
-                    Object value;
-                    if (random.nextBoolean())
-                        value = randomString(random) + key;
-                    else
-                        value = new BigObject(key);
-
-                    changesInTransaction.put(key, value);
-
-                    cache.put(key, value);
-                }
-
-                if (random.nextBoolean()) {
-                    tx.commit();
-                    map.putAll(changesInTransaction);
-                }
-                else {
-                    tx.rollback();
-                }
-
-                if (t % 50 == 0)
-                    log.info("Finished transaction " + t);
-            }
-
-            stopAllGrids();
-
-            ignite = (IgniteEx) startGrids(3);
-            ignite.active(true);
-
-            cache = ignite.cache(cacheName);
-
-            for (Object key : map.keySet()) {
-                Object expectedValue = map.get(key);
-                Object actualValue = cache.get(key);
-                Assert.assertEquals("Unexpected value for key " + key, expectedValue, actualValue);
-            }
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
-     * Test that all DataRecord WAL records are within transaction boundaries - PREPARED and COMMITTED markers.
-     *
-     * @throws Exception If any fail.
-     */
-    public void testTxRecordsConsistency() throws Exception {
-        System.setProperty(IgniteSystemProperties.IGNITE_WAL_LOG_TX_RECORDS, "true");
-
-        IgniteEx ignite = (IgniteEx) startGrids(3);
-        ignite.active(true);
-
-        try {
-            final String cacheName = "transactional";
-
-            CacheConfiguration<Object, Object> cacheConfiguration = new CacheConfiguration<>(cacheName)
-                    .setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL)
-                    .setAffinity(new RendezvousAffinityFunction(false, 32))
-                    .setCacheMode(CacheMode.PARTITIONED)
-                    .setRebalanceMode(CacheRebalanceMode.SYNC)
-                    .setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC)
-                    .setBackups(0);
-
-            ignite.createCache(cacheConfiguration);
-
-            IgniteCache<Object, Object> cache = ignite.cache(cacheName);
-
-            GridCacheSharedContext<Object, Object> sharedCtx = ignite.context().cache().context();
-
-            GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager)sharedCtx.database();
-
-            db.waitForCheckpoint("test");
-            db.enableCheckpoints(false).get();
-
-            // Log something to know where to start.
-            WALPointer startPtr = sharedCtx.wal().log(new MemoryRecoveryRecord(U.currentTimeMillis()));
-
-            final int transactions = 100;
-            final int operationsPerTransaction = 40;
-
-            Random random = new Random();
-
-            for (int t = 1; t <= transactions; t++) {
-                Transaction tx = ignite.transactions().txStart(
-                        TransactionConcurrency.OPTIMISTIC, TransactionIsolation.READ_COMMITTED);
-
-                for (int op = 0; op < operationsPerTransaction; op++) {
-                    int key = random.nextInt(1000) + 1;
-
-                    Object value;
-                    if (random.nextBoolean())
-                        value = randomString(random) + key;
-                    else
-                        value = new BigObject(key);
-
-                    cache.put(key, value);
-                }
-
-                if (random.nextBoolean()) {
-                    tx.commit();
-                }
-                else {
-                    tx.rollback();
-                }
-
-                if (t % 50 == 0)
-                    log.info("Finished transaction " + t);
-            }
-
-            Set<GridCacheVersion> activeTransactions = new HashSet<>();
-
-            // Check that all DataRecords are within PREPARED and COMMITTED tx records.
-            try (WALIterator it = sharedCtx.wal().replay(startPtr)) {
-                while (it.hasNext()) {
-                    IgniteBiTuple<WALPointer, WALRecord> tup = it.next();
-
-                    WALRecord rec = tup.get2();
-
-                    if (rec instanceof TxRecord) {
-                        TxRecord txRecord = (TxRecord) rec;
-                        GridCacheVersion txId = txRecord.nearXidVersion();
-
-                        switch (txRecord.state()) {
-                            case PREPARED:
-                                assert !activeTransactions.contains(txId) : "Transaction is already present " + txRecord;
-
-                                activeTransactions.add(txId);
-
-                                break;
-                            case COMMITTED:
-                                assert activeTransactions.contains(txId) : "No PREPARE marker for transaction " + txRecord;
-
-                                activeTransactions.remove(txId);
-
-                                break;
-                            case ROLLED_BACK:
-                                activeTransactions.remove(txId);
-                                break;
-
-                            default:
-                                throw new IllegalStateException("Unknown Tx state of record " + txRecord);
-                        }
-                    } else if (rec instanceof DataRecord) {
-                        DataRecord dataRecord = (DataRecord) rec;
-
-                        for (DataEntry entry : dataRecord.writeEntries()) {
-                            GridCacheVersion txId = entry.nearXidVersion();
-
-                            assert activeTransactions.contains(txId) : "No transaction for entry " + entry;
-                        }
-                    }
-                }
-            }
-        }
-        finally {
-            System.clearProperty(IgniteSystemProperties.IGNITE_WAL_LOG_TX_RECORDS);
-            stopAllGrids();
-        }
-    }
-
-    /**
-     * Generate random lowercase string for test purposes.
-     */
-    private String randomString(Random random) {
-        int len = random.nextInt(50) + 1;
-
-        StringBuilder sb = new StringBuilder();
-        for (int i = 0; i < len; i++)
-            sb.append(random.nextInt(26) + 'a');
-
-        return sb.toString();
-    }
-
-    /**
-     * BigObject for test purposes that don't fit in page size.
-     */
-    private static class BigObject {
-        private final int index;
-
-        private final byte[] payload = new byte[4096];
-
-        BigObject(int index) {
-            this.index = index;
-            // Create pseudo-random array.
-            for (int i = 0; i < payload.length; i++)
-                if (i % index == 0)
-                    payload[i] = (byte) index;
-        }
-
-        @Override
-        public boolean equals(Object o) {
-            if (this == o) return true;
-            if (o == null || getClass() != o.getClass()) return false;
-            BigObject bigObject = (BigObject) o;
-            return index == bigObject.index &&
-                    Arrays.equals(payload, bigObject.payload);
-        }
-
-        @Override
-        public int hashCode() {
-            return Objects.hash(index, payload);
-        }
-    }
-
-    /**
-     * @param size Size of data.
-     * @return Test data.
-     */
-    private int[] createTestData(int size) {
-        int[] data = new int[size];
-
-        for (int d = 0; d < size; ++d)
-            data[d] = d;
-
-        return data;
-    }
-
-    /**
-     *
-     */
-    private static class LoadRunnable implements IgniteRunnable {
-        /** */
-        @IgniteInstanceResource
-        private Ignite ignite;
-
-        /** */
-        private boolean disableCheckpoints;
-
-        /**
-         * @param disableCheckpoints Disable checkpoints flag.
-         */
-        private LoadRunnable(boolean disableCheckpoints) {
-            this.disableCheckpoints = disableCheckpoints;
-        }
-
-        /** {@inheritDoc} */
-        @Override public void run() {
-            ignite.log().info("Started load.");
-
-            if (disableCheckpoints) {
-                GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager)((IgniteEx)ignite).context()
-                    .cache().context().database();
-
-                try {
-                    dbMgr.enableCheckpoints(false).get();
-                }
-                catch (IgniteCheckedException e) {
-                    throw new IgniteException(e);
-                }
-            }
-
-            try {
-                boolean successfulWaiting = GridTestUtils.waitForCondition(new PAX() {
-                    @Override public boolean applyx() {
-                        return ignite.cache("partitioned") != null;
-                    }
-                }, 10_000);
-
-                assertTrue(successfulWaiting);
-            }
-            catch (IgniteInterruptedCheckedException e) {
-                throw new RuntimeException(e);
-            }
-
-            IgniteCache<Object, Object> cache = ignite.cache("partitioned");
-
-            for (int i = 0; i < 10_000; i++)
-                cache.put(i, new IndexedObject(i));
-
-            ignite.log().info("Finished load.");
-        }
-    }
-
-    /**
-     *
-     */
-    private static class AsyncLoadRunnable implements IgniteRunnable {
-        /** */
-        @IgniteInstanceResource
-        private Ignite ignite;
-
-        /** {@inheritDoc} */
-        @Override public void run() {
-            try {
-                boolean successfulWaiting = GridTestUtils.waitForCondition(new PAX() {
-                    @Override public boolean applyx() {
-                        return ignite.cache("partitioned") != null;
-                    }
-                }, 10_000);
-
-                assertTrue(successfulWaiting);
-            }
-            catch (IgniteInterruptedCheckedException e) {
-                throw new RuntimeException(e);
-            }
-
-            ignite.log().info(">>>>>>> Started load.");
-
-            for (int i = 0; i < 4; i++) {
-                ignite.scheduler().callLocal(new Callable<Object>() {
-                    @Override public Object call() throws Exception {
-                        IgniteCache<Object, Object> cache = ignite.cache("partitioned");
-
-                        ThreadLocalRandom rnd = ThreadLocalRandom.current();
-
-                        int cnt = 0;
-
-                        while (!Thread.currentThread().isInterrupted()) {
-                            cache.put(rnd.nextInt(10_000), new IndexedObject(rnd.nextInt()));
-
-                            cnt++;
-
-                            if (cnt > 0 && cnt % 1_000 == 0)
-                                ignite.log().info(">>>> Updated: " + cnt);
-                        }
-
-                        return null;
-                    }
-                });
-            }
-        }
-    }
-
-    /**
-     *
-     */
-    private static class VerifyCallable implements IgniteCallable<Boolean> {
-        /** */
-        @IgniteInstanceResource
-        private Ignite ignite;
-
-        /** {@inheritDoc} */
-        @Override public Boolean call() throws Exception {
-            try {
-                boolean successfulWaiting = GridTestUtils.waitForCondition(new PAX() {
-                    @Override public boolean applyx() {
-                        return ignite.cache("partitioned") != null;
-                    }
-                }, 10_000);
-
-                assertTrue(successfulWaiting);
-            }
-            catch (IgniteInterruptedCheckedException e) {
-                throw new RuntimeException(e);
-            }
-
-            IgniteCache<Object, Object> cache = ignite.cache("partitioned");
-
-            for (int i = 0; i < 10_000; i++) {
-                Object val = cache.get(i);
-
-                if (val == null) {
-                    ignite.log().warning("Failed to find a value for key: " + i);
-
-                    return false;
-                }
-            }
-
-            return true;
-        }
-    }
-
-    /**
-     *
-     */
-    private static class LargeLoadRunnable implements IgniteRunnable {
-        /** */
-        @IgniteInstanceResource
-        private Ignite ignite;
-
-        /** */
-        private boolean disableCheckpoints;
-
-        /**
-         * @param disableCheckpoints Disable checkpoints flag.
-         */
-        private LargeLoadRunnable(boolean disableCheckpoints) {
-            this.disableCheckpoints = disableCheckpoints;
-        }
-
-        /** {@inheritDoc} */
-        @Override public void run() {
-            try {
-                boolean successfulWaiting = GridTestUtils.waitForCondition(new PAX() {
-                    @Override public boolean applyx() {
-                        return ignite.cache("partitioned") != null;
-                    }
-                }, 10_000);
-
-                assertTrue(successfulWaiting);
-            }
-            catch (IgniteInterruptedCheckedException e) {
-                throw new RuntimeException(e);
-            }
-
-            ignite.log().info("Started load.");
-
-            if (disableCheckpoints) {
-                GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager)((IgniteEx)ignite).context()
-                    .cache().context().database();
-
-                dbMgr.enableCheckpoints(false);
-            }
-
-            IgniteCache<Object, Object> cache = ignite.cache("partitioned");
-
-            for (int i = 0; i < 1000; i++) {
-                final long[] data = new long[LARGE_ARR_SIZE];
-
-                Arrays.fill(data, i);
-
-                cache.put(i, data);
-            }
-
-            ignite.log().info("Finished load.");
-        }
-    }
-
-    /**
-     *
-     */
-    private static class AsyncLargeLoadRunnable implements IgniteRunnable {
-        /** */
-        @IgniteInstanceResource
-        private Ignite ignite;
-
-        /** {@inheritDoc} */
-        @Override public void run() {
-            try {
-                boolean successfulWaiting = GridTestUtils.waitForCondition(new PAX() {
-                    @Override public boolean applyx() {
-                        return ignite.cache("partitioned") != null;
-                    }
-                }, 10_000);
-
-                assertTrue(successfulWaiting);
-            }
-            catch (IgniteInterruptedCheckedException e) {
-                throw new RuntimeException(e);
-            }
-
-            ignite.log().info(">>>>>>> Started load.");
-
-            for (int i = 0; i < 1; i++) {
-                ignite.scheduler().callLocal(new Callable<Object>() {
-                    @Override public Object call() throws Exception {
-                        IgniteCache<Object, Object> cache = ignite.cache("partitioned");
-
-                        ThreadLocalRandom rnd = ThreadLocalRandom.current();
-
-                        int cnt = 0;
-
-                        while (!Thread.currentThread().isInterrupted()) {
-                            final long[] data = new long[LARGE_ARR_SIZE];
-
-                            final int key = rnd.nextInt(1000);
-
-                            Arrays.fill(data, key);
-
-//                            System.out.println("> " + key);
-
-                            cache.put(key, data);
-
-                            cnt++;
-
-                            if (cnt > 0 && cnt % 1_000 == 0)
-                                ignite.log().info(">>>> Updated: " + cnt);
-                        }
-
-                        return null;
-                    }
-                });
-            }
-        }
-    }
-
-    /**
-     *
-     */
-    private static class VerifyLargeCallable implements IgniteCallable<Boolean> {
-        /** */
-        @IgniteInstanceResource
-        private Ignite ignite;
-
-        /** {@inheritDoc} */
-        @Override public Boolean call() throws Exception {
-            try {
-                boolean successfulWaiting = GridTestUtils.waitForCondition(new PAX() {
-                    @Override public boolean applyx() {
-                        return ignite.cache("partitioned") != null;
-                    }
-                }, 10_000);
-
-                assertTrue(successfulWaiting);
-            }
-            catch (IgniteInterruptedCheckedException e) {
-                throw new RuntimeException(e);
-            }
-
-            IgniteCache<Object, Object> cache = ignite.cache("partitioned");
-
-            for (int i = 0; i < 1000; i++) {
-                final long[] data = new long[LARGE_ARR_SIZE];
-
-                Arrays.fill(data, i);
-
-                final Object val = cache.get(i);
-
-                if (val == null) {
-                    ignite.log().warning("Failed to find a value for key: " + i);
-
-                    return false;
-                }
-
-                assertTrue(Arrays.equals(data, (long[])val));
-            }
-
-            return true;
-        }
-    }
-
-
-    /**
-     *
-     */
-    private static class IndexedObject {
-        /** */
-        @QuerySqlField(index = true)
-        private int iVal;
-
-        /**
-         * @param iVal Integer value.
-         */
-        private IndexedObject(int iVal) {
-            this.iVal = iVal;
-        }
-
-        /** {@inheritDoc} */
-        @Override public boolean equals(Object o) {
-            if (this == o)
-                return true;
-
-            if (!(o instanceof IndexedObject))
-                return false;
-
-            IndexedObject that = (IndexedObject)o;
-
-            return iVal == that.iVal;
-        }
-
-        /** {@inheritDoc} */
-        @Override public int hashCode() {
-            return iVal;
-        }
-
-        /** {@inheritDoc} */
-        @Override public String toString() {
-            return S.toString(IndexedObject.class, this);
-        }
-    }
-
-    /**
-     *
-     */
-    private enum EnumVal {
-        /** */
-        VAL1,
-
-        /** */
-        VAL2,
-
-        /** */
-        VAL3
-    }
-}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/WalCompactionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/WalCompactionTest.java
index 938465c..e617455 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/WalCompactionTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/WalCompactionTest.java
@@ -33,7 +33,8 @@
 import org.apache.ignite.internal.IgniteEx;
 import org.apache.ignite.internal.pagemem.FullPageId;
 import org.apache.ignite.internal.pagemem.wal.record.PageSnapshot;
-import org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager;
+import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager;
+import org.apache.ignite.internal.processors.cache.persistence.wal.FileDescriptor;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
 import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
@@ -163,7 +164,7 @@
         File walDir = new File(dbDir, "wal");
         File archiveDir = new File(walDir, "archive");
         File nodeArchiveDir = new File(archiveDir, nodeFolderName);
-        File walSegment = new File(nodeArchiveDir, FileWriteAheadLogManager.FileDescriptor.fileName(0) + ".zip");
+        File walSegment = new File(nodeArchiveDir, FileDescriptor.fileName(0) + FilePageStoreManager.ZIP_SUFFIX);
 
         assertTrue(walSegment.exists());
         assertTrue(walSegment.length() < WAL_SEGMENT_SIZE / 2); // Should be compressed at least in half.
@@ -268,7 +269,7 @@
         File walDir = new File(dbDir, "wal");
         File archiveDir = new File(walDir, "archive");
         File nodeArchiveDir = new File(archiveDir, nodeFolderName);
-        File walSegment = new File(nodeArchiveDir, FileWriteAheadLogManager.FileDescriptor.fileName(emptyIdx));
+        File walSegment = new File(nodeArchiveDir, FileDescriptor.fileName(emptyIdx));
 
         try (RandomAccessFile raf = new RandomAccessFile(walSegment, "rw")) {
             raf.setLength(0); // Clear wal segment, but don't delete.
@@ -358,7 +359,7 @@
         File walDir = new File(dbDir, "wal");
         File archiveDir = new File(walDir, "archive");
         File nodeArchiveDir = new File(archiveDir, nodeFolderName);
-        File walSegment = new File(nodeArchiveDir, FileWriteAheadLogManager.FileDescriptor.fileName(0) + ".zip");
+        File walSegment = new File(nodeArchiveDir, FileDescriptor.fileName(0) + FilePageStoreManager.ZIP_SUFFIX);
 
         assertTrue(walSegment.exists());
         assertTrue(walSegment.length() < WAL_SEGMENT_SIZE / 2); // Should be compressed at least in half.
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/WalDeletionArchiveAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/WalDeletionArchiveAbstractTest.java
new file mode 100644
index 0000000..1cf237c
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/WalDeletionArchiveAbstractTest.java
@@ -0,0 +1,277 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.persistence.db.wal;
+
+import java.io.File;
+import java.util.function.Consumer;
+import java.util.stream.Stream;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.configuration.DataRegionConfiguration;
+import org.apache.ignite.configuration.DataStorageConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.configuration.WALMode;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager;
+import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager;
+import org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointHistory;
+import org.apache.ignite.internal.processors.cache.persistence.wal.FileDescriptor;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+import static org.apache.ignite.IgniteSystemProperties.IGNITE_PDS_MAX_CHECKPOINT_MEMORY_HISTORY_SIZE;
+
+/**
+ *
+ */
+public abstract class WalDeletionArchiveAbstractTest extends GridCommonAbstractTest {
+    /** */
+    public static final String CACHE_NAME = "SomeCache";
+
+    /**
+     * Start grid with override default configuration via customConfigurator.
+     */
+    private Ignite startGrid(Consumer<DataStorageConfiguration> customConfigurator) throws Exception {
+        IgniteConfiguration configuration = getConfiguration(getTestIgniteInstanceName());
+
+        DataStorageConfiguration dbCfg = new DataStorageConfiguration();
+
+        dbCfg.setWalMode(walMode());
+        dbCfg.setWalSegmentSize(512 * 1024);
+        dbCfg.setCheckpointFrequency(60 * 1000);//too high value for turn off frequency checkpoint.
+        dbCfg.setPageSize(4 * 1024);
+        dbCfg.setDefaultDataRegionConfiguration(new DataRegionConfiguration()
+            .setMaxSize(100 * 1024 * 1024)
+            .setPersistenceEnabled(true));
+
+        customConfigurator.accept(dbCfg);
+
+        configuration.setDataStorageConfiguration(dbCfg);
+
+        Ignite ignite = startGrid(configuration);
+
+        ignite.active(true);
+
+        return ignite;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        stopAllGrids();
+
+        cleanPersistenceDir();
+
+        System.clearProperty(IGNITE_PDS_MAX_CHECKPOINT_MEMORY_HISTORY_SIZE);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids();
+
+        cleanPersistenceDir();
+    }
+
+    /**
+     * @return WAL mode used in test.
+     */
+    abstract protected WALMode walMode();
+
+    /**
+     * History size parameters consistency check. Should be set just one of wal history size or max wal archive size.
+     */
+    public void testGridDoesNotStart_BecauseBothWalHistorySizeAndMaxWalArchiveSizeUsed() throws Exception {
+        //given: wal history size and max wal archive size are both set.
+        IgniteConfiguration configuration = getConfiguration(getTestIgniteInstanceName());
+
+        DataStorageConfiguration dbCfg = new DataStorageConfiguration();
+        dbCfg.setWalHistorySize(12);
+        dbCfg.setMaxWalArchiveSize(9);
+        configuration.setDataStorageConfiguration(dbCfg);
+
+        try {
+            //when: start grid.
+            startGrid(getTestIgniteInstanceName(), configuration);
+            fail("Should be fail because both wal history size and max wal archive size was used");
+        }
+        catch (IgniteException e) {
+            //then: exception is occurrence because should be set just one parameters.
+            assertTrue(findSourceMessage(e).startsWith("Should be used only one of wal history size or max wal archive size"));
+        }
+    }
+
+    /**
+     * find first cause's message
+     */
+    private String findSourceMessage(Throwable ex) {
+        return ex.getCause() == null ? ex.getMessage() : findSourceMessage(ex.getCause());
+    }
+
+    /**
+     * Correct delete archived wal files.
+     */
+    public void testCorrectDeletedArchivedWalFiles() throws Exception {
+        //given: configured grid with setted max wal archive size
+        long maxWalArchiveSize = 2 * 1024 * 1024;
+        Ignite ignite = startGrid(dbCfg -> {
+            dbCfg.setMaxWalArchiveSize(maxWalArchiveSize);
+        });
+
+        GridCacheDatabaseSharedManager dbMgr = gridDatabase(ignite);
+
+        long allowedThresholdWalArchiveSize = maxWalArchiveSize / 2;
+
+        IgniteCache<Integer, Integer> cache = ignite.getOrCreateCache(CACHE_NAME);
+
+        //when: put to cache more than 2 MB
+        for (int i = 0; i < 500; i++)
+            cache.put(i, i);
+
+        forceCheckpoint();
+
+        //then: total archive size less than half of maxWalArchiveSize(by current logic)
+        IgniteWriteAheadLogManager wal = wal(ignite);
+
+        FileDescriptor[] files = (FileDescriptor[])U.findNonPublicMethod(wal.getClass(), "walArchiveFiles").invoke(wal);
+
+        Long totalSize = Stream.of(files)
+            .map(desc -> desc.file().length())
+            .reduce(0L, Long::sum);
+
+        assertTrue(files.length >= 1);
+        assertTrue(totalSize <= allowedThresholdWalArchiveSize);
+        assertFalse(Stream.of(files).anyMatch(desc -> desc.file().getName().endsWith("00001.wal")));
+
+        CheckpointHistory hist = dbMgr.checkpointHistory();
+
+        assertTrue(hist.checkpoints().size() > 0);
+    }
+
+    /**
+     * Checkpoint triggered depends on wal size.
+     */
+    public void testCheckpointStarted_WhenWalHasTooBigSizeWithoutCheckpoint() throws Exception {
+        //given: configured grid with max wal archive size = 1MB, wal segment size = 512KB
+        Ignite ignite = startGrid(dbCfg -> {
+            dbCfg.setMaxWalArchiveSize(1 * 1024 * 1024);// 1 Mbytes
+        });
+
+        GridCacheDatabaseSharedManager dbMgr = gridDatabase(ignite);
+
+        IgniteCache<Integer, Integer> cache = ignite.getOrCreateCache(CACHE_NAME);
+
+        for (int i = 0; i < 500; i++)
+            cache.put(i, i);
+
+        //then: checkpoint triggered by size limit of wall without checkpoint
+        GridCacheDatabaseSharedManager.Checkpointer checkpointer = dbMgr.getCheckpointer();
+
+        String checkpointReason = U.field((Object)U.field(checkpointer, "curCpProgress"), "reason");
+
+        assertEquals("too big size of WAL without checkpoint", checkpointReason);
+    }
+
+    /**
+     * Test for check deprecated removing checkpoint by deprecated walHistorySize parameter
+     *
+     * @deprecated Test old removing process depends on WalHistorySize.
+     */
+    public void testCheckpointHistoryRemovingByWalHistorySize() throws Exception {
+        //given: configured grid with wal history size = 10
+        int walHistorySize = 10;
+
+        Ignite ignite = startGrid(dbCfg -> {
+            dbCfg.setWalHistorySize(walHistorySize);
+        });
+
+        GridCacheDatabaseSharedManager dbMgr = gridDatabase(ignite);
+
+        IgniteCache<Integer, Integer> cache = ignite.getOrCreateCache(CACHE_NAME);
+
+        //when: put to cache and do checkpoint
+        int testNumberOfCheckpoint = walHistorySize * 2;
+
+        for (int i = 0; i < testNumberOfCheckpoint; i++) {
+            cache.put(i, i);
+            //and: wait for checkpoint finished
+            forceCheckpoint();
+        }
+
+        //then: number of checkpoints less or equal than walHistorySize
+        CheckpointHistory hist = dbMgr.checkpointHistory();
+        assertTrue(hist.checkpoints().size() == walHistorySize);
+
+        File[] cpFiles = dbMgr.checkpointDirectory().listFiles();
+
+        assertTrue(cpFiles.length <= (walHistorySize * 2 + 1));// starts & ends + node_start
+    }
+
+    /**
+     * Correct delete checkpoint history from memory depends on IGNITE_PDS_MAX_CHECKPOINT_MEMORY_HISTORY_SIZE. WAL files
+     * doesn't delete because deleting was disabled.
+     */
+    public void testCorrectDeletedCheckpointHistoryButKeepWalFiles() throws Exception {
+        System.setProperty(IGNITE_PDS_MAX_CHECKPOINT_MEMORY_HISTORY_SIZE, "2");
+        //given: configured grid with disabled WAL removing.
+        Ignite ignite = startGrid(dbCfg -> {
+            dbCfg.setMaxWalArchiveSize(Long.MAX_VALUE);
+        });
+
+        GridCacheDatabaseSharedManager dbMgr = gridDatabase(ignite);
+
+        IgniteCache<Integer, Integer> cache = ignite.getOrCreateCache(CACHE_NAME);
+
+        //when: put to cache
+        for (int i = 0; i < 500; i++) {
+            cache.put(i, i);
+
+            if (i % 10 == 0)
+                forceCheckpoint();
+        }
+
+        forceCheckpoint();
+
+        //then: WAL files was not deleted but some of checkpoint history was deleted.
+        IgniteWriteAheadLogManager wal = wal(ignite);
+
+        FileDescriptor[] files = (FileDescriptor[])U.findNonPublicMethod(wal.getClass(), "walArchiveFiles").invoke(wal);
+
+        boolean hasFirstSegment = Stream.of(files)
+            .anyMatch(desc -> desc.file().getName().endsWith("0001.wal"));
+
+        assertTrue(hasFirstSegment);
+
+        CheckpointHistory hist = dbMgr.checkpointHistory();
+
+        assertTrue(hist.checkpoints().size() == 2);
+    }
+
+    /**
+     * Extract GridCacheDatabaseSharedManager.
+     */
+    private GridCacheDatabaseSharedManager gridDatabase(Ignite ignite) {
+        return (GridCacheDatabaseSharedManager)((IgniteEx)ignite).context().cache().context().database();
+    }
+
+    /**
+     * Extract IgniteWriteAheadLogManager.
+     */
+    private IgniteWriteAheadLogManager wal(Ignite ignite) {
+        return ((IgniteEx)ignite).context().cache().context().wal();
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/WalDeletionArchiveFsyncTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/WalDeletionArchiveFsyncTest.java
new file mode 100644
index 0000000..64a3185
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/WalDeletionArchiveFsyncTest.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.persistence.db.wal;
+
+import org.apache.ignite.configuration.WALMode;
+
+/**
+ *
+ */
+public class WalDeletionArchiveFsyncTest extends WalDeletionArchiveAbstractTest {
+
+    /** {@inheritDoc} */
+    @Override protected WALMode walMode() {
+        return WALMode.FSYNC;
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/WalDeletionArchiveLogOnlyTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/WalDeletionArchiveLogOnlyTest.java
new file mode 100644
index 0000000..b1cdf7a
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/WalDeletionArchiveLogOnlyTest.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.persistence.db.wal;
+
+import org.apache.ignite.configuration.WALMode;
+
+/**
+ *
+ */
+public class WalDeletionArchiveLogOnlyTest extends WalDeletionArchiveAbstractTest {
+
+    /** {@inheritDoc} */
+    @Override protected WALMode walMode() {
+        return WALMode.LOG_ONLY;
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/WalRecoveryTxLogicalRecordsTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/WalRecoveryTxLogicalRecordsTest.java
index 5abcff4..f84e2b9 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/WalRecoveryTxLogicalRecordsTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/WalRecoveryTxLogicalRecordsTest.java
@@ -473,50 +473,6 @@
     }
 
     /**
-     * @throws Exception if failed.
-     */
-    public void testCheckpointHistory() throws Exception {
-        Ignite ignite = startGrid();
-
-        ignite.cluster().active(true);
-
-        try {
-            GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager)((IgniteEx)ignite).context()
-                .cache().context().database();
-
-            dbMgr.waitForCheckpoint("test");
-
-            // This number depends on wal history size.
-            int entries = WAL_HIST_SIZE * 2;
-
-            IgniteCache<Integer, Integer> cache = ignite.cache(CACHE_NAME);
-
-            for (int i = 0; i < entries; i++) {
-                // Put to partition 0.
-                cache.put(i * PARTS, i * PARTS);
-
-                // Put to partition 1.
-                cache.put(i * PARTS + 1, i * PARTS + 1);
-
-                dbMgr.waitForCheckpoint("test");
-            }
-
-            CheckpointHistory hist = dbMgr.checkpointHistory();
-
-            assertTrue(hist.checkpoints().size() <= WAL_HIST_SIZE);
-
-            File cpDir = dbMgr.checkpointDirectory();
-
-            File[] cpFiles = cpDir.listFiles();
-
-            assertTrue(cpFiles.length <= WAL_HIST_SIZE * 2 + 1); // starts & ends + node_start
-        }
-        finally {
-            stopAllGrids();
-        }
-    }
-
-    /**
      * @throws Exception If failed.
      */
     public void testWalAfterPreloading() throws Exception {
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/crc/IgniteAbstractWalIteratorInvalidCrcTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/crc/IgniteAbstractWalIteratorInvalidCrcTest.java
new file mode 100644
index 0000000..0b53bb8
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/crc/IgniteAbstractWalIteratorInvalidCrcTest.java
@@ -0,0 +1,279 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.persistence.db.wal.crc;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+import java.util.function.BiFunction;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.DataRegionConfiguration;
+import org.apache.ignite.configuration.DataStorageConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.configuration.WALMode;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager;
+import org.apache.ignite.internal.pagemem.wal.WALIterator;
+import org.apache.ignite.internal.pagemem.wal.WALPointer;
+import org.apache.ignite.internal.pagemem.wal.record.WALRecord;
+import org.apache.ignite.internal.processors.cache.persistence.file.FileIO;
+import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory;
+import org.apache.ignite.internal.processors.cache.persistence.file.RandomAccessFileIOFactory;
+import org.apache.ignite.internal.processors.cache.persistence.wal.FileDescriptor;
+import org.apache.ignite.internal.processors.cache.persistence.wal.FileWALPointer;
+import org.apache.ignite.internal.processors.cache.persistence.wal.crc.IgniteDataIntegrityViolationException;
+import org.apache.ignite.internal.processors.cache.persistence.wal.reader.IgniteWalIteratorFactory;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteBiTuple;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+import org.jetbrains.annotations.NotNull;
+
+import static java.nio.ByteBuffer.allocate;
+import static java.nio.file.StandardOpenOption.WRITE;
+import static org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordV1Serializer.CRC_SIZE;
+
+/**
+ *
+ */
+public abstract class IgniteAbstractWalIteratorInvalidCrcTest extends GridCommonAbstractTest {
+    /** IP finder. */
+    private static final TcpDiscoveryVmIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
+    /** Size of inserting dummy value. */
+    private static final int VALUE_SIZE = 4 * 1024;
+
+    /** Size of WAL segment file. */
+    private static final int WAL_SEGMENT_SIZE = 1024 * 1024;
+
+    /** Count of WAL segment files in working directory. */
+    private static final int WAL_SEGMENTS = DataStorageConfiguration.DFLT_WAL_SEGMENTS;
+
+    /** Ignite instance. */
+    protected IgniteEx ignite;
+
+    /** Random instance for utility purposes. */
+    protected Random random = new Random();
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName);
+
+        cfg.setDiscoverySpi(new TcpDiscoverySpi().setIpFinder(IP_FINDER));
+
+        cfg.setDataStorageConfiguration(
+            new DataStorageConfiguration()
+                .setWalSegmentSize(WAL_SEGMENT_SIZE)
+                .setWalMode(getWalMode())
+                .setDefaultDataRegionConfiguration(
+                    new DataRegionConfiguration()
+                        .setPersistenceEnabled(true)
+                )
+        );
+
+        cfg.setCacheConfiguration(new CacheConfiguration(DEFAULT_CACHE_NAME));
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        cleanPersistenceDir();
+
+        ignite = (IgniteEx)startGrid();
+
+        ignite.cluster().active(true);
+
+        IgniteCache<Integer, byte[]> cache = ignite.cache(DEFAULT_CACHE_NAME);
+
+        byte[] val = new byte[VALUE_SIZE];
+
+        // Fill value with random data.
+        random.nextBytes(val);
+
+        // Amount of values that's enough to fill working dir at least twice.
+        int insertingCnt = 2 * WAL_SEGMENT_SIZE * WAL_SEGMENTS / VALUE_SIZE;
+        for (int i = 0; i < insertingCnt; i++)
+            cache.put(i, val);
+
+        ignite.cluster().active(false);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopGrid();
+
+        cleanPersistenceDir();
+    }
+
+    /**
+     * @return WAL mode that will be used in {@link IgniteConfiguration}.
+     */
+    @NotNull protected abstract WALMode getWalMode();
+
+    /**
+     * Instantiate WAL iterator according to the iterator type of specific implementation.
+     * @param walMgr WAL manager instance.
+     * @param ignoreArchiveDir Do not include archive segments in resulting iterator if this flag is true.
+     * @return WAL iterator instance.
+     * @throws IgniteCheckedException If iterator creation failed for some reason.
+     */
+    @NotNull protected abstract WALIterator getWalIterator(
+        IgniteWriteAheadLogManager walMgr,
+        boolean ignoreArchiveDir
+    ) throws IgniteCheckedException;
+
+    /**
+     * Test that iteration fails if one of archive segments contains record with invalid CRC.
+     * @throws Exception If failed.
+     */
+    public void testArchiveCorruptedPtr() throws Exception {
+        doTest((archiveDescs, descs) -> archiveDescs.get(random.nextInt(archiveDescs.size())), false, true);
+    }
+
+    /**
+     * Test that iteration fails if one of segments in working directory contains record with invalid CRC
+     * and it is not the tail segment.
+     * @throws Exception If failed.
+     */
+    public void testNotTailCorruptedPtr() throws Exception {
+        doTest((archiveDescs, descs) -> descs.get(random.nextInt(descs.size() - 1)), true, true);
+    }
+
+
+    /**
+     * Test that iteration does not fail if tail segment in working directory contains record with invalid CRC.
+     * @throws Exception If failed.
+     */
+    public void testTailCorruptedPtr() throws Exception {
+        doTest((archiveDescs, descs) -> descs.get(descs.size() - 1), false, false);
+    }
+
+    /**
+     * @param descPicker Function that picks WAL segment to corrupt from archive segments list
+     *      and working directory segments list.
+     * @param ignoreArchiveDir Do not iterate over archive segments if this flag is true.
+     * @param shouldFail Whether iteration is axpected to fail or not.
+     * @throws IOException If IO exception.
+     * @throws IgniteCheckedException If iterator failed.
+     */
+    protected void doTest(
+        BiFunction<List<FileDescriptor>, List<FileDescriptor>, FileDescriptor> descPicker,
+        boolean ignoreArchiveDir,
+        boolean shouldFail
+    ) throws IOException, IgniteCheckedException {
+        IgniteWriteAheadLogManager walMgr = ignite.context().cache().context().wal();
+
+        IgniteWalIteratorFactory iterFactory = new IgniteWalIteratorFactory();
+
+        File walArchiveDir = U.field(walMgr, "walArchiveDir");
+        List<FileDescriptor> archiveDescs = iterFactory.resolveWalFiles(
+            new IgniteWalIteratorFactory.IteratorParametersBuilder()
+                .filesOrDirs(walArchiveDir)
+        );
+
+        File walDir = U.field(walMgr, "walWorkDir");
+        List<FileDescriptor> descs = iterFactory.resolveWalFiles(
+            new IgniteWalIteratorFactory.IteratorParametersBuilder()
+                .filesOrDirs(walDir)
+        );
+
+        FileDescriptor corruptedDesc = descPicker.apply(archiveDescs, descs);
+
+        FileWALPointer beforeCorruptedPtr = corruptWalSegmentFile(
+            corruptedDesc,
+            iterFactory
+        );
+
+        if (shouldFail) {
+            FileWALPointer[] lastReadPtrRef = new FileWALPointer[1];
+
+            IgniteException igniteException = (IgniteException) GridTestUtils.assertThrows(log, () -> {
+                try (WALIterator iter = getWalIterator(walMgr, ignoreArchiveDir)) {
+                    for (IgniteBiTuple<WALPointer, WALRecord> tuple : iter) {
+                        FileWALPointer ptr = (FileWALPointer)tuple.get1();
+                        lastReadPtrRef[0] = ptr;
+                    }
+                }
+
+                return null;
+            }, IgniteException.class, "Failed to read WAL record");
+
+            assertTrue(igniteException.hasCause(IgniteDataIntegrityViolationException.class));
+
+            FileWALPointer lastReadPtr = lastReadPtrRef[0];
+            assertNotNull(lastReadPtr);
+
+            // WAL iterator advances to the next record and only then returns current one,
+            // so next record has to be valid as well.
+            assertEquals(lastReadPtr.next(), beforeCorruptedPtr);
+        }
+        else
+            try (WALIterator iter = getWalIterator(walMgr, ignoreArchiveDir)) {
+                while (iter.hasNext())
+                    iter.next();
+            }
+    }
+
+    /**
+     * Put zero CRC in one of records for the specified segment.
+     * @param desc WAL segment descriptor.
+     * @param iterFactory Iterator factory for segment iterating.
+     * @return Descriptor that is located strictly before the corrupted one.
+     * @throws IOException If IO exception.
+     * @throws IgniteCheckedException If iterator failed.
+     */
+    protected FileWALPointer corruptWalSegmentFile(
+        FileDescriptor desc,
+        IgniteWalIteratorFactory iterFactory
+    ) throws IOException, IgniteCheckedException {
+        List<FileWALPointer> pointers = new ArrayList<>();
+
+        try (WALIterator it = iterFactory.iterator(desc.file())) {
+            for (IgniteBiTuple<WALPointer, WALRecord> tuple : it) {
+                pointers.add((FileWALPointer) tuple.get1());
+            }
+        }
+
+        // Should have a previous record to return and another value before that to ensure that "lastReadPtr"
+        // in "doTest" will always exist.
+        int idxCorrupted = 2 + random.nextInt(pointers.size() - 2);
+
+        FileWALPointer pointer = pointers.get(idxCorrupted);
+        int crc32Off = pointer.fileOffset() + pointer.length() - CRC_SIZE;
+
+        ByteBuffer zeroCrc32 = allocate(CRC_SIZE); // Has 0 value by default.
+
+        FileIOFactory ioFactory = new RandomAccessFileIOFactory();
+        try (FileIO io = ioFactory.create(desc.file(), WRITE)) {
+            io.write(zeroCrc32, crc32Off);
+
+            io.force(true);
+        }
+
+        return pointers.get(idxCorrupted - 1);
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/crc/IgniteFsyncReplayWalIteratorInvalidCrcTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/crc/IgniteFsyncReplayWalIteratorInvalidCrcTest.java
new file mode 100644
index 0000000..4629acb
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/crc/IgniteFsyncReplayWalIteratorInvalidCrcTest.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.persistence.db.wal.crc;
+
+import org.apache.ignite.configuration.WALMode;
+import org.jetbrains.annotations.NotNull;
+
+/**
+ *
+ */
+public class IgniteFsyncReplayWalIteratorInvalidCrcTest extends IgniteReplayWalIteratorInvalidCrcTest {
+    /** {@inheritDoc} */
+    @NotNull @Override protected WALMode getWalMode() {
+        return WALMode.FSYNC;
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/crc/IgniteReplayWalIteratorInvalidCrcTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/crc/IgniteReplayWalIteratorInvalidCrcTest.java
new file mode 100644
index 0000000..756ef78
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/crc/IgniteReplayWalIteratorInvalidCrcTest.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.persistence.db.wal.crc;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.configuration.WALMode;
+import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager;
+import org.apache.ignite.internal.pagemem.wal.WALIterator;
+import org.jetbrains.annotations.NotNull;
+
+/**
+ *
+ */
+public class IgniteReplayWalIteratorInvalidCrcTest extends IgniteAbstractWalIteratorInvalidCrcTest {
+    /** {@inheritDoc} */
+    @NotNull @Override protected WALMode getWalMode() {
+        return WALMode.LOG_ONLY;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected WALIterator getWalIterator(
+        IgniteWriteAheadLogManager walMgr,
+        boolean ignoreArchiveDir
+    ) throws IgniteCheckedException {
+        if (ignoreArchiveDir)
+            throw new UnsupportedOperationException(
+                "Cannot invoke \"getWalIterator\" with true \"ignoreArchiveDir\" parameter value."
+            );
+        else
+            return walMgr.replay(null);
+    }
+
+    /**
+     * {@inheritDoc}
+     * Case is not relevant to the replay iterator.
+     */
+    @Override public void testNotTailCorruptedPtr() {
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/crc/IgniteStandaloneWalIteratorInvalidCrcTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/crc/IgniteStandaloneWalIteratorInvalidCrcTest.java
new file mode 100644
index 0000000..8802184
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/crc/IgniteStandaloneWalIteratorInvalidCrcTest.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.persistence.db.wal.crc;
+
+import java.io.File;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.configuration.WALMode;
+import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager;
+import org.apache.ignite.internal.pagemem.wal.WALIterator;
+import org.apache.ignite.internal.processors.cache.persistence.wal.reader.IgniteWalIteratorFactory;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.jetbrains.annotations.NotNull;
+
+/**
+ *
+ */
+public class IgniteStandaloneWalIteratorInvalidCrcTest extends IgniteAbstractWalIteratorInvalidCrcTest {
+    /** {@inheritDoc} */
+    @NotNull @Override protected WALMode getWalMode() {
+        return WALMode.LOG_ONLY;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected WALIterator getWalIterator(
+        IgniteWriteAheadLogManager walMgr,
+        boolean ignoreArchiveDir
+    ) throws IgniteCheckedException {
+        File walArchiveDir = U.field(walMgr, "walArchiveDir");
+        File walDir = U.field(walMgr, "walWorkDir");
+
+        IgniteWalIteratorFactory iterFactory = new IgniteWalIteratorFactory();
+
+        return ignoreArchiveDir ? iterFactory.iterator(walDir) : iterFactory.iterator(walArchiveDir, walDir);
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java
index 7c54d62..beab138 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/IgniteWalReaderTest.java
@@ -23,6 +23,7 @@
 import java.io.ObjectInput;
 import java.io.ObjectOutput;
 import java.io.Serializable;
+import java.util.ArrayList;
 import java.util.Collection;
 import java.util.EnumMap;
 import java.util.HashMap;
@@ -31,6 +32,7 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Objects;
+import java.util.Random;
 import java.util.TreeMap;
 import java.util.UUID;
 import java.util.concurrent.CountDownLatch;
@@ -40,6 +42,7 @@
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteDataStreamer;
 import org.apache.ignite.IgniteEvents;
 import org.apache.ignite.IgniteSystemProperties;
 import org.apache.ignite.binary.BinaryObject;
@@ -51,7 +54,6 @@
 import org.apache.ignite.configuration.DataStorageConfiguration;
 import org.apache.ignite.configuration.IgniteConfiguration;
 import org.apache.ignite.configuration.WALMode;
-import org.apache.ignite.events.EventType;
 import org.apache.ignite.events.WalSegmentArchivedEvent;
 import org.apache.ignite.internal.pagemem.wal.WALIterator;
 import org.apache.ignite.internal.pagemem.wal.WALPointer;
@@ -64,9 +66,11 @@
 import org.apache.ignite.internal.processors.cache.CacheObject;
 import org.apache.ignite.internal.processors.cache.GridCacheOperation;
 import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.persistence.wal.FileWALPointer;
 import org.apache.ignite.internal.processors.cache.persistence.wal.reader.IgniteWalIteratorFactory;
 import org.apache.ignite.internal.processors.cache.persistence.wal.reader.IgniteWalIteratorFactory.IteratorParametersBuilder;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
+import org.apache.ignite.internal.util.typedef.T2;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.lang.IgniteBiInClosure;
 import org.apache.ignite.lang.IgniteBiTuple;
@@ -83,6 +87,7 @@
 
 import static java.util.Arrays.fill;
 import static org.apache.ignite.events.EventType.EVT_WAL_SEGMENT_ARCHIVED;
+import static org.apache.ignite.events.EventType.EVT_WAL_SEGMENT_COMPACTED;
 import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.DATA_RECORD;
 import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.TX_RECORD;
 import static org.apache.ignite.internal.processors.cache.GridCacheOperation.CREATE;
@@ -119,10 +124,13 @@
     private WALMode customWalMode;
 
     /** Clear properties in afterTest() method. */
-    private boolean clearProperties;
+    private boolean clearProps;
 
     /** Set WAL and Archive path to same value. */
-    private boolean setWalAndArchiveToSameValue;
+    private boolean setWalAndArchiveToSameVal;
+
+    /** Whether to enable WAL archive compaction. */
+    private boolean enableWalCompaction;
 
     /** {@inheritDoc} */
     @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
@@ -139,17 +147,17 @@
 
         cfg.setCacheConfiguration(ccfg);
 
-        cfg.setIncludeEventTypes(EventType.EVT_WAL_SEGMENT_ARCHIVED);
+        cfg.setIncludeEventTypes(EVT_WAL_SEGMENT_ARCHIVED, EVT_WAL_SEGMENT_COMPACTED);
 
         DataStorageConfiguration dsCfg = new DataStorageConfiguration()
             .setDefaultDataRegionConfiguration(
                 new DataRegionConfiguration()
                     .setMaxSize(1024L * 1024 * 1024)
                     .setPersistenceEnabled(true))
-            .setWalHistorySize(1)
             .setWalSegmentSize(1024 * 1024)
             .setWalSegments(WAL_SEGMENTS)
-            .setWalMode(customWalMode != null ? customWalMode : WALMode.BACKGROUND);
+            .setWalMode(customWalMode != null ? customWalMode : WALMode.BACKGROUND)
+            .setWalCompactionEnabled(enableWalCompaction);
 
         if (archiveIncompleteSegmentAfterInactivityMs > 0)
             dsCfg.setWalAutoArchiveAfterInactivity(archiveIncompleteSegmentAfterInactivityMs);
@@ -158,12 +166,13 @@
         File db = U.resolveWorkDirectory(workDir, DFLT_STORE_DIR, false);
         File wal = new File(db, "wal");
 
-        if(setWalAndArchiveToSameValue) {
+        if(setWalAndArchiveToSameVal) {
             String walAbsPath = wal.getAbsolutePath();
 
             dsCfg.setWalPath(walAbsPath);
             dsCfg.setWalArchivePath(walAbsPath);
-        } else {
+        }
+        else {
             dsCfg.setWalPath(wal.getAbsolutePath());
             dsCfg.setWalArchivePath(new File(wal, "archive").getAbsolutePath());
         }
@@ -186,7 +195,7 @@
 
         cleanPersistenceDir();
 
-        if (clearProperties)
+        if (clearProps)
             System.clearProperty(IgniteSystemProperties.IGNITE_WAL_LOG_TX_RECORDS);
     }
 
@@ -194,7 +203,7 @@
      * @throws Exception if failed.
      */
     public void testFillWalAndReadRecords() throws Exception {
-        setWalAndArchiveToSameValue = false;
+        setWalAndArchiveToSameVal = false;
 
         Ignite ignite0 = startGrid();
 
@@ -285,6 +294,29 @@
      * @throws Exception if failed.
      */
     public void testArchiveCompletedEventFired() throws Exception {
+        assertTrue(checkWhetherWALRelatedEventFired(EVT_WAL_SEGMENT_ARCHIVED));
+    }
+
+    /**
+     * Tests archive completed event is fired.
+     *
+     * @throws Exception if failed.
+     */
+    public void testArchiveCompactedEventFired() throws Exception {
+        boolean oldEnableWalCompaction = enableWalCompaction;
+
+        try {
+            enableWalCompaction = true;
+
+            assertTrue(checkWhetherWALRelatedEventFired(EVT_WAL_SEGMENT_COMPACTED));
+        }
+        finally {
+            enableWalCompaction = oldEnableWalCompaction;
+        }
+    }
+
+    /** */
+    private boolean checkWhetherWALRelatedEventFired(int evtType) throws Exception {
         AtomicBoolean evtRecorded = new AtomicBoolean();
 
         Ignite ignite = startGrid();
@@ -293,7 +325,7 @@
 
         final IgniteEvents evts = ignite.events();
 
-        if (!evts.isEnabled(EVT_WAL_SEGMENT_ARCHIVED))
+        if (!evts.isEnabled(evtType))
             fail("nothing to test");
 
         evts.localListen(e -> {
@@ -301,19 +333,19 @@
 
             long idx = archComplEvt.getAbsWalSegmentIdx();
 
-            log.info("Finished archive for segment [" +
+            log.info("Finished for segment [" +
                 idx + ", " + archComplEvt.getArchiveFile() + "]: [" + e + "]");
 
             evtRecorded.set(true);
 
             return true;
-        }, EVT_WAL_SEGMENT_ARCHIVED);
+        }, evtType);
 
-        putDummyRecords(ignite, 500);
+        putDummyRecords(ignite, 5_000);
 
         stopGrid();
 
-        assertTrue(evtRecorded.get());
+        return evtRecorded.get();
     }
 
     /**
@@ -340,7 +372,7 @@
             long idx = archComplEvt.getAbsWalSegmentIdx();
 
             log.info("Finished archive for segment [" + idx + ", " +
-                archComplEvt.getArchiveFile() + "]: [" + e + "]");
+                archComplEvt.getArchiveFile() + "]: [" + e + ']');
 
             if (waitingForEvt.get())
                 archiveSegmentForInactivity.countDown();
@@ -415,13 +447,13 @@
 
         IgniteWalIteratorFactory factory = new IgniteWalIteratorFactory(log);
 
-        IteratorParametersBuilder iteratorParametersBuilder = createIteratorParametersBuilder(workDir, subfolderName);
+        IteratorParametersBuilder iterParametersBuilder = createIteratorParametersBuilder(workDir, subfolderName);
 
-        iteratorParametersBuilder.filesOrDirs(workDir);
+        iterParametersBuilder.filesOrDirs(workDir);
 
         scanIterateAndCount(
             factory,
-            iteratorParametersBuilder,
+            iterParametersBuilder,
             totalEntries,
             0,
             null,
@@ -472,7 +504,7 @@
 
         String workDir = U.defaultWorkDirectory();
 
-        IteratorParametersBuilder params = createIteratorParametersBuilder(workDir,subfolderName);
+        IteratorParametersBuilder params = createIteratorParametersBuilder(workDir, subfolderName);
 
         params.filesOrDirs(workDir);
 
@@ -738,7 +770,7 @@
             ctrlMapForBinaryObjects, ctrlMapForBinaryObjects.isEmpty());
 
         assertTrue(" Control Map for strings in entries is not empty after" +
-                " reading records: " + ctrlStringsForBinaryObjSearch, ctrlStringsForBinaryObjSearch.isEmpty());
+            " reading records: " + ctrlStringsForBinaryObjSearch, ctrlStringsForBinaryObjSearch.isEmpty());
     }
 
     /**
@@ -763,13 +795,13 @@
 
         IgniteWalIteratorFactory factory = new IgniteWalIteratorFactory(log);
 
-        IteratorParametersBuilder iteratorParametersBuilder =
+        IteratorParametersBuilder iterParametersBuilder =
             createIteratorParametersBuilder(workDir, subfolderName)
-            .filesOrDirs(workDir);
+                .filesOrDirs(workDir);
 
         scanIterateAndCount(
             factory,
-            iteratorParametersBuilder,
+            iterParametersBuilder,
             0,
             0,
             null,
@@ -826,12 +858,11 @@
         runRemoveOperationTest(CacheAtomicityMode.ATOMIC);
     }
 
-
     /**
      * Test if DELETE operation can be found after mixed cache operations including remove().
      *
-     * @throws Exception if failed.
      * @param mode Cache Atomicity Mode.
+     * @throws Exception if failed.
      */
     private void runRemoveOperationTest(CacheAtomicityMode mode) throws Exception {
         Ignite ignite = startGrid();
@@ -1001,7 +1032,7 @@
             createsFound != null && createsFound > 0);
 
         assertTrue("Create operations count should be at least " + cntEntries + " in log: " + operationsFound,
-            createsFound != null && createsFound >= cntEntries);
+            createsFound >= cntEntries);
     }
 
     /**
@@ -1010,7 +1041,7 @@
      * @throws Exception if failed.
      */
     public void testTxRecordsReadWoBinaryMeta() throws Exception {
-        clearProperties = true;
+        clearProps = true;
 
         System.setProperty(IgniteSystemProperties.IGNITE_WAL_LOG_TX_RECORDS, "true");
 
@@ -1048,6 +1079,162 @@
     }
 
     /**
+     * @throws Exception If failed.
+     */
+    public void testCheckBoundsIterator() throws Exception {
+        Ignite ignite = startGrid("node0");
+
+        ignite.cluster().active(true);
+
+        try (IgniteDataStreamer<Integer, IndexedObject> st = ignite.dataStreamer(CACHE_NAME)) {
+            st.allowOverwrite(true);
+
+            for (int i = 0; i < 10_000; i++)
+                st.addData(i, new IndexedObject(i));
+        }
+
+        stopAllGrids();
+
+        List<FileWALPointer> wal = new ArrayList<>();
+
+        String workDir = U.defaultWorkDirectory();
+
+        IgniteWalIteratorFactory factory = new IgniteWalIteratorFactory();
+
+        try (WALIterator it = factory.iterator(workDir)) {
+            while (it.hasNext()) {
+                IgniteBiTuple<WALPointer, WALRecord> tup = it.next();
+
+                wal.add((FileWALPointer)tup.get1());
+            }
+        }
+
+        Random rnd = new Random();
+
+        int from0 = rnd.nextInt(wal.size() - 2) + 1;
+        int to0 = wal.size() - 1;
+
+        // +1 for skip first record.
+        FileWALPointer exp0First = wal.get(from0);
+        FileWALPointer exp0Last = wal.get(to0);
+
+        T2<FileWALPointer, WALRecord> actl0First = null;
+        T2<FileWALPointer, WALRecord> actl0Last = null;
+
+        int records0 = 0;
+
+        try (WALIterator it = factory.iterator(exp0First, workDir)) {
+            while (it.hasNext()) {
+                IgniteBiTuple<WALPointer, WALRecord> tup = it.next();
+
+                if (actl0First == null)
+                    actl0First = new T2<>((FileWALPointer)tup.get1(), tup.get2());
+
+                actl0Last = new T2<>((FileWALPointer)tup.get1(), tup.get2());
+
+                records0++;
+            }
+        }
+
+        log.info("Check REPLAY FROM:" + exp0First + "\n" +
+            "expFirst=" + exp0First + " actlFirst=" + actl0First + ", " +
+            "expLast=" + exp0Last + " actlLast=" + actl0Last);
+
+        // +1 because bound include.
+        Assert.assertEquals(to0 - from0 + 1, records0);
+
+        Assert.assertNotNull(actl0First);
+        Assert.assertNotNull(actl0Last);
+
+        Assert.assertEquals(exp0First, actl0First.get1());
+        Assert.assertEquals(exp0Last, actl0Last.get1());
+
+        int from1 = 0;
+        int to1 = rnd.nextInt(wal.size() - 3) + 1;
+
+        // -3 for skip last record.
+        FileWALPointer exp1First = wal.get(from1);
+        FileWALPointer exp1Last = wal.get(to1);
+
+        T2<FileWALPointer, WALRecord> actl1First = null;
+        T2<FileWALPointer, WALRecord> actl1Last = null;
+
+        int records1 = 0;
+
+        try (WALIterator it = factory.iterator(
+            new IteratorParametersBuilder()
+                .filesOrDirs(workDir)
+                .to(exp1Last)
+        )) {
+            while (it.hasNext()) {
+                IgniteBiTuple<WALPointer, WALRecord> tup = it.next();
+
+                if (actl1First == null)
+                    actl1First = new T2<>((FileWALPointer)tup.get1(), tup.get2());
+
+                actl1Last = new T2<>((FileWALPointer)tup.get1(), tup.get2());
+
+                records1++;
+            }
+        }
+
+        log.info("Check REPLAY TO:" + exp1Last + "\n" +
+            "expFirst=" + exp1First + " actlFirst=" + actl1First + ", " +
+            "expLast=" + exp1Last + " actlLast=" + actl1Last);
+
+        // +1 because bound include.
+        Assert.assertEquals(to1 - from1 + 1, records1);
+
+        Assert.assertNotNull(actl1First);
+        Assert.assertNotNull(actl1Last);
+
+        Assert.assertEquals(exp1First, actl1First.get1());
+        Assert.assertEquals(exp1Last, actl1Last.get1());
+
+        int from2 = rnd.nextInt(wal.size() - 2);
+        int to2 = rnd.nextInt((wal.size() - 1) - from2) + from2;
+
+        FileWALPointer exp2First = wal.get(from2);
+        FileWALPointer exp2Last = wal.get(to2);
+
+        T2<FileWALPointer, WALRecord> actl2First = null;
+        T2<FileWALPointer, WALRecord> actl2Last = null;
+
+        int records2 = 0;
+
+        try (WALIterator it = factory.iterator(
+            new IteratorParametersBuilder()
+                .filesOrDirs(workDir)
+                .from(exp2First)
+                .to(exp2Last)
+        )) {
+            while (it.hasNext()) {
+                IgniteBiTuple<WALPointer, WALRecord> tup = it.next();
+
+                if (actl2First == null)
+                    actl2First = new T2<>((FileWALPointer)tup.get1(), tup.get2());
+
+                actl2Last = new T2<>((FileWALPointer)tup.get1(), tup.get2());
+
+                records2++;
+            }
+        }
+
+        log.info("Check REPLAY BETWEEN:" + exp2First + " " + exp2Last+ "\n" +
+            "expFirst=" + exp2First + " actlFirst=" + actl2First + ", " +
+            "expLast=" + exp2Last + " actlLast=" + actl2Last);
+
+        // +1 because bound include.
+        Assert.assertEquals(to2 - from2 + 1, records2);
+
+        Assert.assertNotNull(actl2First);
+        Assert.assertNotNull(actl2Last);
+
+        Assert.assertEquals(exp2First, actl2First.get1());
+        Assert.assertEquals(exp2Last, actl2Last.get1());
+    }
+
+    /**
      * @param workDir Work directory.
      * @param subfolderName Subfolder name.
      * @return WAL iterator factory.
@@ -1281,7 +1468,7 @@
          *
          * @param iVal I value.
          */
-        public TestExternalizable(int iVal) {
+        TestExternalizable(int iVal) {
             this.iVal = iVal;
         }
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/MockWalIteratorFactory.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/MockWalIteratorFactory.java
index df649fa..b783373 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/MockWalIteratorFactory.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/reader/MockWalIteratorFactory.java
@@ -94,6 +94,7 @@
         when(persistentCfg1.getWalSegments()).thenReturn(segments);
         when(persistentCfg1.getWalBufferSize()).thenReturn(DataStorageConfiguration.DFLT_WAL_BUFF_SIZE);
         when(persistentCfg1.getWalRecordIteratorBufferSize()).thenReturn(DataStorageConfiguration.DFLT_WAL_RECORD_ITERATOR_BUFFER_SIZE);
+        when(persistentCfg1.getWalSegmentSize()).thenReturn(DataStorageConfiguration.DFLT_WAL_SEGMENT_SIZE);
 
         final FileIOFactory fileIOFactory = new DataStorageConfiguration().getFileIOFactory();
         when(persistentCfg1.getFileIOFactory()).thenReturn(fileIOFactory);
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreePageMemoryImplTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreePageMemoryImplTest.java
index 3737204..7719b43 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreePageMemoryImplTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreePageMemoryImplTest.java
@@ -60,6 +60,7 @@
             null,
             null,
             null,
+            null,
             null
         );
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreeReuseListPageMemoryImplTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreeReuseListPageMemoryImplTest.java
index c36ecce..71eb129 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreeReuseListPageMemoryImplTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/BPlusTreeReuseListPageMemoryImplTest.java
@@ -62,6 +62,7 @@
             null,
             null,
             null,
+            null,
             null
         );
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/FillFactorMetricTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/FillFactorMetricTest.java
index 42eaf36..ac65c6d 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/FillFactorMetricTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/FillFactorMetricTest.java
@@ -91,6 +91,25 @@
     private final float[] curFillFactor = new float[NODES];
 
     /**
+     * Tests that {@link DataRegionMetrics#getPagesFillFactor()} doesn't return NaN for empty cache.
+     *
+     * @throws Exception if failed.
+     */
+    public void testEmptyCachePagesFillFactor() throws Exception {
+        startGrids(1);
+
+        // Cache is created in default region so MY_DATA_REGION will have "empty" metrics.
+        CacheConfiguration<Object, Object> cacheCfg = new CacheConfiguration<>().setName(MY_CACHE);
+        grid(0).getOrCreateCache(cacheCfg);
+
+        DataRegionMetrics m = grid(0).dataRegionMetrics(MY_DATA_REGION);
+
+        assertEquals(0, m.getTotalAllocatedPages());
+
+        assertEquals(0, m.getPagesFillFactor(), Float.MIN_VALUE);
+    }
+
+    /**
      * throws if failed.
      */
     public void testFillAndEmpty() throws Exception {
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/FullPageIdTableTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/FullPageIdTableTest.java
index 43b27aa..e337bb1 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/FullPageIdTableTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/FullPageIdTableTest.java
@@ -26,7 +26,10 @@
 import org.apache.ignite.internal.pagemem.FullPageId;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.logger.java.JavaLogger;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.Timeout;
 
 import static org.junit.Assert.assertEquals;
 
@@ -34,6 +37,10 @@
  *
  */
 public class FullPageIdTableTest  {
+    /** Per test timeout */
+    @Rule
+    public Timeout globalTimeout = new Timeout((int) GridTestUtils.DFLT_TEST_TIMEOUT);
+
     /** */
     private static final int CACHE_ID_RANGE = 1;
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/IgnitePageMemReplaceDelayedWriteUnitTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/IgnitePageMemReplaceDelayedWriteUnitTest.java
index c6f42e1..aa1e37d 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/IgnitePageMemReplaceDelayedWriteUnitTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/IgnitePageMemReplaceDelayedWriteUnitTest.java
@@ -42,8 +42,11 @@
 import org.apache.ignite.internal.util.GridMultiCollectionWrapper;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.logger.NullLogger;
+import org.apache.ignite.testframework.GridTestUtils;
 import org.jetbrains.annotations.NotNull;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.Timeout;
 import org.mockito.Mockito;
 
 import static org.mockito.Matchers.any;
@@ -54,6 +57,10 @@
  * Unit test for delayed page replacement mode.
  */
 public class IgnitePageMemReplaceDelayedWriteUnitTest {
+    /** Per test timeout */
+    @Rule
+    public Timeout globalTimeout = new Timeout((int) GridTestUtils.DFLT_TEST_TIMEOUT);
+
     /** CPU count. */
     private static final int CPUS = 32;
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/IgniteThrottlingUnitTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/IgniteThrottlingUnitTest.java
index f9ca7e4..6f504da 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/IgniteThrottlingUnitTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/IgniteThrottlingUnitTest.java
@@ -24,7 +24,10 @@
 import org.apache.ignite.internal.processors.cache.persistence.CheckpointLockStateChecker;
 import org.apache.ignite.internal.processors.cache.persistence.CheckpointWriteProgressSupplier;
 import org.apache.ignite.logger.NullLogger;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.Timeout;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
@@ -37,6 +40,10 @@
  *
  */
 public class IgniteThrottlingUnitTest {
+    /** Per test timeout */
+    @Rule
+    public Timeout globalTimeout = new Timeout((int) GridTestUtils.DFLT_TEST_TIMEOUT);
+
     /** Logger. */
     private IgniteLogger log = new NullLogger();
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/IndexStoragePageMemoryImplTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/IndexStoragePageMemoryImplTest.java
index 9087b1c..43fbb6e 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/IndexStoragePageMemoryImplTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/IndexStoragePageMemoryImplTest.java
@@ -75,6 +75,7 @@
             null,
             null,
             null,
+            null,
             null
         );
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpPageStoreManager.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpPageStoreManager.java
index c61b3c0..0f21952 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpPageStoreManager.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpPageStoreManager.java
@@ -33,6 +33,7 @@
 import org.apache.ignite.internal.processors.cache.CacheGroupDescriptor;
 import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
 import org.apache.ignite.internal.processors.cache.StoredCacheData;
+import org.apache.ignite.internal.processors.cache.persistence.AllocatedPageTracker;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.lang.IgniteFuture;
 
@@ -54,6 +55,12 @@
     }
 
     /** {@inheritDoc} */
+    @Override public void initialize(int cacheId, int partitions, String workingDir,
+        AllocatedPageTracker tracker) throws IgniteCheckedException {
+        // No-op.
+    }
+
+    /** {@inheritDoc} */
     @Override public void initializeForCache(CacheGroupDescriptor grpDesc,
         StoredCacheData cacheData) throws IgniteCheckedException {
         // No-op.
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpWALManager.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpWALManager.java
index 0a240ea..811a231 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpWALManager.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/NoOpWALManager.java
@@ -20,11 +20,12 @@
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.internal.GridKernalContext;
 import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager;
-import org.apache.ignite.internal.pagemem.wal.StorageException;
+import org.apache.ignite.internal.processors.cache.persistence.StorageException;
 import org.apache.ignite.internal.pagemem.wal.WALIterator;
 import org.apache.ignite.internal.pagemem.wal.WALPointer;
 import org.apache.ignite.internal.pagemem.wal.record.WALRecord;
 import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
+import org.apache.ignite.internal.processors.cache.persistence.wal.FileDescriptor;
 import org.apache.ignite.lang.IgniteFuture;
 
 /**
@@ -82,7 +83,7 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void allowCompressionUntil(WALPointer ptr) {
+    @Override public void notchLastCheckpointPtr(WALPointer ptr) {
         // No-op.
     }
 
@@ -155,4 +156,14 @@
     @Override public long lastArchivedSegment() {
         return -1L;
     }
+
+    /** {@inheritDoc} */
+    @Override public long lastCompactedSegment() {
+        return -1L;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long maxArchivedSegmentToDelete() {
+        return -1;
+    }
 }
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplNoLoadTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplNoLoadTest.java
index 34fd93b..52aff0c 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplNoLoadTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplNoLoadTest.java
@@ -65,6 +65,7 @@
             null,
             null,
             null,
+            null,
             null
         );
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplTest.java
index 3697c4c..000131a 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImplTest.java
@@ -297,6 +297,7 @@
             null,
             null,
             null,
+            null,
             null
         );
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/RobinHoodBackwardShiftHashMapTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/RobinHoodBackwardShiftHashMapTest.java
index 565b99e..345651e 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/RobinHoodBackwardShiftHashMapTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/RobinHoodBackwardShiftHashMapTest.java
@@ -26,8 +26,11 @@
 import org.apache.ignite.internal.util.GridLongList;
 import org.apache.ignite.internal.util.GridUnsafe;
 import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.testframework.GridTestUtils;
 import org.jetbrains.annotations.NotNull;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.Timeout;
 
 import static org.apache.ignite.IgniteSystemProperties.IGNITE_LONG_LONG_HASH_MAP_LOAD_FACTOR;
 import static org.junit.Assert.assertEquals;
@@ -38,6 +41,9 @@
  * Unit tests of {@link RobinHoodBackwardShiftHashMap} implementation.
  */
 public class RobinHoodBackwardShiftHashMapTest {
+    /** Per test timeout */
+    @Rule
+    public Timeout globalTimeout = new Timeout((int) GridTestUtils.DFLT_TEST_TIMEOUT);
 
     /**
      * @param tester map test code
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/IgniteChangeGlobalStateAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/IgniteChangeGlobalStateAbstractTest.java
index 016b6a4..fcbe62f 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/IgniteChangeGlobalStateAbstractTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/IgniteChangeGlobalStateAbstractTest.java
@@ -151,7 +151,7 @@
      * @param cnt Count.
      * @throws Exception If failed.
      */
-    private void startPrimaryNodes(int cnt) throws Exception {
+    void startPrimaryNodes(int cnt) throws Exception {
         for (int i = 0; i < cnt; i++)
             startPrimary(i);
 
@@ -181,7 +181,7 @@
      * @param cnt Count.
      * @throws Exception If failed.
      */
-    private void startBackUpNodes(int cnt) throws Exception {
+    void startBackUpNodes(int cnt) throws Exception {
         for (int i = 0; i < cnt; i++)
             startBackUp(i);
     }
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/IgniteChangeGlobalStateTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/IgniteChangeGlobalStateTest.java
index 70a9251..9152ab9 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/IgniteChangeGlobalStateTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/IgniteChangeGlobalStateTest.java
@@ -40,13 +40,6 @@
  *
  */
 public class IgniteChangeGlobalStateTest extends IgniteChangeGlobalStateAbstractTest {
-    /** {@inheritDoc} */
-    @Override protected void beforeTest() throws Exception {
-        fail("https://issues.apache.org/jira/browse/IGNITE-9004");
-
-        super.beforeTest();
-    }
-
     /**
      * @throws Exception if fail.
      */
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/IgniteNoParrallelClusterIsAllowedTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/IgniteNoParrallelClusterIsAllowedTest.java
new file mode 100644
index 0000000..5c986ee
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/IgniteNoParrallelClusterIsAllowedTest.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.persistence.standbycluster;
+
+import junit.framework.AssertionFailedError;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+
+/**
+ *
+ */
+public class IgniteNoParrallelClusterIsAllowedTest extends IgniteChangeGlobalStateAbstractTest {
+    /** */
+    private static final TcpDiscoveryIpFinder vmIpFinder = new TcpDiscoveryVmIpFinder(true);
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testSimple() throws Exception {
+        startPrimaryNodes(primaryNodes());
+
+        tryToStartBackupClusterWhatShouldFail();
+
+        primary(0).cluster().active(true);
+
+        tryToStartBackupClusterWhatShouldFail();
+
+        primary(0).cluster().active(false);
+
+        tryToStartBackupClusterWhatShouldFail();
+
+        primary(0).cluster().active(true);
+
+        tryToStartBackupClusterWhatShouldFail();
+
+        stopAllPrimary();
+
+        startBackUp(backUpNodes());
+
+        stopAllBackUp();
+
+        startPrimaryNodes(primaryNodes());
+
+        tryToStartBackupClusterWhatShouldFail();
+    }
+
+    /**
+     *
+     */
+    private void tryToStartBackupClusterWhatShouldFail() {
+        try {
+            startBackUpNodes(backUpNodes());
+
+            fail();
+        }
+        catch (AssertionFailedError er) {
+                throw er;
+        }
+        catch (Throwable e) {
+            while (true) {
+                String message = e.getMessage();
+
+                if (message.contains("Failed to acquire file lock during"))
+                    break;
+
+                if (e.getCause() != null)
+                    e = e.getCause();
+                else
+                    fail();
+            }
+        }
+    }
+
+    /**
+     *
+     */
+    @Override protected void beforeTest() throws Exception {
+        stopAllGrids();
+
+        U.delete(U.resolveWorkDirectory(U.defaultWorkDirectory(), testName(), true));
+
+        cleanPersistenceDir();
+    }
+
+    /**
+     *
+     */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids();
+
+        cleanPersistenceDir();
+
+        U.delete(U.resolveWorkDirectory(U.defaultWorkDirectory(), testName(), true));
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/reconnect/IgniteAbstractStandByClientReconnectTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/reconnect/IgniteAbstractStandByClientReconnectTest.java
index f7b0ed7..d01e11a 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/reconnect/IgniteAbstractStandByClientReconnectTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/standbycluster/reconnect/IgniteAbstractStandByClientReconnectTest.java
@@ -31,6 +31,7 @@
 import org.apache.ignite.events.Event;
 import org.apache.ignite.events.EventType;
 import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.IgniteInternalFuture;
 import org.apache.ignite.internal.processors.cache.DynamicCacheDescriptor;
 import org.apache.ignite.internal.util.typedef.internal.CU;
 import org.apache.ignite.lang.IgnitePredicate;
@@ -388,7 +389,7 @@
         }
 
         /** {@inheritDoc} */
-        @Override public void onDiscovery(
+        @Override public IgniteInternalFuture onDiscovery(
             int type,
             long topVer,
             ClusterNode node,
@@ -396,9 +397,9 @@
             @Nullable Map<Long, Collection<ClusterNode>> topHist,
             @Nullable DiscoverySpiCustomMessage data
         ) {
-            delegate.onDiscovery(type, topVer, node, topSnapshot, topHist, data);
+            IgniteInternalFuture fut = delegate.onDiscovery(type, topVer, node, topSnapshot, topHist, data);
 
-            if (type == EVT_CLIENT_NODE_DISCONNECTED)
+            if (type == EVT_CLIENT_NODE_DISCONNECTED) {
                 try {
                     System.out.println("Await cluster change state");
 
@@ -407,6 +408,9 @@
                 catch (InterruptedException e) {
                     throw new RuntimeException(e);
                 }
+            }
+
+            return fut;
         }
     }
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneWalRecordsIteratorTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneWalRecordsIteratorTest.java
new file mode 100644
index 0000000..b6a04d0
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneWalRecordsIteratorTest.java
@@ -0,0 +1,216 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.persistence.wal.reader;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.file.OpenOption;
+import java.nio.file.StandardOpenOption;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.configuration.DataRegionConfiguration;
+import org.apache.ignite.configuration.DataStorageConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager;
+import org.apache.ignite.internal.pagemem.wal.WALIterator;
+import org.apache.ignite.internal.pagemem.wal.record.SnapshotRecord;
+import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager;
+import org.apache.ignite.internal.processors.cache.persistence.file.FileIO;
+import org.apache.ignite.internal.processors.cache.persistence.file.RandomAccessFileIO;
+import org.apache.ignite.internal.processors.cache.persistence.file.RandomAccessFileIOFactory;
+import org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+import static org.apache.ignite.internal.processors.cache.persistence.wal.reader.IgniteWalIteratorFactory.IteratorParametersBuilder;
+
+/**
+ * The test check, that StandaloneWalRecordsIterator correctly close file descriptors associated with WAL files.
+ */
+public class StandaloneWalRecordsIteratorTest extends GridCommonAbstractTest {
+    /** */
+    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String name) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(name);
+
+        cfg.setDataStorageConfiguration(
+            new DataStorageConfiguration().
+                setDefaultDataRegionConfiguration(
+                    new DataRegionConfiguration()
+                        .setPersistenceEnabled(true)
+                )
+        ).setDiscoverySpi(
+            new TcpDiscoverySpi()
+                .setIpFinder(IP_FINDER)
+        );
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        super.beforeTest();
+
+        stopAllGrids();
+
+        cleanPersistenceDir();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        super.afterTest();
+
+        stopAllGrids();
+
+        cleanPersistenceDir();
+    }
+
+    /**
+     * Check correct closing file descriptors.
+     *
+     * @throws Exception if test failed.
+     */
+    public void testCorrectClosingFileDescriptors() throws Exception {
+        IgniteEx ig = (IgniteEx)startGrid();
+
+        String archiveWalDir = getArchiveWalDirPath(ig);
+
+        ig.cluster().active(true);
+
+        IgniteCacheDatabaseSharedManager sharedMgr = ig.context().cache().context().database();
+
+        IgniteWriteAheadLogManager walMgr = ig.context().cache().context().wal();
+
+        // Generate WAL segments for filling WAL archive folder.
+        for (int i = 0; i < 2 * ig.configuration().getDataStorageConfiguration().getWalSegments(); i++) {
+            sharedMgr.checkpointReadLock();
+
+            try {
+                walMgr.log(new SnapshotRecord(i, false));
+            }
+            finally {
+                sharedMgr.checkpointReadUnlock();
+            }
+        }
+
+        stopGrid();
+
+        // Iterate by all archived WAL segments.
+        createWalIterator(archiveWalDir).forEach(x -> {
+        });
+
+        assertTrue("At least one WAL file must be opened!", CountedFileIO.getCountOpenedWalFiles() > 0);
+
+        assertEquals("All WAL files must be closed!", CountedFileIO.getCountOpenedWalFiles(), CountedFileIO.getCountClosedWalFiles());
+    }
+
+    /**
+     * Creates WALIterator associated with files inside walDir.
+     *
+     * @param walDir - path to WAL directory.
+     * @return WALIterator associated with files inside walDir.
+     * @throws IgniteCheckedException if error occur.
+     */
+    private WALIterator createWalIterator(String walDir) throws IgniteCheckedException {
+        IteratorParametersBuilder params = new IteratorParametersBuilder();
+
+        params.ioFactory(new CountedFileIOFactory());
+
+        return new IgniteWalIteratorFactory(log).iterator(params.filesOrDirs(walDir));
+    }
+
+    /**
+     * Evaluate path to directory with WAL archive.
+     *
+     * @param ignite instance of Ignite.
+     * @return path to directory with WAL archive.
+     * @throws IgniteCheckedException if error occur.
+     */
+    private String getArchiveWalDirPath(Ignite ignite) throws IgniteCheckedException {
+        return U.resolveWorkDirectory(
+            U.defaultWorkDirectory(),
+            ignite.configuration().getDataStorageConfiguration().getWalArchivePath(),
+            false
+        ).getAbsolutePath();
+    }
+
+    /**
+     *
+     */
+    private static class CountedFileIOFactory extends RandomAccessFileIOFactory {
+        /** {@inheritDoc} */
+        @Override public FileIO create(File file) throws IOException {
+            return create(file, StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE);
+        }
+
+        /** {@inheritDoc} */
+        @Override public FileIO create(File file, OpenOption... modes) throws IOException {
+            return new CountedFileIO(file, modes);
+        }
+    }
+
+    /**
+     *
+     */
+    private static class CountedFileIO extends RandomAccessFileIO {
+        /** Wal open counter. */
+        private static final AtomicInteger WAL_OPEN_COUNTER = new AtomicInteger();
+        /** Wal close counter. */
+        private static final AtomicInteger WAL_CLOSE_COUNTER = new AtomicInteger();
+
+        /** File name. */
+        private final String fileName;
+
+        /** */
+        public CountedFileIO(File file, OpenOption... modes) throws IOException {
+            super(file, modes);
+
+            fileName = file.getName();
+
+            if (FileWriteAheadLogManager.WAL_NAME_PATTERN.matcher(fileName).matches())
+                WAL_OPEN_COUNTER.incrementAndGet();
+        }
+
+        /** {@inheritDoc} */
+        @Override public void close() throws IOException {
+            super.close();
+
+            if (FileWriteAheadLogManager.WAL_NAME_PATTERN.matcher(fileName).matches())
+                WAL_CLOSE_COUNTER.incrementAndGet();
+        }
+
+        /**
+         *
+         * @return number of opened files.
+         */
+        public static int getCountOpenedWalFiles() { return WAL_OPEN_COUNTER.get(); }
+
+        /**
+         *
+         * @return number of closed files.
+         */
+        public static int getCountClosedWalFiles() { return WAL_CLOSE_COUNTER.get(); }
+    }
+}
\ No newline at end of file
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/IndexingSpiQuerySelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/IndexingSpiQuerySelfTest.java
index b6e32d5..5f2e2ed 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/IndexingSpiQuerySelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/IndexingSpiQuerySelfTest.java
@@ -78,8 +78,6 @@
 
         cfg.setDiscoverySpi(disco);
 
-        cfg.setFailureDetectionTimeout(Integer.MAX_VALUE);
-
         return cfg;
     }
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/IndexingSpiQueryWithH2IndexingSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/IndexingSpiQueryWithH2IndexingSelfTest.java
index 800c5a2..a472816 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/IndexingSpiQueryWithH2IndexingSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/IndexingSpiQueryWithH2IndexingSelfTest.java
@@ -24,7 +24,7 @@
  */
 public class IndexingSpiQueryWithH2IndexingSelfTest extends IndexingSpiQuerySelfTest {
     /** */
-    protected <K, V> CacheConfiguration<K, V> cacheConfiguration(String cacheName) {
+    @Override protected <K, V> CacheConfiguration<K, V> cacheConfiguration(String cacheName) {
         CacheConfiguration<K, V> ccfg = super.cacheConfiguration(cacheName);
 
         ccfg.setIndexedTypes(PersonKey.class, Person.class);
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/continuous/CacheContinuousQueryAsyncFailoverTxSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/continuous/CacheContinuousQueryAsyncFailoverTxSelfTest.java
index 900abc8..0417022 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/continuous/CacheContinuousQueryAsyncFailoverTxSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/continuous/CacheContinuousQueryAsyncFailoverTxSelfTest.java
@@ -43,7 +43,7 @@
     }
 
     /** {@inheritDoc} */
-    public void testNoEventLossOnTopologyChange() throws Exception {
+    @Override public void testNoEventLossOnTopologyChange() throws Exception {
         fail("https://issues.apache.org/jira/browse/IGNITE-4015");
     }
 }
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/continuous/CacheContinuousQueryFailoverAbstractSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/continuous/CacheContinuousQueryFailoverAbstractSelfTest.java
index f91c689..a17e48c 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/continuous/CacheContinuousQueryFailoverAbstractSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/continuous/CacheContinuousQueryFailoverAbstractSelfTest.java
@@ -2341,7 +2341,7 @@
      */
     private Thread nodeRestartThread(final int restartCycles, final long initDelay, final long restartDelay) {
         Thread t = new Thread(new Runnable() {
-            public void run() {
+            @Override public void run() {
                 sleep(initDelay);
 
                 try {
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/continuous/CacheContinuousQueryFailoverTxSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/continuous/CacheContinuousQueryFailoverTxSelfTest.java
index c5240da..cd916e4 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/continuous/CacheContinuousQueryFailoverTxSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/continuous/CacheContinuousQueryFailoverTxSelfTest.java
@@ -38,7 +38,7 @@
     }
 
     /** {@inheritDoc} */
-    public void testNoEventLossOnTopologyChange() throws Exception {
+    @Override public void testNoEventLossOnTopologyChange() throws Exception {
         fail("https://issues.apache.org/jira/browse/IGNITE-4015");
     }
 }
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/continuous/ContinuousQueryMarshallerTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/continuous/ContinuousQueryMarshallerTest.java
new file mode 100644
index 0000000..44dcc1c
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/continuous/ContinuousQueryMarshallerTest.java
@@ -0,0 +1,168 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.query.continuous;
+
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.ObjectStreamException;
+import java.io.Serializable;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import javax.cache.Cache;
+import javax.cache.event.CacheEntryEvent;
+import javax.cache.event.CacheEntryListenerException;
+import javax.cache.event.CacheEntryUpdatedListener;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.binary.BinaryObjectException;
+import org.apache.ignite.binary.BinaryReader;
+import org.apache.ignite.binary.BinaryWriter;
+import org.apache.ignite.binary.Binarylizable;
+import org.apache.ignite.cache.query.ContinuousQuery;
+import org.apache.ignite.cache.query.ScanQuery;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.custom.DummyEventFilterFactory;
+import org.apache.ignite.lang.IgniteBiPredicate;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+/**
+ * Checks that Optimized Marshaller is not used on any stage of Continuous Query handling.
+ */
+public class ContinuousQueryMarshallerTest extends GridCommonAbstractTest {
+    /** */
+    public static final String CACHE_NAME = "test-cache";
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(final String gridName) throws Exception {
+        final IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        cfg.setClientMode(gridName.contains("client"));
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testRemoteFilterFactoryClient() throws Exception {
+        check("server", "client");
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testRemoteFilterFactoryServer() throws Exception {
+        check("server1", "server2");
+    }
+
+    /**
+     * @param node1Name Node 1 name.
+     * @param node2Name Node 2 name.
+     */
+    private void check(String node1Name, String node2Name) throws Exception {
+        final Ignite node1 = startGrid(node1Name);
+
+        final IgniteCache<Integer, MarshallerCheckingEntry> cache = node1.getOrCreateCache(CACHE_NAME);
+
+        for (int i = 0; i < 10; i++)
+            cache.put(i, new MarshallerCheckingEntry(String.valueOf(i)));
+
+        final Ignite node2 = startGrid(node2Name);
+
+        final ContinuousQuery<Integer, MarshallerCheckingEntry> qry = new ContinuousQuery<>();
+
+        ScanQuery<Integer, MarshallerCheckingEntry> scanQry = new ScanQuery<>(new IgniteBiPredicate<Integer, MarshallerCheckingEntry>() {
+            @Override public boolean apply(Integer key, MarshallerCheckingEntry val) {
+                return key % 2 == 0;
+            }
+        });
+
+        qry.setInitialQuery(scanQry);
+
+        qry.setRemoteFilterFactory(new DummyEventFilterFactory<>());
+
+        final CountDownLatch latch = new CountDownLatch(15);
+
+        qry.setLocalListener(new CacheEntryUpdatedListener<Integer, MarshallerCheckingEntry>() {
+            @Override public void onUpdated(
+                final Iterable<CacheEntryEvent<? extends Integer, ? extends MarshallerCheckingEntry>> evts)
+                throws CacheEntryListenerException {
+
+                System.out.println(">> Client 1 events " + evts);
+
+                for (CacheEntryEvent<? extends Integer, ? extends MarshallerCheckingEntry> evt : evts)
+                    latch.countDown();
+            }
+        });
+
+        final IgniteCache<Integer, MarshallerCheckingEntry> cache1 = node2.cache(CACHE_NAME);
+
+        for (Cache.Entry<Integer, MarshallerCheckingEntry> entry : cache1.query(qry)) {
+            latch.countDown();
+            System.out.println(">> Initial entry " + entry);
+        }
+
+        for (int i = 10; i < 20; i++)
+            cache1.put(i, new MarshallerCheckingEntry(i));
+
+        assertTrue(Long.toString(latch.getCount()), latch.await(5, TimeUnit.SECONDS));
+    }
+
+    /** Checks that OptimizedMarshaller is never used (BinaryMarshaller is OK) */
+    private class MarshallerCheckingEntry implements Serializable, Binarylizable {
+        /** */
+        private Object val;
+
+        /** */
+        public MarshallerCheckingEntry(Object val) {
+            this.val = val;
+        }
+
+        /** */
+        private void writeObject(ObjectOutputStream out) throws IOException {
+            throw new UnsupportedOperationException();
+        }
+
+        /** */
+        private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
+            throw new UnsupportedOperationException();
+        }
+
+        /** */
+        private void readObjectNoData() throws ObjectStreamException {
+            throw new UnsupportedOperationException();
+        }
+
+        /** */
+        @Override public void writeBinary(BinaryWriter writer) throws BinaryObjectException {
+            writer.writeObject("value", val);
+        }
+
+        /** */
+        @Override public void readBinary(BinaryReader reader) throws BinaryObjectException {
+            val = reader.readObject("value");
+        }
+    }
+
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/continuous/ContinuousQueryPeerClassLoadingTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/continuous/ContinuousQueryPeerClassLoadingTest.java
index e5d1d60..73d8d0d 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/continuous/ContinuousQueryPeerClassLoadingTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/query/continuous/ContinuousQueryPeerClassLoadingTest.java
@@ -91,8 +91,8 @@
         final ContinuousQuery<Integer, String> qry1 = new ContinuousQuery<>();
         final ContinuousQuery<Integer, String> qry2 = new ContinuousQuery<>();
 
-        qry1.setRemoteFilterFactory(new DummyEventFilterFactory());
-        qry2.setRemoteFilterFactory(new DummyEventFilterFactory());
+        qry1.setRemoteFilterFactory(new DummyEventFilterFactory<>());
+        qry2.setRemoteFilterFactory(new DummyEventFilterFactory<>());
 
         final AtomicInteger client1Evts = new AtomicInteger(0);
         final AtomicInteger client2Evts = new AtomicInteger(0);
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/store/GridCacheWriteBehindStoreSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/store/GridCacheWriteBehindStoreSelfTest.java
index af21fc8..538f135 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/store/GridCacheWriteBehindStoreSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/store/GridCacheWriteBehindStoreSelfTest.java
@@ -412,7 +412,9 @@
      * @throws Exception If failed.
      */
     private void testBatchApply(boolean writeCoalescing) throws Exception {
-        delegate = new GridCacheTestStore(new ConcurrentLinkedHashMap<Integer, String>());
+        delegate = new GridCacheTestStore(new ConcurrentLinkedHashMap<Integer, String>() {
+            @Override public void clear() { }
+        });
 
         initStore(1, writeCoalescing);
 
@@ -433,4 +435,4 @@
 
         assertTrue("Store map key set: " + underlyingMap.keySet(), F.eqOrdered(underlyingMap.keySet(), intList));
     }
-}
\ No newline at end of file
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/AbstractDeadlockDetectionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/AbstractDeadlockDetectionTest.java
index c0034f5..2ef7073 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/AbstractDeadlockDetectionTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/AbstractDeadlockDetectionTest.java
@@ -26,6 +26,7 @@
 import org.apache.ignite.cache.affinity.Affinity;
 import org.apache.ignite.cluster.ClusterNode;
 import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.IgniteInterruptedCheckedException;
 import org.apache.ignite.internal.IgniteKernal;
 import org.apache.ignite.internal.processors.cache.GridCacheAdapter;
 import org.apache.ignite.internal.processors.cache.GridCacheConcurrentMap;
@@ -186,6 +187,19 @@
     }
 
     /**
+     * Wait for late affinity assignment after cache start.
+     * So we can be sure that there will not happen unpredictable PME.
+     *
+     * @param minorTopVer Minor topology version before cache start.
+     */
+    void waitForLateAffinityAssignment(int minorTopVer) throws IgniteInterruptedCheckedException {
+        assertTrue("Failed to wait for late affinity assignment",
+            GridTestUtils.waitForCondition(() ->
+                    grid(0).context().discovery().topologyVersionEx().minorTopologyVersion() == minorTopVer + 1,
+                10_000));
+    }
+
+    /**
      *
      */
     protected static class KeyObject implements IncrementalTestObject {
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/AbstractTransactionIntergrityTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/AbstractTransactionIntergrityTest.java
new file mode 100644
index 0000000..fe27e6e
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/AbstractTransactionIntergrityTest.java
@@ -0,0 +1,595 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.transactions;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Objects;
+import java.util.Random;
+import java.util.Set;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Collectors;
+import com.google.common.collect.Sets;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
+import org.apache.ignite.cache.query.annotations.QuerySqlField;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.DataRegionConfiguration;
+import org.apache.ignite.configuration.DataStorageConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.failure.FailureHandler;
+import org.apache.ignite.failure.StopNodeFailureHandler;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.TestRecordingCommunicationSpi;
+import org.apache.ignite.internal.util.typedef.G;
+import org.apache.ignite.lang.IgniteUuid;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+import org.apache.ignite.transactions.Transaction;
+import org.jetbrains.annotations.NotNull;
+import org.jsr166.ConcurrentLinkedHashMap;
+import org.junit.Assert;
+
+import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
+import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC;
+import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC;
+import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ;
+
+/**
+ * Test transfer amount between accounts with enabled {@link StopNodeFailureHandler}.
+ *
+ * This test can be extended to emulate failover scenarios during transactional operations on the grid.
+ */
+public class AbstractTransactionIntergrityTest extends GridCommonAbstractTest {
+    /** */
+    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
+    /** Count of accounts in one thread. */
+    private static final int DFLT_ACCOUNTS_CNT = 32;
+
+    /** Count of threads and caches. */
+    private static final int DFLT_TX_THREADS_CNT = 20;
+
+    /** Count of nodes to start. */
+    private static final int DFLT_NODES_CNT = 3;
+
+    /** Count of transaction on cache. */
+    private static final int DFLT_TRANSACTIONS_CNT = 10;
+
+    /** Completed transactions map. */
+    private ConcurrentLinkedHashMap[] completedTxs;
+
+    /**
+     *
+     */
+    protected int nodesCount() {
+        return DFLT_NODES_CNT;
+    }
+
+    /**
+     *
+     */
+    protected int accountsCount() {
+        return DFLT_ACCOUNTS_CNT;
+    }
+
+    /**
+     *
+     */
+    protected int transactionsCount() {
+        return DFLT_TRANSACTIONS_CNT;
+    }
+
+    /**
+     *
+     */
+    protected int txThreadsCount() {
+        return DFLT_TX_THREADS_CNT;
+    }
+
+    /**
+     * @return Flag enables secondary index on account caches.
+     */
+    protected boolean indexed() {
+        return false;
+    }
+
+    /**
+     * @return Flag enables persistence on account caches.
+     */
+    protected boolean persistent() {
+        return true;
+    }
+
+    /**
+     * @return Flag enables cross-node transactions,
+     *         when primary partitions participating in transaction spreaded across several cluster nodes.
+     */
+    protected boolean crossNodeTransactions() {
+        // Commit error during cross node transactions breaks transaction integrity
+        // TODO: https://issues.apache.org/jira/browse/IGNITE-9086
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected FailureHandler getFailureHandler(String igniteInstanceName) {
+        return new StopNodeFailureHandler();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String name) throws Exception {
+        final IgniteConfiguration cfg = super.getConfiguration(name);
+
+        cfg.setConsistentId(name);
+
+        ((TcpDiscoverySpi)cfg.getDiscoverySpi()).setIpFinder(IP_FINDER);
+        cfg.setCommunicationSpi(new TestRecordingCommunicationSpi());
+        cfg.setLocalHost("127.0.0.1");
+
+        cfg.setDataStorageConfiguration(new DataStorageConfiguration()
+            .setDefaultDataRegionConfiguration(new DataRegionConfiguration()
+                .setMaxSize(256 * 1024 * 1024)
+                .setPersistenceEnabled(persistent()))
+        );
+
+        CacheConfiguration[] cacheConfigurations = new CacheConfiguration[txThreadsCount()];
+
+        for (int i = 0; i < txThreadsCount(); i++) {
+            CacheConfiguration ccfg = new CacheConfiguration()
+                .setName(cacheName(i))
+                .setAffinity(new RendezvousAffinityFunction(false, accountsCount()))
+                .setBackups(1)
+                .setAtomicityMode(TRANSACTIONAL)
+                .setCacheMode(CacheMode.PARTITIONED)
+                .setWriteSynchronizationMode(FULL_SYNC)
+                .setReadFromBackup(true)
+                .setOnheapCacheEnabled(true);
+
+            if (indexed())
+                ccfg.setIndexedTypes(IgniteUuid.class, AccountState.class);
+
+            cacheConfigurations[i] = ccfg;
+        }
+
+        cfg.setCacheConfiguration(cacheConfigurations);
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        super.beforeTest();
+
+        stopAllGrids();
+
+        cleanPersistenceDir();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        super.afterTest();
+
+        stopAllGrids();
+
+        cleanPersistenceDir();
+    }
+
+    /**
+     * Make test cache name by prefix.
+     */
+    @NotNull private String cacheName(int cachePrefixIdx) {
+        return "cache" + cachePrefixIdx;
+    }
+
+    /**
+     * Ignite configuration for client.
+     */
+    @NotNull private IgniteConfiguration getClientConfiguration(int nodesPrefix) throws Exception {
+        IgniteConfiguration clientConf = getConfiguration(getTestIgniteInstanceName(nodesPrefix));
+
+        clientConf.setClientMode(true);
+
+        return clientConf;
+    }
+
+    /**
+     * Test transfer amount.
+     */
+    public void doTestTransferAmount(FailoverScenario failoverScenario) throws Exception {
+        failoverScenario.beforeNodesStarted();
+
+        //given: started some nodes with client.
+        startGrids(nodesCount());
+
+        IgniteEx igniteClient = startGrid(getClientConfiguration(nodesCount()));
+
+        igniteClient.cluster().active(true);
+
+        int[] initAmount = new int[txThreadsCount()];
+        completedTxs = new ConcurrentLinkedHashMap[txThreadsCount()];
+
+        //and: fill all accounts on all caches and calculate total amount for every cache.
+        for (int cachePrefixIdx = 0; cachePrefixIdx < txThreadsCount(); cachePrefixIdx++) {
+            IgniteCache<Integer, AccountState> cache = igniteClient.getOrCreateCache(cacheName(cachePrefixIdx));
+
+            AtomicInteger coinsCounter = new AtomicInteger();
+
+            try (Transaction tx = igniteClient.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                for (int accountId = 0; accountId < accountsCount(); accountId++) {
+                    Set<Integer> initialAmount = generateCoins(coinsCounter, 5);
+
+                    cache.put(accountId, new AccountState(accountId, tx.xid(), initialAmount));
+                }
+
+                tx.commit();
+            }
+
+            initAmount[cachePrefixIdx] = coinsCounter.get();
+            completedTxs[cachePrefixIdx] = new ConcurrentLinkedHashMap();
+        }
+
+        //when: start transfer amount from account to account in different threads.
+        CountDownLatch firstTransactionDone = new CountDownLatch(txThreadsCount());
+
+        ArrayList<Thread> transferThreads = new ArrayList<>();
+
+        for (int i = 0; i < txThreadsCount(); i++) {
+            transferThreads.add(new TransferAmountTxThread(firstTransactionDone, igniteClient, cacheName(i), i));
+
+            transferThreads.get(i).start();
+        }
+
+        firstTransactionDone.await(10, TimeUnit.SECONDS);
+
+        failoverScenario.afterFirstTransaction();
+
+        for (Thread thread : transferThreads) {
+            thread.join();
+        }
+
+        failoverScenario.afterTransactionsFinished();
+
+        consistencyCheck(initAmount);
+    }
+
+    /**
+     * Calculates total amount of coins for every thread for every node and checks that coins difference is zero (transaction integrity is saved).
+     */
+    private void consistencyCheck(int[] initAmount) {
+        for (Ignite node : G.allGrids()) {
+            for (int j = 0; j < txThreadsCount(); j++) {
+                List<Integer> totalCoins = new ArrayList<>();
+
+                String cacheName = cacheName(j);
+
+                IgniteCache<Integer, AccountState> cache = node.getOrCreateCache(cacheName);
+
+                AccountState[] accStates = new AccountState[accountsCount()];
+
+                try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                    for (int i = 0; i < accountsCount(); i++) {
+                        AccountState state = cache.get(i);
+
+                        Assert.assertNotNull("Account state has lost [node=" + node.name() + ", cache=" + cacheName + ", accNo=" + i + "]", state);
+
+                        totalCoins.addAll(state.coins);
+
+                        accStates[i] = state;
+                    }
+
+                    tx.commit();
+                }
+
+                Collections.sort(totalCoins);
+
+                if (initAmount[j] != totalCoins.size()) {
+                    Set<Integer> lostCoins = new HashSet<>();
+                    Set<Integer> duplicateCoins = new HashSet<>();
+
+                    for (int coin = 1; coin <= initAmount[j]; coin++)
+                        if (!totalCoins.contains(coin))
+                            lostCoins.add(coin);
+
+                    for (int coinIdx = 1; coinIdx < totalCoins.size(); coinIdx++)
+                        if (totalCoins.get(coinIdx).equals(totalCoins.get(coinIdx - 1)))
+                            duplicateCoins.add(totalCoins.get(coinIdx));
+
+                    log.error("Transaction integrity failed for [node=" + node.name() + ", cache=" + cacheName + "]");
+
+                    log.error(String.format("Total amount of coins before and after transfers are not same. Lost coins: %s. Duplicate coins: %s.",
+                        Objects.toString(lostCoins),
+                        Objects.toString(duplicateCoins)));
+
+                    ConcurrentLinkedHashMap<IgniteUuid, TxState> txs = completedTxs[j];
+
+                    for (TxState tx : txs.values())
+                        log.error("Tx: " + tx);
+
+                    for (int i = 0; i < accountsCount(); i++)
+                        log.error("Account state " + i + " = " + accStates[i]);
+
+                    assertFalse("Test failed. See messages above", true);
+                }
+            }
+        }
+    }
+
+    /**
+     *
+     */
+    public static class AccountState {
+        /** Account id. */
+        private final int accId;
+
+        /** Last performed transaction id on account state. */
+        @QuerySqlField(index = true)
+        private final IgniteUuid txId;
+
+        /** Set of coins holds in account. */
+        private final Set<Integer> coins;
+
+        /**
+         * @param accId Acc id.
+         * @param txId Tx id.
+         * @param coins Coins.
+         */
+        public AccountState(int accId, IgniteUuid txId, Set<Integer> coins) {
+            this.txId = txId;
+            this.coins = Collections.unmodifiableSet(coins);
+            this.accId = accId;
+        }
+
+        /**
+         * @param random Randomizer.
+         * @return Set of coins need to transfer from.
+         */
+        public Set<Integer> coinsToTransfer(Random random) {
+            int coinsNum = random.nextInt(coins.size());
+
+            return coins.stream().limit(coinsNum).collect(Collectors.toSet());
+        }
+
+        /**
+         * @param txId Transaction id.
+         * @param coinsToAdd Coins to add to current account.
+         * @return Account state with added coins.
+         */
+        public AccountState addCoins(IgniteUuid txId, Set<Integer> coinsToAdd) {
+            return new AccountState(accId, txId, Sets.union(coins, coinsToAdd).immutableCopy());
+        }
+
+        /**
+         * @param txId Transaction id.
+         * @param coinsToRemove Coins to remove from current account.
+         * @return Account state with removed coins.
+         */
+        public AccountState removeCoins(IgniteUuid txId, Set<Integer> coinsToRemove) {
+            return new AccountState(accId, txId, Sets.difference(coins, coinsToRemove).immutableCopy());
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean equals(Object o) {
+            if (this == o) return true;
+            if (o == null || getClass() != o.getClass()) return false;
+            AccountState that = (AccountState) o;
+            return Objects.equals(txId, that.txId) &&
+                Objects.equals(coins, that.coins);
+        }
+
+        /** {@inheritDoc} */
+        @Override public int hashCode() {
+            return Objects.hash(txId, coins);
+        }
+
+        /** {@inheritDoc} */
+        @Override public String toString() {
+            return "AccountState{" +
+                "accId=" + Objects.toString(accId) +
+                ", coins=" + Objects.toString(coins) +
+                '}';
+        }
+    }
+
+    /**
+     * @param coinsNum Coins number.
+     */
+    private Set<Integer> generateCoins(AtomicInteger coinsCounter, int coinsNum) {
+        Set<Integer> res = new HashSet<>();
+
+        for (int i = 0; i < coinsNum; i++)
+            res.add(coinsCounter.incrementAndGet());
+
+        return res;
+    }
+
+    /**
+     * State representing transaction between two accounts.
+     */
+    static class TxState {
+        /**
+         * Account states before transaction.
+         */
+        AccountState before1, before2;
+
+        /**
+         * Account states after transaction.
+         */
+        AccountState after1, after2;
+
+        /**
+         * Transferred coins between accounts during this transaction.
+         */
+        Set<Integer> transferredCoins;
+
+        /**
+         * @param before1 Before 1.
+         * @param before2 Before 2.
+         * @param after1 After 1.
+         * @param after2 After 2.
+         * @param transferredCoins Transferred coins.
+         */
+        public TxState(AccountState before1, AccountState before2, AccountState after1, AccountState after2, Set<Integer> transferredCoins) {
+            this.before1 = before1;
+            this.before2 = before2;
+            this.after1 = after1;
+            this.after2 = after2;
+            this.transferredCoins = transferredCoins;
+        }
+
+        /** {@inheritDoc} */
+        @Override public String toString() {
+            return "TxState{" +
+                "before1=" + before1 +
+                ", before2=" + before2 +
+                ", transferredCoins=" + transferredCoins +
+                ", after1=" + after1 +
+                ", after2=" + after2 +
+                '}';
+        }
+    }
+
+    /**
+     *
+     */
+    private class TransferAmountTxThread extends Thread {
+        /** */
+        private CountDownLatch firstTransactionLatch;
+        /** */
+        private IgniteEx ignite;
+        /** */
+        private String cacheName;
+        /** */
+        private int txIndex;
+        /** */
+        private Random random = new Random();
+
+        /**
+         * @param ignite Ignite.
+         */
+        private TransferAmountTxThread(CountDownLatch firstTransactionLatch, final IgniteEx ignite, String cacheName, int txIndex) {
+            this.firstTransactionLatch = firstTransactionLatch;
+            this.ignite = ignite;
+            this.cacheName = cacheName;
+            this.txIndex = txIndex;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void run() {
+            for (int i = 0; i < transactionsCount(); i++) {
+                try {
+                    updateInTransaction(ignite.cache(cacheName));
+                }
+                finally {
+                    if (i == 0)
+                        firstTransactionLatch.countDown();
+                }
+            }
+        }
+
+        /**
+         * @throws IgniteException if fails
+         */
+        @SuppressWarnings("unchecked")
+        private void updateInTransaction(IgniteCache<Integer, AccountState> cache) throws IgniteException {
+            int accIdFrom;
+            int accIdTo;
+
+            for (;;) {
+                accIdFrom = random.nextInt(accountsCount());
+                accIdTo = random.nextInt(accountsCount());
+
+                if (accIdFrom == accIdTo)
+                    continue;
+
+                ClusterNode primaryForAccFrom = ignite.cachex(cacheName).affinity().mapKeyToNode(accIdFrom);
+                ClusterNode primaryForAccTo = ignite.cachex(cacheName).affinity().mapKeyToNode(accIdTo);
+
+                // Allows only transaction between accounts that primary on the same node if corresponding flag is enabled.
+                if (!crossNodeTransactions() && !primaryForAccFrom.id().equals(primaryForAccTo.id()))
+                    continue;
+
+                break;
+            }
+
+            AccountState acctFrom;
+            AccountState acctTo;
+
+            try (Transaction tx = ignite.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                acctFrom = cache.get(accIdFrom);
+                acctTo = cache.get(accIdTo);
+
+                Set<Integer> coinsToTransfer = acctFrom.coinsToTransfer(random);
+
+                AccountState nextFrom = acctFrom.removeCoins(tx.xid(), coinsToTransfer);
+                AccountState nextTo = acctTo.addCoins(tx.xid(), coinsToTransfer);
+
+                cache.put(accIdFrom, nextFrom);
+                cache.put(accIdTo, nextTo);
+
+                tx.commit();
+
+                completedTxs[txIndex].put(tx.xid(), new TxState(acctFrom, acctTo, nextFrom, nextTo, coinsToTransfer));
+            }
+        }
+
+        /**
+         * @param curr current
+         * @return random value
+         */
+        private long getNextAccountId(long curr) {
+            long randomVal;
+
+            do {
+                randomVal = random.nextInt(accountsCount());
+            }
+            while (curr == randomVal);
+
+            return randomVal;
+        }
+    }
+
+    /**
+     * Interface to implement custom failover scenario during transactional amount transfer.
+     */
+    public interface FailoverScenario {
+        /**
+         * Callback before nodes have started.
+         */
+        public default void beforeNodesStarted() throws Exception { }
+
+        /**
+         * Callback when first transaction has finished.
+         */
+        public default void afterFirstTransaction() throws Exception { }
+
+        /**
+         * Callback when all transactions have finished.
+         */
+        public default void afterTransactionsFinished() throws Exception { }
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TransactionIntegrityWithPrimaryIndexCorruptionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TransactionIntegrityWithPrimaryIndexCorruptionTest.java
new file mode 100644
index 0000000..3260607
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TransactionIntegrityWithPrimaryIndexCorruptionTest.java
@@ -0,0 +1,238 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.transactions;
+
+import java.util.function.BiFunction;
+import java.util.function.Supplier;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteIllegalStateException;
+import org.apache.ignite.Ignition;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
+import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO;
+import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageHandler;
+import org.apache.ignite.internal.processors.cache.tree.SearchRow;
+import org.apache.ignite.testframework.GridTestUtils;
+
+/**
+ * Test cases that check transaction data integrity after transaction commit failed.
+ */
+public class TransactionIntegrityWithPrimaryIndexCorruptionTest extends AbstractTransactionIntergrityTest {
+    /** Corruption enabled flag. */
+    private static volatile boolean corruptionEnabled;
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        corruptionEnabled = false;
+
+        super.afterTest();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected long getTestTimeout() {
+        return 60 * 1000L;
+    }
+
+    /**
+     * Throws a test {@link AssertionError} during tx commit from {@link BPlusTree} and checks after that data is consistent.
+     */
+    public void testPrimaryIndexCorruptionDuringCommitOnPrimaryNode1() throws Exception {
+        doTestTransferAmount(new IndexCorruptionFailoverScenario(
+            true,
+            (hnd, tree) -> hnd instanceof BPlusTree.Search,
+            failoverPredicate(true, () -> new AssertionError("Test")))
+        );
+    }
+
+    /**
+     * Throws a test {@link RuntimeException} during tx commit from {@link BPlusTree} and checks after that data is consistent.
+     */
+    public void testPrimaryIndexCorruptionDuringCommitOnPrimaryNode2() throws Exception {
+        doTestTransferAmount(new IndexCorruptionFailoverScenario(
+            true,
+            (hnd, tree) -> hnd instanceof BPlusTree.Search,
+            failoverPredicate(true, () -> new RuntimeException("Test")))
+        );
+    }
+
+    /**
+     * Throws a test {@link AssertionError} during tx commit from {@link BPlusTree} and checks after that data is consistent.
+     */
+    public void testPrimaryIndexCorruptionDuringCommitOnBackupNode() throws Exception {
+        doTestTransferAmount(new IndexCorruptionFailoverScenario(
+            true,
+            (hnd, tree) -> hnd instanceof BPlusTree.Search,
+            failoverPredicate(false, () -> new AssertionError("Test")))
+        );
+    }
+
+    /**
+     * Throws a test {@link IgniteCheckedException} during tx commit from {@link BPlusTree} and checks after that data is consistent.
+     */
+    public void testPrimaryIndexCorruptionDuringCommitOnPrimaryNode3() throws Exception {
+        fail("https://issues.apache.org/jira/browse/IGNITE-9082");
+
+        doTestTransferAmount(new IndexCorruptionFailoverScenario(
+            false,
+            (hnd, tree) -> hnd instanceof BPlusTree.Search,
+            failoverPredicate(true, () -> new IgniteCheckedException("Test")))
+        );
+    }
+
+    /**
+     * Creates failover predicate which generates error during transaction commmit.
+     *
+     * @param failOnPrimary If {@code true} index should be failed on transaction primary node.
+     * @param errorSupplier Supplier to create various errors.
+     */
+    private BiFunction<IgniteEx, SearchRow, Throwable> failoverPredicate(
+        boolean failOnPrimary,
+        Supplier<Throwable> errorSupplier
+    ) {
+        return (ignite, row) -> {
+            int cacheId = row.cacheId();
+            int partId = row.key().partition();
+
+            final ClusterNode locNode = ignite.localNode();
+            final AffinityTopologyVersion curTopVer = ignite.context().discovery().topologyVersionEx();
+
+            // Throw exception if current node is primary for given row.
+            return ignite.cachesx(c -> c.context().cacheId() == cacheId)
+                .stream()
+                .filter(c -> c.context().affinity().primaryByPartition(locNode, partId, curTopVer) == failOnPrimary)
+                .map(c -> errorSupplier.get())
+                .findFirst()
+                .orElse(null);
+        };
+    }
+
+    /**
+     * Index corruption failover scenario.
+     */
+    class IndexCorruptionFailoverScenario implements FailoverScenario {
+        /** Failed node index. */
+        static final int failedNodeIdx = 1;
+
+        /** Is node stopping expected after failover. */
+        private final boolean nodeStoppingExpected;
+
+        /** Predicate that will choose an instance of {@link BPlusTree} and page operation
+         * to make further failover in this tree using {@link #failoverPredicate}. */
+        private final BiFunction<PageHandler, BPlusTree, Boolean> treeCorruptionPredicate;
+
+        /** Function that may return error during row insertion into {@link BPlusTree}. */
+        private final BiFunction<IgniteEx, SearchRow, Throwable> failoverPredicate;
+
+        /**
+         * @param nodeStoppingExpected Node stopping expected.
+         * @param treeCorruptionPredicate Tree corruption predicate.
+         * @param failoverPredicate Failover predicate.
+         */
+        IndexCorruptionFailoverScenario(
+            boolean nodeStoppingExpected,
+            BiFunction<PageHandler, BPlusTree, Boolean> treeCorruptionPredicate,
+            BiFunction<IgniteEx, SearchRow, Throwable> failoverPredicate
+        ) {
+            this.nodeStoppingExpected = nodeStoppingExpected;
+            this.treeCorruptionPredicate = treeCorruptionPredicate;
+            this.failoverPredicate = failoverPredicate;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void beforeNodesStarted() {
+            BPlusTree.pageHndWrapper = (tree, hnd) -> {
+                final IgniteEx locIgnite = (IgniteEx) Ignition.localIgnite();
+
+                if (!locIgnite.name().endsWith(String.valueOf(failedNodeIdx)))
+                    return hnd;
+
+                if (treeCorruptionPredicate.apply(hnd, tree)) {
+                    log.info("Created corrupted tree handler for -> " + hnd + " " + tree);
+
+                    PageHandler<Object, BPlusTree.Result> delegate = (PageHandler<Object, BPlusTree.Result>) hnd;
+
+                    return new PageHandler<BPlusTree.Get, BPlusTree.Result>() {
+                        @Override public BPlusTree.Result run(int cacheId, long pageId, long page, long pageAddr, PageIO io, Boolean walPlc, BPlusTree.Get arg, int lvl) throws IgniteCheckedException {
+                            log.info("Invoked " + " " + cacheId + " " + arg.toString() + " for BTree (" + corruptionEnabled + ") -> " + arg.row() + " / " + arg.row().getClass());
+
+                            if (corruptionEnabled && (arg.row() instanceof SearchRow)) {
+                                SearchRow row = (SearchRow) arg.row();
+
+                                // Store cacheId to search row explicitly, as it can be zero if there is one cache in a group.
+                                Throwable res = failoverPredicate.apply(locIgnite, new SearchRow(cacheId, row.key()));
+
+                                if (res != null) {
+                                    if (res instanceof Error)
+                                        throw (Error) res;
+                                    else if (res instanceof RuntimeException)
+                                        throw (RuntimeException) res;
+                                    else if (res instanceof IgniteCheckedException)
+                                        throw (IgniteCheckedException) res;
+                                }
+                            }
+
+                            return delegate.run(cacheId, pageId, page, pageAddr, io, walPlc, arg, lvl);
+                        }
+
+                        @Override public boolean releaseAfterWrite(int cacheId, long pageId, long page, long pageAddr, BPlusTree.Get g, int lvl) {
+                            return g.canRelease(pageId, lvl);
+                        }
+                    };
+                }
+
+                return hnd;
+            };
+        }
+
+        /** {@inheritDoc} */
+        @Override public void afterFirstTransaction() {
+            // Enable BPlus tree corruption after first transactions have finished.
+            corruptionEnabled = true;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void afterTransactionsFinished() throws Exception {
+            // Disable index corruption.
+            BPlusTree.pageHndWrapper = (tree, hnd) -> hnd;
+
+            if (nodeStoppingExpected) {
+                // Wait until node with corrupted index will left cluster.
+                GridTestUtils.waitForCondition(() -> {
+                    try {
+                        grid(failedNodeIdx);
+                    }
+                    catch (IgniteIllegalStateException e) {
+                        return true;
+                    }
+
+                    return false;
+                }, getTestTimeout());
+
+                // Failed node should be stopped.
+                GridTestUtils.assertThrows(log, () -> grid(failedNodeIdx), IgniteIllegalStateException.class, "");
+
+                // Re-start failed node.
+                startGrid(failedNodeIdx);
+
+                awaitPartitionMapExchange();
+            }
+        }
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TransactionIntegrityWithSystemWorkerDeathTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TransactionIntegrityWithSystemWorkerDeathTest.java
new file mode 100644
index 0000000..25aae4b
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TransactionIntegrityWithSystemWorkerDeathTest.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.transactions;
+
+import java.lang.management.ManagementFactory;
+import javax.management.MBeanServer;
+import javax.management.MBeanServerInvocationHandler;
+import javax.management.ObjectName;
+import org.apache.ignite.IgniteIllegalStateException;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.internal.worker.WorkersControlMXBeanImpl;
+import org.apache.ignite.mxbean.WorkersControlMXBean;
+import org.apache.ignite.testframework.GridTestUtils;
+
+/**
+ *
+ */
+public class TransactionIntegrityWithSystemWorkerDeathTest extends AbstractTransactionIntergrityTest {
+    /** {@inheritDoc}. */
+    @Override protected long getTestTimeout() {
+        return 60 * 1000L;
+    }
+
+    /** {@inheritDoc}. */
+    @Override protected boolean persistent() {
+        return false;
+    }
+
+    /**
+     *
+     */
+    public void testFailoverWithDiscoWorkerTermination() throws Exception {
+        doTestTransferAmount(new FailoverScenario() {
+            static final int failedNodeIdx = 1;
+
+            /** {@inheritDoc}. */
+            @Override public void afterFirstTransaction() throws Exception {
+                // Terminate disco-event-worker thread on one node.
+                WorkersControlMXBean bean = workersMXBean(failedNodeIdx);
+
+                bean.terminateWorker(
+                    bean.getWorkerNames().stream()
+                        .filter(name -> name.startsWith("disco-event-worker"))
+                        .findFirst()
+                        .orElse(null)
+                );
+            }
+
+            /** {@inheritDoc}. */
+            @Override public void afterTransactionsFinished() throws Exception {
+                // Wait until node with death worker will left cluster.
+                GridTestUtils.waitForCondition(() -> {
+                    try {
+                        grid(failedNodeIdx);
+                    }
+                    catch (IgniteIllegalStateException e) {
+                        return true;
+                    }
+
+                    return false;
+                }, getTestTimeout());
+
+                // Failed node should be stopped.
+                GridTestUtils.assertThrows(log, () -> grid(failedNodeIdx), IgniteIllegalStateException.class, "");
+
+                // Re-start failed node.
+                startGrid(failedNodeIdx);
+
+                awaitPartitionMapExchange();
+            }
+        });
+    }
+
+    /**
+     * Configure workers mx bean.
+     */
+    private WorkersControlMXBean workersMXBean(int igniteInt) throws Exception {
+        ObjectName mbeanName = U.makeMBeanName(
+            getTestIgniteInstanceName(igniteInt),
+            "Kernal",
+            WorkersControlMXBeanImpl.class.getSimpleName()
+        );
+
+        MBeanServer mbeanSrv = ManagementFactory.getPlatformMBeanServer();
+
+        if (!mbeanSrv.isRegistered(mbeanName))
+            fail("MBean is not registered: " + mbeanName.getCanonicalName());
+
+        return MBeanServerInvocationHandler.newProxyInstance(mbeanSrv, mbeanName, WorkersControlMXBean.class, true);
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxOptimisticDeadlockDetectionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxOptimisticDeadlockDetectionTest.java
index 4e47fa6..19fb4c9 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxOptimisticDeadlockDetectionTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxOptimisticDeadlockDetectionTest.java
@@ -175,7 +175,12 @@
      * @return Created cache.
      */
     @SuppressWarnings("unchecked")
-    private IgniteCache createCache(CacheMode cacheMode, CacheWriteSynchronizationMode syncMode, boolean near) {
+    private IgniteCache createCache(CacheMode cacheMode, CacheWriteSynchronizationMode syncMode, boolean near)
+        throws IgniteInterruptedCheckedException, InterruptedException {
+        awaitPartitionMapExchange();
+
+        int minorTopVer = grid(0).context().discovery().topologyVersionEx().minorTopologyVersion();
+
         CacheConfiguration ccfg = defaultCacheConfiguration();
 
         ccfg.setName(CACHE_NAME);
@@ -196,6 +201,8 @@
             }
         }
 
+        waitForLateAffinityAssignment(minorTopVer);
+
         return cache;
     }
 
@@ -471,14 +478,8 @@
                     if (TX_IDS.contains(txId) && TX_IDS.size() < TX_CNT) {
                         GridTestUtils.runAsync(new Callable<Void>() {
                             @Override public Void call() throws Exception {
-                                while (TX_IDS.size() < TX_CNT) {
-                                    try {
-                                        U.sleep(50);
-                                    }
-                                    catch (IgniteInterruptedCheckedException e) {
-                                        e.printStackTrace();
-                                    }
-                                }
+                                while (TX_IDS.size() < TX_CNT)
+                                    U.sleep(50);
 
                                 TestCommunicationSpi.super.sendMessage(node, msg, ackC);
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxPessimisticDeadlockDetectionTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxPessimisticDeadlockDetectionTest.java
index 46651cc..88bebb2 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxPessimisticDeadlockDetectionTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxPessimisticDeadlockDetectionTest.java
@@ -38,6 +38,7 @@
 import org.apache.ignite.configuration.DataRegionConfiguration;
 import org.apache.ignite.configuration.NearCacheConfiguration;
 import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.IgniteInterruptedCheckedException;
 import org.apache.ignite.internal.IgniteKernal;
 import org.apache.ignite.internal.processors.cache.IgniteCacheProxy;
 import org.apache.ignite.internal.util.GridConcurrentHashSet;
@@ -174,7 +175,12 @@
      * @return Created cache.
      */
     @SuppressWarnings("unchecked")
-    private IgniteCache createCache(CacheMode cacheMode, CacheWriteSynchronizationMode syncMode, boolean near) {
+    private IgniteCache createCache(CacheMode cacheMode, CacheWriteSynchronizationMode syncMode, boolean near)
+        throws IgniteInterruptedCheckedException, InterruptedException {
+        awaitPartitionMapExchange();
+
+        int minorTopVer = grid(0).context().discovery().topologyVersionEx().minorTopologyVersion();
+
         CacheConfiguration ccfg = defaultCacheConfiguration();
 
         ccfg.setName(CACHE_NAME);
@@ -198,6 +204,8 @@
             }
         }
 
+        waitForLateAffinityAssignment(minorTopVer);
+
         return cache;
     }
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxRollbackAsyncTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxRollbackAsyncTest.java
index 4626dcd..4ca8ba3 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxRollbackAsyncTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxRollbackAsyncTest.java
@@ -17,7 +17,6 @@
 
 package org.apache.ignite.internal.processors.cache.transactions;
 
-import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -25,8 +24,13 @@
 import java.util.Map;
 import java.util.Random;
 import java.util.Set;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.Callable;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicIntegerArray;
 import java.util.concurrent.atomic.AtomicReference;
@@ -42,24 +46,37 @@
 import org.apache.ignite.configuration.DataStorageConfiguration;
 import org.apache.ignite.configuration.IgniteConfiguration;
 import org.apache.ignite.configuration.NearCacheConfiguration;
+import org.apache.ignite.events.Event;
+import org.apache.ignite.events.EventType;
 import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.IgniteFutureCancelledCheckedException;
 import org.apache.ignite.internal.IgniteInternalFuture;
 import org.apache.ignite.internal.IgniteInterruptedCheckedException;
 import org.apache.ignite.internal.IgniteKernal;
 import org.apache.ignite.internal.TestRecordingCommunicationSpi;
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
-import org.apache.ignite.internal.processors.cache.GridCacheFuture;
 import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockRequest;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxFinishRequest;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal;
+import org.apache.ignite.internal.util.future.GridFutureAdapter;
+import org.apache.ignite.internal.util.typedef.CIX1;
+import org.apache.ignite.internal.util.lang.GridAbsPredicate;
+import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.G;
 import org.apache.ignite.internal.util.typedef.X;
 import org.apache.ignite.internal.util.typedef.internal.CU;
 import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.internal.visor.VisorTaskArgument;
+import org.apache.ignite.internal.visor.tx.VisorTxInfo;
+import org.apache.ignite.internal.visor.tx.VisorTxOperation;
+import org.apache.ignite.internal.visor.tx.VisorTxTask;
+import org.apache.ignite.internal.visor.tx.VisorTxTaskArg;
+import org.apache.ignite.internal.visor.tx.VisorTxTaskResult;
 import org.apache.ignite.lang.IgniteBiPredicate;
 import org.apache.ignite.lang.IgniteFuture;
 import org.apache.ignite.lang.IgniteInClosure;
+import org.apache.ignite.lang.IgnitePredicate;
 import org.apache.ignite.lang.IgniteUuid;
 import org.apache.ignite.plugin.extensions.communication.Message;
 import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
@@ -70,11 +87,13 @@
 import org.apache.ignite.transactions.TransactionIsolation;
 import org.apache.ignite.transactions.TransactionRollbackException;
 
+import static java.lang.Thread.interrupted;
 import static java.lang.Thread.yield;
 import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
 import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC;
 import static org.apache.ignite.configuration.WALMode.LOG_ONLY;
 import static org.apache.ignite.testframework.GridTestUtils.runAsync;
+import static org.apache.ignite.testframework.GridTestUtils.waitForCondition;
 import static org.apache.ignite.transactions.TransactionConcurrency.OPTIMISTIC;
 import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC;
 import static org.apache.ignite.transactions.TransactionIsolation.READ_COMMITTED;
@@ -332,13 +351,12 @@
 
         CountDownLatch waitCommit = new CountDownLatch(1);
 
+        // Used for passing tx instance to rollback thread.
         IgniteInternalFuture<?> lockFut = lockInTx(holdLockNode, keyLocked, waitCommit, 0);
 
         U.awaitQuiet(keyLocked);
 
-        final CountDownLatch rollbackLatch = new CountDownLatch(1);
-
-        final int txCnt = 10000;
+        final int txCnt = 1000;
 
         final IgniteKernal k = (IgniteKernal)tryLockNode;
 
@@ -346,7 +364,16 @@
 
         final GridCacheContext<Object, Object> cctx = ctx.cacheContext(CU.cacheId(CACHE_NAME));
 
-        final AtomicBoolean stop = new AtomicBoolean();
+        GridFutureAdapter<Transaction> txReadyFut = new GridFutureAdapter<>();
+
+        long seed = System.currentTimeMillis();
+
+        Random r = new Random(seed);
+
+        log.info("Running: node0=" + holdLockNode.cluster().localNode().consistentId() +
+            ", node1=" + tryLockNode.cluster().localNode().consistentId() +
+            ", useTimeout=" + useTimeout +
+            ", seed=" + seed);
 
         IgniteInternalFuture<?> txFut = multithreadedAsync(new Runnable() {
             @Override public void run() {
@@ -357,10 +384,10 @@
 
                     assertTrue(tx0 == null || tx0.state() == ROLLED_BACK);
 
-                    rollbackLatch.countDown();
-
                     try (Transaction tx = tryLockNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ,
-                        useTimeout ? 500 : 0, 1)) {
+                        useTimeout ? 50 : 0, 1)) {
+
+                        txReadyFut.onDone(tx);
 
                         // Will block on lock request until rolled back asynchronously.
                         Object o = tryLockNode.cache(CACHE_NAME).get(0);
@@ -372,29 +399,30 @@
                     }
                 }
 
-                stop.set(true);
+                txReadyFut.onDone((Transaction)null);
             }
         }, 1, "tx-get-thread");
 
         IgniteInternalFuture<?> rollbackFut = multithreadedAsync(new Runnable() {
             @Override public void run() {
-                U.awaitQuiet(rollbackLatch);
-
-                doSleep(50);
-
                 Set<IgniteUuid> rolledBackVers = new HashSet<>();
 
                 int proc = 1;
 
-                while(!stop.get()) {
-                    for (Transaction tx : tryLockNode.transactions().localActiveTransactions()) {
+                while(true) {
+                    try {
+                        Transaction tx = txReadyFut.get();
+
+                        txReadyFut.reset();
+
+                        if (tx == null)
+                            break;
+
+                        doSleep(r.nextInt(15)); // Wait a bit to reduce chance of rolling back empty transactions.
+
                         if (rolledBackVers.contains(tx.xid()))
                             fail("Rollback version is expected");
 
-                        // Skip write transaction.
-                        if (LABEL.equals(tx.label()))
-                            continue;
-
                         try {
                             if (proc % 2 == 0)
                                 tx.rollback();
@@ -407,14 +435,15 @@
 
                         rolledBackVers.add(tx.xid());
 
-                        if (proc % 1000 == 0)
+                        if (proc % 100 == 0)
                             log.info("Rolled back: " + proc);
 
                         proc++;
                     }
+                    catch (IgniteCheckedException e) {
+                        fail(e.getMessage());
+                    }
                 }
-
-                assertEquals("Unexpected size", txCnt, rolledBackVers.size());
             }
         }, 1, "tx-rollback-thread");
 
@@ -601,8 +630,6 @@
      *
      */
     public void testMixedAsyncRollbackTypes() throws Exception {
-        fail("https://issues.apache.org/jira/browse/IGNITE-8509");
-
         final Ignite client = startClient();
 
         final AtomicBoolean stop = new AtomicBoolean();
@@ -628,134 +655,124 @@
         final LongAdder failed = new LongAdder();
         final LongAdder rolledBack = new LongAdder();
 
-        IgniteInternalFuture<?> txFut = multithreadedAsync(new Runnable() {
-            @Override public void run() {
-                while (!stop.get()) {
-                    int nodeId = r.nextInt(GRID_CNT + 1);
+        ConcurrentMap<Ignite, BlockingQueue<Transaction>> perNodeTxs = new ConcurrentHashMap<>();
 
-                    // Choose random node to start tx on.
-                    Ignite node = nodeId == GRID_CNT || nearCacheEnabled() ? client : grid(nodeId);
+        for (Ignite ignite : G.allGrids())
+            perNodeTxs.put(ignite, new ArrayBlockingQueue<>(1000));
 
-                    TransactionConcurrency conc = TC_VALS[r.nextInt(TC_VALS.length)];
-                    TransactionIsolation isolation = TI_VALS[r.nextInt(TI_VALS.length)];
+        IgniteInternalFuture<?> txFut = multithreadedAsync(() -> {
+            while (!stop.get()) {
+                int nodeId = r.nextInt(GRID_CNT + 1);
 
-                    long timeout = r.nextInt(50) + 50; // Timeout is necessary to prevent deadlocks.
+                // Choose random node to start tx on.
+                Ignite node = nodeId == GRID_CNT || nearCacheEnabled() ? client : grid(nodeId);
 
-                    try (Transaction tx = node.transactions().txStart(conc, isolation, timeout, txSize)) {
-                        int setSize = r.nextInt(txSize / 2) + 1;
+                TransactionConcurrency conc = TC_VALS[r.nextInt(TC_VALS.length)];
+                TransactionIsolation isolation = TI_VALS[r.nextInt(TI_VALS.length)];
 
-                        for (int i = 0; i < setSize; i++) {
-                            switch (r.nextInt(4)) {
-                                case 0:
-                                    node.cache(CACHE_NAME).remove(r.nextInt(txSize));
+                // Timeout is necessary otherwise deadlock is possible due to randomness of lock acquisition.
+                long timeout = r.nextInt(50) + 50;
 
-                                    break;
+                try (Transaction tx = node.transactions().txStart(conc, isolation, timeout, txSize)) {
+                    BlockingQueue<Transaction> nodeQ = perNodeTxs.get(node);
 
-                                case 1:
-                                    node.cache(CACHE_NAME).get(r.nextInt(txSize));
+                    nodeQ.put(tx);
 
-                                    break;
+                    int setSize = r.nextInt(txSize / 2) + 1;
 
-                                case 2:
-                                    final Integer v = (Integer)node.cache(CACHE_NAME).get(r.nextInt(txSize));
+                    for (int i = 0; i < setSize; i++) {
+                        switch (r.nextInt(4)) {
+                            case 0:
+                                node.cache(CACHE_NAME).remove(r.nextInt(txSize));
 
-                                    node.cache(CACHE_NAME).put(r.nextInt(txSize), (v == null ? 0 : v) + 1);
+                                break;
 
-                                    break;
+                            case 1:
+                                node.cache(CACHE_NAME).get(r.nextInt(txSize));
 
-                                case 3:
-                                    node.cache(CACHE_NAME).put(r.nextInt(txSize), 0);
+                                break;
 
-                                    break;
+                            case 2:
+                                final Integer v = (Integer)node.cache(CACHE_NAME).get(r.nextInt(txSize));
 
-                                default:
-                                    fail("Unexpected opcode");
-                            }
+                                node.cache(CACHE_NAME).put(r.nextInt(txSize), (v == null ? 0 : v) + 1);
+
+                                break;
+
+                            case 3:
+                                node.cache(CACHE_NAME).put(r.nextInt(txSize), 0);
+
+                                break;
+
+                            default:
+                                fail("Unexpected opcode");
                         }
-
-                        tx.commit();
-
-                        completed.add(1);
-                    }
-                    catch (Throwable e) {
-                        failed.add(1);
                     }
 
-                    total.add(1);
+                    tx.commit();
+
+                    completed.add(1);
                 }
+                catch (Throwable e) {
+                    failed.add(1);
+                }
+
+                total.add(1);
             }
         }, threadCnt, "tx-thread");
 
         final AtomicIntegerArray idx = new AtomicIntegerArray(GRID_CNT + 1);
 
-        IgniteInternalFuture<?> rollbackFut = multithreadedAsync(new Runnable() {
-            @Override public void run() {
-                int concurrentRollbackCnt = 5;
+        CIX1<Transaction> rollbackClo = new CIX1<Transaction>() {
+            @Override public void applyx(Transaction tx) throws IgniteCheckedException {
+                try {
+                    IgniteFuture<Void> rollbackFut = tx.rollbackAsync();
 
-                List<IgniteFuture<?>> futs = new ArrayList<>(concurrentRollbackCnt);
-
-                while (!stop.get()) {
-                    // Choose node randomly.
-                    final int nodeId = r.nextInt(GRID_CNT + 1);
-
-                    // Reserve node.
-                    if (!idx.compareAndSet(nodeId, 0, 1)) {
-                        yield();
-
-                        continue;
-                    }
-
-                    Ignite node = nodeId == GRID_CNT || nearCacheEnabled() ? client : grid(nodeId);
-
-                    Collection<Transaction> transactions = node.transactions().localActiveTransactions();
-
-                    for (Transaction tx : transactions) {
-                        rolledBack.add(1);
-
-                        if (rolledBack.sum() % 1000 == 0)
-                            info("Processed: " + rolledBack.sum());
-
-                        try {
-                            IgniteFuture<Void> rollbackFut = tx.rollbackAsync();
-
-                            rollbackFut.listen(new IgniteInClosure<IgniteFuture<Void>>() {
-                                @Override public void apply(IgniteFuture<Void> fut) {
-                                    tx.close();
-                                }
-                            });
-
-                            futs.add(rollbackFut);
+                    rollbackFut.listen(new IgniteInClosure<IgniteFuture<Void>>() {
+                        @Override public void apply(IgniteFuture<Void> fut) {
+                            tx.close();
                         }
-                        catch (Throwable t) {
-                            log.error("Exception on async rollback", t);
+                    });
+                }
+                catch (Throwable t) {
+                    log.error("Exception on async rollback", t);
 
-                            fail("Exception is not expected");
-                        }
+                    throw new IgniteCheckedException("Rollback failed", t);
+                }
+            }
+        };
 
-                        if (futs.size() == concurrentRollbackCnt) {
-                            for (IgniteFuture<?> fut : futs)
-                                try {
-                                    fut.get();
-                                }
-                                catch (IgniteException e) {
-                                    log.warning("Future was rolled back with error", e);
-                                }
+        IgniteInternalFuture<?> rollbackFut = multithreadedAsync(() -> {
+            while (!interrupted()) {
+                // Choose node randomly.
+                final int nodeId = r.nextInt(GRID_CNT + 1);
 
-                            futs.clear();
-                        }
-                    }
+                // Reserve node for rollback.
+                if (!idx.compareAndSet(nodeId, 0, 1)) {
+                    yield();
 
-                    idx.set(nodeId, 0);
+                    continue;
                 }
 
-                for (IgniteFuture<?> fut : futs)
-                    try {
-                        fut.get();
-                    }
-                    catch (Throwable t) {
-                        // No-op.
-                    }
+                Ignite node = nodeId == GRID_CNT || nearCacheEnabled() ? client : grid(nodeId);
 
+                BlockingQueue<Transaction> nodeQ = perNodeTxs.get(node);
+
+                Transaction tx;
+
+                // Rollback all transaction
+                while((tx = nodeQ.poll()) != null) {
+                    rolledBack.add(1);
+
+                    doSleep(r.nextInt(50)); // Add random sleep to increase completed txs count.
+
+                    if (rolledBack.sum() % 1000 == 0)
+                        info("Rolled back so far: " + rolledBack.sum());
+
+                    rollbackClo.apply(tx);
+                }
+
+                idx.set(nodeId, 0);
             }
         }, 3, "rollback-thread"); // Rollback by multiple threads.
 
@@ -763,9 +780,27 @@
 
         stop.set(true);
 
-        txFut.get();
+        txFut.get(); // Stop tx generation.
 
-        rollbackFut.get();
+        rollbackFut.cancel();
+
+        try {
+            rollbackFut.get();
+        }
+        catch (IgniteFutureCancelledCheckedException e) {
+            // Expected.
+        }
+
+        // Rollback remaining transactions.
+        for (BlockingQueue<Transaction> queue : perNodeTxs.values()) {
+            Transaction tx;
+
+            while((tx = queue.poll()) != null) {
+                rolledBack.add(1);
+
+                rollbackClo.apply(tx);
+            }
+        }
 
         log.info("total=" + total.sum() + ", completed=" + completed.sum() + ", failed=" + failed.sum() +
             ", rolledBack=" + rolledBack.sum());
@@ -864,6 +899,124 @@
     }
 
     /**
+     *
+     */
+    public void testRollbackOnTopologyLockPessimistic() throws Exception {
+        final Ignite client = startClient();
+
+        Ignite crd = grid(0);
+
+        List<Integer> keys = primaryKeys(grid(1).cache(CACHE_NAME), 1);
+
+        assertTrue(crd.cluster().localNode().order() == 1);
+
+        CountDownLatch txLatch = new CountDownLatch(1);
+        CountDownLatch tx2Latch = new CountDownLatch(1);
+        CountDownLatch commitLatch = new CountDownLatch(1);
+
+        // Start tx holding topology.
+        IgniteInternalFuture txFut = runAsync(new Runnable() {
+            @Override public void run() {
+                List<Integer> keys = primaryKeys(grid(0).cache(CACHE_NAME), 1);
+
+                try (Transaction tx = client.transactions().txStart()) {
+                    client.cache(CACHE_NAME).put(keys.get(0), 0);
+
+                    txLatch.countDown();
+
+                    assertTrue(U.await(commitLatch, 10, TimeUnit.SECONDS));
+
+                    tx.commit();
+
+                    fail();
+                }
+                catch (Exception e) {
+                    // Expected.
+                }
+            }
+        });
+
+        U.awaitQuiet(txLatch);
+
+        crd.events().localListen(new IgnitePredicate<Event>() {
+            @Override public boolean apply(Event evt) {
+                runAsync(new Runnable() {
+                    @Override public void run() {
+                        try(Transaction tx = crd.transactions().withLabel("testLbl").txStart()) {
+                            // Wait for node start.
+                            waitForCondition(new GridAbsPredicate() {
+                                @Override public boolean apply() {
+                                    return crd.cluster().topologyVersion() != GRID_CNT +
+                                        /** client node */ 1  + /** stop server node */ 1 + /** start server node */ 1;
+                                }
+                            }, 10_000);
+
+                            tx2Latch.countDown();
+
+                            crd.cache(CACHE_NAME).put(keys.get(0), 0);
+
+                            assertTrue(U.await(commitLatch, 10, TimeUnit.SECONDS));
+
+                            tx.commit();
+
+                            fail();
+                        }
+                        catch (Exception e) {
+                            // Expected.
+                        }
+                    }
+                });
+
+                return false;
+            }
+        }, EventType.EVT_NODE_FAILED, EventType.EVT_NODE_LEFT);
+
+        IgniteInternalFuture restartFut = runAsync(new Runnable() {
+            @Override public void run() {
+                stopGrid(2);
+
+                try {
+                    startGrid(2);
+                }
+                catch (Exception e) {
+                    fail();
+                }
+            }
+        });
+
+        U.awaitQuiet(tx2Latch);
+
+        // Rollback tx using kill task.
+        VisorTxTaskArg arg =
+            new VisorTxTaskArg(VisorTxOperation.KILL, null, null, null, null, null, null, null, null, null);
+
+        Map<ClusterNode, VisorTxTaskResult> res = client.compute(client.cluster().forPredicate(F.alwaysTrue())).
+            execute(new VisorTxTask(), new VisorTaskArgument<>(client.cluster().localNode().id(), arg, false));
+
+        int expCnt = 0;
+
+        for (Map.Entry<ClusterNode, VisorTxTaskResult> entry : res.entrySet()) {
+            if (entry.getValue().getInfos().isEmpty())
+                continue;
+
+            for (VisorTxInfo info : entry.getValue().getInfos()) {
+                log.info(info.toUserString());
+
+                expCnt++;
+            }
+        }
+
+        assertEquals("Expecting 2 transactions", 2, expCnt);
+
+        commitLatch.countDown();
+
+        txFut.get();
+        restartFut.get();
+
+        checkFutures();
+    }
+
+    /**
      * Locks entry in tx and delays commit until signalled.
      *
      * @param node Near node.
@@ -896,22 +1049,6 @@
     }
 
     /**
-     * Checks if all tx futures are finished.
-     */
-    private void checkFutures() {
-        for (Ignite ignite : G.allGrids()) {
-            IgniteEx ig = (IgniteEx)ignite;
-
-            final Collection<GridCacheFuture<?>> futs = ig.context().cache().context().mvcc().activeFutures();
-
-            for (GridCacheFuture<?> fut : futs)
-                log.info("Waiting for future: " + fut);
-
-            assertTrue("Expecting no active futures: node=" + ig.localNode().id(), futs.isEmpty());
-        }
-    }
-
-    /**
      * @param tx Tx to rollback.
      */
     private IgniteInternalFuture<?> rollbackAsync(final Transaction tx) throws Exception {
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxRollbackOnTimeoutTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxRollbackOnTimeoutTest.java
index 9c37cfa..ccf4c8a 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxRollbackOnTimeoutTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxRollbackOnTimeoutTest.java
@@ -19,8 +19,10 @@
 
 import java.util.Collection;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
 import java.util.Random;
+import java.util.UUID;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -35,6 +37,8 @@
 import org.apache.ignite.configuration.CacheConfiguration;
 import org.apache.ignite.configuration.IgniteConfiguration;
 import org.apache.ignite.configuration.NearCacheConfiguration;
+import org.apache.ignite.events.Event;
+import org.apache.ignite.events.EventType;
 import org.apache.ignite.internal.IgniteEx;
 import org.apache.ignite.internal.IgniteFutureTimeoutCheckedException;
 import org.apache.ignite.internal.IgniteInternalFuture;
@@ -42,12 +46,24 @@
 import org.apache.ignite.internal.TestRecordingCommunicationSpi;
 import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxPrepareResponse;
+import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockRequest;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxPrepareRequest;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.G;
 import org.apache.ignite.internal.util.typedef.X;
 import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.internal.visor.VisorTaskArgument;
+import org.apache.ignite.internal.visor.tx.VisorTxInfo;
+import org.apache.ignite.internal.visor.tx.VisorTxOperation;
+import org.apache.ignite.internal.visor.tx.VisorTxTask;
+import org.apache.ignite.internal.visor.tx.VisorTxTaskArg;
+import org.apache.ignite.internal.visor.tx.VisorTxTaskResult;
+import org.apache.ignite.lang.IgniteBiPredicate;
+import org.apache.ignite.lang.IgniteClosure;
 import org.apache.ignite.lang.IgniteInClosure;
+import org.apache.ignite.lang.IgnitePredicate;
 import org.apache.ignite.plugin.extensions.communication.Message;
 import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
 import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
@@ -62,8 +78,13 @@
 import static java.lang.Thread.sleep;
 import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
 import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC;
+import static org.apache.ignite.testframework.GridTestUtils.runAsync;
+import static org.apache.ignite.testframework.GridTestUtils.runAsync;
+import static org.apache.ignite.transactions.TransactionConcurrency.OPTIMISTIC;
 import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC;
+import static org.apache.ignite.transactions.TransactionIsolation.READ_COMMITTED;
 import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ;
+import static org.apache.ignite.transactions.TransactionIsolation.SERIALIZABLE;
 
 /**
  * Tests an ability to eagerly rollback timed out transactions.
@@ -88,6 +109,8 @@
     @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
         IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName);
 
+        cfg.setConsistentId(igniteInstanceName);
+
         ((TcpDiscoverySpi)cfg.getDiscoverySpi()).setIpFinder(IP_FINDER);
 
         cfg.setCommunicationSpi(new TestRecordingCommunicationSpi());
@@ -298,7 +321,7 @@
 
         final CountDownLatch l = new CountDownLatch(2);
 
-        IgniteInternalFuture<?> fut1 = GridTestUtils.runAsync(new Runnable() {
+        IgniteInternalFuture<?> fut1 = runAsync(new Runnable() {
             @Override public void run() {
                 try {
                     try (Transaction tx = node1.transactions().txStart(PESSIMISTIC, REPEATABLE_READ, 5000, 2)) {
@@ -322,7 +345,7 @@
             }
         }, "First");
 
-        IgniteInternalFuture<?> fut2 = GridTestUtils.runAsync(new Runnable() {
+        IgniteInternalFuture<?> fut2 = runAsync(new Runnable() {
             @Override public void run() {
                 try (Transaction tx = node2.transactions().txStart(PESSIMISTIC, REPEATABLE_READ, 0, 2)) {
                     node2.cache(CACHE_NAME).put(2, 2);
@@ -528,7 +551,7 @@
                 final int idx0 = idx.getAndIncrement();
 
                 if (idx0 == 0) {
-                    try(final Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ, 0, 1)) {
+                    try (final Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ, 0, 1)) {
                         client.cache(CACHE_NAME).put(0, 0); // Lock is owned.
 
                         readStartLatch.countDown();
@@ -587,6 +610,198 @@
     /**
      *
      */
+    public void testRollbackOnTimeoutTxRemapOptimisticReadCommitted() throws Exception {
+        doTestRollbackOnTimeoutTxRemap(OPTIMISTIC, READ_COMMITTED, true);
+    }
+
+    /**
+     *
+     */
+    public void testRollbackOnTimeoutTxRemapOptimisticRepeatableRead() throws Exception {
+        doTestRollbackOnTimeoutTxRemap(OPTIMISTIC, REPEATABLE_READ, true);
+    }
+
+    /**
+     *
+     */
+    public void testRollbackOnTimeoutTxRemapOptimisticSerializable() throws Exception {
+        doTestRollbackOnTimeoutTxRemap(OPTIMISTIC, SERIALIZABLE, true);
+    }
+
+    /**
+     *
+     */
+    public void testRollbackOnTimeoutTxRemapPessimisticReadCommitted() throws Exception {
+        doTestRollbackOnTimeoutTxRemap(PESSIMISTIC, READ_COMMITTED, true);
+    }
+
+    /**
+     *
+     */
+    public void testRollbackOnTimeoutTxRemapPessimisticRepeatableRead() throws Exception {
+        doTestRollbackOnTimeoutTxRemap(PESSIMISTIC, REPEATABLE_READ, true);
+    }
+
+    /**
+     *
+     */
+    public void testRollbackOnTimeoutTxRemapPessimisticSerializable() throws Exception {
+        doTestRollbackOnTimeoutTxRemap(PESSIMISTIC, SERIALIZABLE, true);
+    }
+
+    /**
+     *
+     */
+    public void testRollbackOnTimeoutTxServerRemapOptimisticReadCommitted() throws Exception {
+        doTestRollbackOnTimeoutTxRemap(OPTIMISTIC, READ_COMMITTED, false);
+    }
+
+    /**
+     *
+     */
+    public void testRollbackOnTimeoutTxServerRemapOptimisticRepeatableRead() throws Exception {
+        doTestRollbackOnTimeoutTxRemap(OPTIMISTIC, REPEATABLE_READ, false);
+    }
+
+    /**
+     *
+     */
+    public void testRollbackOnTimeoutTxServerRemapOptimisticSerializable() throws Exception {
+        doTestRollbackOnTimeoutTxRemap(OPTIMISTIC, SERIALIZABLE, false);
+    }
+
+    /**
+     *
+     */
+    public void testRollbackOnTimeoutTxServerRemapPessimisticReadCommitted() throws Exception {
+        doTestRollbackOnTimeoutTxRemap(PESSIMISTIC, READ_COMMITTED, false);
+    }
+
+    /**
+     *
+     */
+    public void testRollbackOnTimeoutTxServerRemapPessimisticRepeatableRead() throws Exception {
+        doTestRollbackOnTimeoutTxRemap(PESSIMISTIC, REPEATABLE_READ, false);
+    }
+
+    /**
+     *
+     */
+    public void testRollbackOnTimeoutTxServerRemapPessimisticSerializable() throws Exception {
+        doTestRollbackOnTimeoutTxRemap(PESSIMISTIC, SERIALIZABLE, false);
+    }
+
+
+    /**
+     * @param concurrency Concurrency.
+     * @param isolation Isolation.
+     * @param clientWait {@code True} to wait remap on client, otherwise wait remap on server.
+     */
+    private void doTestRollbackOnTimeoutTxRemap(TransactionConcurrency concurrency,
+        TransactionIsolation isolation,
+        boolean clientWait) throws Exception {
+        IgniteEx client = (IgniteEx)startClient();
+
+        Ignite crd = grid(0);
+
+        assertTrue(crd.cluster().localNode().order() == 1);
+
+        List<Integer> keys = movingKeysAfterJoin(grid(1), CACHE_NAME, 1);
+
+        // Delay exchange finish on server nodes if clientWait=true, or on all nodes otherwise (excluding joining node).
+        TestRecordingCommunicationSpi.spi(crd).blockMessages((node,
+            msg) -> node.order() < 5 && msg instanceof GridDhtPartitionsFullMessage &&
+            (!clientWait || node.order() != grid(1).cluster().localNode().order()));
+
+        // Delay prepare until exchange is finished.
+        TestRecordingCommunicationSpi.spi(client).blockMessages((node, msg) -> {
+            boolean block = false;
+
+            if (concurrency == PESSIMISTIC) {
+                if (msg instanceof GridNearLockRequest) {
+                    block = true;
+
+                    assertEquals(GRID_CNT + 1, ((GridNearLockRequest)msg).topologyVersion().topologyVersion());
+                }
+            }
+            else {
+                if (msg instanceof GridNearTxPrepareRequest) {
+                    block = true;
+
+                    assertEquals(GRID_CNT + 1, ((GridNearTxPrepareRequest)msg).topologyVersion().topologyVersion());
+                }
+            }
+
+            return block;
+        });
+
+        // Start tx and map on topver=GRID_CNT + 1
+        // Delay map until exchange.
+        // Start new node.
+
+        IgniteInternalFuture fut0 = runAsync(new Runnable() {
+            @Override public void run() {
+                try (Transaction tx = client.transactions().txStart(concurrency, isolation, 5000, 1)) {
+                    client.cache(CACHE_NAME).put(keys.get(0), 0);
+
+                    tx.commit();
+
+                    fail();
+                }
+                catch (Exception e) {
+                    assertTrue(X.hasCause(e, TransactionTimeoutException.class));
+                }
+            }
+        });
+
+        IgniteInternalFuture fut1 = runAsync(new Runnable() {
+            @Override public void run() {
+                try {
+                    TestRecordingCommunicationSpi.spi(client).waitForBlocked(); // TX is trying to prepare on prev top ver.
+
+                    startGrid(GRID_CNT);
+                }
+                catch (Exception e) {
+                    fail(e.getMessage());
+                }
+            }
+        });
+
+        IgniteInternalFuture fut2 = runAsync(new Runnable() {
+            @Override public void run() {
+                try {
+                    // Wait for all full messages to be ready.
+                    TestRecordingCommunicationSpi.spi(crd).waitForBlocked(GRID_CNT + (clientWait ? 0 : 1));
+
+                    // Trigger remap.
+                    TestRecordingCommunicationSpi.spi(client).stopBlock();
+                }
+                catch (Exception e) {
+                    fail(e.getMessage());
+                }
+            }
+        });
+
+        fut0.get(30_000);
+        fut1.get(30_000);
+        fut2.get(30_000);
+
+        TestRecordingCommunicationSpi.spi(crd).stopBlock();
+
+        // FIXME: If using awaitPartitionMapExchange for waiting it some times fail while waiting for owners.
+        IgniteInternalFuture<?> topFut = ((IgniteEx)client).context().cache().context().exchange().
+            affinityReadyFuture(new AffinityTopologyVersion(GRID_CNT + 2, 1));
+
+        assertNotNull(topFut);
+
+        topFut.get(10_000);
+
+        checkFutures();
+    }
+
+    /**
+     *
+     */
     private void testEnlistMany(boolean write) throws Exception {
         final Ignite client = startClient();
 
@@ -812,7 +1027,7 @@
 
         final int recordsCnt = 5;
 
-        IgniteInternalFuture<?> fut1 = GridTestUtils.runAsync(new Runnable() {
+        IgniteInternalFuture<?> fut1 = runAsync(new Runnable() {
             @Override public void run() {
                 try (Transaction tx = near.transactions().txStart(PESSIMISTIC, REPEATABLE_READ, timeout, 0)) {
                     try {
@@ -860,7 +1075,7 @@
             }
         }, "First");
 
-        IgniteInternalFuture<?> fut2 = GridTestUtils.runAsync(new Runnable() {
+        IgniteInternalFuture<?> fut2 = runAsync(new Runnable() {
             @Override public void run() {
                 U.awaitQuiet(blocked);
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxRollbackOnTopologyChangeTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxRollbackOnTopologyChangeTest.java
index 2542cdb..13c5e41 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxRollbackOnTopologyChangeTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/transactions/TxRollbackOnTopologyChangeTest.java
@@ -209,20 +209,4 @@
 
         checkFutures();
     }
-
-    /**
-     * Checks if all tx futures are finished.
-     */
-    private void checkFutures() {
-        for (Ignite ignite : G.allGrids()) {
-            IgniteEx ig = (IgniteEx)ignite;
-
-            final Collection<GridCacheFuture<?>> futs = ig.context().cache().context().mvcc().activeFutures();
-
-            for (GridCacheFuture<?> fut : futs)
-                log.info("Waiting for future: " + fut);
-
-            assertTrue("Expecting no active futures: node=" + ig.localNode().id(), futs.isEmpty());
-        }
-    }
 }
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeSelfTest.java
index bd9a099..487cdbe 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/BPlusTreeSelfTest.java
@@ -28,12 +28,14 @@
 import java.util.Random;
 import java.util.Set;
 import java.util.TreeMap;
+import java.util.TreeSet;
 import java.util.concurrent.ArrayBlockingQueue;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.BrokenBarrierException;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.CyclicBarrier;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
@@ -252,6 +254,53 @@
     /**
      * @throws IgniteCheckedException If failed.
      */
+    public void testFindWithClosure() throws IgniteCheckedException {
+        TestTree tree = createTestTree(true);
+        TreeMap<Long, Long> map = new TreeMap<>();
+
+        long size = CNT * CNT;
+
+        for (long i = 1; i <= size; i++) {
+            tree.put(i);
+            map.put(i, i);
+        }
+
+        checkCursor(tree.find(null, null, new TestTreeFindFilteredClosure(Collections.<Long>emptySet()), null),
+            Collections.<Long>emptyList().iterator());
+
+        checkCursor(tree.find(null, null, new TestTreeFindFilteredClosure(map.keySet()), null),
+            map.values().iterator());
+
+        ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+        for (int i = 0; i < 100; i++) {
+            Long val = rnd.nextLong(size) + 1;
+
+            checkCursor(tree.find(null, null, new TestTreeFindFilteredClosure(Collections.singleton(val)), null),
+                Collections.singleton(val).iterator());
+        }
+
+        for (int i = 0; i < 200; i++) {
+            long vals = rnd.nextLong(size) + 1;
+
+            TreeSet<Long> exp = new TreeSet<>();
+
+            for (long k = 0; k < vals; k++)
+                exp.add(rnd.nextLong(size) + 1);
+
+            checkCursor(tree.find(null, null, new TestTreeFindFilteredClosure(exp), null), exp.iterator());
+
+            checkCursor(tree.find(0L, null, new TestTreeFindFilteredClosure(exp), null), exp.iterator());
+
+            checkCursor(tree.find(0L, size, new TestTreeFindFilteredClosure(exp), null), exp.iterator());
+
+            checkCursor(tree.find(null, size, new TestTreeFindFilteredClosure(exp), null), exp.iterator());
+        }
+    }
+
+    /**
+     * @throws IgniteCheckedException If failed.
+     */
     public void _testBenchInvoke() throws IgniteCheckedException {
         MAX_PER_PAGE = 10;
 
@@ -605,6 +654,7 @@
             assertNoLocks();
 
             assertEquals(x, tree.findOne(x).longValue());
+            checkIterate(tree, x, x, x, true);
 
             assertNoLocks();
 
@@ -619,12 +669,15 @@
 
         assertNull(tree.findOne(-1L));
 
-        for (long x = 0; x < cnt; x++)
+        for (long x = 0; x < cnt; x++) {
             assertEquals(x, tree.findOne(x).longValue());
+            checkIterate(tree, x, x, x, true);
+        }
 
         assertNoLocks();
 
         assertNull(tree.findOne(cnt));
+        checkIterate(tree, cnt, cnt, null, false);
 
         for (long x = RMV_INC > 0 ? 0 : cnt - 1; x >= 0 && x < cnt; x += RMV_INC) {
             X.println(" -- " + x);
@@ -638,6 +691,7 @@
             assertNoLocks();
 
             assertNull(tree.findOne(x));
+            checkIterate(tree, x, x, null, false);
 
             assertNoLocks();
 
@@ -654,6 +708,40 @@
     }
 
     /**
+     * @param tree Tree.
+     * @param lower Lower bound.
+     * @param upper Upper bound.
+     * @param exp Value to find.
+     * @param expFound {@code True} if value should be found.
+     * @throws IgniteCheckedException If failed.
+     */
+    private void checkIterate(TestTree tree, long lower, long upper, Long exp, boolean expFound)
+        throws IgniteCheckedException {
+        TestTreeRowClosure c = new TestTreeRowClosure(exp);
+
+        tree.iterate(lower, upper, c);
+
+        assertEquals(expFound, c.found);
+    }
+
+    /**
+     * @param tree Tree.
+     * @param lower Lower bound.
+     * @param upper Upper bound.
+     * @param c Closure.
+     * @param expFound {@code True} if value should be found.
+     * @throws IgniteCheckedException If failed.
+     */
+    private void checkIterateC(TestTree tree, long lower, long upper, TestTreeRowClosure c, boolean expFound)
+        throws IgniteCheckedException {
+        c.found = false;
+
+        tree.iterate(lower, upper, c);
+
+        assertEquals(expFound, c.found);
+    }
+
+    /**
      * @throws IgniteCheckedException If failed.
      */
     public void testRandomInvoke_1_30_1() throws IgniteCheckedException {
@@ -2057,7 +2145,209 @@
     }
 
     /**
-     *
+     * @throws Exception If failed.
+     */
+    public void testIterate() throws Exception {
+        MAX_PER_PAGE = 5;
+
+        TestTree tree = createTestTree(true);
+
+        checkIterate(tree, 0L, 100L, null, false);
+
+        for (long idx = 1L; idx <= 10L; ++idx)
+            tree.put(idx);
+
+        for (long idx = 1L; idx <= 10L; ++idx)
+            checkIterate(tree, idx, 100L, idx, true);
+
+        checkIterate(tree, 0L, 100L, 1L, true);
+
+        for (long idx = 1L; idx <= 10L; ++idx)
+            checkIterate(tree, idx, 100L, 10L, true);
+
+        checkIterate(tree, 0L, 100L, 100L, false);
+
+        for (long idx = 1L; idx <= 10L; ++idx)
+            checkIterate(tree, 0L, 100L, idx, true);
+
+        for (long idx = 0L; idx <= 10L; ++idx)
+            checkIterate(tree, idx, 11L, -1L, false);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testIterateConcurrentPutRemove() throws Exception {
+        iterateConcurrentPutRemove();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testIterateConcurrentPutRemove_1() throws Exception {
+        fail("https://issues.apache.org/jira/browse/IGNITE-7265");
+
+        MAX_PER_PAGE = 1;
+
+        iterateConcurrentPutRemove();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testIterateConcurrentPutRemove_5() throws Exception {
+        MAX_PER_PAGE = 5;
+
+        iterateConcurrentPutRemove();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testIteratePutRemove_10() throws Exception {
+        MAX_PER_PAGE = 10;
+
+        iterateConcurrentPutRemove();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    private void iterateConcurrentPutRemove() throws Exception {
+        final TestTree tree = createTestTree(true);
+
+        final int KEYS = 10_000;
+
+        ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+        for (int i = 0; i < 10; i++) {
+            for (long idx = 0L; idx < KEYS; ++idx)
+                tree.put(idx);
+
+            final Long findKey;
+
+            if (MAX_PER_PAGE > 0) {
+                switch (i) {
+                    case 0:
+                        findKey = 1L;
+
+                        break;
+
+                    case 1:
+                        findKey = (long)MAX_PER_PAGE;
+
+                        break;
+
+                    case 2:
+                        findKey = (long)MAX_PER_PAGE - 1;
+
+                        break;
+
+                    case 3:
+                        findKey = (long)MAX_PER_PAGE + 1;
+
+                        break;
+
+                    case 4:
+                        findKey = (long)(KEYS / MAX_PER_PAGE / 2) * MAX_PER_PAGE;
+
+                        break;
+
+                    case 5:
+                        findKey = (long)(KEYS / MAX_PER_PAGE / 2) * MAX_PER_PAGE - 1;
+
+                        break;
+
+                    case 6:
+                        findKey = (long)(KEYS / MAX_PER_PAGE / 2) * MAX_PER_PAGE + 1;
+
+                        break;
+
+                    case 7:
+                        findKey = (long)KEYS - 1;
+
+                        break;
+
+                    default:
+                        findKey = rnd.nextLong(KEYS);
+                }
+            }
+            else
+                findKey = rnd.nextLong(KEYS);
+
+            info("Iteration [iter=" + i + ", key=" + findKey + ']');
+
+            assertEquals(findKey, tree.findOne(findKey));
+            checkIterate(tree, findKey, findKey, findKey, true);
+
+            IgniteInternalFuture getFut = GridTestUtils.runMultiThreadedAsync(new Callable<Void>() {
+                @Override public Void call() throws Exception {
+                    ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                    TestTreeRowClosure p = new TestTreeRowClosure(findKey);
+
+                    TestTreeRowClosure falseP = new TestTreeRowClosure(-1L);
+
+                    int cnt = 0;
+
+                    while (!stop.get()) {
+                        int shift = MAX_PER_PAGE > 0 ? rnd.nextInt(MAX_PER_PAGE * 2) : rnd.nextInt(100);
+
+                        checkIterateC(tree, findKey, findKey, p, true);
+
+                        checkIterateC(tree, findKey - shift, findKey, p, true);
+
+                        checkIterateC(tree, findKey - shift, findKey + shift, p, true);
+
+                        checkIterateC(tree, findKey, findKey + shift, p, true);
+
+                        checkIterateC(tree, -100L, KEYS + 100L, falseP, false);
+
+                        cnt++;
+                    }
+
+                    info("Done, read count: " + cnt);
+
+                    return null;
+                }
+            }, 10, "find");
+
+            asyncRunFut = new GridCompoundFuture<>();
+
+            asyncRunFut.add(getFut);
+
+            asyncRunFut.markInitialized();
+
+            try {
+                U.sleep(100);
+
+                for (int j = 0; j < 20; j++) {
+                    for (long idx = 0L; idx < KEYS / 2; ++idx) {
+                        long toRmv = rnd.nextLong(KEYS);
+
+                        if (toRmv != findKey)
+                            tree.remove(toRmv);
+                    }
+
+                    for (long idx = 0L; idx < KEYS / 2; ++idx) {
+                        long put = rnd.nextLong(KEYS);
+
+                        tree.put(put);
+                    }
+                }
+            }
+            finally {
+                stop.set(true);
+            }
+
+            asyncRunFut.get();
+
+            stop.set(false);
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
      */
     public void testConcurrentGrowDegenerateTreeAndConcurrentRemove() throws Exception {
         //calculate tree size when split happens
@@ -2264,6 +2554,17 @@
 
                         last = c.get();
                     }
+
+                    TestTreeFindFirstClosure cl = new TestTreeFindFirstClosure();
+
+                    tree.iterate((long)low, (long)high, cl);
+
+                    last = cl.val;
+
+                    if (last != null) {
+                        assertTrue(low + " <= " + last + " <= " + high, last >= low);
+                        assertTrue(low + " <= " + last + " <= " + high, last <= high);
+                    }
                 }
 
                 return null;
@@ -2385,7 +2686,7 @@
         public TestTree(ReuseList reuseList, boolean canGetRow, int cacheId, PageMemory pageMem, long metaPageId)
             throws IgniteCheckedException {
             super("test", cacheId, pageMem, null, new AtomicLong(), metaPageId, reuseList,
-                new IOVersions<>(new LongInnerIO(canGetRow)), new IOVersions<>(new LongLeafIO()));
+                new IOVersions<>(new LongInnerIO(canGetRow)), new IOVersions<>(new LongLeafIO()), null);
 
             PageIO.registerTest(latestInnerIO(), latestLeafIO());
 
@@ -2401,7 +2702,7 @@
         }
 
         /** {@inheritDoc} */
-        @Override protected Long getRow(BPlusIO<Long> io, long pageAddr, int idx, Object ignore)
+        @Override public Long getRow(BPlusIO<Long> io, long pageAddr, int idx, Object ignore)
             throws IgniteCheckedException {
             assert io.canGetRow() : io;
 
@@ -2688,4 +2989,74 @@
             return PageUtils.getLong(pageAddr, offset(idx));
         }
     }
+
+    /**
+     *
+     */
+    static class TestTreeRowClosure implements BPlusTree.TreeRowClosure<Long, Long> {
+        /** */
+        private final Long expVal;
+
+        /** */
+        private boolean found;
+
+        /**
+         * @param expVal Value to find or {@code null} to find first.
+         */
+        TestTreeRowClosure(Long expVal) {
+            this.expVal = expVal;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean apply(BPlusTree<Long, Long> tree, BPlusIO<Long> io, long pageAddr, int idx)
+            throws IgniteCheckedException {
+            assert !found;
+
+            found = expVal == null || io.getLookupRow(tree, pageAddr, idx).equals(expVal);
+
+            return !found;
+        }
+    }
+
+    /**
+     *
+     */
+    static class TestTreeFindFirstClosure implements BPlusTree.TreeRowClosure<Long, Long> {
+        /** */
+        private Long val;
+
+
+        /** {@inheritDoc} */
+        @Override public boolean apply(BPlusTree<Long, Long> tree, BPlusIO<Long> io, long pageAddr, int idx)
+            throws IgniteCheckedException {
+            assert val == null;
+
+            val = io.getLookupRow(tree, pageAddr, idx);
+
+            return false;
+        }
+    }
+
+    /**
+     *
+     */
+    static class TestTreeFindFilteredClosure implements BPlusTree.TreeRowClosure<Long, Long> {
+        /** */
+        private final Set<Long> vals;
+
+        /**
+         * @param vals Values to allow in filter.
+         */
+        TestTreeFindFilteredClosure(Set<Long> vals) {
+            this.vals = vals;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean apply(BPlusTree<Long, Long> tree, BPlusIO<Long> io, long pageAddr, int idx)
+            throws IgniteCheckedException {
+            Long val = io.getLookupRow(tree, pageAddr, idx);
+
+            return vals.contains(val);
+        }
+    }
 }
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/CacheFreeListImplSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/CacheFreeListImplSelfTest.java
index 80daff2..17aa9b0 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/CacheFreeListImplSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/CacheFreeListImplSelfTest.java
@@ -45,6 +45,7 @@
 import org.apache.ignite.internal.processors.cache.persistence.evict.NoOpPageEvictionTracker;
 import org.apache.ignite.internal.processors.cache.persistence.freelist.CacheFreeListImpl;
 import org.apache.ignite.internal.processors.cache.persistence.freelist.FreeList;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.CacheVersionIO;
 import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
 import org.apache.ignite.plugin.extensions.communication.MessageReader;
 import org.apache.ignite.plugin.extensions.communication.MessageWriter;
@@ -407,6 +408,20 @@
         }
 
         /** {@inheritDoc} */
+        @Override public int size() throws IgniteCheckedException {
+            int len = key().valueBytesLength(null);
+
+            len += value().valueBytesLength(null) + CacheVersionIO.size(version(), false) + 8;
+
+            return len + (cacheId() != 0 ? 4 : 0);
+        }
+
+        /** {@inheritDoc} */
+        @Override public int headerSize() {
+            return 0;
+        }
+
+        /** {@inheritDoc} */
         @Override public long link() {
             return link;
         }
@@ -425,6 +440,46 @@
         @Override public int cacheId() {
             return 0;
         }
+
+        /** {@inheritDoc} */
+        @Override public long newMvccCoordinatorVersion() {
+            return 0;
+        }
+
+        /** {@inheritDoc} */
+        @Override public long newMvccCounter() {
+            return 0;
+        }
+
+        /** {@inheritDoc} */
+        @Override public int newMvccOperationCounter() {
+            return 0;
+        }
+
+        /** {@inheritDoc} */
+        @Override public long mvccCoordinatorVersion() {
+            return 0;
+        }
+
+        /** {@inheritDoc} */
+        @Override public long mvccCounter() {
+            return 0;
+        }
+
+        /** {@inheritDoc} */
+        @Override public int mvccOperationCounter() {
+            return 0;
+        }
+
+        /** {@inheritDoc} */
+        @Override public byte mvccTxState() {
+            return 0;
+        }
+
+        /** {@inheritDoc} */
+        @Override public byte newMvccTxState() {
+            return 0;
+        }
     }
 
     /**
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/DataRegionMetricsSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/DataRegionMetricsSelfTest.java
index 22e87b8..122f50e 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/DataRegionMetricsSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/DataRegionMetricsSelfTest.java
@@ -17,6 +17,7 @@
 package org.apache.ignite.internal.processors.database;
 
 import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.atomic.AtomicInteger;
 import org.apache.ignite.DataRegionMetrics;
 import org.apache.ignite.configuration.DataRegionConfiguration;
 import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl;
@@ -75,9 +76,8 @@
 
         joinAllThreads();
 
-        assertTrue(watcher.rateDropsCntr > 3);
-
-        assertTrue(watcher.rateDropsCntr < 6);
+        assertTrue("Expected rate drops count > 3 and < 6 but actual is " + watcher.rateDropsCntr.get(),
+            watcher.rateDropsCntr.get() > 3 && watcher.rateDropsCntr.get() < 6);
     }
 
     /**
@@ -100,7 +100,8 @@
 
         joinAllocationThreads();
 
-        assertTrue("4 or 5 rate drops must be observed: " + watcher.rateDropsCntr, watcher.rateDropsCntr == 4 || watcher.rateDropsCntr == 5);
+        assertTrue("4 or 5 rate drops must be observed: " + watcher.rateDropsCntr,
+            watcher.rateDropsCntr.get() == 4 || watcher.rateDropsCntr.get() == 5);
 
         sleep(3);
 
@@ -114,7 +115,8 @@
 
         joinAllThreads();
 
-        assertTrue(watcher.rateDropsCntr > 4);
+        assertTrue("Expected rate drops count > 4 but actual is " + watcher.rateDropsCntr.get(),
+            watcher.rateDropsCntr.get() > 4);
     }
 
     /**
@@ -143,7 +145,8 @@
 
         joinAllThreads();
 
-        assertTrue(watcher.rateDropsCntr > 4);
+        assertTrue("Expected rate drops count > 4 but actual is " + watcher.rateDropsCntr.get(),
+            watcher.rateDropsCntr.get() > 4);
     }
 
     /**
@@ -172,7 +175,8 @@
 
         joinAllThreads();
 
-        assertTrue(watcher.rateDropsCntr > 4);
+        assertTrue("Expected rate drops count > 4 but actual is " + watcher.rateDropsCntr.get(),
+            watcher.rateDropsCntr.get() > 4);
     }
 
     /**
@@ -280,7 +284,7 @@
                 startLatch.await();
 
                 for (int i = 0; i < iterationsCnt; i++) {
-                    memMetrics.incrementTotalAllocatedPages();
+                    memMetrics.updateTotalAllocatedPages(1);
 
                     sleep(delay);
                 }
@@ -299,7 +303,7 @@
      */
     private static class AllocationRateWatcher implements Runnable {
         /** */
-        private volatile int rateDropsCntr;
+        private final AtomicInteger rateDropsCntr = new AtomicInteger();
 
         /** */
         private final CountDownLatch startLatch;
@@ -330,7 +334,7 @@
 
                 while (!Thread.currentThread().isInterrupted()) {
                     if (prevRate > memMetrics.getAllocationRate())
-                        rateDropsCntr++;
+                        rateDropsCntr.incrementAndGet();
 
                     prevRate = memMetrics.getAllocationRate();
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/IgniteDbPutGetWithCacheStoreTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/IgniteDbPutGetWithCacheStoreTest.java
index 3238acf..8cca7f4 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/IgniteDbPutGetWithCacheStoreTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/IgniteDbPutGetWithCacheStoreTest.java
@@ -31,7 +31,6 @@
 import org.apache.ignite.configuration.IgniteReflectionFactory;
 import org.apache.ignite.configuration.WALMode;
 import org.apache.ignite.internal.IgniteEx;
-import org.apache.ignite.testframework.GridTestUtils;
 import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
 
 import static org.apache.ignite.cache.CacheAtomicityMode.ATOMIC;
@@ -124,7 +123,7 @@
 
             assertEquals(2000, storeMap.size());
 
-            stopAllGrids();
+            stopAllGrids(false);
 
             storeMap.clear();
 
@@ -158,7 +157,7 @@
             for (int i = 0; i < 2000; i++)
                 assertEquals(i, ig.cache(CACHE_NAME).get(i));
 
-            stopAllGrids();
+            stopAllGrids(false);
 
             storeMap.clear();
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/IndexStorageSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/IndexStorageSelfTest.java
index b92534c..69a86b4 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/database/IndexStorageSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/database/IndexStorageSelfTest.java
@@ -98,7 +98,8 @@
                 if (metaStore == null) {
                     metaStore = new IndexStorageImpl(mem, null, new AtomicLong(), cacheId,
                         PageIdAllocator.INDEX_PARTITION, PageMemory.FLAG_IDX,
-                        null, mem.allocatePage(cacheId, PageIdAllocator.INDEX_PARTITION, PageMemory.FLAG_IDX), true);
+                        null, mem.allocatePage(cacheId, PageIdAllocator.INDEX_PARTITION, PageMemory.FLAG_IDX), true,
+                            null);
 
                     storeMap.put(cacheId, metaStore);
                 }
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/datastreamer/DataStreamProcessorMvccSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/datastreamer/DataStreamProcessorMvccSelfTest.java
new file mode 100644
index 0000000..d7948bd
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/datastreamer/DataStreamProcessorMvccSelfTest.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.datastreamer;
+
+import org.apache.ignite.cache.CacheAtomicityMode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+
+import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT;
+
+/**
+ * Check DataStreamer with Mvcc enabled.
+ */
+public class DataStreamProcessorMvccSelfTest extends DataStreamProcessorSelfTest {
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
+        IgniteConfiguration igniteConfiguration = super.getConfiguration(igniteInstanceName);
+
+        CacheConfiguration[] cacheConfigurations = igniteConfiguration.getCacheConfiguration();
+
+        assert cacheConfigurations == null || cacheConfigurations.length == 0
+                || (cacheConfigurations.length == 1 && cacheConfigurations[0].getAtomicityMode() == TRANSACTIONAL_SNAPSHOT);
+
+        return igniteConfiguration;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected CacheAtomicityMode getCacheAtomicityMode() {
+        return TRANSACTIONAL_SNAPSHOT;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void testPartitioned() throws Exception {
+        // test uses batchedSorted StreamReceiver which depends on Cache.putAll, Cache.removeAll
+        fail("https://issues.apache.org/jira/browse/IGNITE-9451");
+
+        super.testPartitioned();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void testColocated() throws Exception {
+        // test uses batchedSorted StreamReceiver which depends on Cache.putAll, Cache.removeAll
+        fail("https://issues.apache.org/jira/browse/IGNITE-9451");
+
+        super.testColocated();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void testReplicated() throws Exception {
+        // test uses batchedSorted StreamReceiver which depends on Cache.putAll, Cache.removeAll
+        fail("https://issues.apache.org/jira/browse/IGNITE-9451");
+
+        super.testReplicated();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void testUpdateStore() throws Exception {
+        fail("https://issues.apache.org/jira/browse/IGNITE-8582");
+
+        super.testUpdateStore();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void testLocal() throws Exception {
+        // Do not check local caches with MVCC enabled.
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/datastreamer/DataStreamProcessorSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/datastreamer/DataStreamProcessorSelfTest.java
index 91345fe..536d73e 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/datastreamer/DataStreamProcessorSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/datastreamer/DataStreamProcessorSelfTest.java
@@ -34,6 +34,7 @@
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.IgniteDataStreamer;
 import org.apache.ignite.IgniteException;
+import org.apache.ignite.cache.CacheAtomicityMode;
 import org.apache.ignite.cache.CacheMode;
 import org.apache.ignite.cache.CachePeekMode;
 import org.apache.ignite.cache.affinity.Affinity;
@@ -67,6 +68,7 @@
 
 import static java.util.concurrent.TimeUnit.MILLISECONDS;
 import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL;
+import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT;
 import static org.apache.ignite.cache.CacheMode.LOCAL;
 import static org.apache.ignite.cache.CacheMode.PARTITIONED;
 import static org.apache.ignite.cache.CacheMode.REPLICATED;
@@ -122,7 +124,7 @@
             CacheConfiguration cc = defaultCacheConfiguration();
 
             cc.setCacheMode(mode);
-            cc.setAtomicityMode(TRANSACTIONAL);
+            cc.setAtomicityMode(getCacheAtomicityMode());
 
             if (nearEnabled) {
                 NearCacheConfiguration nearCfg = new NearCacheConfiguration();
@@ -150,6 +152,13 @@
     }
 
     /**
+     * @return Default cache atomicity mode.
+     */
+    protected CacheAtomicityMode getCacheAtomicityMode() {
+        return TRANSACTIONAL;
+    }
+
+    /**
      * @return {@code True} if custom stream receiver should use keepBinary flag.
      */
     protected boolean customKeepBinary() {
@@ -371,11 +380,27 @@
                     if (aff.isPrimary(locNode, key) || aff.isBackup(locNode, key)) {
                         GridCacheEntryEx entry = cache0.entryEx(key);
 
-                        entry.unswap();
+                        try {
+                            // lock non obsolete entry
+                            while (true) {
+                                entry.lockEntry();
 
-                        assertNotNull("Missing entry for key: " + key, entry);
-                        assertEquals(new Integer((key < 100 ? -1 : key)),
-                            CU.value(entry.rawGet(), cache0.context(), false));
+                                if (!entry.obsolete())
+                                    break;
+
+                                entry.unlockEntry();
+
+                                entry = cache0.entryEx(key);
+                            }
+
+                            entry.unswap();
+
+                            assertEquals(new Integer((key < 100 ? -1 : key)),
+                                CU.value(entry.rawGet(), cache0.context(), false));
+                        }
+                        finally {
+                            entry.unlockEntry();
+                        }
                     }
                 }
             }
@@ -714,7 +739,8 @@
      * @throws Exception If failed.
      */
     public void testFlush() throws Exception {
-        mode = LOCAL;
+        // Local caches are not allowed with MVCC enabled.
+        mode = getCacheAtomicityMode() != TRANSACTIONAL_SNAPSHOT ? LOCAL : PARTITIONED;
 
         useCache = true;
 
@@ -766,7 +792,8 @@
      * @throws Exception If failed.
      */
     public void testTryFlush() throws Exception {
-        mode = LOCAL;
+        // Local caches are not allowed with MVCC enabled.
+        mode = getCacheAtomicityMode() != TRANSACTIONAL_SNAPSHOT ? LOCAL : PARTITIONED;
 
         useCache = true;
 
@@ -801,7 +828,8 @@
      * @throws Exception If failed.
      */
     public void testFlushTimeout() throws Exception {
-        mode = LOCAL;
+        // Local caches are not allowed with MVCC enabled.
+        mode = getCacheAtomicityMode() != TRANSACTIONAL_SNAPSHOT ? LOCAL : PARTITIONED;
 
         useCache = true;
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsLocalSecondaryFileSystemDualAbstractSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsLocalSecondaryFileSystemDualAbstractSelfTest.java
index 62d1552..ffbcad5 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsLocalSecondaryFileSystemDualAbstractSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/igfs/IgfsLocalSecondaryFileSystemDualAbstractSelfTest.java
@@ -177,7 +177,7 @@
      *
      * @throws Exception If failed.
      */
-    public void testUpdateParentRootPathMissing() throws Exception {
+    @Override public void testUpdateParentRootPathMissing() throws Exception {
         doUpdateParentRootPathMissing(properties(TEST_GROUP, "0555"));
     }
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/service/IgniteServiceConfigVariationsFullApiTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/service/IgniteServiceConfigVariationsFullApiTest.java
index 825e561..160014c 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/service/IgniteServiceConfigVariationsFullApiTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/service/IgniteServiceConfigVariationsFullApiTest.java
@@ -322,7 +322,7 @@
         }
 
         /** {@inheritDoc} */
-        public void setValue(Object val) {
+        @Override public void setValue(Object val) {
             this.val = val;
         }
     }
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/service/ServicePredicateAccessCacheTest.java b/modules/core/src/test/java/org/apache/ignite/internal/processors/service/ServicePredicateAccessCacheTest.java
index 02ba65e..33a1993 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/processors/service/ServicePredicateAccessCacheTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/processors/service/ServicePredicateAccessCacheTest.java
@@ -131,17 +131,17 @@
      */
     public static class TestService implements Service {
         /** {@inheritDoc} */
-        public void execute(ServiceContext ctx) {
+        @Override public void execute(ServiceContext ctx) {
             // No-op.
         }
 
         /** {@inheritDoc} */
-        public void init(ServiceContext ctx) {
+        @Override public void init(ServiceContext ctx) {
             // No-op.
         }
 
         /** {@inheritDoc} */
-        public void cancel(ServiceContext ctx) {
+        @Override public void cancel(ServiceContext ctx) {
             // No-op.
         }
     }
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/sql/SqlParserTransactionalKeywordsSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/sql/SqlParserTransactionalKeywordsSelfTest.java
new file mode 100644
index 0000000..103bb97
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/internal/sql/SqlParserTransactionalKeywordsSelfTest.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.sql;
+
+import org.apache.ignite.internal.sql.command.SqlBeginTransactionCommand;
+import org.apache.ignite.internal.sql.command.SqlCommand;
+import org.apache.ignite.internal.sql.command.SqlCommitTransactionCommand;
+import org.apache.ignite.internal.sql.command.SqlRollbackTransactionCommand;
+
+/**
+ * Tests for processing of keywords BEGIN, COMMIT, ROLLBACK, START.
+ */
+public class SqlParserTransactionalKeywordsSelfTest extends SqlParserAbstractSelfTest {
+    /**
+     * Test parsing of different forms of BEGIN/START.
+     */
+    public void testBegin() {
+        assertBegin("begin");
+        assertBegin("BEGIN");
+        assertBegin("BEGIN work");
+        assertBegin("begin Transaction");
+        assertBegin("StarT TransactioN");
+
+        assertParseError(null, "begin index", "Unexpected token: \"INDEX\"");
+        assertParseError(null, "start work", "Unexpected token: \"WORK\" (expected: \"TRANSACTION\")");
+        assertParseError(null, "start", "Unexpected end of command (expected: \"TRANSACTION\")");
+    }
+
+    /**
+     * Test parsing of different forms of COMMIT.
+     */
+    public void testCommit() {
+        assertCommit("commit");
+        assertCommit("COMMIT transaction");
+
+        assertParseError(null, "commit index", "Unexpected token: \"INDEX\"");
+    }
+
+    /**
+     * Test parsing of different forms of ROLLBACK.
+     */
+    public void testRollback() {
+        assertRollback("rollback");
+        assertRollback("ROLLBACK transaction");
+
+        assertParseError(null, "rollback index", "Unexpected token: \"INDEX\"");
+    }
+
+    /**
+     * Test that given SQL is parsed as a BEGIN command.
+     * @param sql command.
+     */
+    private static void assertBegin(String sql) {
+        assertTrue(parse(sql) instanceof SqlBeginTransactionCommand);
+    }
+
+    /**
+     * Test that given SQL is parsed as a BEGIN command.
+     * @param sql command.
+     */
+    private static void assertCommit(String sql) {
+        assertTrue(parse(sql) instanceof SqlCommitTransactionCommand);
+    }
+
+    /**
+     * Test that given SQL is parsed as a BEGIN command.
+     * @param sql command.
+     */
+    private static void assertRollback(String sql) {
+        assertTrue(parse(sql) instanceof SqlRollbackTransactionCommand);
+    }
+
+    /**
+     * Parse single SQL command.
+     * @param sql command.
+     * @return parsed command.
+     */
+    private static SqlCommand parse(String sql) {
+        return new SqlParser(null, sql).nextCommand();
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/util/IgniteDevOnlyLogTest.java b/modules/core/src/test/java/org/apache/ignite/internal/util/IgniteDevOnlyLogTest.java
index 1f4e6d2..9a82947 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/util/IgniteDevOnlyLogTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/util/IgniteDevOnlyLogTest.java
@@ -17,47 +17,73 @@
 
 package org.apache.ignite.internal.util;
 
-import java.io.IOException;
 import java.util.Collections;
-import junit.framework.TestCase;
+import java.util.List;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteLogger;
 import org.apache.ignite.IgniteSystemProperties;
-import org.apache.ignite.Ignition;
-import org.apache.ignite.configuration.IgniteConfiguration;
-import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
-import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.lang.IgniteRunnable;
+import org.apache.ignite.resources.IgniteInstanceResource;
+import org.apache.ignite.testframework.GridStringLogger;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
 
 /**
  * Testing logging via {@link IgniteUtils#warnDevOnly(IgniteLogger, Object)}.
  */
-public class IgniteDevOnlyLogTest extends TestCase {
-    /** Check that dev-only messages appear in the log. */
-    public void testDevOnlyQuietMessage() throws IOException {
-        String oldQuietVal = System.setProperty(IgniteSystemProperties.IGNITE_QUIET, "true");
+public class IgniteDevOnlyLogTest extends GridCommonAbstractTest {
+    /** */
+    private List<String> additionalArgs;
 
-        try (Ignite ignite = startNode()) {
-            String msg = getMessage(ignite);
-            IgniteUtils.warnDevOnly(ignite.log(), msg);
-            assertTrue(readLog(ignite).contains(msg));
-        }
-        finally {
-            setOrClearProperty(IgniteSystemProperties.IGNITE_QUIET, oldQuietVal);
-        }
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        startGrid(0);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected boolean isMultiJvm() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected List<String> additionalRemoteJvmArgs() {
+        return additionalArgs;
     }
 
     /** Check that dev-only messages appear in the log. */
-    public void testDevOnlyVerboseMessage() throws IOException {
-        String oldQuietVal = System.setProperty(IgniteSystemProperties.IGNITE_QUIET, "false");
+    public void testDevOnlyQuietMessage() throws Exception {
+        fail("https://issues.apache.org/jira/browse/IGNITE-9328");
 
-        try (Ignite ignite = startNode()) {
-            String msg = getMessage(ignite);
-            IgniteUtils.warnDevOnly(ignite.log(), msg);
-            assertTrue(readLog(ignite).contains(msg));
-        }
-        finally {
-            setOrClearProperty(IgniteSystemProperties.IGNITE_QUIET, oldQuietVal);
-        }
+        additionalArgs = Collections.singletonList("-D" + IgniteSystemProperties.IGNITE_QUIET + "=true");
+
+        log = new GridStringLogger(false, grid(0).log());
+
+        Ignite ignite = startGrid(1);
+
+        String msg = getMessage(ignite);
+
+        warnDevOnly(msg);
+
+        assertTrue(log.toString().contains(msg));
+    }
+
+    /** Check that dev-only messages appear in the log. */
+    public void testDevOnlyVerboseMessage() throws Exception {
+        additionalArgs = Collections.singletonList("-D" + IgniteSystemProperties.IGNITE_QUIET + "=false");
+
+        log = new GridStringLogger(false, grid(0).log());
+
+        Ignite ignite = startGrid(1);
+
+        String msg = getMessage(ignite);
+
+        warnDevOnly(msg);
+
+        assertTrue(log.toString().contains(msg));
     }
 
     /**
@@ -65,44 +91,31 @@
      * doesn't print anything if {@link org.apache.ignite.IgniteSystemProperties#IGNITE_DEV_ONLY_LOGGING_DISABLED}
      * is set to {@code true}.
      */
-    public void testDevOnlyDisabledProperty() throws IOException {
-        String oldDevOnlyVal = System.setProperty(IgniteSystemProperties.IGNITE_DEV_ONLY_LOGGING_DISABLED, "true");
+    public void testDevOnlyDisabledProperty() throws Exception {
+        additionalArgs = Collections.singletonList("-D" +
+            IgniteSystemProperties.IGNITE_DEV_ONLY_LOGGING_DISABLED + "=true");
 
-        try (Ignite ignite = startNode()) {
-            String msg = getMessage(ignite);
-            IgniteUtils.warnDevOnly(ignite.log(), msg);
-            assertFalse(readLog(ignite).contains(msg));
-        }
-        finally {
-            setOrClearProperty(IgniteSystemProperties.IGNITE_DEV_ONLY_LOGGING_DISABLED, oldDevOnlyVal);
-        }
+        log = new GridStringLogger(false, grid(0).log());
 
+        Ignite ignite = startGrid(1);
+
+        String msg = getMessage(ignite);
+
+        warnDevOnly(msg);
+
+        assertFalse(log.toString().contains(msg));
     }
 
-    /** Sets a system property if the value is not null, or clears it if the value is null. */
-    private void setOrClearProperty(String key, String val) {
-        if (val != null)
-            System.setProperty(key, val);
-        else
-            System.clearProperty(IgniteSystemProperties.IGNITE_QUIET);
-    }
+    /** */
+    private void warnDevOnly(final String msg) {
+        grid(0).compute(grid(0).cluster().forRemotes()).broadcast(new IgniteRunnable() {
+            @IgniteInstanceResource
+            private Ignite ignite;
 
-    /** Starts an Ignite node. */
-    private Ignite startNode() throws IOException {
-        IgniteConfiguration configuration = new IgniteConfiguration()
-            .setIgniteInstanceName(IgniteDevOnlyLogTest.class.getName() + "Instance")
-            .setDiscoverySpi(new TcpDiscoverySpi()
-                .setIpFinder(new TcpDiscoveryVmIpFinder()
-                    .setAddresses(Collections.singletonList("127.0.0.1:47500..47509"))
-                )
-            );
-
-        return Ignition.start(configuration);
-    }
-
-    /** Reads log of the given node to a string. */
-    private String readLog(Ignite ignite) throws IOException {
-        return IgniteUtils.readFileToString(ignite.log().fileName(), "UTF-8");
+            @Override public void run() {
+                IgniteUtils.warnDevOnly(ignite.log(), msg);
+            }
+        });
     }
 
     /** Returns a test message. */
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/util/nio/impl/GridNioFilterChainSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/util/nio/impl/GridNioFilterChainSelfTest.java
index e6aab9f..5888575 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/util/nio/impl/GridNioFilterChainSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/util/nio/impl/GridNioFilterChainSelfTest.java
@@ -247,7 +247,7 @@
         }
 
         /** {@inheritDoc} */
-        public String toString() {
+        @Override public String toString() {
             return "AppendingFilter [param=" + param + ']';
         }
 
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/util/tostring/GridToStringBuilderSelfTest.java b/modules/core/src/test/java/org/apache/ignite/internal/util/tostring/GridToStringBuilderSelfTest.java
index eff3349..d249914 100644
--- a/modules/core/src/test/java/org/apache/ignite/internal/util/tostring/GridToStringBuilderSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/internal/util/tostring/GridToStringBuilderSelfTest.java
@@ -31,6 +31,8 @@
 import org.apache.ignite.IgniteLogger;
 import org.apache.ignite.IgniteSystemProperties;
 import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.processors.cache.KeyCacheObjectImpl;
+import org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey;
 import org.apache.ignite.internal.util.typedef.internal.S;
 import org.apache.ignite.testframework.GridTestUtils;
 import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
@@ -302,8 +304,8 @@
      * @param limit Array limit.
      */
     private void checkArrayOverflow(Object[] arrOf, Object[] arr, int limit) {
-        String arrStr = GridToStringBuilder.arrayToString(arr.getClass(), arr);
-        String arrOfStr = GridToStringBuilder.arrayToString(arrOf.getClass(), arrOf);
+        String arrStr = GridToStringBuilder.arrayToString(arr);
+        String arrOfStr = GridToStringBuilder.arrayToString(arrOf);
 
         // Simulate overflow
         StringBuilder resultSB = new StringBuilder(arrStr);
@@ -330,75 +332,97 @@
         for (Object val : vals)
             testArr(val, limit);
 
+        int[] intArr1 = new int[0];
+
+        assertEquals("[]", GridToStringBuilder.arrayToString(intArr1));
+        assertEquals("null", GridToStringBuilder.arrayToString(null));
+
+        int[] intArr2 = {1, 2, 3};
+
+        assertEquals("[1, 2, 3]", GridToStringBuilder.arrayToString(intArr2));
+
+        Object[] intArr3 = {2, 3, 4};
+
+        assertEquals("[2, 3, 4]", GridToStringBuilder.arrayToString(intArr3));
+
         byte[] byteArr = new byte[1];
+
         byteArr[0] = 1;
-        assertEquals(Arrays.toString(byteArr), GridToStringBuilder.arrayToString(byteArr.getClass(), byteArr));
+        assertEquals(Arrays.toString(byteArr), GridToStringBuilder.arrayToString(byteArr));
         byteArr = Arrays.copyOf(byteArr, 101);
         assertTrue("Can't find \"... and 1 more\" in overflowed array string!",
-            GridToStringBuilder.arrayToString(byteArr.getClass(), byteArr).contains("... and 1 more"));
+            GridToStringBuilder.arrayToString(byteArr).contains("... and 1 more"));
 
         boolean[] boolArr = new boolean[1];
+
         boolArr[0] = true;
-        assertEquals(Arrays.toString(boolArr), GridToStringBuilder.arrayToString(boolArr.getClass(), boolArr));
+        assertEquals(Arrays.toString(boolArr), GridToStringBuilder.arrayToString(boolArr));
         boolArr = Arrays.copyOf(boolArr, 101);
         assertTrue("Can't find \"... and 1 more\" in overflowed array string!",
-            GridToStringBuilder.arrayToString(boolArr.getClass(), boolArr).contains("... and 1 more"));
+            GridToStringBuilder.arrayToString(boolArr).contains("... and 1 more"));
 
         short[] shortArr = new short[1];
+
         shortArr[0] = 100;
-        assertEquals(Arrays.toString(shortArr), GridToStringBuilder.arrayToString(shortArr.getClass(), shortArr));
+        assertEquals(Arrays.toString(shortArr), GridToStringBuilder.arrayToString(shortArr));
         shortArr = Arrays.copyOf(shortArr, 101);
         assertTrue("Can't find \"... and 1 more\" in overflowed array string!",
-            GridToStringBuilder.arrayToString(shortArr.getClass(), shortArr).contains("... and 1 more"));
+            GridToStringBuilder.arrayToString(shortArr).contains("... and 1 more"));
 
         int[] intArr = new int[1];
+
         intArr[0] = 10000;
-        assertEquals(Arrays.toString(intArr), GridToStringBuilder.arrayToString(intArr.getClass(), intArr));
+        assertEquals(Arrays.toString(intArr), GridToStringBuilder.arrayToString(intArr));
         intArr = Arrays.copyOf(intArr, 101);
         assertTrue("Can't find \"... and 1 more\" in overflowed array string!",
-            GridToStringBuilder.arrayToString(intArr.getClass(), intArr).contains("... and 1 more"));
+            GridToStringBuilder.arrayToString(intArr).contains("... and 1 more"));
 
         long[] longArr = new long[1];
+
         longArr[0] = 10000000;
-        assertEquals(Arrays.toString(longArr), GridToStringBuilder.arrayToString(longArr.getClass(), longArr));
+        assertEquals(Arrays.toString(longArr), GridToStringBuilder.arrayToString(longArr));
         longArr = Arrays.copyOf(longArr, 101);
         assertTrue("Can't find \"... and 1 more\" in overflowed array string!",
-            GridToStringBuilder.arrayToString(longArr.getClass(), longArr).contains("... and 1 more"));
+            GridToStringBuilder.arrayToString(longArr).contains("... and 1 more"));
 
         float[] floatArr = new float[1];
+
         floatArr[0] = 1.f;
-        assertEquals(Arrays.toString(floatArr), GridToStringBuilder.arrayToString(floatArr.getClass(), floatArr));
+        assertEquals(Arrays.toString(floatArr), GridToStringBuilder.arrayToString(floatArr));
         floatArr = Arrays.copyOf(floatArr, 101);
         assertTrue("Can't find \"... and 1 more\" in overflowed array string!",
-            GridToStringBuilder.arrayToString(floatArr.getClass(), floatArr).contains("... and 1 more"));
+            GridToStringBuilder.arrayToString(floatArr).contains("... and 1 more"));
 
         double[] doubleArr = new double[1];
+
         doubleArr[0] = 1.;
-        assertEquals(Arrays.toString(doubleArr), GridToStringBuilder.arrayToString(doubleArr.getClass(), doubleArr));
+        assertEquals(Arrays.toString(doubleArr), GridToStringBuilder.arrayToString(doubleArr));
         doubleArr = Arrays.copyOf(doubleArr, 101);
         assertTrue("Can't find \"... and 1 more\" in overflowed array string!",
-            GridToStringBuilder.arrayToString(doubleArr.getClass(), doubleArr).contains("... and 1 more"));
+            GridToStringBuilder.arrayToString(doubleArr).contains("... and 1 more"));
 
-        char[] charArr = new char[1];
-        charArr[0] = 'a';
-        assertEquals(Arrays.toString(charArr), GridToStringBuilder.arrayToString(charArr.getClass(), charArr));
-        charArr = Arrays.copyOf(charArr, 101);
+        char[] cArr = new char[1];
+
+        cArr[0] = 'a';
+        assertEquals(Arrays.toString(cArr), GridToStringBuilder.arrayToString(cArr));
+        cArr = Arrays.copyOf(cArr, 101);
         assertTrue("Can't find \"... and 1 more\" in overflowed array string!",
-            GridToStringBuilder.arrayToString(charArr.getClass(), charArr).contains("... and 1 more"));
+            GridToStringBuilder.arrayToString(cArr).contains("... and 1 more"));
 
         Map<String, String> strMap = new TreeMap<>();
         List<String> strList = new ArrayList<>(limit+1);
 
-        TestClass1 testClass = new TestClass1();
-        testClass.strMap = strMap;
-        testClass.strListIncl = strList;
+        TestClass1 testCls = new TestClass1();
+
+        testCls.strMap = strMap;
+        testCls.strListIncl = strList;
 
         for (int i = 0; i < limit; i++) {
             strMap.put("k" + i, "v");
             strList.add("e");
         }
 
-        checkColAndMap(testClass);
+        checkColAndMap(testCls);
     }
 
     /**
@@ -457,26 +481,42 @@
     public void testToStringSizeLimits() throws Exception {
         int limit = IgniteSystemProperties.getInteger(IGNITE_TO_STRING_MAX_LENGTH, 10_000);
         int tailLen = limit / 10 * 2;
-        StringBuilder sb = new StringBuilder(limit + 10);
-        for (int i = 0; i < limit - 100; i++) {
-            sb.append('a');
-        }
-        String actual = GridToStringBuilder.toString(TestClass2.class, new TestClass2(sb.toString()));
-        String expected = "TestClass2 [str=" + sb.toString() + ", nullArr=null]";
-        assertEquals(expected, actual);
 
-        for (int i = 0; i < 110; i++) {
+        StringBuilder sb = new StringBuilder(limit + 10);
+
+        for (int i = 0; i < limit - 100; i++)
+            sb.append('a');
+
+        String actual = GridToStringBuilder.toString(TestClass2.class, new TestClass2(sb.toString()));
+        String exp = "TestClass2 [str=" + sb + ", nullArr=null]";
+
+        assertEquals(exp, actual);
+
+        for (int i = 0; i < 110; i++)
             sb.append('b');
-        }
+
         actual = GridToStringBuilder.toString(TestClass2.class, new TestClass2(sb.toString()));
-        expected = "TestClass2 [str=" + sb.toString() + ", nullArr=null]";
-        assertEquals(expected.substring(0, limit - tailLen), actual.substring(0, limit - tailLen));
-        assertEquals(expected.substring(expected.length() - tailLen), actual.substring(actual.length() - tailLen));
+        exp = "TestClass2 [str=" + sb + ", nullArr=null]";
+
+        assertEquals(exp.substring(0, limit - tailLen), actual.substring(0, limit - tailLen));
+        assertEquals(exp.substring(exp.length() - tailLen), actual.substring(actual.length() - tailLen));
+
         assertTrue(actual.contains("... and"));
         assertTrue(actual.contains("skipped ..."));
     }
 
     /**
+     *
+     */
+    public void testObjectPlusStringToString() {
+        IgniteTxKey k = new IgniteTxKey(new KeyCacheObjectImpl(1, null, 1), 123);
+
+        info(k.toString());
+
+        assertTrue("Wrong string: " + k, k.toString().startsWith("IgniteTxKey ["));
+    }
+
+    /**
      * Test class.
      */
     private static class TestClass1 {
@@ -607,7 +647,7 @@
         /**
          * @param str String.
          */
-        public TestClass2(String str) {
+        TestClass2(String str) {
             this.str = str;
         }
     }
diff --git a/modules/core/src/test/java/org/apache/ignite/lang/utils/GridConsistentHashSelfTest.java b/modules/core/src/test/java/org/apache/ignite/lang/utils/GridConsistentHashSelfTest.java
index 3897060..98d7b98 100644
--- a/modules/core/src/test/java/org/apache/ignite/lang/utils/GridConsistentHashSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/lang/utils/GridConsistentHashSelfTest.java
@@ -160,8 +160,8 @@
 
         try {
             // Add several objects with the same hash without neigther natural ordering nor comparator.
-            hash.addNode(new Object() { public int hashCode() { return 0; } }, 1);
-            hash.addNode(new Object() { public int hashCode() { return 0; } }, 1);
+            hash.addNode(new Object() { @Override public int hashCode() { return 0; } }, 1);
+            hash.addNode(new Object() { @Override public int hashCode() { return 0; } }, 1);
 
             fail("Expects failed due to internal TreeSet requires comparator or natural ordering.");
         }
@@ -178,8 +178,8 @@
         }, null);
 
         // Add several objects with the same hash into consistent hash with explicit comparator.
-        hash.addNode(new Object() { public int hashCode() { return 0; } }, 1);
-        hash.addNode(new Object() { public int hashCode() { return 0; } }, 1);
+        hash.addNode(new Object() { @Override public int hashCode() { return 0; } }, 1);
+        hash.addNode(new Object() { @Override public int hashCode() { return 0; } }, 1);
 
         info("Expected pass due to internal TreeSet has explicit comparator.");
     }
diff --git a/modules/core/src/test/java/org/apache/ignite/lang/utils/IgniteOffheapReadWriteLockSelfTest.java b/modules/core/src/test/java/org/apache/ignite/lang/utils/IgniteOffheapReadWriteLockSelfTest.java
index 7a38548..c5ebe6a 100644
--- a/modules/core/src/test/java/org/apache/ignite/lang/utils/IgniteOffheapReadWriteLockSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/lang/utils/IgniteOffheapReadWriteLockSelfTest.java
@@ -38,6 +38,9 @@
     /** */
     private static final int TAG_0 = 1;
 
+    /** Number of 1-second iterations in every test. */
+    public static final int ROUNDS_PER_TEST = 5;
+
     /**
      * @throws Exception if failed.
      */
@@ -60,7 +63,7 @@
 
         IgniteInternalFuture<Long> fut = GridTestUtils.runMultiThreadedAsync(new Callable<Object>() {
             /** {@inheritDoc} */
-            @Override public Object call() throws Exception {
+            @Override public Object call() {
                 try {
                     ThreadLocalRandom rnd = ThreadLocalRandom.current();
 
@@ -120,7 +123,7 @@
             }
         }, 32, "tester");
 
-        for (int i = 0; i < 30; i++) {
+        for (int i = 0; i < ROUNDS_PER_TEST; i++) {
             Thread.sleep(1_000);
 
             info("Reads: " + reads.getAndSet(0) + ", writes=" + writes.getAndSet(0));
@@ -156,7 +159,7 @@
 
         IgniteInternalFuture<Long> fut = GridTestUtils.runMultiThreadedAsync(new Callable<Object>() {
             /** {@inheritDoc} */
-            @Override public Object call() throws Exception {
+            @Override public Object call() {
                 ThreadLocalRandom rnd = ThreadLocalRandom.current();
 
                 while (!done.get()) {
@@ -206,7 +209,7 @@
             }
         }, 32, "tester");
 
-        for (int i = 0; i < 30; i++) {
+        for (int i = 0; i < ROUNDS_PER_TEST; i++) {
             Thread.sleep(1_000);
 
             info("Reads: " + reads.getAndSet(0) + ", writes=" + writes.getAndSet(0));
@@ -243,7 +246,7 @@
 
         IgniteInternalFuture<Long> fut = GridTestUtils.runMultiThreadedAsync(new Callable<Object>() {
             /** {@inheritDoc} */
-            @Override public Object call() throws Exception {
+            @Override public Object call() {
                 ThreadLocalRandom rnd = ThreadLocalRandom.current();
 
                 while (!done.get()) {
@@ -293,7 +296,7 @@
             }
         }, 32, "tester");
 
-        for (int i = 0; i < 30; i++) {
+        for (int i = 0; i < ROUNDS_PER_TEST; i++) {
             Thread.sleep(1_000);
 
             info("Reads=" + reads.getAndSet(0) + ", writes=" + writes.getAndSet(0) + ", upgrades=" + successfulUpgrades.getAndSet(0));
@@ -339,14 +342,15 @@
         final AtomicInteger reads = new AtomicInteger();
         final AtomicInteger writes = new AtomicInteger();
         final AtomicBoolean done = new AtomicBoolean(false);
+        final AtomicBoolean run = new AtomicBoolean(true);
 
         final int threadCnt = 32;
 
-        final CyclicBarrier barr = new CyclicBarrier(threadCnt);
+        final CyclicBarrier barr = new CyclicBarrier(threadCnt, () -> {if (done.get()) run.set(false);});
 
         IgniteInternalFuture<Long> fut = GridTestUtils.runMultiThreadedAsync(new Callable<Object>() {
             /** {@inheritDoc} */
-            @Override public Object call() throws Exception {
+            @Override public Object call() {
                 try {
                     ThreadLocalRandom rnd = ThreadLocalRandom.current();
 
@@ -354,7 +358,7 @@
 
                     long lastSwitch = System.currentTimeMillis();
 
-                    while (true) {
+                    while (run.get()) {
                         boolean write = rnd.nextInt(10) < 2;
 
                         boolean locked;
@@ -420,8 +424,10 @@
                             try {
                                 barr.await();
                             }
-                            catch (BrokenBarrierException ignore) {
+                            catch (BrokenBarrierException e) {
                                 // Done.
+                                e.printStackTrace();
+
                                 return null;
                             }
 
@@ -433,12 +439,6 @@
                             if (waitBeforeSwitch || (!waitBeforeSwitch && tag == 1))
                                 info("Switch to a new tag: " + tag);
 
-                            if (done.get()) {
-                                barr.reset();
-
-                                return null;
-                            }
-
                             lastSwitch = System.currentTimeMillis();
                         }
                     }
@@ -451,7 +451,7 @@
             }
         }, threadCnt, "tester");
 
-        for (int i = 0; i < 30; i++) {
+        for (int i = 0; i < ROUNDS_PER_TEST; i++) {
             Thread.sleep(1_000);
 
             info("Reads: " + reads.getAndSet(0) + ", writes=" + writes.getAndSet(0));
diff --git a/modules/core/src/test/java/org/apache/ignite/loadtests/GridCacheLoadPopulationTask.java b/modules/core/src/test/java/org/apache/ignite/loadtests/GridCacheLoadPopulationTask.java
index 232eef9..0ee8300 100644
--- a/modules/core/src/test/java/org/apache/ignite/loadtests/GridCacheLoadPopulationTask.java
+++ b/modules/core/src/test/java/org/apache/ignite/loadtests/GridCacheLoadPopulationTask.java
@@ -138,7 +138,7 @@
     }
 
     /** {@inheritDoc} */
-    public String toString() {
+    @Override public String toString() {
         return S.toString(TestValue.class, this);
     }
 
diff --git a/modules/core/src/test/java/org/apache/ignite/loadtests/hashmap/GridCacheTestContext.java b/modules/core/src/test/java/org/apache/ignite/loadtests/hashmap/GridCacheTestContext.java
index 3933953..64d29e6 100644
--- a/modules/core/src/test/java/org/apache/ignite/loadtests/hashmap/GridCacheTestContext.java
+++ b/modules/core/src/test/java/org/apache/ignite/loadtests/hashmap/GridCacheTestContext.java
@@ -37,6 +37,7 @@
 import org.apache.ignite.internal.processors.cache.GridCacheTtlManager;
 import org.apache.ignite.internal.processors.cache.WalStateManager;
 import org.apache.ignite.internal.processors.cache.datastructures.CacheDataStructuresManager;
+import org.apache.ignite.internal.processors.cache.distributed.dht.PartitionsEvictManager;
 import org.apache.ignite.internal.processors.cache.dr.GridOsCacheDrManager;
 import org.apache.ignite.internal.processors.cache.jta.CacheNoopJtaManager;
 import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager;
@@ -78,6 +79,7 @@
                 new CacheAffinitySharedManager<K, V>(),
                 new GridCacheIoManager(),
                 new GridCacheSharedTtlCleanupManager(),
+                new PartitionsEvictManager(),
                 new CacheNoopJtaManager(),
                 null
             ),
diff --git a/modules/core/src/test/java/org/apache/ignite/loadtests/mapper/GridContinuousMapperLoadTest1.java b/modules/core/src/test/java/org/apache/ignite/loadtests/mapper/GridContinuousMapperLoadTest1.java
deleted file mode 100644
index e7f63dd..0000000
--- a/modules/core/src/test/java/org/apache/ignite/loadtests/mapper/GridContinuousMapperLoadTest1.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.loadtests.mapper;
-
-import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteDataStreamer;
-import org.apache.ignite.internal.util.typedef.G;
-import org.apache.ignite.internal.util.typedef.X;
-
-/**
- * Continuous mapper load test.
- */
-public class GridContinuousMapperLoadTest1 {
-    /**
-     * Main method.
-     *
-     * @param args Parameters.
-     */
-    public static void main(String[] args) {
-        try (Ignite g = G.start("examples/config/example-cache.xml")) {
-            int max = 30000;
-
-            IgniteDataStreamer<Integer, TestObject> ldr = g.dataStreamer("replicated");
-
-            for (int i = 0; i < max; i++)
-                ldr.addData(i, new TestObject(i, "Test object: " + i));
-
-            // Wait for loader to complete.
-            ldr.close(false);
-
-            X.println("Populated replicated cache.");
-
-            g.compute().execute(new GridContinuousMapperTask1(), max);
-        }
-    }
-}
\ No newline at end of file
diff --git a/modules/core/src/test/java/org/apache/ignite/loadtests/mapper/GridContinuousMapperLoadTest2.java b/modules/core/src/test/java/org/apache/ignite/loadtests/mapper/GridContinuousMapperLoadTest2.java
deleted file mode 100644
index 68c36ce..0000000
--- a/modules/core/src/test/java/org/apache/ignite/loadtests/mapper/GridContinuousMapperLoadTest2.java
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.loadtests.mapper;
-
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.atomic.AtomicInteger;
-import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteDataStreamer;
-import org.apache.ignite.internal.util.typedef.G;
-import org.apache.ignite.internal.util.typedef.X;
-
-/**
- * Continuous mapper load test.
- */
-public class GridContinuousMapperLoadTest2 {
-    /**
-     * Main method.
-     *
-     * @param args Parameters.
-     * @throws Exception If failed.
-     */
-    public static void main(String[] args) throws Exception {
-        final AtomicInteger jobIdGen = new AtomicInteger();
-        final AtomicInteger sentJobs = new AtomicInteger();
-
-        final LinkedBlockingQueue<Integer> queue = new LinkedBlockingQueue<>(10);
-
-        /** Worker thread. */
-        Thread t = new Thread("mapper-worker") {
-            @Override public void run() {
-                try {
-                    while (!Thread.currentThread().isInterrupted())
-                        queue.put(jobIdGen.incrementAndGet());
-                }
-                catch (InterruptedException ignore) {
-                    // No-op.
-                }
-            }
-        };
-
-        Ignite g = G.start("examples/config/example-cache.xml");
-
-        try {
-            int max = 20000;
-
-            IgniteDataStreamer<Integer, TestObject> ldr = g.dataStreamer("replicated");
-
-            for (int i = 0; i < max; i++)
-                ldr.addData(i, new TestObject(i, "Test object: " + i));
-
-            // Wait for loader to complete.
-            ldr.close(false);
-
-            X.println("Populated replicated cache.");
-
-            t.start();
-
-            while (sentJobs.get() < max) {
-                int[] jobIds = new int[10];
-
-                for (int i = 0; i < jobIds.length; i++)
-                    jobIds[i] = queue.take();
-
-                sentJobs.addAndGet(10);
-
-                g.compute().execute(new GridContinuousMapperTask2(), jobIds);
-            }
-        }
-        finally {
-            t.interrupt();
-
-            t.join();
-
-            G.stopAll(false);
-        }
-    }
-}
\ No newline at end of file
diff --git a/modules/core/src/test/java/org/apache/ignite/loadtests/mapper/GridContinuousMapperTask1.java b/modules/core/src/test/java/org/apache/ignite/loadtests/mapper/GridContinuousMapperTask1.java
deleted file mode 100644
index 4577806..0000000
--- a/modules/core/src/test/java/org/apache/ignite/loadtests/mapper/GridContinuousMapperTask1.java
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.loadtests.mapper;
-
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.LinkedBlockingQueue;
-import java.util.concurrent.atomic.AtomicInteger;
-import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteException;
-import org.apache.ignite.cache.CachePeekMode;
-import org.apache.ignite.cluster.ClusterNode;
-import org.apache.ignite.compute.ComputeJob;
-import org.apache.ignite.compute.ComputeJobAdapter;
-import org.apache.ignite.compute.ComputeJobResult;
-import org.apache.ignite.compute.ComputeJobResultPolicy;
-import org.apache.ignite.compute.ComputeTaskAdapter;
-import org.apache.ignite.compute.ComputeTaskContinuousMapper;
-import org.apache.ignite.compute.ComputeTaskNoResultCache;
-import org.apache.ignite.internal.util.typedef.X;
-import org.apache.ignite.resources.IgniteInstanceResource;
-import org.apache.ignite.resources.TaskContinuousMapperResource;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Test task.
- */
-@SuppressWarnings("TransientFieldNotInitialized")
-@ComputeTaskNoResultCache
-public class GridContinuousMapperTask1 extends ComputeTaskAdapter<Integer, Integer> {
-    /** Job ID generator. */
-    private final transient AtomicInteger jobIdGen = new AtomicInteger();
-
-    /** Mapper. */
-    @TaskContinuousMapperResource
-    private ComputeTaskContinuousMapper mapper;
-
-    /** Grid. */
-    @IgniteInstanceResource
-    private Ignite g;
-
-    /** Blocking queue. */
-    private final transient LinkedBlockingQueue<Integer> queue = new LinkedBlockingQueue<>(10);
-
-    /** Sent jobs count. */
-    private final transient AtomicInteger sentJobs = new AtomicInteger();
-
-    /** Maximum number of executions. */
-    private transient int maxExecs;
-
-    /** Worker thread. */
-    private transient Thread t = new Thread("mapper-worker") {
-        @Override public void run() {
-            try {
-                while (!Thread.currentThread().isInterrupted())
-                    queue.put(jobIdGen.getAndIncrement());
-            }
-            catch (InterruptedException ignore) {
-                // No-op.
-            }
-        }
-    };
-
-    /**
-     * Sends job to node.
-     *
-     * @param n Node.
-     * @throws IgniteException If failed.
-     */
-    private void sendJob(ClusterNode n) {
-        try {
-            int jobId = queue.take();
-
-            sentJobs.incrementAndGet();
-
-            mapper.send(new ComputeJobAdapter(jobId) {
-                @IgniteInstanceResource
-                private Ignite g;
-
-                @Override public Object execute() {
-                    Integer jobId = argument(0);
-
-                    X.println(">>> Received job for ID: " + jobId);
-
-                    return g.cache("replicated").localPeek(jobId, CachePeekMode.ONHEAP);
-                }
-            }, n);
-        }
-        catch (InterruptedException e) {
-            throw new IgniteException(e);
-        }
-    }
-
-    /** {@inheritDoc} */
-    @Override public Map<? extends ComputeJob, ClusterNode> map(List<ClusterNode> subgrid, @Nullable Integer arg) {
-        maxExecs = arg;
-
-        // Start worker thread.
-        t.start();
-
-        if (g.cluster().nodes().size() == 1)
-            sendJob(g.cluster().localNode());
-        else
-            for (ClusterNode n : g.cluster().forRemotes().nodes())
-                sendJob(n);
-
-        return null;
-    }
-
-    /** {@inheritDoc} */
-    @Override public ComputeJobResultPolicy result(ComputeJobResult res, List<ComputeJobResult> rcvd) {
-        if (res.getException() != null)
-            throw new IgniteException(res.getException());
-
-        TestObject o = res.getData();
-
-        assert o != null;
-
-        X.println("Received job result from node [resId=" + o.getId() + ", node=" + res.getNode().id() + ']');
-
-        if (sentJobs.get() < maxExecs)
-            sendJob(res.getNode());
-
-        return ComputeJobResultPolicy.WAIT;
-    }
-
-    /** {@inheritDoc} */
-    @Override public Integer reduce(List<ComputeJobResult> results) {
-        X.println(">>> Reducing task...");
-
-        t.interrupt();
-
-        try {
-            t.join();
-        }
-        catch (InterruptedException e) {
-            throw new IgniteException(e);
-        }
-
-        return null;
-    }
-}
\ No newline at end of file
diff --git a/modules/core/src/test/java/org/apache/ignite/loadtests/mapper/GridContinuousMapperTask2.java b/modules/core/src/test/java/org/apache/ignite/loadtests/mapper/GridContinuousMapperTask2.java
deleted file mode 100644
index 9a795c4..0000000
--- a/modules/core/src/test/java/org/apache/ignite/loadtests/mapper/GridContinuousMapperTask2.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.loadtests.mapper;
-
-import java.util.HashMap;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import org.apache.ignite.Ignite;
-import org.apache.ignite.cache.CachePeekMode;
-import org.apache.ignite.cluster.ClusterNode;
-import org.apache.ignite.compute.ComputeJob;
-import org.apache.ignite.compute.ComputeJobAdapter;
-import org.apache.ignite.compute.ComputeJobResult;
-import org.apache.ignite.compute.ComputeJobResultPolicy;
-import org.apache.ignite.compute.ComputeTaskAdapter;
-import org.apache.ignite.internal.util.typedef.X;
-import org.apache.ignite.resources.IgniteInstanceResource;
-import org.jetbrains.annotations.Nullable;
-
-/**
- * Test task.
- */
-public class GridContinuousMapperTask2 extends ComputeTaskAdapter<int[], Integer> {
-    /** Grid. */
-    @IgniteInstanceResource
-    private Ignite g;
-
-    /** {@inheritDoc} */
-    @Override public Map<? extends ComputeJob, ClusterNode> map(List<ClusterNode> subgrid, @Nullable int[] jobIds) {
-        Map<ComputeJob, ClusterNode> mappings = new HashMap<>(jobIds.length);
-
-        Iterator<ClusterNode> nodeIter = g.cluster().forRemotes().nodes().iterator();
-
-        for (int jobId : jobIds) {
-            ComputeJob job = new ComputeJobAdapter(jobId) {
-                @IgniteInstanceResource
-                private Ignite g;
-
-                @Override public Object execute() {
-                    Integer jobId = argument(0);
-
-                    X.println(">>> Received job for ID: " + jobId);
-
-                    return g.cache("replicated").localPeek(jobId, CachePeekMode.ONHEAP);
-                }
-            };
-
-            // If only local node in the grid.
-            if (g.cluster().nodes().size() == 1)
-                mappings.put(job, g.cluster().localNode());
-            else {
-                ClusterNode n = nodeIter.hasNext() ? nodeIter.next() :
-                    (nodeIter = g.cluster().forRemotes().nodes().iterator()).next();
-
-                mappings.put(job, n);
-            }
-        }
-
-        return mappings;
-    }
-
-    /** {@inheritDoc} */
-    @Override public ComputeJobResultPolicy result(ComputeJobResult res, List<ComputeJobResult> rcvd) {
-        TestObject o = res.getData();
-
-        X.println("Received job result from node [resId=" + o.getId() + ", node=" + res.getNode().id() + ']');
-
-        return super.result(res, rcvd);
-    }
-
-    /** {@inheritDoc} */
-    @Override public Integer reduce(List<ComputeJobResult> results) {
-        X.println(">>> Reducing task...");
-
-        return null;
-    }
-}
\ No newline at end of file
diff --git a/modules/core/src/test/java/org/apache/ignite/loadtests/mapper/TestObject.java b/modules/core/src/test/java/org/apache/ignite/loadtests/mapper/TestObject.java
deleted file mode 100644
index 9a17e0e..0000000
--- a/modules/core/src/test/java/org/apache/ignite/loadtests/mapper/TestObject.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.loadtests.mapper;
-
-import java.io.Serializable;
-import org.apache.ignite.cache.query.annotations.QuerySqlField;
-
-/**
- * Test object.
- */
-public class TestObject implements Serializable {
-    /** ID. */
-    @QuerySqlField(index = true)
-    private int id;
-
-    /** Text. */
-    @QuerySqlField
-    private String txt;
-
-    /**
-     * @param id ID.
-     * @param txt Text.
-     */
-    public TestObject(int id, String txt) {
-        this.id = id;
-        this.txt = txt;
-    }
-
-    /**
-     * @return ID.
-     */
-    public int getId() {
-        return id;
-    }
-
-    /**
-     * @return Text.
-     */
-    public String getText() {
-        return txt;
-    }
-}
\ No newline at end of file
diff --git a/modules/core/src/test/java/org/apache/ignite/p2p/GridP2PJobClassLoaderSelfTest.java b/modules/core/src/test/java/org/apache/ignite/p2p/GridP2PJobClassLoaderSelfTest.java
index d67e294..0120a31 100644
--- a/modules/core/src/test/java/org/apache/ignite/p2p/GridP2PJobClassLoaderSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/p2p/GridP2PJobClassLoaderSelfTest.java
@@ -129,7 +129,7 @@
 
             return Collections.singletonList(new ComputeJobAdapter() {
                     /** {@inheritDoc} */
-                    @SuppressWarnings({"ObjectEquality"})
+                    @Override @SuppressWarnings({"ObjectEquality"})
                     public Serializable execute() {
                         assert getClass().getClassLoader() == ldr;
 
diff --git a/modules/core/src/test/java/org/apache/ignite/p2p/GridP2PLocalDeploymentSelfTest.java b/modules/core/src/test/java/org/apache/ignite/p2p/GridP2PLocalDeploymentSelfTest.java
index 94739bb..ca2aeb6 100644
--- a/modules/core/src/test/java/org/apache/ignite/p2p/GridP2PLocalDeploymentSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/p2p/GridP2PLocalDeploymentSelfTest.java
@@ -273,17 +273,17 @@
         }
 
         /** {@inheritDoc} */
-        public Object call() throws Exception {
+        @Override public Object call() throws Exception {
             return null;
         }
 
         /** {@inheritDoc} */
-        public Class<?> deployClass() {
+        @Override public Class<?> deployClass() {
             return this.getClass();
         }
 
         /** {@inheritDoc} */
-        public ClassLoader classLoader() {
+        @Override public ClassLoader classLoader() {
             return clsLdr;
         }
     }
diff --git a/modules/core/src/test/java/org/apache/ignite/session/GridSessionSetFutureAttributeWaitListenerSelfTest.java b/modules/core/src/test/java/org/apache/ignite/session/GridSessionSetFutureAttributeWaitListenerSelfTest.java
index f5c13fe..d354839 100644
--- a/modules/core/src/test/java/org/apache/ignite/session/GridSessionSetFutureAttributeWaitListenerSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/session/GridSessionSetFutureAttributeWaitListenerSelfTest.java
@@ -156,7 +156,7 @@
 
             for (int i = 1; i <= SPLIT_COUNT; i++) {
                 jobs.add(new ComputeJobAdapter(i) {
-                    @SuppressWarnings({"UnconditionalWait"})
+                    @Override @SuppressWarnings({"UnconditionalWait"})
                     public Serializable execute() {
                         assert taskSes != null;
 
diff --git a/modules/core/src/test/java/org/apache/ignite/session/GridSessionSetJobAttributeWaitListenerSelfTest.java b/modules/core/src/test/java/org/apache/ignite/session/GridSessionSetJobAttributeWaitListenerSelfTest.java
index 5c7e6ec..c9e0537 100644
--- a/modules/core/src/test/java/org/apache/ignite/session/GridSessionSetJobAttributeWaitListenerSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/session/GridSessionSetJobAttributeWaitListenerSelfTest.java
@@ -140,7 +140,7 @@
 
             for (int i = 1; i <= SPLIT_COUNT; i++) {
                 jobs.add(new ComputeJobAdapter(i) {
-                    @SuppressWarnings({"UnconditionalWait"})
+                    @Override @SuppressWarnings({"UnconditionalWait"})
                     public Serializable execute() {
                         assert taskSes != null;
 
diff --git a/modules/core/src/test/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpiSkipMessageSendTest.java b/modules/core/src/test/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpiSkipMessageSendTest.java
index c4bc8f2..2c17f95 100644
--- a/modules/core/src/test/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpiSkipMessageSendTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/spi/communication/tcp/TcpCommunicationSpiSkipMessageSendTest.java
@@ -20,6 +20,7 @@
 import java.io.IOException;
 import java.io.InputStream;
 import java.net.Socket;
+import java.net.SocketTimeoutException;
 import java.util.Set;
 import java.util.UUID;
 import java.util.concurrent.ConcurrentMap;
@@ -29,18 +30,17 @@
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.IgniteCompute;
 import org.apache.ignite.IgniteException;
-import org.apache.ignite.Ignition;
 import org.apache.ignite.cluster.ClusterNode;
 import org.apache.ignite.configuration.IgniteConfiguration;
 import org.apache.ignite.events.Event;
 import org.apache.ignite.events.EventType;
-import org.apache.ignite.internal.IgniteInterruptedCheckedException;
 import org.apache.ignite.internal.util.nio.GridCommunicationClient;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.lang.IgniteCallable;
 import org.apache.ignite.lang.IgniteInClosure;
 import org.apache.ignite.lang.IgnitePredicate;
 import org.apache.ignite.plugin.extensions.communication.Message;
+import org.apache.ignite.plugin.segmentation.SegmentationPolicy;
 import org.apache.ignite.spi.IgniteSpiException;
 import org.apache.ignite.spi.IgniteSpiOperationTimeoutHelper;
 import org.apache.ignite.spi.collision.fifoqueue.FifoQueueCollisionSpi;
@@ -57,80 +57,112 @@
     private static final CountDownLatch COMPUTE_JOB_STARTED = new CountDownLatch(1);
 
     /** */
-    private static final long FAILURE_DETECTION_TIMEOUT = 10000;
+    private static final long FAILURE_DETECTION_TIMEOUT = 1_000;
 
     /** */
-    private static final long JOIN_TIMEOUT = 10000;
+    private static final long JOIN_TIMEOUT = 5_000;
 
     /** */
-    private static final long START_JOB_TIMEOUT = 10000;
-
-    /** */
-    private static final long DISABLE_NETWORK_DELAY = 2000;
+    private static final long START_JOB_TIMEOUT = 10_000;
 
     /** {@inheritDoc} */
     @Override protected long getTestTimeout() {
         return 2 * 60 * 1000;
     }
 
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName);
+
+        if (igniteInstanceName.contains("client"))
+            cfg.setClientMode(true);
+        else {
+            FifoQueueCollisionSpi collisionSpi = new FifoQueueCollisionSpi();
+
+            collisionSpi.setParallelJobsNumber(1);
+
+            cfg.setCollisionSpi(collisionSpi);
+        }
+
+        cfg.setFailureDetectionTimeout(FAILURE_DETECTION_TIMEOUT);
+
+        cfg.setSegmentationPolicy(SegmentationPolicy.NOOP);
+
+        TcpCommunicationSpi commSpi = new CustomCommunicationSpi();
+
+        cfg.setCommunicationSpi(commSpi);
+
+        TcpDiscoverySpi discoSpi = new CustomDiscoverySpi();
+
+        discoSpi.setIpFinder(LOCAL_IP_FINDER);
+        discoSpi.setJoinTimeout(JOIN_TIMEOUT);
+
+        cfg.setDiscoverySpi(discoSpi);
+
+        return cfg;
+    }
+
     /**
      * @throws Exception If failed.
      */
     public void testClientSegmented() throws Exception {
-        Ignite server = null;
-        Ignite client = null;
+        startGrid("server");
 
-        try {
-            server = Ignition.start(getConfig(false));
+        Ignite client = startGrid("client");
 
-            final CountDownLatch clientDisconnected = new CountDownLatch(1);
-            final CountDownLatch clientSegmented = new CountDownLatch(1);
+        CountDownLatch clientDisconnected = new CountDownLatch(1);
+        CountDownLatch clientSegmented = new CountDownLatch(1);
 
-            client = startClient(clientDisconnected, clientSegmented);
+        IgnitePredicate<Event> locLsnr = new IgnitePredicate<Event>() {
+            @Override public boolean apply(Event evt) {
+                log.info("Client node received event: " + evt.name());
 
-            final IgniteCompute compute = client.compute();
+                if (evt.type() == EventType.EVT_CLIENT_NODE_DISCONNECTED)
+                    clientDisconnected.countDown();
 
-            runJobAsync(compute);
+                if (evt.type() == EventType.EVT_NODE_SEGMENTED)
+                    clientSegmented.countDown();
 
-            if (!COMPUTE_JOB_STARTED.await(START_JOB_TIMEOUT, TimeUnit.MILLISECONDS))
-                fail("Compute job wasn't started.");
+                return true;
+            }
+        };
 
-            disableNetwork(client);
+        client.events().localListen(locLsnr,
+            EventType.EVT_NODE_SEGMENTED,
+            EventType.EVT_CLIENT_NODE_DISCONNECTED);
 
-            if (!clientDisconnected.await(FAILURE_DETECTION_TIMEOUT * 3, TimeUnit.MILLISECONDS))
-                fail("Client wasn't disconnected.");
+        IgniteCompute compute = client.compute();
 
-            if (!clientSegmented.await(JOIN_TIMEOUT * 2, TimeUnit.MILLISECONDS))
-                fail("Client wasn't segmented.");
-        }
-        finally {
-            if (client != null)
-                client.close();
+        runJobAsync(compute);
 
-            if (server != null)
-                server.close();
-        }
+        if (!COMPUTE_JOB_STARTED.await(START_JOB_TIMEOUT, TimeUnit.MILLISECONDS))
+            fail("Compute job wasn't started.");
+
+        disableNetwork(client);
+
+        if (!clientDisconnected.await(JOIN_TIMEOUT * 2, TimeUnit.MILLISECONDS))
+            fail("Client wasn't disconnected.");
+
+        if (!clientSegmented.await(JOIN_TIMEOUT * 2, TimeUnit.MILLISECONDS))
+            fail("Client wasn't segmented.");
     }
 
     /**
      * Simulate network disabling.
      *
      * @param ignite Ignite instance.
-     * @throws IgniteInterruptedCheckedException If thread sleep interrupted.
      * @throws InterruptedException If waiting for network disabled failed (interrupted).
      */
-    private void disableNetwork(Ignite ignite) throws IgniteInterruptedCheckedException, InterruptedException {
-        U.sleep(DISABLE_NETWORK_DELAY);
-
-        CustomCommunicationSpi communicationSpi = (CustomCommunicationSpi)ignite.configuration().getCommunicationSpi();
+    private void disableNetwork(Ignite ignite) throws InterruptedException {
+        CustomCommunicationSpi commSpi = (CustomCommunicationSpi)ignite.configuration().getCommunicationSpi();
 
         CustomDiscoverySpi discoverySpi = (CustomDiscoverySpi)ignite.configuration().getDiscoverySpi();
 
         discoverySpi.disableNetwork();
 
-        communicationSpi.disableNetwork();
+        commSpi.disableNetwork();
 
-        if (!discoverySpi.awaitNetworkDisabled(FAILURE_DETECTION_TIMEOUT * 2))
+        if (!discoverySpi.awaitNetworkDisabled())
             fail("Network wasn't disabled.");
     }
 
@@ -162,144 +194,40 @@
     }
 
     /**
-     * Create Communication Spi instance.
-     *
-     * @param client Is a client node.
-     * @return Communication Spi.
-     */
-    private TcpCommunicationSpi getCommunicationSpi(boolean client) {
-        TcpCommunicationSpi spi = new CustomCommunicationSpi(client);
-
-        spi.setName("CustomCommunicationSpi");
-
-        return spi;
-    }
-
-    /**
-     * Create Discovery Spi instance.
-     *
-     * @return Discovery Spi.
-     */
-    private TcpDiscoverySpi getDiscoverySpi() {
-        TcpDiscoverySpi spi = new CustomDiscoverySpi();
-
-        spi.setName("CustomDiscoverySpi");
-
-        spi.setIpFinder(LOCAL_IP_FINDER);
-
-        return spi;
-    }
-
-    /**
-     * Create Ignite configuration.
-     *
-     * @param clientMode Client mode.
-     * @return Ignite configuration.
-     */
-    private IgniteConfiguration getConfig(boolean clientMode) {
-        IgniteConfiguration cfg = new IgniteConfiguration();
-
-        cfg.setIgniteInstanceName(clientMode ? "client-node" : "server-node");
-
-        cfg.setClientMode(clientMode);
-
-        cfg.setCommunicationSpi(getCommunicationSpi(clientMode));
-
-        if (!clientMode) {
-            cfg.setDiscoverySpi(getDiscoverySpi());
-
-            FifoQueueCollisionSpi collisionSpi = new FifoQueueCollisionSpi();
-
-            collisionSpi.setParallelJobsNumber(1);
-
-            cfg.setCollisionSpi(collisionSpi);
-        }
-        else {
-            cfg.setFailureDetectionTimeout(FAILURE_DETECTION_TIMEOUT);
-
-            cfg.setDiscoverySpi(getDiscoverySpi().setJoinTimeout(JOIN_TIMEOUT));
-        }
-
-        return cfg;
-    }
-
-    /**
-     * Start client node.
-     *
-     * @param clientDisconnected Client is disconnected.
-     * @param clientSegmented Client is segmented.
-     * @return Ignite instance.
-     */
-    private Ignite startClient(final CountDownLatch clientDisconnected, final CountDownLatch clientSegmented) {
-        Ignite ignite = Ignition.start(getConfig(true));
-
-        IgnitePredicate<Event> locLsnr = new IgnitePredicate<Event>() {
-            @Override public boolean apply(Event event) {
-                log.info("Client node received event: " + event.name());
-
-                if (event.type() == EventType.EVT_CLIENT_NODE_DISCONNECTED)
-                    clientDisconnected.countDown();
-
-                if (event.type() == EventType.EVT_NODE_SEGMENTED)
-                    clientSegmented.countDown();
-
-                return true;
-            }
-        };
-
-        ignite.events().localListen(locLsnr,
-            EventType.EVT_NODE_SEGMENTED,
-            EventType.EVT_CLIENT_NODE_DISCONNECTED);
-
-        return ignite;
-    }
-
-    /**
      * Communication Spi that emulates connection troubles.
      */
     class CustomCommunicationSpi extends TcpCommunicationSpi {
         /** Network is disabled. */
-        private volatile boolean networkDisabled = false;
-
-        /** Additional logging is enabled. */
-        private final boolean logEnabled;
-
-        /**
-         * @param enableLogs Enable additional logging.
-         */
-        CustomCommunicationSpi(boolean enableLogs) {
-            super();
-            this.logEnabled = enableLogs;
-        }
+        private volatile boolean netDisabled;
 
         /** {@inheritDoc} */
         @Override public void sendMessage(ClusterNode node, Message msg,
             IgniteInClosure<IgniteException> ackC) throws IgniteSpiException {
-            String message = msg.toString();
+            String msgStr = msg.toString();
 
-            if (logEnabled)
-                log.info("CustomCommunicationSpi.sendMessage: " + message);
+            log.info("CustomCommunicationSpi.sendMessage: " + msgStr);
 
-            if (message.contains("TOPIC_JOB_CANCEL"))
+            if (msgStr.contains("TOPIC_JOB_CANCEL"))
                 closeTcpConnections();
 
             super.sendMessage(node, msg, ackC);
         }
 
         /** {@inheritDoc} */
-        @Override protected GridCommunicationClient createTcpClient(ClusterNode node, int connIdx) throws IgniteCheckedException {
-            if (logEnabled)
-                log.info(String.format("CustomCommunicationSpi.createTcpClient [networkDisabled=%s, node=%s]", networkDisabled, node));
+        @Override protected GridCommunicationClient createTcpClient(ClusterNode node,
+            int connIdx) throws IgniteCheckedException {
+            log.info(String.format("CustomCommunicationSpi.createTcpClient [networkDisabled=%s, node=%s]",
+                netDisabled, node));
 
-            if (networkDisabled) {
-                IgniteSpiOperationTimeoutHelper timeoutHelper = new IgniteSpiOperationTimeoutHelper(this, !node.isClient());
+            if (netDisabled) {
+                IgniteSpiOperationTimeoutHelper timeoutHelper = new IgniteSpiOperationTimeoutHelper(this,
+                    !node.isClient());
 
                 long timeout = timeoutHelper.nextTimeoutChunk(getConnectTimeout());
 
-                if (logEnabled)
-                    log.info("CustomCommunicationSpi.createTcpClient [timeoutHelper.nextTimeoutChunk=" + timeout + "]");
+                log.info("CustomCommunicationSpi.createTcpClient [timeoutHelper.nextTimeoutChunk=" + timeout + "]");
 
-                sleep(timeout);
+                U.sleep(timeout);
 
                 return null;
             }
@@ -311,7 +239,7 @@
          * Simulate network disabling.
          */
         void disableNetwork() {
-            networkDisabled = true;
+            netDisabled = true;
         }
 
         /**
@@ -322,7 +250,7 @@
 
             Set<UUID> ids = clients.keySet();
 
-            if (ids.size() > 0) {
+            if (!ids.isEmpty()) {
                 log.info("Close TCP clients: " + ids);
 
                 for (UUID nodeId : ids) {
@@ -346,25 +274,18 @@
      */
     class CustomDiscoverySpi extends TcpDiscoverySpi {
         /** Network is disabled. */
-        private volatile boolean networkDisabled = false;
+        private volatile boolean netDisabled;
 
         /** */
-        private final CountDownLatch networkDisabledLatch = new CountDownLatch(1);
-
-        /** */
-        CustomDiscoverySpi() {
-            super();
-
-            setName("CustomDiscoverySpi");
-        }
+        private final CountDownLatch netDisabledLatch = new CountDownLatch(1);
 
         /** {@inheritDoc} */
         @Override protected <T> T readMessage(Socket sock, @Nullable InputStream in,
             long timeout) throws IOException, IgniteCheckedException {
-            if (networkDisabled) {
-                sleep(timeout);
+            if (netDisabled) {
+                U.sleep(timeout);
 
-                return null;
+                throw new SocketTimeoutException("CustomDiscoverySpi: network is disabled.");
             }
             else
                 return super.readMessage(sock, in, timeout);
@@ -373,12 +294,10 @@
         /** {@inheritDoc} */
         @Override protected void writeToSocket(Socket sock, TcpDiscoveryAbstractMessage msg,
             long timeout) throws IOException, IgniteCheckedException {
-            if (networkDisabled) {
-                sleep(timeout);
+            if (netDisabled) {
+                netDisabledLatch.countDown();
 
-                networkDisabledLatch.countDown();
-
-                throw new IgniteCheckedException("CustomDiscoverySpi: network is disabled.");
+                throw new SocketTimeoutException("CustomDiscoverySpi: network is disabled.");
             }
             else
                 super.writeToSocket(sock, msg, timeout);
@@ -388,27 +307,19 @@
          * Simulate network disabling.
          */
         void disableNetwork() {
-            networkDisabled = true;
+            netDisabled = true;
         }
 
         /**
          * Wait until the network is disabled.
          */
-        boolean awaitNetworkDisabled(long timeout) throws InterruptedException {
-            return networkDisabledLatch.await(timeout, TimeUnit.MILLISECONDS);
+        boolean awaitNetworkDisabled() throws InterruptedException {
+            return netDisabledLatch.await(FAILURE_DETECTION_TIMEOUT * 2, TimeUnit.MILLISECONDS);
         }
     }
 
-    /**
-     * Sleeps for given number of milliseconds.
-     *
-     * @param timeout Time to sleep (2000 ms by default).
-     * @throws IgniteInterruptedCheckedException If current thread interrupted.
-     */
-    static void sleep(long timeout) throws IgniteInterruptedCheckedException {
-        if (timeout > 0)
-            U.sleep(timeout);
-        else
-            U.sleep(2000);
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() {
+        stopAllGrids();
     }
 }
diff --git a/modules/core/src/test/java/org/apache/ignite/spi/discovery/AbstractDiscoverySelfTest.java b/modules/core/src/test/java/org/apache/ignite/spi/discovery/AbstractDiscoverySelfTest.java
index fa1a2ae..e59d24a 100644
--- a/modules/core/src/test/java/org/apache/ignite/spi/discovery/AbstractDiscoverySelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/spi/discovery/AbstractDiscoverySelfTest.java
@@ -34,6 +34,8 @@
 import mx4j.tools.adaptor.http.HttpAdaptor;
 import org.apache.ignite.cluster.ClusterNode;
 import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.util.future.GridFinishedFuture;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.marshaller.Marshaller;
 import org.apache.ignite.spi.IgniteSpi;
@@ -160,10 +162,17 @@
         }
 
         /** {@inheritDoc} */
-        @Override public void onDiscovery(int type, long topVer, ClusterNode node, Collection<ClusterNode> topSnapshot,
-            Map<Long, Collection<ClusterNode>> topHist, @Nullable DiscoverySpiCustomMessage data) {
+        @Override public IgniteInternalFuture onDiscovery(
+            int type,
+            long topVer,
+            ClusterNode node,
+            Collection<ClusterNode> topSnapshot,
+            Map<Long, Collection<ClusterNode>> topHist, @Nullable DiscoverySpiCustomMessage data
+        ) {
             if (type == EVT_NODE_METRICS_UPDATED)
                 isMetricsUpdate = true;
+
+            return new GridFinishedFuture();
         }
     }
 
@@ -237,13 +246,15 @@
                     // No-op.
                 }
 
-                @Override public void onDiscovery(int type, long topVer, ClusterNode node,
+                @Override public IgniteInternalFuture onDiscovery(int type, long topVer, ClusterNode node,
                     Collection<ClusterNode> topSnapshot, Map<Long, Collection<ClusterNode>> topHist,
                     @Nullable DiscoverySpiCustomMessage data) {
                     // If METRICS_UPDATED came from local node
                     if (type == EVT_NODE_METRICS_UPDATED
                         && node.id().equals(spi.getLocalNode().id()))
                         spiCnt.addAndGet(1);
+
+                    return new GridFinishedFuture();
                 }
             };
 
@@ -405,7 +416,7 @@
                     }
 
                     @SuppressWarnings({"NakedNotify"})
-                    @Override public void onDiscovery(int type, long topVer, ClusterNode node,
+                    @Override public IgniteInternalFuture onDiscovery(int type, long topVer, ClusterNode node,
                         Collection<ClusterNode> topSnapshot, Map<Long, Collection<ClusterNode>> topHist,
                         @Nullable DiscoverySpiCustomMessage data) {
                         info("Discovery event [type=" + type + ", node=" + node + ']');
@@ -413,6 +424,8 @@
                         synchronized (mux) {
                             mux.notifyAll();
                         }
+
+                        return new GridFinishedFuture();
                     }
                 });
 
diff --git a/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/IgniteClientConnectTest.java b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/IgniteClientConnectTest.java
index 1a89987..2ed55a1 100644
--- a/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/IgniteClientConnectTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/IgniteClientConnectTest.java
@@ -135,7 +135,7 @@
      */
     class TestTcpDiscoverySpi extends TcpDiscoverySpi {
         /** {@inheritDoc} */
-        protected void writeToSocket(Socket sock, OutputStream out, TcpDiscoveryAbstractMessage msg, long timeout) throws IOException,
+        @Override protected void writeToSocket(Socket sock, OutputStream out, TcpDiscoveryAbstractMessage msg, long timeout) throws IOException,
                 IgniteCheckedException {
             if (msg instanceof TcpDiscoveryNodeAddFinishedMessage) {
                 if (msg.senderNodeId() != null && clientJustStarted.get())
diff --git a/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpClientDiscoverySpiSelfTest.java b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpClientDiscoverySpiSelfTest.java
index 2d130e1..c85e94e 100644
--- a/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpClientDiscoverySpiSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpClientDiscoverySpiSelfTest.java
@@ -148,7 +148,7 @@
     private boolean longSockTimeouts;
 
     /** */
-    protected long clientFailureDetectionTimeout = 1000;
+    protected long clientFailureDetectionTimeout = 5000;
 
     /** */
     private IgniteInClosure2X<TcpDiscoveryAbstractMessage, Socket> afterWrite;
@@ -263,7 +263,7 @@
         clientIpFinder = null;
         joinTimeout = TcpDiscoverySpi.DFLT_JOIN_TIMEOUT;
         netTimeout = TcpDiscoverySpi.DFLT_NETWORK_TIMEOUT;
-        clientFailureDetectionTimeout = 1000;
+        clientFailureDetectionTimeout = 5000;
         longSockTimeouts = false;
 
         assert G.allGrids().isEmpty();
@@ -538,6 +538,8 @@
 
         ((TestTcpDiscoverySpi)client.configuration().getDiscoverySpi()).resumeAll();
 
+        Thread.sleep(2000);
+
         assert ((IgniteEx)srv1).context().discovery().pingNode(client.cluster().localNode().id());
         assert ((IgniteEx)srv0).context().discovery().pingNode(client.cluster().localNode().id());
     }
@@ -583,6 +585,8 @@
      * @throws Exception If failed.
      */
     public void testClientReconnectOnRouterSuspendTopologyChange() throws Exception {
+        clientFailureDetectionTimeout = 20_000;
+
         reconnectAfterSuspend(true);
     }
 
@@ -1266,6 +1270,8 @@
     public void testTimeoutWaitingNodeAddedMessage() throws Exception {
         longSockTimeouts = true;
 
+        clientFailureDetectionTimeout = 20_000;
+
         startServerNodes(2);
 
         final CountDownLatch cnt = new CountDownLatch(1);
diff --git a/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoveryClientSuspensionSelfTest.java b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoveryClientSuspensionSelfTest.java
new file mode 100644
index 0000000..a519d25
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoveryClientSuspensionSelfTest.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.spi.discovery.tcp;
+
+import java.util.Timer;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteSystemProperties;
+import org.apache.ignite.Ignition;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+/**
+ * Test for missed client metrics update messages.
+ */
+public class TcpDiscoveryClientSuspensionSelfTest extends GridCommonAbstractTest {
+    /** */
+    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        TcpDiscoverySpi disco = new TcpDiscoverySpi();
+
+        disco.setIpFinder(IP_FINDER);
+
+        cfg.setDiscoverySpi(disco);
+
+        cfg.setMetricsUpdateFrequency(100);
+
+        cfg.setClientFailureDetectionTimeout(1000);
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        super.beforeTestsStarted();
+
+        System.setProperty(IgniteSystemProperties.IGNITE_DISCO_FAILED_CLIENT_RECONNECT_DELAY, "10000");
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        super.afterTestsStopped();
+
+        System.clearProperty(IgniteSystemProperties.IGNITE_DISCO_FAILED_CLIENT_RECONNECT_DELAY);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testOneServer() throws Exception {
+        doTestClientSuspension(1);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testTwoServers() throws Exception {
+        doTestClientSuspension(2);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testThreeServers() throws Exception {
+        doTestClientSuspension(3);
+    }
+
+    /**
+     * @param serverCnt Servers count.
+     * @throws Exception If failed.
+     */
+    private void doTestClientSuspension(int serverCnt) throws Exception {
+        startGrids(serverCnt);
+
+        Ignition.setClientMode(true);
+
+        Ignite client = startGrid("client");
+
+        for (int i = 0; i < serverCnt; i++)
+            assertEquals(1, grid(i).cluster().forClients().nodes().size());
+
+        Thread.sleep(2000);
+
+        for (int i = 0; i < serverCnt; i++)
+            assertEquals(1, grid(i).cluster().forClients().nodes().size());
+
+        suspendClientMetricsUpdate(client);
+
+        Thread.sleep(2000);
+
+        for (int i = 0; i < serverCnt; i++)
+            assertEquals(0, grid(i).cluster().forClients().nodes().size());
+    }
+
+    /**
+     * @param client Client.
+     */
+    private void suspendClientMetricsUpdate(Ignite client) {
+        assert client.cluster().localNode().isClient();
+
+        ClientImpl impl = U.field(client.configuration().getDiscoverySpi(), "impl");
+
+        Timer timer = U.field(impl, "timer");
+
+        timer.cancel();
+
+        System.out.println("Metrics update message suspended");
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpiFailureTimeoutSelfTest.java b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpiFailureTimeoutSelfTest.java
index b6759e6..a760e2e 100644
--- a/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpiFailureTimeoutSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySpiFailureTimeoutSelfTest.java
@@ -323,7 +323,7 @@
         }
 
         /** {@inheritDoc} */
-        protected void writeToSocket(TcpDiscoveryAbstractMessage msg, Socket sock, int res, long timeout)
+        @Override protected void writeToSocket(TcpDiscoveryAbstractMessage msg, Socket sock, int res, long timeout)
             throws IOException {
             if (cntConnCheckMsg && msg instanceof TcpDiscoveryConnectionCheckMessage)
                 connCheckStatusMsgCntReceived++;
diff --git a/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySslParametersTest.java b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySslParametersTest.java
new file mode 100644
index 0000000..f2fc278
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TcpDiscoverySslParametersTest.java
@@ -0,0 +1,274 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.spi.discovery.tcp;
+
+import java.util.concurrent.Callable;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.ssl.SslContextFactory;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+/**
+ * Tests cases when node connects to cluster with different set of cipher suites.
+ */
+public class TcpDiscoverySslParametersTest extends GridCommonAbstractTest {
+
+    /** */
+    private volatile String[] cipherSuites;
+
+    /** */
+    private volatile String[] protocols;
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        SslContextFactory factory = (SslContextFactory)GridTestUtils.sslTrustedFactory(
+            "node01", "trustone");
+
+        factory.setCipherSuites(cipherSuites);
+
+        factory.setProtocols(protocols);
+
+        cfg.setSslContextFactory(factory);
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testSameCipherSuite() throws Exception {
+        checkDiscoverySuccess(
+            new String[][] {
+                new String[] {
+                    "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+                    "TLS_RSA_WITH_AES_128_GCM_SHA256",
+                    "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256"
+                },
+                new String[] {
+                    "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+                    "TLS_RSA_WITH_AES_128_GCM_SHA256",
+                    "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256"
+                }
+            },
+            null
+        );
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testOneCommonCipherSuite() throws Exception {
+        checkDiscoverySuccess(
+            new String[][] {
+                new String[] {
+                    "TLS_RSA_WITH_AES_128_GCM_SHA256",
+                    "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256"
+                },
+                new String[] {
+                    "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+                    "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256"
+                }
+            },
+            null
+        );
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testNoCommonCipherSuite() throws Exception {
+        checkDiscoveryFailure(
+            new String[][] {
+                new String[] {
+                    "TLS_RSA_WITH_AES_128_GCM_SHA256",
+                },
+                new String[] {
+                    "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
+                    "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256"
+                }
+            },
+            null
+        );
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testNonExistentCipherSuite() throws Exception {
+        checkDiscoveryFailure(
+            new String[][] {
+                new String[] {
+                    "TLS_RSA_WITH_AES_128_GCM_SHA256",
+                    "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256"
+                },
+                new String[] {
+                    "TLC_FAKE_CIPHER",
+                    "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256"
+                }
+            },
+            null,
+            IgniteCheckedException.class,
+            "Unsupported ciphersuite"
+        );
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testNoCommonProtocols() throws Exception {
+        checkDiscoveryFailure(
+            null,
+            new String[][] {
+                new String[] {
+                    "TLSv1.1",
+                    "SSLv3"
+                },
+                new String[] {
+                    "TLSv1",
+                    "TLSv1.2",
+                }
+            }
+        );
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testNonExistentProtocol() throws Exception {
+        checkDiscoveryFailure(
+            null,
+            new String[][] {
+                new String[] {
+                    "SSLv3"
+                },
+                new String[] {
+                    "SSLv3",
+                    "SSLvDoesNotExist"
+                }
+            },
+            IgniteCheckedException.class,
+            "SSLvDoesNotExist"
+        );
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testSameProtocols() throws Exception {
+        checkDiscoverySuccess(null,
+            new String[][] {
+                new String[] {
+                    "TLSv1.1",
+                    "TLSv1.2",
+                },
+                new String[] {
+                    "TLSv1.1",
+                    "TLSv1.2",
+                }
+            }
+        );
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testOneCommonProtocol() throws Exception {
+        checkDiscoverySuccess(null,
+            new String[][] {
+                new String[] {
+                    "TLSv1",
+                    "TLSv1.1",
+                    "TLSv1.2",
+                },
+                new String[] {
+                    "TLSv1.1",
+                    "SSLv3"
+                }
+            }
+        );
+    }
+
+    /**
+     * @param cipherSuites list of cipher suites
+     * @param protocols list of protocols
+     * @throws Exception If failed.
+     */
+    private void checkDiscoverySuccess(String[][] cipherSuites, String[][] protocols) throws Exception {
+        int n = Math.max(
+            cipherSuites != null ? cipherSuites.length : 0,
+            protocols != null ? protocols.length : 0);
+
+        for (int i = 0; i < n; i++) {
+            this.cipherSuites = cipherSuites != null && i < cipherSuites.length ? cipherSuites[i] : null;
+            this.protocols = protocols != null && i < protocols.length ? protocols[i] : null;
+
+            startGrid(i);
+        }
+    }
+
+    /**
+     * @param cipherSuites list of cipher suites
+     * @param protocols list of protocols
+     * @throws Exception If failed.
+     */
+    private void checkDiscoveryFailure(String[][] cipherSuites, String[][] protocols) throws Exception {
+        checkDiscoveryFailure(cipherSuites, protocols, IgniteCheckedException.class, "Unable to establish secure connection.");
+    }
+
+    /**
+     * @param cipherSuites list of cipher suites
+     * @param protocols list of protocols
+     * @param ex expected exception class
+     * @param msg exception message
+     * @throws Exception If failed.
+     */
+    private void checkDiscoveryFailure(String[][] cipherSuites, String[][] protocols, Class<? extends Throwable> ex, String msg) throws Exception {
+        this.cipherSuites = cipherSuites != null ? cipherSuites[0] : null;
+        this.protocols = protocols != null ? protocols[0] : null;
+
+        startGrid(0);
+
+        int n = Math.max(
+            cipherSuites != null ? cipherSuites.length : 0,
+            protocols != null ? protocols.length : 0);
+
+        for (int i = 1; i < n; i++) {
+            this.cipherSuites = cipherSuites != null && i < cipherSuites.length ? cipherSuites[i] : null;
+            this.protocols = protocols != null && i < protocols.length ? protocols[i] : null;
+
+            int finalI = i;
+
+            GridTestUtils.assertThrows(null, new Callable<Object>() {
+                @Override public Object call() throws Exception {
+                    startGrid(finalI);
+
+                    return null;
+                }
+            }, ex, msg);
+        }
+    }
+
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TestTcpDiscoverySpi.java b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TestTcpDiscoverySpi.java
index 721192f..a9c4e68 100644
--- a/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TestTcpDiscoverySpi.java
+++ b/modules/core/src/test/java/org/apache/ignite/spi/discovery/tcp/TestTcpDiscoverySpi.java
@@ -32,7 +32,7 @@
     public boolean ignorePingResponse;
 
     /** {@inheritDoc} */
-    protected void writeToSocket(Socket sock, OutputStream out, TcpDiscoveryAbstractMessage msg, long timeout) throws IOException,
+    @Override protected void writeToSocket(Socket sock, OutputStream out, TcpDiscoveryAbstractMessage msg, long timeout) throws IOException,
         IgniteCheckedException {
         if (msg instanceof TcpDiscoveryPingResponse && ignorePingResponse)
             return;
diff --git a/modules/core/src/test/java/org/apache/ignite/startup/cmdline/GridCommandLineLoaderTest.java b/modules/core/src/test/java/org/apache/ignite/startup/cmdline/GridCommandLineLoaderTest.java
index b7bc7e8..612479b 100644
--- a/modules/core/src/test/java/org/apache/ignite/startup/cmdline/GridCommandLineLoaderTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/startup/cmdline/GridCommandLineLoaderTest.java
@@ -17,15 +17,21 @@
 
 package org.apache.ignite.startup.cmdline;
 
-import java.util.concurrent.CountDownLatch;
-import org.apache.ignite.IgniteState;
-import org.apache.ignite.IgnitionListener;
-import org.apache.ignite.internal.util.typedef.G;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.IgniteSystemProperties;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.util.lang.GridAbsPredicate;
 import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lifecycle.LifecycleBean;
+import org.apache.ignite.lifecycle.LifecycleEventType;
+import org.apache.ignite.resources.IgniteInstanceResource;
+import org.apache.ignite.testframework.GridTestUtils;
 import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
 import org.apache.ignite.testframework.junits.common.GridCommonTest;
+import org.apache.ignite.testframework.junits.multijvm.IgniteProcessProxy;
 
-import static org.apache.ignite.IgniteState.STARTED;
+import static org.apache.ignite.IgniteSystemProperties.IGNITE_RESTART_CODE;
 
 /**
  * Command line loader test.
@@ -35,34 +41,71 @@
     /** */
     private static final String GRID_CFG_PATH = "/modules/core/src/test/config/loaders/grid-cfg.xml";
 
-    /** */
-    private final CountDownLatch latch = new CountDownLatch(2);
-
-    /** */
-    public GridCommandLineLoaderTest() {
-        super(false);
-    }
-
     /**
      * @throws Exception If failed.
      */
     public void testLoader() throws Exception {
         String path = U.getIgniteHome() + GRID_CFG_PATH;
 
-        info("Loading Grid from configuration file: " + path);
+        info("Using Grids from configuration file: " + path);
 
-        G.addListener(new IgnitionListener() {
-            @Override public void onStateChange(String name, IgniteState state) {
-                if (state == STARTED) {
-                    info("Received started notification from grid: " + name);
-
-                    latch.countDown();
-
-                    G.stop(name, true);
+        IgniteProcessProxy proxy = new IgniteProcessProxy(
+            new IgniteConfiguration().setIgniteInstanceName("fake"), log, null) {
+                @Override protected String igniteNodeRunnerClassName() throws Exception {
+                    return CommandLineStartup.class.getCanonicalName();
                 }
-            }
-        });
 
-        CommandLineStartup.main(new String[]{path});
+                @Override protected String params(IgniteConfiguration cfg, boolean resetDiscovery) throws Exception {
+                    return path;
+                }
+            };
+
+        try {
+            GridTestUtils.waitForCondition(new GridAbsPredicate() {
+                @Override public boolean apply() {
+                    return !proxy.getProcess().getProcess().isAlive();
+                }
+            }, 150_000);
+        }
+        finally {
+            if (proxy.getProcess().getProcess().isAlive())
+                proxy.kill();
+        }
+
+        assertEquals(2, proxy.getProcess().getProcess().exitValue());
     }
-}
\ No newline at end of file
+
+    /**
+     * Kills node after it is started.
+     */
+    public static class KillerLifecycleBean implements LifecycleBean {
+        /** */
+        @IgniteInstanceResource
+        private Ignite ignite;
+
+        /** */
+        @Override public void onLifecycleEvent(LifecycleEventType evt) throws IgniteException {
+            if (evt == LifecycleEventType.AFTER_NODE_START) {
+                System.setProperty(IGNITE_RESTART_CODE, Integer.toString(
+                    1 + IgniteSystemProperties.getInteger(IGNITE_RESTART_CODE, 0)));
+
+                System.out.println("Ignite instance seen, will shut it down.");
+
+                new Thread(new Runnable() {
+                    @Override public void run() {
+                        try {
+                            Thread.sleep(3000);
+                        }
+                        catch (InterruptedException e) {
+                            e.printStackTrace();
+                        }
+
+                        System.out.println("Shutdown imminent.");
+
+                        ignite.close();
+                    }
+                }).start();
+            }
+        }
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/testframework/GridTestUtils.java b/modules/core/src/test/java/org/apache/ignite/testframework/GridTestUtils.java
index 9390d6b..77f5324 100644
--- a/modules/core/src/test/java/org/apache/ignite/testframework/GridTestUtils.java
+++ b/modules/core/src/test/java/org/apache/ignite/testframework/GridTestUtils.java
@@ -115,6 +115,9 @@
     public static final long DFLT_BUSYWAIT_SLEEP_INTERVAL = 200;
 
     /** */
+    public static final long DFLT_TEST_TIMEOUT = 5 * 60 * 1000;
+
+    /** */
     static final String ALPHABETH = "qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM1234567890_";
 
     /**
@@ -156,9 +159,10 @@
         }
 
         /** {@inheritDoc} */
-        @Override public void onDiscovery(int type, long topVer, ClusterNode node, Collection<ClusterNode> topSnapshot, @Nullable Map<Long, Collection<ClusterNode>> topHist, @Nullable DiscoverySpiCustomMessage spiCustomMsg) {
+        @Override public IgniteInternalFuture onDiscovery(int type, long topVer, ClusterNode node, Collection<ClusterNode> topSnapshot, @Nullable Map<Long, Collection<ClusterNode>> topHist, @Nullable DiscoverySpiCustomMessage spiCustomMsg) {
             hook.handleDiscoveryMessage(spiCustomMsg);
-            delegate.onDiscovery(type, topVer, node, topSnapshot, topHist, spiCustomMsg);
+
+            return delegate.onDiscovery(type, topVer, node, topSnapshot, topHist, spiCustomMsg);
         }
 
         /** {@inheritDoc} */
@@ -407,6 +411,24 @@
      * Checks whether callable throws exception, which is itself of a specified
      * class, or has a cause of the specified class.
      *
+     * @param runnable Runnable.
+     * @param cls Expected class.
+     * @return Thrown throwable.
+     */
+    @Nullable public static Throwable assertThrowsWithCause(Runnable runnable, Class<? extends Throwable> cls) {
+        return assertThrowsWithCause(new Callable<Integer>() {
+            @Override public Integer call() throws Exception {
+                runnable.run();
+
+                return 0;
+            }
+        }, cls);
+    }
+
+    /**
+     * Checks whether callable throws exception, which is itself of a specified
+     * class, or has a cause of the specified class.
+     *
      * @param call Callable.
      * @param cls Expected class.
      * @return Thrown throwable.
@@ -1665,7 +1687,7 @@
     public static SSLContext sslContext() throws GeneralSecurityException, IOException {
         SSLContext ctx = SSLContext.getInstance("TLS");
 
-        char[] storePass = GridTestProperties.getProperty("ssl.keystore.password").toCharArray();
+        char[] storePass = keyStorePassword().toCharArray();
 
         KeyManagerFactory keyMgrFactory = KeyManagerFactory.getInstance("SunX509");
 
@@ -1692,7 +1714,7 @@
 
         factory.setKeyStoreFilePath(
             U.resolveIgnitePath(GridTestProperties.getProperty("ssl.keystore.path")).getAbsolutePath());
-        factory.setKeyStorePassword(GridTestProperties.getProperty("ssl.keystore.password").toCharArray());
+        factory.setKeyStorePassword(keyStorePassword().toCharArray());
 
         factory.setTrustManagers(GridSslBasicContextFactory.getDisabledTrustManager());
 
@@ -1710,7 +1732,7 @@
 
         factory.setKeyStoreFilePath(
             U.resolveIgnitePath(GridTestProperties.getProperty("ssl.keystore.path")).getAbsolutePath());
-        factory.setKeyStorePassword(GridTestProperties.getProperty("ssl.keystore.password").toCharArray());
+        factory.setKeyStorePassword(keyStorePassword().toCharArray());
 
         factory.setTrustManagers(SslContextFactory.getDisabledTrustManager());
 
@@ -1727,16 +1749,23 @@
     public static Factory<SSLContext> sslTrustedFactory(String keyStore, String trustStore) {
         SslContextFactory factory = new SslContextFactory();
 
-        factory.setKeyStoreFilePath(U.resolveIgnitePath(GridTestProperties.getProperty(
-            "ssl.keystore." + keyStore + ".path")).getAbsolutePath());
-        factory.setKeyStorePassword(GridTestProperties.getProperty("ssl.keystore.password").toCharArray());
-        factory.setTrustStoreFilePath(U.resolveIgnitePath(GridTestProperties.getProperty(
-            "ssl.keystore." + trustStore + ".path")).getAbsolutePath());
-        factory.setTrustStorePassword(GridTestProperties.getProperty("ssl.keystore.password").toCharArray());
+        factory.setKeyStoreFilePath(keyStorePath(keyStore));
+        factory.setKeyStorePassword(keyStorePassword().toCharArray());
+        factory.setTrustStoreFilePath(keyStorePath(trustStore));
+        factory.setTrustStorePassword(keyStorePassword().toCharArray());
 
         return factory;
     }
 
+    public static String keyStorePassword() {
+        return GridTestProperties.getProperty("ssl.keystore.password");
+    }
+
+    @NotNull public static String keyStorePath(String keyStore) {
+        return U.resolveIgnitePath(GridTestProperties.getProperty(
+            "ssl.keystore." + keyStore + ".path")).getAbsolutePath();
+    }
+
     /**
      * @param o1 Object 1.
      * @param o2 Object 2.
diff --git a/modules/core/src/test/java/org/apache/ignite/testframework/configvariations/ConfigVariations.java b/modules/core/src/test/java/org/apache/ignite/testframework/configvariations/ConfigVariations.java
index 8e7e554..ca22b56b 100644
--- a/modules/core/src/test/java/org/apache/ignite/testframework/configvariations/ConfigVariations.java
+++ b/modules/core/src/test/java/org/apache/ignite/testframework/configvariations/ConfigVariations.java
@@ -92,7 +92,7 @@
     @SuppressWarnings("unchecked")
     private static final ConfigParameter<CacheConfiguration>[][] BASIC_CACHE_SET = new ConfigParameter[][] {
         Parameters.objectParameters("setCacheMode", CacheMode.REPLICATED, CacheMode.PARTITIONED),
-        Parameters.enumParameters("setAtomicityMode", CacheAtomicityMode.class),
+        Parameters.objectParameters("setAtomicityMode", CacheAtomicityMode.ATOMIC, CacheAtomicityMode.TRANSACTIONAL),
         // Set default parameters.
         Parameters.objectParameters("setLoadPreviousValue", true),
         asArray(SIMPLE_CACHE_STORE_PARAM),
diff --git a/modules/core/src/test/java/org/apache/ignite/testframework/junits/GridAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/testframework/junits/GridAbstractTest.java
index 45833fb..ee0dfa4 100755
--- a/modules/core/src/test/java/org/apache/ignite/testframework/junits/GridAbstractTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/testframework/junits/GridAbstractTest.java
@@ -160,9 +160,6 @@
     }};
 
     /** */
-    private static final long DFLT_TEST_TIMEOUT = 5 * 60 * 1000;
-
-    /** */
     private static final int DFLT_TOP_WAIT_TIMEOUT = 2000;
 
     /** */
@@ -1528,6 +1525,9 @@
             }
         }
 
+        if (cfg.getDiscoverySpi() instanceof TcpDiscoverySpi)
+            ((TcpDiscoverySpi)cfg.getDiscoverySpi()).setJoinTimeout(getTestTimeout());
+
         if (isMultiJvm())
             ((TcpDiscoverySpi)cfg.getDiscoverySpi()).setIpFinder(LOCAL_IP_FINDER);
 
@@ -1580,6 +1580,14 @@
     }
 
     /**
+     * @param idx Index of the Ignite instance.
+     * @return Indexed Ignite instance name.
+     */
+    protected String testNodeName(int idx) {
+        return getTestIgniteInstanceName(idx);
+    }
+
+    /**
      * Parses test Ignite instance index from test Ignite instance name.
      *
      * @param testIgniteInstanceName Test Ignite instance name, returned by {@link #getTestIgniteInstanceName(int)}.
@@ -1765,6 +1773,8 @@
         finally {
             serializedObj.clear();
 
+            Exception err = null;
+
             if (isLastTest()) {
                 info(">>> Stopping test class: " + testClassDescription() + " <<<");
 
@@ -1779,11 +1789,22 @@
                 // Set reset flags, so counters will be reset on the next setUp.
                 counters.setReset(true);
 
-                afterTestsStopped();
+                try {
+                    afterTestsStopped();
+                }
+                catch (Exception e) {
+                    err = e;
+                }
 
-                if(isSafeTopology())
+                if (isSafeTopology()) {
                     stopAllGrids(false);
 
+                    if (stopGridErr) {
+                        err = new RuntimeException("Not all Ignite instances has been stopped. " +
+                            "Please, see log for details.", err);
+                    }
+                }
+
                 // Remove counters.
                 tests.remove(getClass());
 
@@ -1800,8 +1821,8 @@
 
             cleanReferences();
 
-           if (isLastTest() && isSafeTopology() && stopGridErr)
-               throw new RuntimeException("Not all Ignite instances has been stopped. Please, see log for details.");
+            if (err != null)
+                throw err;
         }
     }
 
@@ -2172,7 +2193,7 @@
         if (timeout != null)
             return Long.parseLong(timeout);
 
-        return DFLT_TEST_TIMEOUT;
+        return GridTestUtils.DFLT_TEST_TIMEOUT;
     }
 
     /**
diff --git a/modules/core/src/test/java/org/apache/ignite/testframework/junits/common/GridCommonAbstractTest.java b/modules/core/src/test/java/org/apache/ignite/testframework/junits/common/GridCommonAbstractTest.java
index 313cd71..273456a 100755
--- a/modules/core/src/test/java/org/apache/ignite/testframework/junits/common/GridCommonAbstractTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/testframework/junits/common/GridCommonAbstractTest.java
@@ -19,6 +19,7 @@
 
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.BitSet;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashSet;
@@ -75,13 +76,13 @@
 import org.apache.ignite.internal.processors.cache.GridCachePartitionExchangeManager;
 import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
 import org.apache.ignite.internal.processors.cache.IgniteCacheProxy;
+import org.apache.ignite.internal.processors.cache.IgniteCacheProxyImpl;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheAdapter;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture;
 import org.apache.ignite.internal.processors.cache.distributed.dht.colocated.GridDhtColocatedCache;
-import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionDemander;
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap;
 import org.apache.ignite.internal.processors.cache.distributed.near.GridNearCacheAdapter;
 import org.apache.ignite.internal.processors.cache.local.GridLocalCache;
@@ -89,9 +90,6 @@
 import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx;
 import org.apache.ignite.internal.processors.cache.transactions.IgniteTxManager;
 import org.apache.ignite.internal.processors.cache.verify.IdleVerifyResultV2;
-import org.apache.ignite.internal.processors.cache.verify.PartitionHashRecord;
-import org.apache.ignite.internal.processors.cache.verify.PartitionKey;
-import org.apache.ignite.internal.processors.cache.verify.VerifyBackupPartitionsTask;
 import org.apache.ignite.internal.util.lang.GridAbsPredicate;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.G;
@@ -100,11 +98,11 @@
 import org.apache.ignite.internal.util.typedef.internal.LT;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.internal.visor.VisorTaskArgument;
-import org.apache.ignite.internal.visor.verify.VisorIdleVerifyTask;
 import org.apache.ignite.internal.visor.verify.VisorIdleVerifyTaskArg;
-import org.apache.ignite.internal.visor.verify.VisorIdleVerifyTaskResult;
 import org.apache.ignite.internal.visor.verify.VisorIdleVerifyTaskV2;
+import org.apache.ignite.lang.IgniteBiInClosure;
 import org.apache.ignite.lang.IgniteFuture;
+import org.apache.ignite.lang.IgniteInClosure;
 import org.apache.ignite.lang.IgnitePredicate;
 import org.apache.ignite.lang.IgniteRunnable;
 import org.apache.ignite.resources.IgniteInstanceResource;
@@ -1173,37 +1171,37 @@
      * @return List of keys.
      */
     protected final List<Integer> movingKeysAfterJoin(Ignite ign, String cacheName, int size) {
-        assertEquals("Expected consistentId is set to node name", ign.name(), ign.cluster().localNode().consistentId());
+        return movingKeysAfterJoin(ign, cacheName, size, null);
+    }
 
-        GridCacheContext<Object, Object> cctx = ((IgniteKernal)ign).context().cache().internalCache(cacheName).context();
+    /**
+     * Return list of keys that are primary for given node on current topology,
+     * but primary node will change after new node will be added.
+     *
+     * @param ign Ignite.
+     * @param cacheName Cache name.
+     * @param size Number of keys.
+     * @param nodeInitializer Node initializer closure.
+     * @return List of keys.
+     */
+    protected final List<Integer> movingKeysAfterJoin(Ignite ign, String cacheName, int size,
+        @Nullable IgniteInClosure<ClusterNode> nodeInitializer) {
+        assertEquals("Expected consistentId is set to node name", ign.name(), ign.cluster().localNode().consistentId());
 
         ArrayList<ClusterNode> nodes = new ArrayList<>(ign.cluster().nodes());
 
-        AffinityFunction func = cctx.config().getAffinity();
-
-        AffinityFunctionContext ctx = new GridAffinityFunctionContextImpl(
-            nodes,
-            null,
-            null,
-            AffinityTopologyVersion.NONE,
-            cctx.config().getBackups());
-
-        List<List<ClusterNode>> calcAff = func.assignPartitions(ctx);
+        List<List<ClusterNode>> calcAff = calcAffinity(ign.cache(cacheName), nodes);
 
         GridTestNode fakeNode = new GridTestNode(UUID.randomUUID(), null);
 
+        if (nodeInitializer != null)
+            nodeInitializer.apply(fakeNode);
+
         fakeNode.consistentId(getTestIgniteInstanceName(nodes.size()));
 
         nodes.add(fakeNode);
 
-        ctx = new GridAffinityFunctionContextImpl(
-            nodes,
-            null,
-            null,
-            AffinityTopologyVersion.NONE,
-            cctx.config().getBackups());
-
-        List<List<ClusterNode>> calcAff2 = func.assignPartitions(ctx);
+        List<List<ClusterNode>> calcAff2 = calcAffinity(ign.cache(cacheName), nodes);
 
         Set<Integer> movedParts = new HashSet<>();
 
@@ -1235,6 +1233,92 @@
     }
 
     /**
+     * Returns list of partitions what will be evicted after new node's join.
+     *
+     * @param ign Node to find evicting partitions.
+     * @param cache Cach.
+     * @param size Size.
+     * @return List of moving partition
+     */
+    protected List<Integer> evictingPartitionsAfterJoin(Ignite ign, IgniteCache<?, ?> cache, int size) {
+        ArrayList<ClusterNode> nodes = new ArrayList<>(ign.cluster().nodes());
+
+        List<List<ClusterNode>> ideal1 = calcAffinity(cache, nodes);
+
+        GridTestNode fakeNode = new GridTestNode(UUID.randomUUID(), null);
+
+        fakeNode.consistentId(getTestIgniteInstanceName(nodes.size()));
+
+        nodes.add(fakeNode);
+
+        List<List<ClusterNode>> ideal2 = calcAffinity(cache, nodes);
+
+        Map<ClusterNode, BitSet> m1 = U.newHashMap(nodes.size());
+        Map<ClusterNode, BitSet> m2 = U.newHashMap(nodes.size());
+
+        int parts = cache.getConfiguration(CacheConfiguration.class).getAffinity().partitions();
+
+        for (int p = 0; p < parts; p++) {
+            List<ClusterNode> assign1 = new ArrayList<>(ideal1.get(p));
+            List<ClusterNode> assign2 = new ArrayList<>(ideal2.get(p));
+
+            final int finalP = p;
+
+            IgniteBiInClosure<Map<ClusterNode, BitSet>, ClusterNode> updater = (map, node) -> {
+                BitSet set = map.get(node);
+
+                if (set == null)
+                    map.put(node, (set = new BitSet(parts)));
+
+                set.set(finalP);
+            };
+
+            for (ClusterNode node : assign1)
+                updater.apply(m1, node);
+
+            for (ClusterNode node : assign2)
+                updater.apply(m2, node);
+        }
+
+        List<Integer> partsToRet = new ArrayList<>(size);
+
+        BitSet before = m1.get(ign.cluster().localNode());
+        BitSet after = m2.get(ign.cluster().localNode());
+
+        for (int p = before.nextSetBit(0); p >= 0; p = before.nextSetBit(p+1)) {
+            if (!after.get(p)) {
+                partsToRet.add(p);
+
+                if (partsToRet.size() == size)
+                    break;
+            }
+        }
+
+        return partsToRet;
+    }
+
+    /**
+     * @param cache Cache.
+     * @param nodes Nodes.
+     */
+    private List<List<ClusterNode>> calcAffinity(IgniteCache<?, ?> cache, List<ClusterNode> nodes) {
+        IgniteCacheProxyImpl proxy = cache.unwrap(IgniteCacheProxyImpl.class);
+
+        GridCacheContext<?, ?> cctx = proxy.context();
+
+        AffinityFunction func = cctx.config().getAffinity();
+
+        AffinityFunctionContext ctx = new GridAffinityFunctionContextImpl(
+            nodes,
+            null,
+            null,
+            AffinityTopologyVersion.NONE,
+            cctx.config().getBackups());
+
+        return func.assignPartitions(ctx);
+    }
+
+    /**
      * @param cache Cache.
      * @return Collection of keys for which given cache is primary.
      * @throws IgniteCheckedException If failed.
@@ -1902,4 +1986,27 @@
             new VisorTaskArgument<>(node.id(), taskArg, false)
         );
     }
+
+    /**
+     * Checks if all txs and mvcc futures are finished.
+     */
+    protected void checkFutures() {
+        for (Ignite ignite : G.allGrids()) {
+            IgniteEx ig = (IgniteEx)ignite;
+
+            final Collection<GridCacheFuture<?>> futs = ig.context().cache().context().mvcc().activeFutures();
+
+            for (GridCacheFuture<?> fut : futs)
+                log.info("Waiting for future: " + fut);
+
+            assertTrue("Expecting no active futures: node=" + ig.localNode().id(), futs.isEmpty());
+
+            Collection<IgniteInternalTx> txs = ig.context().cache().context().tm().activeTransactions();
+
+            for (IgniteInternalTx tx : txs)
+                log.info("Waiting for tx: " + tx);
+
+            assertTrue("Expecting no active transactions: node=" + ig.localNode().id(), txs.isEmpty());
+        }
+    }
 }
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicTestSuite.java
index 74e23ed..ac2bed3 100644
--- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicTestSuite.java
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicTestSuite.java
@@ -17,6 +17,7 @@
 
 package org.apache.ignite.testsuites;
 
+import java.util.Set;
 import junit.framework.TestSuite;
 import org.apache.ignite.GridSuppressedExceptionSelfTest;
 import org.apache.ignite.failure.FailureHandlerTriggeredTest;
@@ -55,6 +56,7 @@
 import org.apache.ignite.internal.processors.cache.SetTxTimeoutOnPartitionMapExchangeTest;
 import org.apache.ignite.internal.processors.cache.distributed.IgniteRejectConnectOnNodeStopTest;
 import org.apache.ignite.internal.processors.cache.transactions.AtomicOperationsInTxTest;
+import org.apache.ignite.internal.processors.cache.transactions.TransactionIntegrityWithSystemWorkerDeathTest;
 import org.apache.ignite.internal.processors.closure.GridClosureProcessorRemoteTest;
 import org.apache.ignite.internal.processors.closure.GridClosureProcessorSelfTest;
 import org.apache.ignite.internal.processors.closure.GridClosureSerializationTest;
@@ -90,8 +92,6 @@
 import org.apache.ignite.util.AttributeNodeFilterSelfTest;
 import org.jetbrains.annotations.Nullable;
 
-import java.util.Set;
-
 /**
  * Basic test suite.
  */
@@ -211,6 +211,7 @@
         suite.addTestSuite(StopNodeFailureHandlerTest.class);
         suite.addTestSuite(StopNodeOrHaltFailureHandlerTest.class);
         suite.addTestSuite(OomFailureHandlerTest.class);
+        suite.addTestSuite(TransactionIntegrityWithSystemWorkerDeathTest.class);
 
         suite.addTestSuite(AtomicOperationsInTxTest.class);
 
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicWithPersistenceTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicWithPersistenceTestSuite.java
index 5f1d18d..3171754 100644
--- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicWithPersistenceTestSuite.java
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteBasicWithPersistenceTestSuite.java
@@ -17,8 +17,8 @@
 
 package org.apache.ignite.testsuites;
 
+import java.util.Set;
 import junit.framework.TestSuite;
-import org.apache.ignite.failure.AccountTransferTransactionTest;
 import org.apache.ignite.failure.IoomFailureHandlerTest;
 import org.apache.ignite.failure.SystemWorkersTerminationTest;
 import org.apache.ignite.internal.ClusterBaselineNodesMetricsSelfTest;
@@ -29,8 +29,6 @@
 import org.apache.ignite.util.GridInternalTaskUnusedWalSegmentsTest;
 import org.jetbrains.annotations.Nullable;
 
-import java.util.Set;
-
 /**
  * Basic test suite.
  */
@@ -52,7 +50,6 @@
         TestSuite suite = new TestSuite("Ignite Basic With Persistence Test Suite");
 
         suite.addTestSuite(IoomFailureHandlerTest.class);
-        suite.addTestSuite(AccountTransferTransactionTest.class);
         suite.addTestSuite(ClusterBaselineNodesMetricsSelfTest.class);
         suite.addTestSuite(ServiceDeploymentOnActivationTest.class);
         suite.addTestSuite(ServiceDeploymentOutsideBaselineTest.class);
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheEvictionSelfTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheEvictionSelfTestSuite.java
index 79d98e4..ad9658d 100644
--- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheEvictionSelfTestSuite.java
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheEvictionSelfTestSuite.java
@@ -29,6 +29,7 @@
 import org.apache.ignite.internal.processors.cache.eviction.GridCacheEvictionFilterSelfTest;
 import org.apache.ignite.internal.processors.cache.eviction.GridCacheEvictionLockUnlockSelfTest;
 import org.apache.ignite.internal.processors.cache.eviction.GridCacheEvictionTouchSelfTest;
+import org.apache.ignite.internal.processors.cache.eviction.DhtAndNearEvictionTest;
 import org.apache.ignite.internal.processors.cache.eviction.fifo.FifoEvictionPolicyFactorySelfTest;
 import org.apache.ignite.internal.processors.cache.eviction.fifo.FifoEvictionPolicySelfTest;
 import org.apache.ignite.internal.processors.cache.eviction.lru.LruEvictionPolicyFactorySelfTest;
@@ -94,6 +95,8 @@
 
         suite.addTest(new TestSuite(PageEvictionPagesRecyclingAndReusingTest.class));
 
+        suite.addTest(new TestSuite(DhtAndNearEvictionTest.class));
+
         return suite;
     }
 }
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheMvccTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheMvccTestSuite.java
new file mode 100644
index 0000000..8585ebe
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheMvccTestSuite.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.testsuites;
+
+import junit.framework.TestSuite;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccClusterRestartTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccConfigurationValidationTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccIteratorWithConcurrentTransactionTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccLocalEntriesWithConcurrentTransactionTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccOperationChecksTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccPartitionedCoordinatorFailoverTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccProcessorLazyStartTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccProcessorTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccReplicatedCoordinatorFailoverTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccScanQueryWithConcurrentTransactionTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccSizeWithConcurrentTransactionTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccTransactionsTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccVacuumTest;
+import org.apache.ignite.internal.processors.datastreamer.DataStreamProcessorMvccSelfTest;
+
+/**
+ *
+ */
+public class IgniteCacheMvccTestSuite extends TestSuite {
+    /**
+     * @return Test suite.
+     */
+    public static TestSuite suite() {
+        TestSuite suite = new TestSuite("IgniteCache MVCC Test Suite");
+
+        // Basic tests.
+        suite.addTestSuite(CacheMvccTransactionsTest.class);
+        suite.addTestSuite(CacheMvccProcessorTest.class);
+        suite.addTestSuite(CacheMvccVacuumTest.class);
+        suite.addTestSuite(CacheMvccConfigurationValidationTest.class);
+
+        suite.addTestSuite(DataStreamProcessorMvccSelfTest.class);
+        suite.addTestSuite(CacheMvccOperationChecksTest.class);
+
+        // Concurrent ops tests.
+        suite.addTestSuite(CacheMvccIteratorWithConcurrentTransactionTest.class);
+        suite.addTestSuite(CacheMvccLocalEntriesWithConcurrentTransactionTest.class);
+        suite.addTestSuite(CacheMvccScanQueryWithConcurrentTransactionTest.class);
+        suite.addTestSuite(CacheMvccSizeWithConcurrentTransactionTest.class);
+
+        // Failover tests.
+        suite.addTestSuite(CacheMvccClusterRestartTest.class);
+        suite.addTestSuite(CacheMvccPartitionedCoordinatorFailoverTest.class);
+        suite.addTestSuite(CacheMvccReplicatedCoordinatorFailoverTest.class);
+        suite.addTestSuite(CacheMvccProcessorLazyStartTest.class);
+
+        return suite;
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite.java
index 81c3216..d6c1f96 100755
--- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite.java
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite.java
@@ -45,6 +45,7 @@
 import org.apache.ignite.internal.managers.communication.IgniteCommunicationSslBalanceTest;
 import org.apache.ignite.internal.managers.communication.IgniteIoTestMessagesTest;
 import org.apache.ignite.internal.managers.communication.IgniteVariousConnectionNumberTest;
+import org.apache.ignite.internal.processors.cache.BinaryMetadataRegistrationInsideEntryProcessorTest;
 import org.apache.ignite.internal.processors.cache.CacheAffinityCallSelfTest;
 import org.apache.ignite.internal.processors.cache.CacheAtomicSingleMessageCountSelfTest;
 import org.apache.ignite.internal.processors.cache.CacheDeferredDeleteQueueTest;
@@ -388,6 +389,8 @@
         //suite.addTestSuite(IgniteStaticCacheStartSelfTest.class);
         //suite.addTestSuite(InterceptorWithKeepBinaryCacheFullApiTest.class);
 
+        suite.addTestSuite(BinaryMetadataRegistrationInsideEntryProcessorTest.class);
+
         return suite;
     }
 }
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite4.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite4.java
index 2cc5f0b..370fa49 100644
--- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite4.java
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite4.java
@@ -33,7 +33,6 @@
 import org.apache.ignite.internal.processors.cache.CacheGetEntryPessimisticRepeatableReadSeltTest;
 import org.apache.ignite.internal.processors.cache.CacheGetEntryPessimisticSerializableSeltTest;
 import org.apache.ignite.internal.processors.cache.CacheOffheapMapEntrySelfTest;
-import org.apache.ignite.internal.processors.cache.CachePutIfAbsentTest;
 import org.apache.ignite.internal.processors.cache.CacheReadThroughAtomicRestartSelfTest;
 import org.apache.ignite.internal.processors.cache.CacheReadThroughLocalAtomicRestartSelfTest;
 import org.apache.ignite.internal.processors.cache.CacheReadThroughLocalRestartSelfTest;
@@ -57,25 +56,31 @@
 import org.apache.ignite.internal.processors.cache.GridCacheStoreManagerDeserializationTest;
 import org.apache.ignite.internal.processors.cache.GridCacheVersionMultinodeTest;
 import org.apache.ignite.internal.processors.cache.GridLocalCacheStoreManagerDeserializationTest;
+import org.apache.ignite.internal.processors.cache.IgniteCacheAtomicCopyOnReadDisabledTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheAtomicLocalPeekModesTest;
+import org.apache.ignite.internal.processors.cache.IgniteCacheAtomicLocalStoreValueTest;
+import org.apache.ignite.internal.processors.cache.IgniteCacheAtomicNearEnabledStoreValueTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheAtomicNearPeekModesTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheAtomicPeekModesTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheAtomicReplicatedPeekModesTest;
+import org.apache.ignite.internal.processors.cache.IgniteCacheAtomicStoreValueTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheConfigurationDefaultTemplateTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheConfigurationTemplateTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheContainsKeyAtomicTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheDynamicStopSelfTest;
-import org.apache.ignite.internal.processors.cache.IgniteCacheGetCustomCollectionsSelfTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheInvokeReadThroughSingleNodeTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheInvokeReadThroughTest;
-import org.apache.ignite.internal.processors.cache.IgniteCacheLoadRebalanceEvictionSelfTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheReadThroughStoreCallTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheStartTest;
+import org.apache.ignite.internal.processors.cache.IgniteCacheTxCopyOnReadDisabledTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheTxLocalPeekModesTest;
+import org.apache.ignite.internal.processors.cache.IgniteCacheTxLocalStoreValueTest;
+import org.apache.ignite.internal.processors.cache.IgniteCacheTxNearEnabledStoreValueTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheTxNearPeekModesTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheTxPeekModesTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheTxPreloadNoWriteTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheTxReplicatedPeekModesTest;
+import org.apache.ignite.internal.processors.cache.IgniteCacheTxStoreValueTest;
 import org.apache.ignite.internal.processors.cache.IgniteClientCacheInitializationFailTest;
 import org.apache.ignite.internal.processors.cache.IgniteDynamicCacheFilterTest;
 import org.apache.ignite.internal.processors.cache.IgniteDynamicCacheMultinodeTest;
@@ -93,7 +98,6 @@
 import org.apache.ignite.internal.processors.cache.IgniteSystemCacheOnClientTest;
 import org.apache.ignite.internal.processors.cache.MarshallerCacheJobRunNodeRestartTest;
 import org.apache.ignite.internal.processors.cache.distributed.CacheAffinityEarlyTest;
-import org.apache.ignite.internal.processors.cache.distributed.CacheAtomicPrimarySyncBackPressureTest;
 import org.apache.ignite.internal.processors.cache.distributed.CacheDiscoveryDataConcurrentJoinTest;
 import org.apache.ignite.internal.processors.cache.distributed.CacheGetFutureHangsSelfTest;
 import org.apache.ignite.internal.processors.cache.distributed.CacheGroupsPreloadTest;
@@ -102,11 +106,8 @@
 import org.apache.ignite.internal.processors.cache.distributed.IgniteCacheCreatePutMultiNodeSelfTest;
 import org.apache.ignite.internal.processors.cache.distributed.IgniteCacheCreatePutTest;
 import org.apache.ignite.internal.processors.cache.distributed.IgniteCacheFailedUpdateResponseTest;
-import org.apache.ignite.internal.processors.cache.distributed.IgniteCachePrimarySyncTest;
 import org.apache.ignite.internal.processors.cache.distributed.IgniteCacheReadFromBackupTest;
 import org.apache.ignite.internal.processors.cache.distributed.IgniteCacheSingleGetMessageTest;
-import org.apache.ignite.internal.processors.cache.distributed.IgniteTxCachePrimarySyncTest;
-import org.apache.ignite.internal.processors.cache.distributed.IgniteTxCacheWriteSynchronizationModesMultithreadedTest;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridCacheDhtTxPreloadSelfTest;
 import org.apache.ignite.internal.processors.cache.distributed.dht.IgniteCacheLockFailoverSelfTest;
 import org.apache.ignite.internal.processors.cache.distributed.dht.IgniteCacheMultiTxLockSelfTest;
@@ -223,9 +224,8 @@
         suite.addTestSuite(GridCacheVersionMultinodeTest.class);
 
         suite.addTestSuite(IgniteCacheNearReadCommittedTest.class);
-        // TODO GG-11148 need decide if CopyOnRead flag makes sense.
-//        suite.addTestSuite(IgniteCacheAtomicCopyOnReadDisabledTest.class);
-//        suite.addTestSuite(IgniteCacheTxCopyOnReadDisabledTest.class);
+        suite.addTestSuite(IgniteCacheAtomicCopyOnReadDisabledTest.class);
+        suite.addTestSuite(IgniteCacheTxCopyOnReadDisabledTest.class);
 
         suite.addTestSuite(IgniteCacheTxPreloadNoWriteTest.class);
 
@@ -255,15 +255,12 @@
 
         suite.addTestSuite(IgniteCacheJdbcBlobStoreNodeRestartTest.class);
 
-        // TODO GG-11148 need decide if CopyOnRead flag makes sense.
-//        suite.addTestSuite(IgniteCacheAtomicLocalStoreValueTest.class);
-//        suite.addTestSuite(IgniteCacheAtomicStoreValueTest.class);
-//        suite.addTestSuite(IgniteCacheAtomicNearEnabledStoreValueTest.class);
-//        suite.addTestSuite(IgniteCacheAtomicPrimaryWriteOrderStoreValueTest.class);
-//        suite.addTestSuite(IgniteCacheAtomicPrimaryWriteOrderNearEnabledStoreValueTest.class);
-//        suite.addTestSuite(IgniteCacheTxLocalStoreValueTest.class);
-//        suite.addTestSuite(IgniteCacheTxStoreValueTest.class);
-//        suite.addTestSuite(IgniteCacheTxNearEnabledStoreValueTest.class);
+        suite.addTestSuite(IgniteCacheAtomicLocalStoreValueTest.class);
+        suite.addTestSuite(IgniteCacheAtomicStoreValueTest.class);
+        suite.addTestSuite(IgniteCacheAtomicNearEnabledStoreValueTest.class);
+        suite.addTestSuite(IgniteCacheTxLocalStoreValueTest.class);
+        suite.addTestSuite(IgniteCacheTxStoreValueTest.class);
+        suite.addTestSuite(IgniteCacheTxNearEnabledStoreValueTest.class);
 
         suite.addTestSuite(IgniteCacheLockFailoverSelfTest.class);
         suite.addTestSuite(IgniteCacheMultiTxLockSelfTest.class);
@@ -289,7 +286,7 @@
         suite.addTestSuite(CacheOffheapMapEntrySelfTest.class);
 
         suite.addTestSuite(CacheJdbcStoreSessionListenerSelfTest.class);
-        //suite.addTestSuite(CacheStoreSessionListenerLifecycleSelfTest.class);
+        suite.addTestSuite(CacheStoreSessionListenerLifecycleSelfTest.class);
         suite.addTestSuite(CacheStoreListenerRWThroughDisabledAtomicCacheTest.class);
         suite.addTestSuite(CacheStoreListenerRWThroughDisabledTransactionalCacheTest.class);
         suite.addTestSuite(CacheStoreSessionListenerWriteBehindEnabledTest.class);
@@ -322,10 +319,6 @@
         suite.addTestSuite(CacheVersionedEntryReplicatedAtomicSelfTest.class);
         suite.addTestSuite(CacheVersionedEntryReplicatedTransactionalSelfTest.class);
 
-        // TODO GG-11148.
-        // suite.addTestSuite(CacheSwapUnswapGetTest.class);
-        // suite.addTestSuite(CacheSwapUnswapGetTestSmallQueueSize.class);
-
         suite.addTestSuite(GridCacheDhtTxPreloadSelfTest.class);
         suite.addTestSuite(GridCacheNearTxPreloadSelfTest.class);
         suite.addTestSuite(GridReplicatedTxPreloadTest.class);
@@ -341,19 +334,10 @@
         suite.addTestSuite(IgniteCacheSingleGetMessageTest.class);
         suite.addTestSuite(IgniteCacheReadFromBackupTest.class);
 
-        suite.addTestSuite(IgniteCacheGetCustomCollectionsSelfTest.class);
-        suite.addTestSuite(IgniteCacheLoadRebalanceEvictionSelfTest.class);
-        suite.addTestSuite(IgniteCachePrimarySyncTest.class);
-        suite.addTestSuite(IgniteTxCachePrimarySyncTest.class);
-        suite.addTestSuite(IgniteTxCacheWriteSynchronizationModesMultithreadedTest.class);
-        suite.addTestSuite(CachePutIfAbsentTest.class);
-
         suite.addTestSuite(MarshallerCacheJobRunNodeRestartTest.class);
 
         suite.addTestSuite(IgniteCacheNearOnlyTxTest.class);
 
-        suite.addTestSuite(CacheAtomicPrimarySyncBackPressureTest.class);
-
         suite.addTestSuite(IgniteCacheContainsKeyAtomicTest.class);
 
         return suite;
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite6.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite6.java
index 6f3df0b..1269d0d 100644
--- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite6.java
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite6.java
@@ -21,17 +21,16 @@
 import org.apache.ignite.internal.processors.cache.PartitionedAtomicCacheGetsDistributionTest;
 import org.apache.ignite.internal.processors.cache.PartitionedTransactionalOptimisticCacheGetsDistributionTest;
 import org.apache.ignite.internal.processors.cache.PartitionedTransactionalPessimisticCacheGetsDistributionTest;
+import org.apache.ignite.internal.processors.cache.PartitionsExchangeCoordinatorFailoverTest;
 import org.apache.ignite.internal.processors.cache.ReplicatedAtomicCacheGetsDistributionTest;
 import org.apache.ignite.internal.processors.cache.ReplicatedTransactionalOptimisticCacheGetsDistributionTest;
 import org.apache.ignite.internal.processors.cache.ReplicatedTransactionalPessimisticCacheGetsDistributionTest;
 import org.apache.ignite.internal.processors.cache.datastructures.IgniteExchangeLatchManagerCoordinatorFailTest;
-import org.apache.ignite.internal.processors.cache.distributed.CacheClientsConcurrentStartTest;
 import org.apache.ignite.internal.processors.cache.distributed.CacheExchangeMergeTest;
 import org.apache.ignite.internal.processors.cache.distributed.CachePartitionStateTest;
 import org.apache.ignite.internal.processors.cache.distributed.CacheTryLockMultithreadedTest;
 import org.apache.ignite.internal.processors.cache.distributed.GridCachePartitionEvictionDuringReadThroughSelfTest;
 import org.apache.ignite.internal.processors.cache.distributed.IgniteCache150ClientsTest;
-import org.apache.ignite.internal.processors.cache.distributed.IgniteCacheClientMultiNodeUpdateTopologyLockTest;
 import org.apache.ignite.internal.processors.cache.distributed.IgniteCacheThreadLocalTxTest;
 import org.apache.ignite.internal.processors.cache.distributed.IgniteOptimisticTxSuspendResumeMultiServerTest;
 import org.apache.ignite.internal.processors.cache.distributed.IgniteOptimisticTxSuspendResumeTest;
@@ -44,11 +43,11 @@
 import org.apache.ignite.internal.processors.cache.transactions.TxOptimisticPrepareOnUnstableTopologyTest;
 import org.apache.ignite.internal.processors.cache.transactions.TxRollbackAsyncNearCacheTest;
 import org.apache.ignite.internal.processors.cache.transactions.TxRollbackAsyncTest;
+import org.apache.ignite.internal.processors.cache.transactions.TxRollbackOnIncorrectParamsTest;
 import org.apache.ignite.internal.processors.cache.transactions.TxRollbackOnTimeoutNearCacheTest;
 import org.apache.ignite.internal.processors.cache.transactions.TxRollbackOnTimeoutNoDeadlockDetectionTest;
 import org.apache.ignite.internal.processors.cache.transactions.TxRollbackOnTimeoutTest;
 import org.apache.ignite.internal.processors.cache.transactions.TxRollbackOnTopologyChangeTest;
-import org.apache.ignite.internal.processors.cache.transactions.TxRollbackOnIncorrectParamsTest;
 import org.apache.ignite.internal.processors.cache.transactions.TxStateChangeEventTest;
 import org.apache.ignite.testframework.junits.GridAbstractTest;
 
@@ -109,8 +108,10 @@
 
         suite.addTestSuite(IgniteExchangeLatchManagerCoordinatorFailTest.class);
 
+        suite.addTestSuite(PartitionsExchangeCoordinatorFailoverTest.class);
+        suite.addTestSuite(CacheTryLockMultithreadedTest.class);
+
         //suite.addTestSuite(CacheClientsConcurrentStartTest.class);
-        //suite.addTestSuite(CacheTryLockMultithreadedTest.class);
         //suite.addTestSuite(GridCacheRebalancingOrderingTest.class);
         //suite.addTestSuite(IgniteCacheClientMultiNodeUpdateTopologyLockTest.class);
 
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite7.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite7.java
index 330796c..6c48ecc 100755
--- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite7.java
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite7.java
@@ -17,6 +17,7 @@
 
 package org.apache.ignite.testsuites;
 
+import java.util.Set;
 import junit.framework.TestSuite;
 import org.apache.ignite.internal.processors.authentication.Authentication1kUsersNodeRestartTest;
 import org.apache.ignite.internal.processors.authentication.AuthenticationConfigurationClusterTest;
@@ -32,16 +33,19 @@
 import org.apache.ignite.internal.processors.cache.WalModeChangeCoordinatorNotAffinityNodeSelfTest;
 import org.apache.ignite.internal.processors.cache.WalModeChangeSelfTest;
 import org.apache.ignite.internal.processors.cache.distributed.Cache64kPartitionsTest;
+import org.apache.ignite.internal.processors.cache.distributed.CachePageWriteLockUnlockTest;
+import org.apache.ignite.internal.processors.cache.distributed.CacheRentingStateRepairTest;
+import org.apache.ignite.internal.processors.cache.distributed.CacheDataLossOnPartitionMoveTest;
+import org.apache.ignite.internal.processors.cache.distributed.dht.IgniteCacheStartWithLoadTest;
 import org.apache.ignite.internal.processors.cache.distributed.rebalancing.GridCacheRebalancingPartitionCountersTest;
 import org.apache.ignite.internal.processors.cache.distributed.rebalancing.GridCacheRebalancingWithAsyncClearingTest;
 import org.apache.ignite.internal.processors.cache.eviction.paged.PageEvictionMultinodeMixedRegionsTest;
 import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsCacheAssignmentNodeRestartsTest;
 import org.apache.ignite.internal.processors.cache.persistence.db.CheckpointBufferDeadlockTest;
+import org.apache.ignite.internal.processors.cache.transactions.TransactionIntegrityWithPrimaryIndexCorruptionTest;
 import org.apache.ignite.internal.processors.cache.transactions.TxRollbackAsyncWithPersistenceTest;
 import org.apache.ignite.internal.processors.cache.transactions.TxWithSmallTimeoutAndContentionOneKeyTest;
 
-import java.util.Set;
-
 /**
  * Test suite.
  */
@@ -63,6 +67,7 @@
         TestSuite suite = new TestSuite("IgniteCache With Persistence Test Suite");
 
         suite.addTestSuite(CheckpointBufferDeadlockTest.class);
+        suite.addTestSuite(IgniteCacheStartWithLoadTest.class);
 
         suite.addTestSuite(AuthenticationConfigurationClusterTest.class);
         suite.addTestSuite(AuthenticationProcessorSelfTest.class);
@@ -92,6 +97,13 @@
 
         suite.addTestSuite(TxWithSmallTimeoutAndContentionOneKeyTest.class);
 
+        suite.addTestSuite(CacheRentingStateRepairTest.class);
+
+        suite.addTestSuite(TransactionIntegrityWithPrimaryIndexCorruptionTest.class);
+        suite.addTestSuite(CacheDataLossOnPartitionMoveTest.class);
+
+        suite.addTestSuite(CachePageWriteLockUnlockTest.class);
+
         return suite;
     }
 }
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite9.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite9.java
new file mode 100644
index 0000000..386b17b
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteCacheTestSuite9.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.testsuites;
+
+import junit.framework.TestSuite;
+import org.apache.ignite.internal.processors.cache.CachePutIfAbsentTest;
+import org.apache.ignite.internal.processors.cache.IgniteCacheGetCustomCollectionsSelfTest;
+import org.apache.ignite.internal.processors.cache.IgniteCacheLoadRebalanceEvictionSelfTest;
+import org.apache.ignite.internal.processors.cache.distributed.CacheAtomicPrimarySyncBackPressureTest;
+import org.apache.ignite.internal.processors.cache.distributed.IgniteCachePrimarySyncTest;
+import org.apache.ignite.internal.processors.cache.distributed.IgniteTxCachePrimarySyncTest;
+import org.apache.ignite.internal.processors.cache.distributed.IgniteTxCacheWriteSynchronizationModesMultithreadedTest;
+import org.apache.ignite.testframework.junits.GridAbstractTest;
+
+/**
+ * Test suite.
+ */
+public class IgniteCacheTestSuite9 extends TestSuite {
+    /**
+     * @return IgniteCache test suite.
+     * @throws Exception Thrown in case of the failure.
+     */
+    public static TestSuite suite() throws Exception {
+        System.setProperty(GridAbstractTest.PERSISTENCE_IN_TESTS_IS_ALLOWED_PROPERTY, "false");
+
+        TestSuite suite = new TestSuite("IgniteCache Test Suite part 9");
+
+        suite.addTestSuite(IgniteCacheGetCustomCollectionsSelfTest.class);
+        suite.addTestSuite(IgniteCacheLoadRebalanceEvictionSelfTest.class);
+        suite.addTestSuite(IgniteCachePrimarySyncTest.class);
+        suite.addTestSuite(IgniteTxCachePrimarySyncTest.class);
+        suite.addTestSuite(IgniteTxCacheWriteSynchronizationModesMultithreadedTest.class);
+        suite.addTestSuite(CachePutIfAbsentTest.class);
+
+        suite.addTestSuite(CacheAtomicPrimarySyncBackPressureTest.class);
+
+        return suite;
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteKernalSelfTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteKernalSelfTestSuite.java
index 3a943f3..b8ea850 100644
--- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteKernalSelfTestSuite.java
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteKernalSelfTestSuite.java
@@ -35,7 +35,6 @@
 import org.apache.ignite.internal.GridRuntimeExceptionSelfTest;
 import org.apache.ignite.internal.GridSameVmStartupSelfTest;
 import org.apache.ignite.internal.GridSpiExceptionSelfTest;
-import org.apache.ignite.internal.GridStartupTest;
 import org.apache.ignite.internal.GridVersionSelfTest;
 import org.apache.ignite.internal.IgniteConcurrentEntryProcessorAccessStopTest;
 import org.apache.ignite.internal.IgniteConnectionConcurrentReserveAndRemoveTest;
@@ -104,8 +103,7 @@
     public static TestSuite suite(Set<Class> ignoredTests) throws Exception {
         TestSuite suite = new TestSuite("Ignite Kernal Test Suite");
 
-        //suite.addTestSuite(GridStartupTest.class);
-        //suite.addTestSuite(GridGetOrStartSelfTest.class);
+        suite.addTestSuite(GridGetOrStartSelfTest.class);
         suite.addTestSuite(GridSameVmStartupSelfTest.class);
         suite.addTestSuite(GridSpiExceptionSelfTest.class);
         suite.addTestSuite(GridRuntimeExceptionSelfTest.class);
@@ -139,7 +137,7 @@
         suite.addTestSuite(ComputeJobCancelWithServiceSelfTest.class);
         suite.addTestSuite(IgniteConnectionConcurrentReserveAndRemoveTest.class);
         suite.addTestSuite(LongJVMPauseDetectorTest.class);
-        //suite.addTestSuite(ClusterMetricsSelfTest.class);
+        suite.addTestSuite(ClusterMetricsSelfTest.class);
 
         // Managed Services.
         suite.addTestSuite(GridServiceProcessorSingleNodeSelfTest.class);
@@ -162,14 +160,15 @@
         suite.addTestSuite(GridServiceProcessorBatchDeploySelfTest.class);
         suite.addTestSuite(GridServiceDeploymentCompoundFutureSelfTest.class);
         suite.addTestSuite(SystemCacheNotConfiguredTest.class);
+        // IGNITE-3392
         //suite.addTestSuite(GridServiceDeploymentExceptionPropagationTest.class);
 
         suite.addTestSuite(IgniteServiceDeploymentClassLoadingDefaultMarshallerTest.class);
         suite.addTestSuite(IgniteServiceDeploymentClassLoadingJdkMarshallerTest.class);
-        //suite.addTestSuite(IgniteServiceDeploymentClassLoadingOptimizedMarshallerTest.class);
+        suite.addTestSuite(IgniteServiceDeploymentClassLoadingOptimizedMarshallerTest.class);
         suite.addTestSuite(IgniteServiceDeployment2ClassLoadersDefaultMarshallerTest.class);
         suite.addTestSuite(IgniteServiceDeployment2ClassLoadersJdkMarshallerTest.class);
-        //suite.addTestSuite(IgniteServiceDeployment2ClassLoadersOptimizedMarshallerTest.class);
+        suite.addTestSuite(IgniteServiceDeployment2ClassLoadersOptimizedMarshallerTest.class);
 
         return suite;
     }
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteLangSelfTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteLangSelfTestSuite.java
index 187ca59..f40b03e 100644
--- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteLangSelfTestSuite.java
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteLangSelfTestSuite.java
@@ -86,7 +86,7 @@
         suite.addTest(new TestSuite(IgniteFutureImplTest.class));
         suite.addTest(new TestSuite(IgniteCacheFutureImplTest.class));
 
-        //suite.addTest(new TestSuite(IgniteOffheapReadWriteLockSelfTest.class));
+        suite.addTest(new TestSuite(IgniteOffheapReadWriteLockSelfTest.class));
 
         // Consistent hash tests.
         suite.addTest(new TestSuite(GridConsistentHashSelfTest.class));
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteLostAndFoundTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteLostAndFoundTestSuite.java
index 7fc40df..37bb2d6 100644
--- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteLostAndFoundTestSuite.java
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteLostAndFoundTestSuite.java
@@ -51,7 +51,6 @@
     public static TestSuite suite() {
         TestSuite suite = new TestSuite("Ignite List And Found Test Suite");
 
-        suite.addTestSuite(FileDownloaderTest.class);
         suite.addTestSuite(FileIOTest.class);
         suite.addTestSuite(FileLocksTest.class);
         suite.addTestSuite(GridComputeJobExecutionErrorToLogManualTest.class);
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite.java
index 25315ea..9f50b32 100644
--- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite.java
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite.java
@@ -20,6 +20,7 @@
 import junit.framework.TestSuite;
 import org.apache.ignite.internal.pagemem.impl.PageMemoryNoLoadSelfTest;
 import org.apache.ignite.internal.processors.cache.IgniteClusterActivateDeactivateTestWithPersistence;
+import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsCacheObjectBinaryProcessorOnDiscoveryTest;
 import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsDestroyCacheTest;
 import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsDestroyCacheWithoutCheckpointsTest;
 import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsCacheConfigurationFileConsistencyCheckTest;
@@ -95,6 +96,9 @@
         suite.addTestSuite(ExplicitWalDeltaConsistencyTest.class);
         suite.addTestSuite(SysPropWalDeltaConsistencyTest.class);
 
+        // Binary meta tests.
+        suite.addTestSuite(IgnitePdsCacheObjectBinaryProcessorOnDiscoveryTest.class);
+
         return suite;
     }
 
@@ -104,7 +108,7 @@
      *
      * @param suite suite to add tests into.
      */
-    public static void addRealPageStoreTestsLongRunning(TestSuite suite) {
+    private static void addRealPageStoreTestsLongRunning(TestSuite suite) {
         // Basic PageMemory tests.
         suite.addTestSuite(IgnitePdsPageReplacementTest.class);
     }
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite2.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite2.java
index e8a8576..a9f2601 100644
--- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite2.java
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite2.java
@@ -19,37 +19,31 @@
 
 import junit.framework.TestSuite;
 import org.apache.ignite.internal.processors.cache.persistence.IgniteDataStorageMetricsSelfTest;
-import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsContinuousRestartTest;
-import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsContinuousRestartTestWithExpiryPolicy;
-import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsContinuousRestartTestWithSharedGroupAndIndexes;
 import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsCorruptedStoreTest;
 import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsExchangeDuringCheckpointTest;
 import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsPageSizesTest;
 import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsPartitionFilesDestroyTest;
-import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsRecoveryAfterFileCorruptionTest;
-import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsTaskCancelingTest;
 import org.apache.ignite.internal.processors.cache.persistence.IgnitePersistentStoreDataStructuresTest;
 import org.apache.ignite.internal.processors.cache.persistence.IgniteRebalanceScheduleResendPartitionsTest;
 import org.apache.ignite.internal.processors.cache.persistence.LocalWacModeNoChangeDuringRebalanceOnNonNodeAssignTest;
 import org.apache.ignite.internal.processors.cache.persistence.LocalWalModeChangeDuringRebalancingSelfTest;
-import org.apache.ignite.internal.processors.cache.persistence.baseline.IgniteAbsentEvictionNodeOutOfBaselineTest;
 import org.apache.ignite.internal.processors.cache.persistence.baseline.ClientAffinityAssignmentWithBaselineTest;
+import org.apache.ignite.internal.processors.cache.persistence.baseline.IgniteAbsentEvictionNodeOutOfBaselineTest;
 import org.apache.ignite.internal.processors.cache.persistence.baseline.IgniteAllBaselineNodesOnlineFullApiSelfTest;
 import org.apache.ignite.internal.processors.cache.persistence.baseline.IgniteOfflineBaselineNodeFullApiSelfTest;
 import org.apache.ignite.internal.processors.cache.persistence.baseline.IgniteOnlineNodeOutOfBaselineFullApiSelfTest;
-import org.apache.ignite.internal.processors.cache.persistence.db.IgnitePdsPageEvictionDuringPartitionClearTest;
 import org.apache.ignite.internal.processors.cache.persistence.db.IgnitePdsRebalancingOnNotStableTopologyTest;
-import org.apache.ignite.internal.processors.cache.persistence.db.IgnitePdsTransactionsHangTest;
+import org.apache.ignite.internal.processors.cache.persistence.db.IgnitePdsReserveWalSegmentsTest;
 import org.apache.ignite.internal.processors.cache.persistence.db.IgnitePdsWholeClusterRestartTest;
 import org.apache.ignite.internal.processors.cache.persistence.db.SlowHistoricalRebalanceSmallHistoryTest;
 import org.apache.ignite.internal.processors.cache.persistence.db.checkpoint.IgniteCheckpointDirtyPagesForLowLoadTest;
-import org.apache.ignite.internal.processors.cache.persistence.db.IgnitePdsUnusedWalSegmentsTest;
 import org.apache.ignite.internal.processors.cache.persistence.db.filename.IgniteUidAsConsistentIdMigrationTest;
 import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteNodeStoppedDuringDisableWALTest;
+import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteWALTailIsReachedDuringIterationOverArchiveTest;
 import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteWalFlushBackgroundSelfTest;
 import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteWalFlushBackgroundWithMmapBufferSelfTest;
-import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteWalFlushFsyncSelfTest;
 import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteWalFlushFailoverTest;
+import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteWalFlushFsyncSelfTest;
 import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteWalFlushFsyncWithDedicatedWorkerSelfTest;
 import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteWalFlushFsyncWithMmapBufferSelfTest;
 import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteWalFlushLogOnlySelfTest;
@@ -58,12 +52,16 @@
 import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteWalHistoryReservationsTest;
 import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteWalIteratorExceptionDuringReadTest;
 import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteWalIteratorSwitchSegmentTest;
-import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteWalRecoverySeveralRestartsTest;
 import org.apache.ignite.internal.processors.cache.persistence.db.wal.IgniteWalSerializerVersionTest;
 import org.apache.ignite.internal.processors.cache.persistence.db.wal.WalCompactionTest;
+import org.apache.ignite.internal.processors.cache.persistence.db.wal.WalDeletionArchiveFsyncTest;
+import org.apache.ignite.internal.processors.cache.persistence.db.wal.WalDeletionArchiveLogOnlyTest;
 import org.apache.ignite.internal.processors.cache.persistence.db.wal.crc.IgniteDataIntegrityTests;
+import org.apache.ignite.internal.processors.cache.persistence.db.wal.crc.IgniteFsyncReplayWalIteratorInvalidCrcTest;
+import org.apache.ignite.internal.processors.cache.persistence.db.wal.crc.IgniteReplayWalIteratorInvalidCrcTest;
+import org.apache.ignite.internal.processors.cache.persistence.db.wal.crc.IgniteStandaloneWalIteratorInvalidCrcTest;
 import org.apache.ignite.internal.processors.cache.persistence.db.wal.reader.IgniteWalReaderTest;
-import org.apache.ignite.internal.processors.cache.persistence.file.FileDownloaderTest;
+import org.apache.ignite.internal.processors.cache.persistence.wal.reader.StandaloneWalRecordsIteratorTest;
 
 /**
  *
@@ -77,6 +75,9 @@
 
         // Integrity test.
         suite.addTestSuite(IgniteDataIntegrityTests.class);
+        suite.addTestSuite(IgniteStandaloneWalIteratorInvalidCrcTest.class);
+        suite.addTestSuite(IgniteReplayWalIteratorInvalidCrcTest.class);
+        suite.addTestSuite(IgniteFsyncReplayWalIteratorInvalidCrcTest.class);
 
         addRealPageStoreTests(suite);
 
@@ -89,32 +90,16 @@
         suite.addTestSuite(ClientAffinityAssignmentWithBaselineTest.class);
         suite.addTestSuite(IgniteAbsentEvictionNodeOutOfBaselineTest.class);
 
-        suite.addTestSuite(FileDownloaderTest.class);
-
-        suite.addTestSuite(IgnitePdsTaskCancelingTest.class);
-
         return suite;
     }
 
     /**
-     * Fills {@code suite} with PDS test subset, which operates with real page store, but requires long time to execute.
+     * Fills {@code suite} with PDS test subset, which operates with real page store, but requires long time to
+     * execute.
      *
      * @param suite suite to add tests into.
      */
     private static void addRealPageStoreTestsNotForDirectIo(TestSuite suite) {
-        suite.addTestSuite(IgnitePdsTransactionsHangTest.class);
-
-        suite.addTestSuite(IgnitePdsPageEvictionDuringPartitionClearTest.class);
-
-        // Rebalancing test
-        suite.addTestSuite(IgnitePdsContinuousRestartTest.class);
-        suite.addTestSuite(IgnitePdsContinuousRestartTestWithExpiryPolicy.class);
-
-        suite.addTestSuite(IgnitePdsContinuousRestartTestWithSharedGroupAndIndexes.class);
-
-        // Integrity test.
-        suite.addTestSuite(IgnitePdsRecoveryAfterFileCorruptionTest.class);
-
         suite.addTestSuite(IgnitePdsPartitionFilesDestroyTest.class);
 
         suite.addTestSuite(LocalWalModeChangeDuringRebalancingSelfTest.class);
@@ -170,7 +155,7 @@
 
         suite.addTestSuite(IgnitePdsExchangeDuringCheckpointTest.class);
 
-        suite.addTestSuite(IgnitePdsUnusedWalSegmentsTest.class);
+        suite.addTestSuite(IgnitePdsReserveWalSegmentsTest.class);
 
         // new style folders with generated consistent ID test
         suite.addTestSuite(IgniteUidAsConsistentIdMigrationTest.class);
@@ -179,6 +164,9 @@
 
         suite.addTestSuite(WalCompactionTest.class);
 
+        suite.addTestSuite(WalDeletionArchiveFsyncTest.class);
+        suite.addTestSuite(WalDeletionArchiveLogOnlyTest.class);
+
         suite.addTestSuite(IgniteCheckpointDirtyPagesForLowLoadTest.class);
 
         suite.addTestSuite(IgnitePdsCorruptedStoreTest.class);
@@ -189,8 +177,12 @@
 
         suite.addTestSuite(IgniteNodeStoppedDuringDisableWALTest.class);
 
+        suite.addTestSuite(StandaloneWalRecordsIteratorTest.class);
+
         //suite.addTestSuite(IgniteWalRecoverySeveralRestartsTest.class);
 
         suite.addTestSuite(IgniteRebalanceScheduleResendPartitionsTest.class);
+
+        suite.addTestSuite(IgniteWALTailIsReachedDuringIterationOverArchiveTest.class);
     }
 }
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite3.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite3.java
new file mode 100644
index 0000000..06ba9c0
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite3.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.testsuites;
+
+import junit.framework.TestSuite;
+import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsContinuousRestartTest;
+import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsContinuousRestartTestWithExpiryPolicy;
+
+/**
+ *
+ */
+public class IgnitePdsTestSuite3 extends TestSuite {
+    /**
+     * @return Suite.
+     */
+    public static TestSuite suite() {
+        TestSuite suite = new TestSuite("Ignite Persistent Store Test Suite 3");
+
+        addRealPageStoreTestsNotForDirectIo(suite);
+
+        return suite;
+    }
+
+    /**
+     * Fills {@code suite} with PDS test subset, which operates with real page store, but requires long time to execute.
+     *
+     * @param suite suite to add tests into.
+     */
+    private static void addRealPageStoreTestsNotForDirectIo(TestSuite suite) {
+        // Rebalancing test
+        suite.addTestSuite(IgnitePdsContinuousRestartTest.class);
+        suite.addTestSuite(IgnitePdsContinuousRestartTestWithExpiryPolicy.class);
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite4.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite4.java
new file mode 100644
index 0000000..2e6a439
--- /dev/null
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgnitePdsTestSuite4.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.testsuites;
+
+import junit.framework.TestSuite;
+import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsContinuousRestartTestWithSharedGroupAndIndexes;
+import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsRecoveryAfterFileCorruptionTest;
+import org.apache.ignite.internal.processors.cache.persistence.IgnitePdsTaskCancelingTest;
+import org.apache.ignite.internal.processors.cache.persistence.db.IgnitePdsPageEvictionDuringPartitionClearTest;
+import org.apache.ignite.internal.processors.cache.persistence.db.IgnitePdsTransactionsHangTest;
+import org.apache.ignite.internal.processors.cache.persistence.file.FileDownloaderTest;
+
+/**
+ *
+ */
+public class IgnitePdsTestSuite4 extends TestSuite {
+    /**
+     * @return Suite.
+     */
+    public static TestSuite suite() {
+        TestSuite suite = new TestSuite("Ignite Persistent Store Test Suite 4");
+
+        addRealPageStoreTestsNotForDirectIo(suite);
+
+        suite.addTestSuite(FileDownloaderTest.class);
+
+        suite.addTestSuite(IgnitePdsTaskCancelingTest.class);
+
+        return suite;
+    }
+
+    /**
+     * Fills {@code suite} with PDS test subset, which operates with real page store, but requires long time to execute.
+     *
+     * @param suite suite to add tests into.
+     */
+    private static void addRealPageStoreTestsNotForDirectIo(TestSuite suite) {
+        suite.addTestSuite(IgnitePdsTransactionsHangTest.class);
+
+        suite.addTestSuite(IgnitePdsPageEvictionDuringPartitionClearTest.class);
+
+        // Rebalancing test
+        suite.addTestSuite(IgnitePdsContinuousRestartTestWithSharedGroupAndIndexes.class);
+
+        // Integrity test.
+        suite.addTestSuite(IgnitePdsRecoveryAfterFileCorruptionTest.class);
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteSpiDiscoverySelfTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteSpiDiscoverySelfTestSuite.java
index 9ba9d2d..39d5421 100644
--- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteSpiDiscoverySelfTestSuite.java
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteSpiDiscoverySelfTestSuite.java
@@ -30,7 +30,7 @@
 import org.apache.ignite.spi.discovery.tcp.TcpClientDiscoverySpiFailureTimeoutSelfTest;
 import org.apache.ignite.spi.discovery.tcp.TcpClientDiscoverySpiMulticastTest;
 import org.apache.ignite.spi.discovery.tcp.TcpClientDiscoverySpiSelfTest;
-import org.apache.ignite.spi.discovery.tcp.TcpDiscoveryConcurrentStartTest;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoveryClientSuspensionSelfTest;
 import org.apache.ignite.spi.discovery.tcp.TcpDiscoveryMarshallerCheckSelfTest;
 import org.apache.ignite.spi.discovery.tcp.TcpDiscoveryMultiThreadedTest;
 import org.apache.ignite.spi.discovery.tcp.TcpDiscoveryNodeAttributesUpdateOnReconnectTest;
@@ -44,12 +44,10 @@
 import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpiConfigSelfTest;
 import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpiFailureTimeoutSelfTest;
 import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpiMBeanTest;
-import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpiRandomStartStopTest;
 import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpiReconnectDelayTest;
 import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpiSelfTest;
-import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpiSslSelfTest;
 import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpiStartStopSelfTest;
-import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpiWildcardSelfTest;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySslParametersTest;
 import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySslSecuredUnsecuredTest;
 import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySslSelfTest;
 import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySslTrustedSelfTest;
@@ -123,12 +121,14 @@
         // Client connect.
         suite.addTest(new TestSuite(IgniteClientConnectTest.class));
         suite.addTest(new TestSuite(IgniteClientReconnectMassiveShutdownTest.class));
+        suite.addTest(new TestSuite(TcpDiscoveryClientSuspensionSelfTest.class));
 
         // SSL.
         suite.addTest(new TestSuite(TcpDiscoverySslSelfTest.class));
         suite.addTest(new TestSuite(TcpDiscoverySslTrustedSelfTest.class));
         suite.addTest(new TestSuite(TcpDiscoverySslSecuredUnsecuredTest.class));
         suite.addTest(new TestSuite(TcpDiscoverySslTrustedUntrustedTest.class));
+        suite.addTest(new TestSuite(TcpDiscoverySslParametersTest.class));
 
         // Disco cache reuse.
         suite.addTest(new TestSuite(IgniteDiscoveryCacheReuseSelfTest.class));
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteStandByClusterSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteStandByClusterSuite.java
index fd124b7..f524420 100644
--- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteStandByClusterSuite.java
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteStandByClusterSuite.java
@@ -21,12 +21,9 @@
 import org.apache.ignite.internal.processors.cache.IgniteClusterActivateDeactivateTest;
 import org.apache.ignite.internal.processors.cache.distributed.CacheBaselineTopologyTest;
 import org.apache.ignite.internal.processors.cache.persistence.IgniteBaselineAffinityTopologyActivationTest;
-import org.apache.ignite.internal.processors.cache.persistence.standbycluster.IgniteChangeGlobalStateCacheTest;
 import org.apache.ignite.internal.processors.cache.persistence.standbycluster.IgniteChangeGlobalStateDataStreamerTest;
-import org.apache.ignite.internal.processors.cache.persistence.standbycluster.IgniteChangeGlobalStateDataStructureTest;
 import org.apache.ignite.internal.processors.cache.persistence.standbycluster.IgniteChangeGlobalStateFailOverTest;
-import org.apache.ignite.internal.processors.cache.persistence.standbycluster.IgniteChangeGlobalStateServiceTest;
-import org.apache.ignite.internal.processors.cache.persistence.standbycluster.IgniteChangeGlobalStateTest;
+import org.apache.ignite.internal.processors.cache.persistence.standbycluster.IgniteNoParrallelClusterIsAllowedTest;
 import org.apache.ignite.internal.processors.cache.persistence.standbycluster.IgniteStandByClusterTest;
 import org.apache.ignite.internal.processors.cache.persistence.standbycluster.join.JoinActiveNodeToActiveCluster;
 import org.apache.ignite.internal.processors.cache.persistence.standbycluster.join.JoinActiveNodeToInActiveCluster;
@@ -65,12 +62,15 @@
         suite.addTestSuite(JoinInActiveNodeToActiveClusterWithPersistence.class);
         suite.addTestSuite(JoinInActiveNodeToInActiveClusterWithPersistence.class);
 
-        suite.addTestSuite(IgniteChangeGlobalStateTest.class);
-        suite.addTestSuite(IgniteChangeGlobalStateCacheTest.class);
-        suite.addTestSuite(IgniteChangeGlobalStateDataStructureTest.class);
+//TODO https://issues.apache.org/jira/browse/IGNITE-9081 suite.addTestSuite(IgniteChangeGlobalStateTest.class);
+//TODO https://issues.apache.org/jira/browse/IGNITE-9081 suite.addTestSuite(IgniteChangeGlobalStateCacheTest.class);
+//TODO https://issues.apache.org/jira/browse/IGNITE-9081 suite.addTestSuite(IgniteChangeGlobalStateDataStructureTest.class);
+//TODO https://issues.apache.org/jira/browse/IGNITE-9081 suite.addTestSuite(IgniteChangeGlobalStateServiceTest.class);
+
         suite.addTestSuite(IgniteChangeGlobalStateDataStreamerTest.class);
         suite.addTestSuite(IgniteChangeGlobalStateFailOverTest.class);
-        suite.addTestSuite(IgniteChangeGlobalStateServiceTest.class);
+
+        suite.addTestSuite(IgniteNoParrallelClusterIsAllowedTest.class);
 
         suite.addTestSuite(CacheBaselineTopologyTest.class);
         suite.addTestSuite(IgniteBaselineAffinityTopologyActivationTest.class);
diff --git a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteUtilSelfTestSuite.java b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteUtilSelfTestSuite.java
index c0d7a8b..bf726d5 100644
--- a/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteUtilSelfTestSuite.java
+++ b/modules/core/src/test/java/org/apache/ignite/testsuites/IgniteUtilSelfTestSuite.java
@@ -45,7 +45,6 @@
 import org.apache.ignite.thread.GridThreadTest;
 import org.apache.ignite.thread.IgniteThreadPoolSizeTest;
 import org.apache.ignite.util.GridConcurrentLinkedDequeMultiThreadedTest;
-import org.apache.ignite.util.GridIndexFillTest;
 import org.apache.ignite.util.GridIntListSelfTest;
 import org.apache.ignite.util.GridLogThrottleTest;
 import org.apache.ignite.util.GridLongListSelfTest;
@@ -98,25 +97,24 @@
         suite.addTestSuite(GridMBeanDisableSelfTest.class);
         suite.addTestSuite(GridMBeanExoticNamesSelfTest.class);
         suite.addTestSuite(GridLongListSelfTest.class);
-        //suite.addTestSuite(GridThreadTest.class);
+        suite.addTestSuite(GridThreadTest.class);
         suite.addTestSuite(GridIntListSelfTest.class);
         suite.addTestSuite(GridArraysSelfTest.class);
         suite.addTestSuite(GridCacheUtilsSelfTest.class);
         suite.addTestSuite(IgniteExceptionRegistrySelfTest.class);
         suite.addTestSuite(GridMessageCollectionTest.class);
         suite.addTestSuite(WorkersControlMXBeanTest.class);
-        //suite.addTestSuite(GridConcurrentLinkedDequeMultiThreadedTest.class);
-        //suite.addTestSuite(GridIndexFillTest.class);
-        //suite.addTestSuite(GridLogThrottleTest.class);
-        //suite.addTestSuite(GridRandomSelfTest.class);
-        //suite.addTestSuite(GridSnapshotLockSelfTest.class);
-        //suite.addTestSuite(GridTopologyHeapSizeSelfTest.class);
-        //suite.addTestSuite(GridTransientTest.class);
-        //suite.addTestSuite(IgniteDevOnlyLogTest.class);
+        suite.addTestSuite(GridConcurrentLinkedDequeMultiThreadedTest.class);
+        suite.addTestSuite(GridLogThrottleTest.class);
+        suite.addTestSuite(GridRandomSelfTest.class);
+        suite.addTestSuite(GridSnapshotLockSelfTest.class);
+        suite.addTestSuite(GridTopologyHeapSizeSelfTest.class);
+        suite.addTestSuite(GridTransientTest.class);
+        suite.addTestSuite(IgniteDevOnlyLogTest.class);
 
         // Sensitive toString.
-        //suite.addTestSuite(IncludeSensitiveAtomicTest.class);
-        //suite.addTestSuite(IncludeSensitiveTransactionalTest.class);
+        suite.addTestSuite(IncludeSensitiveAtomicTest.class);
+        suite.addTestSuite(IncludeSensitiveTransactionalTest.class);
 
         // Metrics.
         suite.addTestSuite(ClusterMetricsSnapshotSerializeSelfTest.class);
diff --git a/modules/core/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java b/modules/core/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java
index f612a96..8af7a10 100644
--- a/modules/core/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/util/GridCommandHandlerTest.java
@@ -1415,29 +1415,6 @@
         }, 4, "tx-thread");
     }
 
-    /**
-     * Checks if all tx futures are finished.
-     */
-    private void checkFutures() {
-        for (Ignite ignite : G.allGrids()) {
-            IgniteEx ig = (IgniteEx)ignite;
-
-            final Collection<GridCacheFuture<?>> futs = ig.context().cache().context().mvcc().activeFutures();
-
-            for (GridCacheFuture<?> fut : futs)
-                log.info("Waiting for future: " + fut);
-
-            assertTrue("Expecting no active futures: node=" + ig.localNode().id(), futs.isEmpty());
-
-            Collection<IgniteInternalTx> txs = ig.context().cache().context().tm().activeTransactions();
-
-            for (IgniteInternalTx tx : txs)
-                log.info("Waiting for tx: " + tx);
-
-            assertTrue("Expecting no active transactions: node=" + ig.localNode().id(), txs.isEmpty());
-        }
-    }
-
     /** */
     private static class IncrementClosure implements EntryProcessor<Long, Long, Void> {
         /** {@inheritDoc} */
diff --git a/modules/core/src/test/java/org/apache/ignite/util/GridConcurrentLinkedDequeMultiThreadedTest.java b/modules/core/src/test/java/org/apache/ignite/util/GridConcurrentLinkedDequeMultiThreadedTest.java
index 57cea6e..99064f0 100644
--- a/modules/core/src/test/java/org/apache/ignite/util/GridConcurrentLinkedDequeMultiThreadedTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/util/GridConcurrentLinkedDequeMultiThreadedTest.java
@@ -89,12 +89,11 @@
             "queue-prod"
         );
 
-        Thread.sleep(2 * 60 * 1000);
-
+        Thread.sleep(20 * 1000);
 
         done.set(true);
 
         pollFut.get();
         prodFut.get();
     }
-}
\ No newline at end of file
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/util/GridIndexFillTest.java b/modules/core/src/test/java/org/apache/ignite/util/GridIndexFillTest.java
deleted file mode 100644
index 88011ff..0000000
--- a/modules/core/src/test/java/org/apache/ignite/util/GridIndexFillTest.java
+++ /dev/null
@@ -1,259 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.util;
-
-import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.CopyOnWriteArrayList;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.ThreadLocalRandom;
-import java.util.concurrent.atomic.AtomicBoolean;
-import org.apache.ignite.internal.IgniteInternalFuture;
-import org.apache.ignite.internal.util.GridConcurrentSkipListSet;
-import org.apache.ignite.internal.util.snaptree.SnapTreeMap;
-import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
-
-/**
- * TODO write doc
- */
-public class GridIndexFillTest extends GridCommonAbstractTest {
-    /** */
-    private CopyOnWriteArrayList<Idx> idxs;
-
-    /** */
-    private ConcurrentHashMap<Integer, CountDownLatch> keyLocks;
-
-    @Override protected void beforeTest() throws Exception {
-        super.beforeTest();
-
-        idxs = new CopyOnWriteArrayList<>();
-
-        idxs.add(new Idx(true));
-
-        keyLocks = new ConcurrentHashMap<>();
-    }
-
-    /**
-     * @param k Key.
-     */
-    private CountDownLatch lock(String op, Integer k) {
-//        U.debug(op + " lock: " + k);
-        CountDownLatch latch = new CountDownLatch(1);
-
-        for(;;) {
-            CountDownLatch l = keyLocks.putIfAbsent(k, latch);
-
-            if (l == null)
-                return latch;
-
-            try {
-                l.await();
-            }
-            catch (InterruptedException e) {
-                throw new RuntimeException(e);
-            }
-        }
-    }
-
-    /**
-     * @param k Key.
-     */
-    private void unlock(Integer k, CountDownLatch latch) {
-//        U.debug("unlock: " + k);
-        assertTrue(keyLocks.remove(k, latch));
-
-        latch.countDown();
-    }
-
-    private void put(Integer k, Long v) {
-        CountDownLatch l = lock("add", k);
-
-        for (Idx idx : idxs)
-            idx.add(k, v);
-
-        unlock(k, l);
-    }
-
-    private void remove(Integer k) {
-        CountDownLatch l = lock("rm", k);
-
-        try {
-            Long v = null;
-
-            for (Idx idx : idxs) {
-                Long v2 = idx.remove(k, v);
-
-                if (v2 == null) {
-                    assert v == null;
-
-                    return; // Nothing to remove.
-                }
-
-                if (v == null)
-                    v = v2;
-                else
-                    assert v.equals(v2);
-            }
-        }
-        finally {
-            unlock(k, l);
-        }
-    }
-
-    public void testSnaptreeParallelBuild() throws Exception {
-        final AtomicBoolean stop = new AtomicBoolean();
-
-        IgniteInternalFuture<?> fut = multithreadedAsync(new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                ThreadLocalRandom rnd = ThreadLocalRandom.current();
-
-                while (!stop.get()) {
-                    int k = rnd.nextInt(100);
-                    long v = rnd.nextLong(10);
-
-                    if (rnd.nextBoolean())
-                        put(k, v);
-                    else
-                        remove(k);
-                 }
-
-                return null;
-            }
-        }, 12, "put");
-
-        Thread.sleep(500);
-
-        Idx newIdx = new Idx(false);
-
-        idxs.add(newIdx);
-
-        SnapTreeMap<Integer, Long> snap = idxs.get(0).tree.clone();
-
-        for (Map.Entry<Integer, Long> entry : snap.entrySet())
-            newIdx.addX(entry.getKey(), entry.getValue());
-
-        newIdx.finish();
-
-        stop.set(true);
-
-        fut.get();
-
-        assertEquals(idxs.get(0).tree, idxs.get(1).tree);
-    }
-
-    private static class Idx {
-
-        static int z = 1;
-
-        private final SnapTreeMap<Integer, Long> tree = new SnapTreeMap<>(); //new ConcurrentSkipListMap<>();
-
-        private volatile Rm rm;
-
-        private final String name = "idx" + z++;
-
-        public Idx(boolean pk) {
-            if (!pk)
-                rm = new Rm();
-        }
-
-        public void add(Integer k, Long v) {
-//            U.debug(name + " add: k" + k + " " + v);
-
-            Long old = tree.put(k, v);
-
-            if (old != null) {
-                Rm rm = this.rm;
-
-                if (rm != null)
-                    rm.keys.add(k);
-            }
-        }
-
-        public void addX(Integer k, Long v) {
-//            U.debug(name + " addX: k" + k + " " + v);
-
-            assert v != null;
-            assert k != null;
-
-//            Lock l = rm.lock.writeLock();
-
-//            l.lock();
-
-            try {
-                if (!rm.keys.contains(k)) {
-//                    U.debug(name + " addX-put: k" + k + " " + v);
-
-                    tree.putIfAbsent(k, v);
-                }
-            }
-            finally {
-//                l.unlock();
-            }
-        }
-
-        public Long remove(Integer k, Long v) {
-            Rm rm = this.rm;
-
-            if (rm != null) {
-                assert v != null;
-
-//                Lock l = rm.lock.readLock();
-
-//                l.lock();
-
-                try {
-                    rm.keys.add(k);
-
-                    Long v2 = tree.remove(k);
-
-//                    U.debug(name + " rm1: k" + k + " " + v + " " + v2);
-
-                }
-                finally {
-//                    l.unlock();
-                }
-            }
-            else {
-                Long v2 = tree.remove(k);
-
-//                U.debug(name + " rm2: k" + k + " " + v + " " + v2);
-
-                if (v == null)
-                    v = v2;
-                else
-                    assertEquals(v, v2);
-            }
-
-            return v;
-        }
-
-        public void finish() {
-//            assertTrue(rm.tree.isEmpty());
-
-            rm = null;
-        }
-    }
-
-    private static class Rm {
-//        private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
-
-        private final GridConcurrentSkipListSet<Integer> keys = new GridConcurrentSkipListSet<>();
-            //new SnapTreeMap<>(); //new ConcurrentSkipListMap<>();
-    }
-}
\ No newline at end of file
diff --git a/modules/core/src/test/java/org/apache/ignite/util/GridRandomSelfTest.java b/modules/core/src/test/java/org/apache/ignite/util/GridRandomSelfTest.java
index e965433..005da6c 100644
--- a/modules/core/src/test/java/org/apache/ignite/util/GridRandomSelfTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/util/GridRandomSelfTest.java
@@ -19,13 +19,13 @@
 
 import java.util.Random;
 import java.util.concurrent.ThreadLocalRandom;
-import junit.framework.TestCase;
 import org.apache.ignite.internal.util.GridRandom;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
 
 /**
  * Test for {@link GridRandom}.
  */
-public class GridRandomSelfTest extends TestCase {
+public class GridRandomSelfTest extends GridCommonAbstractTest {
     /**
      */
     public void testRandom() {
@@ -50,4 +50,4 @@
             }
         }
     }
-}
\ No newline at end of file
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/util/mbeans/WorkersControlMXBeanTest.java b/modules/core/src/test/java/org/apache/ignite/util/mbeans/WorkersControlMXBeanTest.java
index c1c2fda..cb30906 100644
--- a/modules/core/src/test/java/org/apache/ignite/util/mbeans/WorkersControlMXBeanTest.java
+++ b/modules/core/src/test/java/org/apache/ignite/util/mbeans/WorkersControlMXBeanTest.java
@@ -79,7 +79,7 @@
         final CountDownLatch latch = new CountDownLatch(1);

 

         Thread t = new Thread(TEST_THREAD_NAME) {

-            public void run() {

+            @Override public void run() {

                 latch.countDown();

 

                 for (;;)

diff --git a/modules/dev-utils/src/main/java/org/apache/ignite/development/utils/IgniteWalConverter.java b/modules/dev-utils/src/main/java/org/apache/ignite/development/utils/IgniteWalConverter.java
index eee193a..ca144ab 100644
--- a/modules/dev-utils/src/main/java/org/apache/ignite/development/utils/IgniteWalConverter.java
+++ b/modules/dev-utils/src/main/java/org/apache/ignite/development/utils/IgniteWalConverter.java
@@ -29,6 +29,8 @@
 import org.apache.ignite.internal.processors.query.h2.database.io.H2ExtrasLeafIO;
 import org.apache.ignite.internal.processors.query.h2.database.io.H2InnerIO;
 import org.apache.ignite.internal.processors.query.h2.database.io.H2LeafIO;
+import org.apache.ignite.internal.processors.query.h2.database.io.H2MvccInnerIO;
+import org.apache.ignite.internal.processors.query.h2.database.io.H2MvccLeafIO;
 import org.apache.ignite.lang.IgniteBiTuple;
 import org.apache.ignite.logger.NullLogger;
 import org.jetbrains.annotations.Nullable;
@@ -39,6 +41,7 @@
 public class IgniteWalConverter {
     /**
      * @param args Args.
+     * @throws Exception If failed.
      */
     public static void main(String[] args) throws Exception {
         if (args.length < 2)
@@ -47,12 +50,12 @@
                     "\t2. Path to dir with wal files.\n" +
                     "\t3. (Optional) Path to dir with archive wal files.");
 
-        PageIO.registerH2(H2InnerIO.VERSIONS, H2LeafIO.VERSIONS);
+        PageIO.registerH2(H2InnerIO.VERSIONS, H2LeafIO.VERSIONS, H2MvccInnerIO.VERSIONS, H2MvccLeafIO.VERSIONS);
         H2ExtrasInnerIO.register();
         H2ExtrasLeafIO.register();
 
-        boolean printRecords = IgniteSystemProperties.getBoolean("PRINT_RECORDS", false);
-        boolean printStat = IgniteSystemProperties.getBoolean("PRINT_STAT", true);
+        boolean printRecords = IgniteSystemProperties.getBoolean("PRINT_RECORDS", false); //TODO read them from argumetns
+        boolean printStat = IgniteSystemProperties.getBoolean("PRINT_STAT", true); //TODO read them from argumetns
 
         final IgniteWalIteratorFactory factory = new IgniteWalIteratorFactory(new NullLogger());
 
@@ -65,7 +68,11 @@
 
         @Nullable final WalStat stat = printStat ? new WalStat() : null;
 
-        try (WALIterator stIt = factory.iterator(workFiles)) {
+        IgniteWalIteratorFactory.IteratorParametersBuilder iteratorParametersBuilder =
+                new IgniteWalIteratorFactory.IteratorParametersBuilder().filesOrDirs(workFiles)
+                    .pageSize(Integer.parseInt(args[0]));
+
+        try (WALIterator stIt = factory.iterator(iteratorParametersBuilder)) {
             while (stIt.hasNextX()) {
                 IgniteBiTuple<WALPointer, WALRecord> next = stIt.nextX();
 
diff --git a/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/META-INF/bad_classes.xml b/modules/extdata/uri/META-INF/bad-classes-ignite.xml
similarity index 100%
rename from modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/META-INF/bad_classes.xml
rename to modules/extdata/uri/META-INF/bad-classes-ignite.xml
diff --git a/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/META-INF/bad-signed-deployfile-ignite.xml b/modules/extdata/uri/META-INF/bad-signed-deployfile-ignite.xml
similarity index 100%
rename from modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/META-INF/bad-signed-deployfile-ignite.xml
rename to modules/extdata/uri/META-INF/bad-signed-deployfile-ignite.xml
diff --git a/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/META-INF/deploydepend-ignite.xml b/modules/extdata/uri/META-INF/deploydepend-ignite.xml
similarity index 100%
rename from modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/META-INF/deploydepend-ignite.xml
rename to modules/extdata/uri/META-INF/deploydepend-ignite.xml
diff --git a/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/META-INF/deploydir-ignite.xml b/modules/extdata/uri/META-INF/deploydir-ignite.xml
similarity index 100%
rename from modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/META-INF/deploydir-ignite.xml
rename to modules/extdata/uri/META-INF/deploydir-ignite.xml
diff --git a/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/META-INF/deployfile-ignite.xml b/modules/extdata/uri/META-INF/deployfile-ignite.xml
similarity index 100%
rename from modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/META-INF/deployfile-ignite.xml
rename to modules/extdata/uri/META-INF/deployfile-ignite.xml
diff --git a/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/META-INF/well-signed-deployfile-ignite.xml b/modules/extdata/uri/META-INF/well-signed-deployfile-ignite.xml
similarity index 100%
rename from modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/META-INF/well-signed-deployfile-ignite.xml
rename to modules/extdata/uri/META-INF/well-signed-deployfile-ignite.xml
diff --git a/modules/extdata/uri/config/signeddeploy/keystore b/modules/extdata/uri/config/signeddeploy/keystore
new file mode 100644
index 0000000..dc51a0f
--- /dev/null
+++ b/modules/extdata/uri/config/signeddeploy/keystore
Binary files differ
diff --git a/modules/extdata/uri/pom.xml b/modules/extdata/uri/pom.xml
index bd25653..43c0516 100644
--- a/modules/extdata/uri/pom.xml
+++ b/modules/extdata/uri/pom.xml
@@ -171,6 +171,119 @@
                                 -->
                                 <gar destfile="${basedir}/target/resources/helloworld.gar" descrdir="${basedir}/META-INF" basedir="${basedir}/target/classes" />
                                 <gar destfile="${basedir}/target/resources/helloworld1.gar" descrdir="${basedir}/META-INF" basedir="${basedir}/target/classes" />
+
+                                <!--deploydir.gar-->
+                                <mkdir dir="${basedir}/target/file_tmp/META-INF/" />
+                                <copy file="${basedir}/META-INF/deploydir-ignite.xml" tofile="${basedir}/target/file_tmp/META-INF/ignite.xml" />
+
+                                <mkdir dir="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/" />
+                                <copy file="${basedir}/target/classes/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentTestTask0.class" todir="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/" />
+                                <copy file="${basedir}/target/classes/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentTestWithNameTask0.class" todir="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/" />
+
+                                <gar destfile="${basedir}/target/file/deploydir0.gar" descrdir="${basedir}/target/file_tmp/META-INF" basedir="${basedir}/target/file_tmp/classes" />
+                                <mkdir dir="${basedir}/target/file/deploydir.gar" />
+                                <unzip src="${basedir}/target/file/deploydir0.gar" dest="${basedir}/target/file/deploydir.gar" />
+                                <delete file="${basedir}/target/file/deploydir0.gar" />
+
+                                <delete dir="${basedir}/target/file_tmp/" />
+
+                                <!--deployfile.gar-->
+                                <mkdir dir="${basedir}/target/file_tmp/META-INF/" />
+                                <copy file="${basedir}/META-INF/deployfile-ignite.xml" tofile="${basedir}/target/file_tmp/META-INF/ignite.xml" />
+
+                                <mkdir dir="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/" />
+                                <copy file="${basedir}/target/classes/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentTestTask3.class" todir="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/" />
+                                <copy file="${basedir}/target/classes/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentTestWithNameTask3.class" todir="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/" />
+
+                                <gar destfile="${basedir}/target/file/deployfile.gar" descrdir="${basedir}/target/file_tmp/META-INF" basedir="${basedir}/target/file_tmp/classes" />
+
+                                <delete dir="${basedir}/target/file_tmp/" />
+
+                                <!--deployfile-nodescr.gar-->
+                                <mkdir dir="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/" />
+                                <copy file="${basedir}/target/classes/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentTestTask4.class" todir="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/" />
+                                <copy file="${basedir}/target/classes/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentTestWithNameTask4.class" todir="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/" />
+
+                                <gar destfile="${basedir}/target/file/deployfile-nodescr.gar" basedir="${basedir}/target/file_tmp/classes" />
+
+                                <delete dir="${basedir}/target/file_tmp/" />
+
+                                <!--deployfile-bad.gar-->
+                                <mkdir dir="${basedir}/target/file_tmp/META-INF/" />
+                                <copy file="${basedir}/META-INF/bad-classes-ignite.xml" tofile="${basedir}/target/file_tmp/META-INF/ignite.xml" />
+
+                                <mkdir dir="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/" />
+                                <copy file="${basedir}/target/classes/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentAbstractTestTask.class" todir="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/" />
+                                <copy file="${basedir}/target/classes/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentInnerTestTask.class" todir="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/" />
+                                <copy file="${basedir}/target/classes/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentInnerTestTask$GridInnerTestTask.class" todir="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/" />
+                                <copy file="${basedir}/target/classes/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentInterfaceTestTask.class" todir="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/" />
+                                <copy file="${basedir}/target/classes/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentNonePublicTestTask.class" todir="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/" />
+
+                                <gar destfile="${basedir}/target/file/deployfile-bad.gar" descrdir="${basedir}/target/file_tmp/META-INF" basedir="${basedir}/target/file_tmp/classes" />
+
+                                <delete dir="${basedir}/target/file_tmp/" />
+
+                                <!--deployfile-depend.gar-->
+                                <mkdir dir="${basedir}/target/file_tmp/META-INF/" />
+                                <copy file="${basedir}/META-INF/deploydepend-ignite.xml" tofile="${basedir}/target/file_tmp/META-INF/ignite.xml" />
+
+                                <mkdir dir="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/" />
+                                <copy file="${basedir}/target/classes/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentTestTask1.class" todir="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/" />
+                                <copy file="${basedir}/target/classes/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentDependency1.class" todir="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/" />
+                                <copy file="${basedir}/target/classes/org/apache/ignite/spi/deployment/uri/tasks/test1.properties" todir="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/" />
+
+                                <gar destfile="${basedir}/target/file/deployfile-depend.gar" descrdir="${basedir}/target/file_tmp/META-INF" basedir="${basedir}/target/file_tmp/classes" />
+                                <zip destfile="${basedir}/target/file/deployfile-depend.gar" basedir="${basedir}/target/classes/org/apache/ignite/spi/deployment/uri/tasks/" includes="spring1.xml" update="yes" />
+
+                                <delete dir="${basedir}/target/file_tmp/" />
+
+                                <!--deploydir-nodescr-depend.gar-->
+                                <mkdir dir="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/" />
+                                <copy file="${basedir}/target/classes/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentTestTask2.class" todir="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/" />
+                                <copy file="${basedir}/target/classes/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentDependency2.class" todir="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/" />
+                                <copy file="${basedir}/target/classes/org/apache/ignite/spi/deployment/uri/tasks/test2.properties" todir="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/" />
+                                <copy file="${basedir}/target/classes/org/apache/ignite/spi/deployment/uri/tasks/spring2.xml" todir="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/" />
+
+                                <gar destfile="${basedir}/target/file/deploydir-nodescr-depend0.gar" basedir="${basedir}/target/file_tmp/classes" />
+                                <mkdir dir="${basedir}/target/file/deploydir-nodescr-depend.gar" />
+                                <unzip src="${basedir}/target/file/deploydir-nodescr-depend0.gar" dest="${basedir}/target/file/deploydir-nodescr-depend.gar" />
+                                <delete file="${basedir}/target/file/deploydir-nodescr-depend0.gar" />
+
+                                <delete dir="${basedir}/target/file_tmp/" />
+
+                                <!--well-signed-deployfile.gar-->
+                                <mkdir dir="${basedir}/target/file_tmp/META-INF/" />
+                                <copy file="${basedir}/META-INF/well-signed-deployfile-ignite.xml" tofile="${basedir}/target/file_tmp/META-INF/ignite.xml" />
+
+                                <mkdir dir="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/" />
+                                <copy file="${basedir}/target/classes/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentTestTask5.class" todir="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/" />
+                                <copy file="${basedir}/target/classes/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentTestWithNameTask5.class" todir="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/" />
+
+                                <gar destfile="${basedir}/target/file/well-signed-deployfile.gar" descrdir="${basedir}/target/file_tmp/META-INF" basedir="${basedir}/target/file_tmp/classes" />
+
+                                <signjar jar="${basedir}/target/file/well-signed-deployfile.gar" keystore="${basedir}/config/signeddeploy/keystore" storepass="abc123" keypass="abc123" alias="business" />
+
+                                <delete dir="${basedir}/target/file_tmp/" />
+
+                                <!--bad-signed-deployfile.gar-->
+                                <mkdir dir="${basedir}/target/file_tmp/META-INF/" />
+                                <copy file="${basedir}/META-INF/bad-signed-deployfile-ignite.xml" tofile="${basedir}/target/file_tmp/META-INF/ignite.xml" />
+
+                                <mkdir dir="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/" />
+                                <copy file="${basedir}/target/classes/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentTestTask6.class" todir="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/" />
+                                <copy file="${basedir}/target/classes/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentTestTask6.class" tofile="${basedir}/target/file_tmp/classes/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentTestWithNameTask6.class" />
+
+                                <gar destfile="${basedir}/target/file/bad-signed-deployfile.gar" descrdir="${basedir}/target/file_tmp/META-INF" basedir="${basedir}/target/file_tmp/classes" />
+
+                                <signjar jar="${basedir}/target/file/bad-signed-deployfile.gar" keystore="${basedir}/config/signeddeploy/keystore" storepass="abc123" keypass="abc123" alias="business" />
+
+                                <sleep seconds="2" />
+
+                                <touch file="${basedir}/target/classes/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentTestWithNameTask6.class" />
+
+                                <zip destfile="${basedir}/target/file/bad-signed-deployfile.gar" basedir="${basedir}/target/classes/" includes="org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentTestWithNameTask6.class" update="yes" />
+
+                                <delete dir="${basedir}/target/file_tmp/" />
                             </target>
                         </configuration>
                     </execution>
diff --git a/modules/extdata/uri/src/main/java/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentDependency1.java b/modules/extdata/uri/src/main/java/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentDependency1.java
index e57bab5..2e205b1 100644
--- a/modules/extdata/uri/src/main/java/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentDependency1.java
+++ b/modules/extdata/uri/src/main/java/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentDependency1.java
@@ -25,11 +25,11 @@
 /**
  * This class used by {@link GridUriDeploymentTestTask2} which loaded from GAR file.
  * GridDependency loaded from {@code /lib/*.jar} in GAR file.
- * GridDependency load resource {@code test.properties} from the same jar in {@code /lib/*.jar}
+ * GridDependency load resource {@code test1.properties} from the same jar in {@code /lib/*.jar}
  */
 public class GridUriDeploymentDependency1 {
     /** */
-    public static final String RESOURCE = "org/apache/ignite/grid/spi/deployment/uri/tasks/test1.properties";
+    public static final String RESOURCE = "org/apache/ignite/spi/deployment/uri/tasks/test1.properties";
 
     /**
      * @return Value of the property {@code test1.txt} loaded from the {@code test1.properties} file.
@@ -55,4 +55,4 @@
 
         return null;
     }
-}
\ No newline at end of file
+}
diff --git a/modules/extdata/uri/src/main/java/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentDependency2.java b/modules/extdata/uri/src/main/java/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentDependency2.java
index 4717fee..5a2c55b 100644
--- a/modules/extdata/uri/src/main/java/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentDependency2.java
+++ b/modules/extdata/uri/src/main/java/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentDependency2.java
@@ -25,14 +25,14 @@
 /**
  * This class used by {@link GridUriDeploymentTestTask2} which loaded from GAR file.
  * GridDependency loaded from {@code /lib/*.jar} in GAR file.
- * GridDependency load resource {@code test.properties} from the same jar in {@code /lib/*.jar}
+ * GridDependency load resource {@code test2.properties} from the same jar in {@code /lib/*.jar}
  */
 public class GridUriDeploymentDependency2 {
     /** */
-    public static final String RESOURCE = "org/apache/ignite/grid/spi/deployment/uri/tasks/test2.properties";
+    public static final String RESOURCE = "org/apache/ignite/spi/deployment/uri/tasks/test2.properties";
 
     /**
-     * @return Value of the property {@code test1.txt} loaded from the {@code test2.properties} file.
+     * @return Value of the property {@code test2.txt} loaded from the {@code test2.properties} file.
      */
     public String getMessage() {
         InputStream in = null;
@@ -44,7 +44,7 @@
 
             props.load(in);
 
-            return props.getProperty("test1.txt");
+            return props.getProperty("test2.txt");
         }
         catch (IOException e) {
             e.printStackTrace();
@@ -55,4 +55,4 @@
 
         return null;
     }
-}
\ No newline at end of file
+}
diff --git a/modules/extdata/uri/src/main/java/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentTestTask2.java b/modules/extdata/uri/src/main/java/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentTestTask2.java
index 3a6f5a4..e354b42 100644
--- a/modules/extdata/uri/src/main/java/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentTestTask2.java
+++ b/modules/extdata/uri/src/main/java/org/apache/ignite/spi/deployment/uri/tasks/GridUriDeploymentTestTask2.java
@@ -27,7 +27,7 @@
 import org.springframework.core.io.ClassPathResource;
 
 /**
- * URI deployment test task which loads Spring bean definitions from spring1.xml configuration file.
+ * URI deployment test task which loads Spring bean definitions from spring2.xml configuration file.
  */
 public class GridUriDeploymentTestTask2 extends ComputeTaskSplitAdapter<Object, Object> {
     /** */
@@ -37,7 +37,7 @@
     @SuppressWarnings("unchecked")
     public GridUriDeploymentTestTask2() {
         XmlBeanFactory factory = new XmlBeanFactory(
-            new ClassPathResource("org/apache/ignite/grid/spi/deployment/uri/tasks/spring2.xml",
+            new ClassPathResource("org/apache/ignite/spi/deployment/uri/tasks/spring2.xml",
                 getClass().getClassLoader()));
 
         factory.setBeanClassLoader(getClass().getClassLoader());
@@ -70,4 +70,4 @@
 
         return null;
     }
-}
\ No newline at end of file
+}
diff --git a/modules/geospatial/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2SpatialIndex.java b/modules/geospatial/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2SpatialIndex.java
index b4a8af4..831e674 100644
--- a/modules/geospatial/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2SpatialIndex.java
+++ b/modules/geospatial/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2SpatialIndex.java
@@ -17,8 +17,6 @@
 
 package org.apache.ignite.internal.processors.query.h2.opt;
 
-import com.vividsolutions.jts.geom.Envelope;
-import com.vividsolutions.jts.geom.Geometry;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashSet;
@@ -28,6 +26,8 @@
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
+import com.vividsolutions.jts.geom.Envelope;
+import com.vividsolutions.jts.geom.Geometry;
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.internal.processors.query.h2.H2Cursor;
 import org.apache.ignite.internal.util.GridCursorIteratorWrapper;
@@ -340,7 +340,12 @@
 
         long time = System.currentTimeMillis();
 
-        IndexingQueryFilter qryFilter = threadLocalFilter();
+        IndexingQueryFilter qryFilter = null;
+        GridH2QueryContext qctx = GridH2QueryContext.get();
+
+        if (qctx != null) {
+            qryFilter = qctx.filter();
+        }
 
         IndexingQueryCacheFilter qryCacheFilter = qryFilter != null ? qryFilter.forCache(getTable().cacheName()) : null;
 
diff --git a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/impl/v2/HadoopV2JobResourceManager.java b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/impl/v2/HadoopV2JobResourceManager.java
index 52e394b..18dbf20 100644
--- a/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/impl/v2/HadoopV2JobResourceManager.java
+++ b/modules/hadoop/src/main/java/org/apache/ignite/internal/processors/hadoop/impl/v2/HadoopV2JobResourceManager.java
@@ -26,6 +26,7 @@
 import org.apache.hadoop.util.RunJar;
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.IgniteLogger;
+import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager;
 import org.apache.ignite.internal.processors.hadoop.HadoopCommonUtils;
 import org.apache.ignite.internal.processors.hadoop.HadoopJobId;
 import org.apache.ignite.internal.processors.hadoop.impl.fs.HadoopFileSystemsUtils;
@@ -234,7 +235,7 @@
 
                 if (archiveNameLC.endsWith(".jar"))
                     RunJar.unJar(archiveFile, dstPath);
-                else if (archiveNameLC.endsWith(".zip"))
+                else if (archiveNameLC.endsWith(FilePageStoreManager.ZIP_SUFFIX))
                     FileUtil.unZip(archiveFile, dstPath);
                 else if (archiveNameLC.endsWith(".tar.gz") ||
                     archiveNameLC.endsWith(".tgz") ||
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/impl/igfs/IgfsNearOnlyMultiNodeSelfTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/impl/igfs/IgfsNearOnlyMultiNodeSelfTest.java
index d3df289..20699f1 100644
--- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/impl/igfs/IgfsNearOnlyMultiNodeSelfTest.java
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/impl/igfs/IgfsNearOnlyMultiNodeSelfTest.java
@@ -69,10 +69,6 @@
     /** {@inheritDoc} */
     @Override protected void beforeTestsStarted() throws Exception {
         startGrids(nodeCount());
-
-        grid(0).createNearCache("data", new NearCacheConfiguration());
-
-        grid(0).createNearCache("meta", new NearCacheConfiguration());
     }
 
     /** {@inheritDoc} */
@@ -89,8 +85,10 @@
         FileSystemConfiguration igfsCfg = new FileSystemConfiguration();
 
         igfsCfg.setName("igfs");
-        igfsCfg.setDataCacheConfiguration(cacheConfiguration(igniteInstanceName, "data"));
-        igfsCfg.setMetaCacheConfiguration(cacheConfiguration(igniteInstanceName, "meta"));
+        igfsCfg.setDataCacheConfiguration(cacheConfiguration(igniteInstanceName, "data")
+            .setNearConfiguration(new NearCacheConfiguration()));
+        igfsCfg.setMetaCacheConfiguration(cacheConfiguration(igniteInstanceName, "meta")
+            .setNearConfiguration(new NearCacheConfiguration()));
 
         IgfsIpcEndpointConfiguration endpointCfg = new IgfsIpcEndpointConfiguration();
 
@@ -158,7 +156,7 @@
      */
     protected URI getFileSystemURI(int grid) {
         try {
-            return new URI("igfs://127.0.0.1:" + (IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT + grid));
+            return new URI("igfs://igfs@127.0.0.1:" + (IpcSharedMemoryServerEndpoint.DFLT_IPC_PORT + grid));
         }
         catch (URISyntaxException e) {
             throw new RuntimeException(e);
@@ -221,4 +219,4 @@
             }
         }
     }
-}
\ No newline at end of file
+}
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/impl/igfs/IgniteHadoopFileSystemClientBasedOpenTest.java b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/impl/igfs/IgniteHadoopFileSystemClientBasedOpenTest.java
index 289e63d..932f4d8 100644
--- a/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/impl/igfs/IgniteHadoopFileSystemClientBasedOpenTest.java
+++ b/modules/hadoop/src/test/java/org/apache/ignite/internal/processors/hadoop/impl/igfs/IgniteHadoopFileSystemClientBasedOpenTest.java
@@ -176,7 +176,7 @@
      */
     private void checkFsOpenWithAllNodesTypes() throws Exception {
         for (int i = 0; i < nodesTypes.length; ++i) {
-            log.info("Begin test case for nodes: " + S.arrayToString(NodeType.class, nodesTypes[i]));
+            log.info("Begin test case for nodes: " + S.arrayToString(nodesTypes[i]));
 
             startNodes(nodesTypes[i]);
 
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteHadoopTestSuite.java b/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteHadoopTestSuite.java
index 2be2fa9..199fa96 100644
--- a/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteHadoopTestSuite.java
+++ b/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteHadoopTestSuite.java
@@ -176,7 +176,7 @@
 
         suite.addTest(new TestSuite(ldr.loadClass(HadoopFileSystemsTest.class.getName())));
 
-        //suite.addTest(new TestSuite(ldr.loadClass(HadoopExecutorServiceTest.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(HadoopExecutorServiceTest.class.getName())));
 
         suite.addTest(new TestSuite(ldr.loadClass(HadoopValidationSelfTest.class.getName())));
 
@@ -207,6 +207,7 @@
 
         suite.addTest(new TestSuite(ldr.loadClass(HadoopSortingTest.class.getName())));
 
+        // TODO https://issues.apache.org/jira/browse/IGNITE-3167
 //        suite.addTest(new TestSuite(ldr.loadClass(HadoopExternalTaskExecutionSelfTest.class.getName())));
 //        suite.addTest(new TestSuite(ldr.loadClass(HadoopExternalCommunicationSelfTest.class.getName())));
 //        suite.addTest(new TestSuite(ldr.loadClass(HadoopSortingExternalTest.class.getName())));
diff --git a/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteIgfsLinuxAndMacOSTestSuite.java b/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteIgfsLinuxAndMacOSTestSuite.java
index bebb656..7d1b55d 100644
--- a/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteIgfsLinuxAndMacOSTestSuite.java
+++ b/modules/hadoop/src/test/java/org/apache/ignite/testsuites/IgniteIgfsLinuxAndMacOSTestSuite.java
@@ -66,7 +66,7 @@
 
         suite.addTest(new TestSuite(ldr.loadClass(HadoopIgfs20FileSystemShmemPrimarySelfTest.class.getName())));
 
-        //suite.addTest(new TestSuite(ldr.loadClass(IgfsNearOnlyMultiNodeSelfTest.class.getName())));
+        suite.addTest(new TestSuite(ldr.loadClass(IgfsNearOnlyMultiNodeSelfTest.class.getName())));
 
         suite.addTest(IgfsEventsTestSuite.suite());
 
diff --git a/modules/hibernate-4.2/src/test/java/org/apache/ignite/testsuites/IgniteHibernateTestSuite.java b/modules/hibernate-4.2/src/test/java/org/apache/ignite/testsuites/IgniteHibernateTestSuite.java
index 2d9b25e..8d45dea 100644
--- a/modules/hibernate-4.2/src/test/java/org/apache/ignite/testsuites/IgniteHibernateTestSuite.java
+++ b/modules/hibernate-4.2/src/test/java/org/apache/ignite/testsuites/IgniteHibernateTestSuite.java
@@ -46,7 +46,7 @@
         suite.addTestSuite(HibernateL2CacheTransactionalUseSyncSelfTest.class);
         suite.addTestSuite(HibernateL2CacheConfigurationSelfTest.class);
         suite.addTestSuite(HibernateL2CacheStrategySelfTest.class);
-        //suite.addTestSuite(HibernateL2CacheMultiJvmTest.class);
+        suite.addTestSuite(HibernateL2CacheMultiJvmTest.class);
 
         suite.addTestSuite(CacheHibernateBlobStoreSelfTest.class);
 
diff --git a/modules/hibernate-5.1/src/test/java/org/apache/ignite/testsuites/IgniteHibernate5TestSuite.java b/modules/hibernate-5.1/src/test/java/org/apache/ignite/testsuites/IgniteHibernate5TestSuite.java
index 89e4c2a..b571599 100644
--- a/modules/hibernate-5.1/src/test/java/org/apache/ignite/testsuites/IgniteHibernate5TestSuite.java
+++ b/modules/hibernate-5.1/src/test/java/org/apache/ignite/testsuites/IgniteHibernate5TestSuite.java
@@ -46,7 +46,7 @@
         suite.addTestSuite(HibernateL2CacheTransactionalUseSyncSelfTest.class);
         suite.addTestSuite(HibernateL2CacheConfigurationSelfTest.class);
         suite.addTestSuite(HibernateL2CacheStrategySelfTest.class);
-        //suite.addTestSuite(HibernateL2CacheMultiJvmTest.class);
+        suite.addTestSuite(HibernateL2CacheMultiJvmTest.class);
 
         suite.addTestSuite(CacheHibernateBlobStoreSelfTest.class);
 
diff --git a/modules/hibernate-core/src/main/java/org/apache/ignite/cache/hibernate/HibernateCacheProxy.java b/modules/hibernate-core/src/main/java/org/apache/ignite/cache/hibernate/HibernateCacheProxy.java
index 70e55de..fdb87f0 100644
--- a/modules/hibernate-core/src/main/java/org/apache/ignite/cache/hibernate/HibernateCacheProxy.java
+++ b/modules/hibernate-core/src/main/java/org/apache/ignite/cache/hibernate/HibernateCacheProxy.java
@@ -602,16 +602,6 @@
     }
 
     /** {@inheritDoc} */
-    @Override public boolean isMongoDataCache() {
-        return delegate.isMongoDataCache();
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean isMongoMetaCache() {
-        return delegate.isMongoMetaCache();
-    }
-
-    /** {@inheritDoc} */
     @Nullable @Override public ExpiryPolicy expiry() {
         return delegate.expiry();
     }
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheTwoStepQuery.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheTwoStepQuery.java
index 4a93aaf..a5f0ca2 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheTwoStepQuery.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/cache/query/GridCacheTwoStepQuery.java
@@ -65,6 +65,12 @@
     /** */
     private CacheQueryPartitionInfo[] derivedPartitions;
 
+    /** */
+    private boolean mvccEnabled;
+
+    /** {@code FOR UPDATE} flag. */
+    private boolean forUpdate;
+
     /**
      * @param originalSql Original query SQL.
      * @param tbls Tables in query.
@@ -241,6 +247,8 @@
         cp.distributedJoins = distributedJoins;
         cp.derivedPartitions = derivedPartitions;
         cp.local = local;
+        cp.mvccEnabled = mvccEnabled;
+        cp.forUpdate = forUpdate;
 
         for (int i = 0; i < mapQrys.size(); i++)
             cp.mapQrys.add(mapQrys.get(i).copy());
@@ -262,6 +270,34 @@
         return tbls;
     }
 
+    /**
+     * @return Mvcc flag.
+     */
+    public boolean mvccEnabled() {
+        return mvccEnabled;
+    }
+
+    /**
+     * @param mvccEnabled Mvcc flag.
+     */
+    public void mvccEnabled(boolean mvccEnabled) {
+        this.mvccEnabled = mvccEnabled;
+    }
+
+    /**
+     * @return {@code FOR UPDATE} flag.
+     */
+    public boolean forUpdate() {
+        return forUpdate;
+    }
+
+    /**
+     * @param forUpdate {@code FOR UPDATE} flag.
+     */
+    public void forUpdate(boolean forUpdate) {
+        this.forUpdate = forUpdate;
+    }
+
     /** {@inheritDoc} */
     @Override public String toString() {
         return S.toString(GridCacheTwoStepQuery.class, this);
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DhtResultSetEnlistFuture.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DhtResultSetEnlistFuture.java
new file mode 100644
index 0000000..1d382f7
--- /dev/null
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DhtResultSetEnlistFuture.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2;
+
+import java.sql.ResultSet;
+import java.util.UUID;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxAbstractEnlistFuture;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxLocalAdapter;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
+import org.apache.ignite.internal.processors.query.UpdateSourceIterator;
+import org.apache.ignite.lang.IgniteUuid;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ *
+ */
+public class DhtResultSetEnlistFuture extends GridDhtTxAbstractEnlistFuture implements ResultSetEnlistFuture  {
+    /** */
+    private ResultSet rs;
+
+    /**
+     * @param nearNodeId   Near node ID.
+     * @param nearLockVer  Near lock version.
+     * @param mvccSnapshot Mvcc snapshot.
+     * @param threadId     Thread ID.
+     * @param nearFutId    Near future id.
+     * @param nearMiniId   Near mini future id.
+     * @param parts        Partitions.
+     * @param tx           Transaction.
+     * @param timeout      Lock acquisition timeout.
+     * @param cctx         Cache context.
+     * @param rs           Result set to process.
+     */
+    public DhtResultSetEnlistFuture(UUID nearNodeId, GridCacheVersion nearLockVer,
+        MvccSnapshot mvccSnapshot, long threadId, IgniteUuid nearFutId, int nearMiniId, @Nullable int[] parts,
+        GridDhtTxLocalAdapter tx, long timeout, GridCacheContext<?, ?> cctx, ResultSet rs) {
+        super(nearNodeId, nearLockVer, mvccSnapshot, threadId, nearFutId, nearMiniId, parts, tx, timeout, cctx);
+
+        this.rs = rs;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected UpdateSourceIterator<?> createIterator() {
+        return ResultSetEnlistFuture.createIterator(rs);
+    }
+}
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java
index 5270e7f..6ce43dd 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/DmlStatementsProcessor.java
@@ -43,6 +43,8 @@
 import org.apache.ignite.cache.query.FieldsQueryCursor;
 import org.apache.ignite.cache.query.SqlFieldsQuery;
 import org.apache.ignite.internal.GridKernalContext;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
 import org.apache.ignite.internal.processors.bulkload.BulkLoadAckClientParameters;
 import org.apache.ignite.internal.processors.bulkload.BulkLoadCacheWriter;
 import org.apache.ignite.internal.processors.bulkload.BulkLoadParser;
@@ -51,14 +53,20 @@
 import org.apache.ignite.internal.processors.cache.CacheOperationContext;
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
 import org.apache.ignite.internal.processors.cache.QueryCursorImpl;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.mvcc.StaticMvccQueryTracker;
 import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode;
 import org.apache.ignite.internal.processors.cache.query.SqlFieldsQueryEx;
 import org.apache.ignite.internal.processors.odbc.SqlStateCode;
+import org.apache.ignite.internal.processors.query.EnlistOperation;
 import org.apache.ignite.internal.processors.query.GridQueryCacheObjectsIterator;
 import org.apache.ignite.internal.processors.query.GridQueryCancel;
 import org.apache.ignite.internal.processors.query.GridQueryFieldsResult;
 import org.apache.ignite.internal.processors.query.GridQueryFieldsResultAdapter;
 import org.apache.ignite.internal.processors.query.IgniteSQLException;
+import org.apache.ignite.internal.processors.query.QueryUtils;
+import org.apache.ignite.internal.processors.query.UpdateSourceIterator;
 import org.apache.ignite.internal.processors.query.h2.dml.DmlBatchSender;
 import org.apache.ignite.internal.processors.query.h2.dml.DmlDistributedPlanInfo;
 import org.apache.ignite.internal.processors.query.h2.dml.DmlUtils;
@@ -67,6 +75,7 @@
 import org.apache.ignite.internal.processors.query.h2.dml.UpdatePlanBuilder;
 import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table;
 import org.apache.ignite.internal.processors.query.h2.sql.GridSqlQueryParser;
+import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2QueryRequest;
 import org.apache.ignite.internal.sql.command.SqlBulkLoadCommand;
 import org.apache.ignite.internal.sql.command.SqlCommand;
 import org.apache.ignite.internal.util.GridBoundedConcurrentLinkedHashMap;
@@ -86,6 +95,11 @@
 import org.h2.command.dml.Update;
 import org.jetbrains.annotations.Nullable;
 
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.checkActive;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.mvccTracker;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.requestSnapshot;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.tx;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.txStart;
 import static org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode.DUPLICATE_KEY;
 import static org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode.createJdbcSqlException;
 import static org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing.UPDATE_RESULT_META;
@@ -95,7 +109,7 @@
  */
 public class DmlStatementsProcessor {
     /** Default number of attempts to re-run DELETE and UPDATE queries in case of concurrent modifications of values. */
-    private final static int DFLT_DML_RERUN_ATTEMPTS = 4;
+    private static final int DFLT_DML_RERUN_ATTEMPTS = 4;
 
     /** Indexing. */
     private IgniteH2Indexing idx;
@@ -168,7 +182,7 @@
             UpdateResult r;
 
             try {
-                r = executeUpdateStatement(schemaName, cctx, conn, prepared, fieldsQry, loc, filters, cancel);
+                r = executeUpdateStatement(schemaName, plan, fieldsQry, loc, filters, cancel);
             }
             finally {
                 cctx.operationContextPerCall(opCtx);
@@ -211,17 +225,16 @@
 
         UpdatePlan plan = getPlanForStatement(schemaName, conn, prepared, fieldsQry, loc, null);
 
-        if (plan.hasRows() && plan.mode() == UpdateMode.INSERT) {
-            GridCacheContext<?, ?> cctx = plan.cacheContext();
+        GridCacheContext<?, ?> cctx = plan.cacheContext();
 
+        // For MVCC case, let's enlist batch elements one by one.
+        if (plan.hasRows() && plan.mode() == UpdateMode.INSERT && !cctx.mvccEnabled()) {
             CacheOperationContext opCtx = setKeepBinaryContext(cctx);
 
             try {
                 List<List<List<?>>> cur = plan.createRows(argss);
 
-                List<UpdateResult> res = processDmlSelectResultBatched(plan, cur, fieldsQry.getPageSize());
-
-                return res;
+                return processDmlSelectResultBatched(plan, cur, fieldsQry.getPageSize());
             }
             finally {
                 cctx.operationContextPerCall(opCtx);
@@ -344,8 +357,6 @@
         else {
             UpdateResult res = updateSqlFields(schemaName, c, p, fieldsQry, false, null, cancel);
 
-            ArrayList<QueryCursorImpl<List<?>>> resCurs = new ArrayList<>(1);
-
             checkUpdateResult(res);
 
             QueryCursorImpl<List<?>> resCur = (QueryCursorImpl<List<?>>)new QueryCursorImpl(Collections.singletonList
@@ -353,9 +364,7 @@
 
             resCur.fieldsMeta(UPDATE_RESULT_META);
 
-            resCurs.add(resCur);
-
-            return resCurs;
+            return Collections.singletonList(resCur);
         }
     }
 
@@ -419,7 +428,7 @@
                     if (!F.isEmpty(plan.selectQuery())) {
                         GridQueryFieldsResult res = idx.queryLocalSqlFields(idx.schema(cctx.name()),
                             plan.selectQuery(), F.asList(U.firstNotNull(args, X.EMPTY_OBJECT_ARRAY)),
-                            null, false, 0, null);
+                            null, false, false, 0, null);
 
                         it = res.iterator();
                     }
@@ -467,9 +476,7 @@
      * Actually perform SQL DML operation locally.
      *
      * @param schemaName Schema name.
-     * @param cctx Cache context.
-     * @param c Connection.
-     * @param prepared Prepared statement for DML query.
+     * @param plan Cache context.
      * @param fieldsQry Fields query.
      * @param loc Local query flag.
      * @param filters Cache name and key filter.
@@ -478,12 +485,124 @@
      * @throws IgniteCheckedException if failed.
      */
     @SuppressWarnings({"ConstantConditions", "unchecked"})
-    private UpdateResult executeUpdateStatement(String schemaName, final GridCacheContext cctx, Connection c,
-        Prepared prepared, SqlFieldsQuery fieldsQry, boolean loc, IndexingQueryFilter filters,
+    private UpdateResult executeUpdateStatement(String schemaName, final UpdatePlan plan,
+        SqlFieldsQuery fieldsQry, boolean loc, IndexingQueryFilter filters,
         GridQueryCancel cancel) throws IgniteCheckedException {
-        Integer errKeysPos = null;
+        GridCacheContext cctx = plan.cacheContext();
 
-        UpdatePlan plan = getPlanForStatement(schemaName, c, prepared, fieldsQry, loc, errKeysPos);
+        if (cctx != null && cctx.mvccEnabled()) {
+            assert cctx.transactional();
+
+            DmlDistributedPlanInfo distributedPlan = plan.distributedPlan();
+
+            GridNearTxLocal tx = tx(cctx.kernalContext());
+
+            boolean implicit = (tx == null);
+
+            boolean commit = implicit && (!(fieldsQry instanceof SqlFieldsQueryEx) ||
+                ((SqlFieldsQueryEx)fieldsQry).isAutoCommit());
+
+            if (implicit)
+                tx = txStart(cctx, fieldsQry.getTimeout());
+
+            requestSnapshot(cctx, checkActive(tx));
+
+            try (GridNearTxLocal toCommit = commit ? tx : null) {
+                long timeout;
+
+                if (implicit)
+                    timeout = tx.remainingTime();
+                else {
+                    long tm1 = tx.remainingTime(), tm2 = fieldsQry.getTimeout();
+
+                    timeout = tm1 > 0 && tm2 > 0 ? Math.min(tm1, tm2) : Math.max(tm1, tm2);
+                }
+
+                if (cctx.isReplicated() || distributedPlan == null || ((plan.mode() == UpdateMode.INSERT
+                    || plan.mode() == UpdateMode.MERGE) && !plan.isLocalSubquery())) {
+
+                    boolean sequential = true;
+
+                    UpdateSourceIterator<?> it;
+
+                    if (plan.fastResult()) {
+                        IgniteBiTuple row = plan.getFastRow(fieldsQry.getArgs());
+
+                        EnlistOperation op = UpdatePlan.enlistOperation(plan.mode());
+
+                        it = new DmlUpdateSingleEntryIterator<>(op, op.isDeleteOrLock() ? row.getKey() : row);
+                    }
+                    else if (plan.hasRows())
+                        it = new DmlUpdateResultsIterator(UpdatePlan.enlistOperation(plan.mode()), plan, plan.createRows(fieldsQry.getArgs()));
+                    else {
+                        // TODO IGNITE-8865 if there is no ORDER BY statement it's no use to retain entries order on locking (sequential = false).
+                        SqlFieldsQuery newFieldsQry = new SqlFieldsQuery(plan.selectQuery(), fieldsQry.isCollocated())
+                            .setArgs(fieldsQry.getArgs())
+                            .setDistributedJoins(fieldsQry.isDistributedJoins())
+                            .setEnforceJoinOrder(fieldsQry.isEnforceJoinOrder())
+                            .setLocal(fieldsQry.isLocal())
+                            .setPageSize(fieldsQry.getPageSize())
+                            .setTimeout((int)timeout, TimeUnit.MILLISECONDS);
+
+                        FieldsQueryCursor<List<?>> cur = idx.querySqlFields(schemaName, newFieldsQry, null,
+                            true, true, mvccTracker(cctx, tx), cancel).get(0);
+
+                        it = plan.iteratorForTransaction(idx, cur);
+                    }
+
+                    IgniteInternalFuture<Long> fut = tx.updateAsync(cctx, it,
+                        fieldsQry.getPageSize(), timeout, sequential);
+
+                    UpdateResult res = new UpdateResult(fut.get(), X.EMPTY_OBJECT_ARRAY);
+
+                    if (commit)
+                        toCommit.commit();
+
+                    return res;
+                }
+
+                int[] ids = U.toIntArray(distributedPlan.getCacheIds());
+
+                int flags = 0;
+
+                if (fieldsQry.isEnforceJoinOrder())
+                    flags |= GridH2QueryRequest.FLAG_ENFORCE_JOIN_ORDER;
+
+                if (distributedPlan.isReplicatedOnly())
+                    flags |= GridH2QueryRequest.FLAG_REPLICATED;
+
+                int[] parts = fieldsQry.getPartitions();
+
+                IgniteInternalFuture<Long> fut = tx.updateAsync(
+                    cctx,
+                    ids,
+                    parts,
+                    schemaName,
+                    fieldsQry.getSql(),
+                    fieldsQry.getArgs(),
+                    flags,
+                    fieldsQry.getPageSize(),
+                    timeout);
+
+                UpdateResult res = new UpdateResult(fut.get(), X.EMPTY_OBJECT_ARRAY);
+
+                if (commit)
+                    toCommit.commit();
+
+                return res;
+            }
+            catch (IgniteCheckedException e) {
+                checkSqlException(e);
+
+                U.error(log, "Error during update [localNodeId=" + cctx.localNodeId() + "]", e);
+
+                throw new IgniteSQLException("Failed to run update. " + e.getMessage(), e);
+            }
+            finally {
+                if (commit)
+                    cctx.tm().resetContext();
+            }
+        }
 
         UpdateResult fastUpdateRes = plan.processFast(fieldsQry.getArgs());
 
@@ -514,13 +633,14 @@
                 .setTimeout(fieldsQry.getTimeout(), TimeUnit.MILLISECONDS);
 
             cur = (QueryCursorImpl<List<?>>)idx.querySqlFields(schemaName, newFieldsQry, null, true, true,
-                cancel).get(0);
+                null, cancel).get(0);
         }
         else if (plan.hasRows())
             cur = plan.createRows(fieldsQry.getArgs());
         else {
             final GridQueryFieldsResult res = idx.queryLocalSqlFields(schemaName, plan.selectQuery(),
-                F.asList(fieldsQry.getArgs()), filters, fieldsQry.isEnforceJoinOrder(), fieldsQry.getTimeout(), cancel);
+                F.asList(fieldsQry.getArgs()), filters, fieldsQry.isEnforceJoinOrder(), false, fieldsQry.getTimeout(),
+                cancel);
 
             cur = new QueryCursorImpl<>(new Iterable<List<?>>() {
                 @Override public Iterator<List<?>> iterator() {
@@ -536,7 +656,17 @@
 
         int pageSize = loc ? 0 : fieldsQry.getPageSize();
 
-        return processDmlSelectResult(cctx, plan, cur, pageSize);
+        return processDmlSelectResult(plan, cur, pageSize);
+    }
+
+    /**
+     * @param e Exception.
+     */
+    private void checkSqlException(IgniteCheckedException e) {
+        IgniteSQLException sqlEx = X.cause(e, IgniteSQLException.class);
+
+        if(sqlEx != null)
+            throw sqlEx;
     }
 
     /**
@@ -564,14 +694,13 @@
     }
 
     /**
-     * @param cctx Cache context.
      * @param plan Update plan.
      * @param cursor Cursor over select results.
      * @param pageSize Page size.
      * @return Pair [number of successfully processed items; keys that have failed to be processed]
      * @throws IgniteCheckedException if failed.
      */
-    private UpdateResult processDmlSelectResult(GridCacheContext cctx, UpdatePlan plan, Iterable<List<?>> cursor,
+    private UpdateResult processDmlSelectResult(UpdatePlan plan, Iterable<List<?>> cursor,
         int pageSize) throws IgniteCheckedException {
         switch (plan.mode()) {
             case MERGE:
@@ -584,7 +713,7 @@
                 return doUpdate(plan, cursor, pageSize);
 
             case DELETE:
-                return doDelete(cctx, cursor, pageSize);
+                return doDelete(plan.cacheContext(), cursor, pageSize);
 
             default:
                 throw new IgniteSQLException("Unexpected DML operation [mode=" + plan.mode() + ']',
@@ -606,6 +735,8 @@
     @SuppressWarnings({"unchecked", "ConstantConditions"})
     UpdatePlan getPlanForStatement(String schema, Connection conn, Prepared p, SqlFieldsQuery fieldsQry,
         boolean loc, @Nullable Integer errKeysPos) throws IgniteCheckedException {
+        isDmlOnSchemaSupported(schema);
+
         H2CachedStatementKey planKey = H2CachedStatementKey.forDmlStatement(schema, p.getSQL(), fieldsQry, loc);
 
         UpdatePlan res = (errKeysPos == null ? planCache.get(planKey) : null);
@@ -973,6 +1104,82 @@
     }
 
     /**
+     * @param schema Schema name.
+     * @param conn Connection.
+     * @param stmt Prepared statement.
+     * @param qry Sql fields query
+     * @param filter Backup filter.
+     * @param cancel Query cancel object.
+     * @param local {@code true} if should be executed locally.
+     * @param topVer Topology version.
+     * @param mvccSnapshot MVCC snapshot.
+     * @return Iterator upon updated values.
+     * @throws IgniteCheckedException If failed.
+     */
+    @SuppressWarnings("unchecked")
+    public UpdateSourceIterator<?> prepareDistributedUpdate(String schema, Connection conn,
+        PreparedStatement stmt, SqlFieldsQuery qry,
+        IndexingQueryFilter filter, GridQueryCancel cancel, boolean local,
+        AffinityTopologyVersion topVer, MvccSnapshot mvccSnapshot) throws IgniteCheckedException {
+
+        Prepared prepared = GridSqlQueryParser.prepared(stmt);
+
+        UpdatePlan plan = getPlanForStatement(schema, conn, prepared, qry, local, null);
+
+        GridCacheContext cctx = plan.cacheContext();
+
+        CacheOperationContext opCtx = cctx.operationContextPerCall();
+
+        // Force keepBinary for operation context to avoid binary deserialization inside entry processor
+        if (cctx.binaryMarshaller()) {
+            CacheOperationContext newOpCtx = null;
+
+            if (opCtx == null)
+                newOpCtx = new CacheOperationContext().keepBinary();
+            else if (!opCtx.isKeepBinary())
+                newOpCtx = opCtx.keepBinary();
+
+            if (newOpCtx != null)
+                cctx.operationContextPerCall(newOpCtx);
+        }
+
+        QueryCursorImpl<List<?>> cur;
+
+        // Do a two-step query only if locality flag is not set AND if plan's SELECT corresponds to an actual
+        // sub-query and not some dummy stuff like "select 1, 2, 3;"
+        if (!local && !plan.isLocalSubquery()) {
+            SqlFieldsQuery newFieldsQry = new SqlFieldsQuery(plan.selectQuery(), qry.isCollocated())
+                .setArgs(qry.getArgs())
+                .setDistributedJoins(qry.isDistributedJoins())
+                .setEnforceJoinOrder(qry.isEnforceJoinOrder())
+                .setLocal(qry.isLocal())
+                .setPageSize(qry.getPageSize())
+                .setTimeout(qry.getTimeout(), TimeUnit.MILLISECONDS);
+
+            cur = (QueryCursorImpl<List<?>>)idx.querySqlFields(schema, newFieldsQry, null, true, true,
+                new StaticMvccQueryTracker(cctx, mvccSnapshot), cancel).get(0);
+        }
+        else {
+            final GridQueryFieldsResult res = idx.queryLocalSqlFields(schema, plan.selectQuery(),
+                F.asList(qry.getArgs()), filter, qry.isEnforceJoinOrder(), false, qry.getTimeout(), cancel,
+                new StaticMvccQueryTracker(cctx, mvccSnapshot));
+
+            cur = new QueryCursorImpl<>(new Iterable<List<?>>() {
+                @Override public Iterator<List<?>> iterator() {
+                    try {
+                        return res.iterator();
+                    }
+                    catch (IgniteCheckedException e) {
+                        throw new IgniteException(e);
+                    }
+                }
+            }, cancel);
+        }
+
+        return plan.iteratorForTransaction(idx, cur);
+    }
+
+    /**
      * Runs a DML statement for which we have internal command executor.
      *
      * @param sql The SQL command text to execute.
@@ -1041,7 +1248,7 @@
     }
 
     /** */
-    private final static class InsertEntryProcessor implements EntryProcessor<Object, Object, Boolean> {
+    private static final class InsertEntryProcessor implements EntryProcessor<Object, Object, Boolean> {
         /** Value to set. */
         private final Object val;
 
@@ -1064,7 +1271,7 @@
     /**
      * Entry processor invoked by UPDATE and DELETE operations.
      */
-    private final static class ModifyingEntryProcessor implements EntryProcessor<Object, Object, Boolean> {
+    private static final class ModifyingEntryProcessor implements EntryProcessor<Object, Object, Boolean> {
         /** Value to expect. */
         private final Object val;
 
@@ -1139,6 +1346,17 @@
     }
 
     /**
+     * Check if schema supports DDL statement.
+     *
+     * @param schemaName Schema name.
+     */
+    private static void isDmlOnSchemaSupported(String schemaName) {
+        if (F.eq(QueryUtils.SCHEMA_SYS, schemaName))
+            throw new IgniteSQLException("DML statements are not supported on " + schemaName + " schema",
+                IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
+    }
+
+    /**
      * Check update result for erroneous keys and throws concurrent update exception if necessary.
      *
      * @param r Update result.
@@ -1181,4 +1399,82 @@
             return plan.processRow(record);
         }
     }
+
+    /** */
+    private static class DmlUpdateResultsIterator
+        implements UpdateSourceIterator<Object> {
+        /** */
+        private static final long serialVersionUID = 0L;
+
+        /** */
+        private EnlistOperation op;
+
+        /** */
+        private UpdatePlan plan;
+
+        /** */
+        private Iterator<List<?>> it;
+
+        /** */
+        DmlUpdateResultsIterator(EnlistOperation op, UpdatePlan plan, Iterable<List<?>> rows) {
+            this.op = op;
+            this.plan = plan;
+            this.it = rows.iterator();
+        }
+
+        /** {@inheritDoc} */
+        @Override public EnlistOperation operation() {
+            return op;
+        }
+
+        /** {@inheritDoc} */
+        public boolean hasNextX() {
+            return it.hasNext();
+        }
+
+        /** {@inheritDoc} */
+        public Object nextX() throws IgniteCheckedException {
+            return plan.processRowForTx(it.next());
+        }
+    }
+
+    /** */
+    private static class DmlUpdateSingleEntryIterator<T> implements UpdateSourceIterator<T> {
+        /** */
+        private static final long serialVersionUID = 0L;
+
+        /** */
+        private EnlistOperation op;
+
+        /** */
+        private boolean first = true;
+
+        /** */
+        private T entry;
+
+        /** */
+        DmlUpdateSingleEntryIterator(EnlistOperation op, T entry) {
+            this.op = op;
+            this.entry = entry;
+        }
+
+        /** {@inheritDoc} */
+        @Override public EnlistOperation operation() {
+            return op;
+        }
+
+        /** {@inheritDoc} */
+        public boolean hasNextX() {
+            return first;
+        }
+
+        /** {@inheritDoc} */
+        public T nextX() {
+            T res = first ? entry : null;
+
+            first = false;
+
+            return res;
+        }
+    }
 }
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2ConnectionWrapper.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2ConnectionWrapper.java
index e180c9c..425015a 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2ConnectionWrapper.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2ConnectionWrapper.java
@@ -17,26 +17,34 @@
 
 package org.apache.ignite.internal.processors.query.h2;
 
+import java.sql.Connection;
 import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.internal.util.typedef.internal.U;
 import org.jetbrains.annotations.Nullable;
 
-import java.sql.Connection;
-
 /**
- * Wrapper to store connection and flag is schema set or not.
+ * Wrapper to store connection with currently used schema and statement cache.
  */
-public class H2ConnectionWrapper {
+public class H2ConnectionWrapper implements AutoCloseable {
     /** */
-    private Connection conn;
+    private static final int STATEMENT_CACHE_SIZE = 256;
+
+    /** */
+    private final Connection conn;
 
     /** */
     private volatile String schema;
 
+    /** */
+    private volatile H2StatementCache statementCache;
+
     /**
      * @param conn Connection to use.
      */
     H2ConnectionWrapper(Connection conn) {
         this.conn = conn;
+
+        initStatementCache();
     }
 
     /**
@@ -60,8 +68,42 @@
         return conn;
     }
 
+    /**
+     * @return Statement cache corresponding to connection.
+     */
+    public H2StatementCache statementCache() {
+        return statementCache;
+    }
+
+    /**
+     * Clears statement cache.
+     */
+    public void clearStatementCache() {
+        initStatementCache();
+    }
+
+    /**
+     * @return Statement cache size.
+     */
+    public int statementCacheSize() {
+        return statementCache == null ? 0 : statementCache.size();
+    }
+
+    /**
+     * Initializes statement cache.
+     */
+    private void initStatementCache() {
+        statementCache = new H2StatementCache(STATEMENT_CACHE_SIZE);
+    }
+
     /** {@inheritDoc} */
     @Override public String toString() {
         return S.toString(H2ConnectionWrapper.class, this);
     }
+
+    /** Closes wrapped connection */
+    @Override
+    public void close() {
+        U.closeQuiet(conn);
+    }
 }
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2FieldsIterator.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2FieldsIterator.java
index f300c3f..e9f293c 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2FieldsIterator.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2FieldsIterator.java
@@ -17,12 +17,12 @@
 
 package org.apache.ignite.internal.processors.query.h2;
 
-import org.apache.ignite.IgniteCheckedException;
-
 import java.sql.ResultSet;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.List;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccQueryTracker;
 
 /**
  * Special field set iterator based on database result set.
@@ -31,20 +31,39 @@
     /** */
     private static final long serialVersionUID = 0L;
 
+    /** */
+    private transient MvccQueryTracker mvccTracker;
+
     /**
      * @param data Data.
+     * @param mvccTracker Mvcc tracker.
+     * @param forUpdate {@code SELECT FOR UPDATE} flag.
      * @throws IgniteCheckedException If failed.
      */
-    public H2FieldsIterator(ResultSet data) throws IgniteCheckedException {
-        super(data, false, true);
+    public H2FieldsIterator(ResultSet data, MvccQueryTracker mvccTracker, boolean forUpdate)
+        throws IgniteCheckedException {
+        super(data, forUpdate);
+
+        this.mvccTracker = mvccTracker;
     }
 
     /** {@inheritDoc} */
     @Override protected List<?> createRow() {
-        ArrayList<Object> res = new ArrayList<>(row.length);
+        List<Object> res = new ArrayList<>(row.length);
 
         Collections.addAll(res, row);
 
         return res;
     }
+
+    /** {@inheritDoc} */
+    @Override public void onClose() throws IgniteCheckedException {
+        try {
+            super.onClose();
+        }
+        finally {
+            if (mvccTracker != null)
+                mvccTracker.onDone();
+        }
+    }
 }
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2KeyValueIterator.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2KeyValueIterator.java
index 2088e44..770d9d5 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2KeyValueIterator.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2KeyValueIterator.java
@@ -34,7 +34,7 @@
      * @throws IgniteCheckedException If failed.
      */
     protected H2KeyValueIterator(ResultSet data) throws IgniteCheckedException {
-        super(data, false, true);
+        super(data, false);
     }
 
     /** {@inheritDoc} */
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2ResultSetIterator.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2ResultSetIterator.java
index 1b9aea3..814e83d 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2ResultSetIterator.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2ResultSetIterator.java
@@ -26,7 +26,6 @@
 import org.apache.ignite.internal.processors.query.h2.opt.GridH2ValueCacheObject;
 import org.apache.ignite.internal.util.GridCloseableIteratorAdapter;
 import org.apache.ignite.internal.util.typedef.internal.S;
-import org.apache.ignite.internal.util.typedef.internal.U;
 import org.h2.jdbc.JdbcResultSet;
 import org.h2.result.ResultInterface;
 import org.h2.value.Value;
@@ -56,32 +55,27 @@
     private static final long serialVersionUID = 0L;
 
     /** */
-    private final ResultInterface res;
+    private ResultInterface res;
 
     /** */
-    private final ResultSet data;
+    private ResultSet data;
 
     /** */
     protected final Object[] row;
 
     /** */
-    private final boolean closeStmt;
-
-    /** */
     private boolean hasRow;
 
     /**
      * @param data Data array.
-     * @param closeStmt If {@code true} closes result set statement when iterator is closed.
-     * @param needCpy {@code True} if need copy cache object's value.
+     * @param forUpdate Whether is result is one of {@code SELECT FOR UPDATE} query.
      * @throws IgniteCheckedException If failed.
      */
-    protected H2ResultSetIterator(ResultSet data, boolean closeStmt, boolean needCpy) throws IgniteCheckedException {
+    protected H2ResultSetIterator(ResultSet data, boolean forUpdate) throws IgniteCheckedException {
         this.data = data;
-        this.closeStmt = closeStmt;
 
         try {
-            res = needCpy ? (ResultInterface)RESULT_FIELD.get(data) : null;
+            res = (ResultInterface)RESULT_FIELD.get(data);
         }
         catch (IllegalAccessException e) {
             throw new IllegalStateException(e); // Must not happen.
@@ -89,7 +83,9 @@
 
         if (data != null) {
             try {
-                row = new Object[data.getMetaData().getColumnCount()];
+                int colsCnt = data.getMetaData().getColumnCount();
+
+                row = new Object[forUpdate ? colsCnt - 1 : colsCnt];
             }
             catch (SQLException e) {
                 throw new IgniteCheckedException(e);
@@ -102,13 +98,16 @@
     /**
      * @return {@code true} If next row was fetched successfully.
      */
-    private boolean fetchNext() {
+    private boolean fetchNext() throws IgniteCheckedException {
         if (data == null)
             return false;
 
         try {
-            if (!data.next())
+            if (!data.next()) {
+                close();
+
                 return false;
+            }
 
             if (res != null) {
                 Value[] values = res.currentRow();
@@ -138,7 +137,7 @@
     }
 
     /** {@inheritDoc} */
-    @Override public boolean onHasNext() {
+    @Override public boolean onHasNext() throws IgniteCheckedException {
         return hasRow || (hasRow = fetchNext());
     }
 
@@ -169,16 +168,16 @@
             // Nothing to close.
             return;
 
-        if (closeStmt) {
-            try {
-                U.closeQuiet(data.getStatement());
-            }
-            catch (SQLException e) {
-                throw new IgniteCheckedException(e);
-            }
+        try {
+            data.close();
         }
-
-        U.closeQuiet(data);
+        catch (SQLException e) {
+            throw new IgniteSQLException(e);
+        }
+        finally {
+            res = null;
+            data = null;
+        }
     }
 
     /** {@inheritDoc} */
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Schema.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Schema.java
index 2fdf32d..ab7cb4b 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Schema.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Schema.java
@@ -34,6 +34,9 @@
     /** */
     private final ConcurrentMap<H2TypeKey, H2TableDescriptor> typeToTbl = new ConcurrentHashMap<>();
 
+    /** Whether schema is predefined and cannot be dorpped. */
+    private final boolean predefined;
+
     /** Usage count. */
     private int usageCnt;
 
@@ -41,9 +44,11 @@
      * Constructor.
      *
      * @param schemaName Schema name.
+     * @param predefined Predefined flag.
      */
-    public H2Schema(String schemaName) {
+    public H2Schema(String schemaName, boolean predefined) {
         this.schemaName = schemaName;
+        this.predefined = predefined;
     }
 
     /**
@@ -55,20 +60,19 @@
 
     /**
      * Increments counter for number of caches having this schema.
-     *
-     * @return New value of caches counter.
      */
-    public int incrementUsageCount() {
-        return ++usageCnt;
+    public void incrementUsageCount() {
+        if (!predefined)
+            ++usageCnt;
     }
 
     /**
      * Increments counter for number of caches having this schema.
      *
-     * @return New value of caches counter.
+     * @return If schema is no longer used.
      */
-    public int decrementUsageCount() {
-        return --usageCnt;
+    public boolean decrementUsageCount() {
+        return !predefined && --usageCnt == 0;
     }
 
     /**
@@ -128,14 +132,9 @@
     }
 
     /**
-     * Called after the schema was dropped.
+     * @return {@code True} if schema is predefined.
      */
-    public void dropAll() {
-        for (H2TableDescriptor tbl : tbls.values())
-            tbl.onDrop();
-
-        tbls.clear();
-
-        typeToTbl.clear();
+    public boolean predefined() {
+        return predefined;
     }
 }
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2SqlFieldMetadata.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2SqlFieldMetadata.java
index 46aa1fc..de4c6c6 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2SqlFieldMetadata.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2SqlFieldMetadata.java
@@ -46,6 +46,12 @@
     /** Type. */
     private String type;
 
+    /** Precision. */
+    private int precision;
+
+    /** Scale. */
+    private int scale;
+
     /**
      * Required by {@link Externalizable}.
      */
@@ -58,14 +64,19 @@
      * @param typeName Type name.
      * @param name Name.
      * @param type Type.
+     * @param precision Precision.
+     * @param scale Scale.
      */
-    H2SqlFieldMetadata(@Nullable String schemaName, @Nullable String typeName, String name, String type) {
+    H2SqlFieldMetadata(@Nullable String schemaName, @Nullable String typeName, String name, String type,
+        int precision, int scale) {
         assert name != null && type != null : schemaName + " | " + typeName + " | " + name + " | " + type;
 
         this.schemaName = schemaName;
         this.typeName = typeName;
         this.name = name;
         this.type = type;
+        this.precision = precision;
+        this.scale = scale;
     }
 
     /** {@inheritDoc} */
@@ -89,11 +100,24 @@
     }
 
     /** {@inheritDoc} */
+    @Override public int precision() {
+        return precision;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int scale() {
+        return scale;
+    }
+
+    /** {@inheritDoc} */
     @Override public void writeExternal(ObjectOutput out) throws IOException {
         U.writeString(out, schemaName);
         U.writeString(out, typeName);
         U.writeString(out, name);
         U.writeString(out, type);
+        out.write(precision);
+        out.write(scale);
+
     }
 
     /** {@inheritDoc} */
@@ -102,6 +126,8 @@
         typeName = U.readString(in);
         name = U.readString(in);
         type = U.readString(in);
+        precision = in.read();
+        scale = in.read();
     }
 
     /** {@inheritDoc} */
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2StatementCache.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2StatementCache.java
index 673625f..6426994 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2StatementCache.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2StatementCache.java
@@ -17,52 +17,59 @@
 
 package org.apache.ignite.internal.processors.query.h2;
 
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
+
 import java.sql.PreparedStatement;
 import java.util.LinkedHashMap;
 import java.util.Map;
-import org.apache.ignite.internal.util.typedef.internal.U;
-import org.jetbrains.annotations.Nullable;
 
 /**
- * Statement cache.
+ * Statement cache. LRU eviction policy is used. Not thread-safe.
  */
-class H2StatementCache extends LinkedHashMap<H2CachedStatementKey, PreparedStatement> {
-    /** */
-    private int size;
-
+final class H2StatementCache {
     /** Last usage. */
     private volatile long lastUsage;
 
+    /** */
+    private final LinkedHashMap<H2CachedStatementKey, PreparedStatement> lruStmtCache;
+
     /**
-     * @param size Size.
+     * @param size Maximum number of statements this cache can store.
      */
     H2StatementCache(int size) {
-        super(size, (float)0.75, true);
+        lruStmtCache = new LinkedHashMap<H2CachedStatementKey, PreparedStatement>(size, .75f, true) {
+            @Override
+            protected boolean removeEldestEntry(Map.Entry<H2CachedStatementKey, PreparedStatement> eldest) {
+                if (size() <= size)
+                    return false;
 
-        this.size = size;
-    }
+                U.closeQuiet(eldest.getValue());
 
-    /** {@inheritDoc} */
-    @Override protected boolean removeEldestEntry(Map.Entry<H2CachedStatementKey, PreparedStatement> eldest) {
-        boolean rmv = size() > size;
-
-        if (rmv) {
-            PreparedStatement stmt = eldest.getValue();
-
-            U.closeQuiet(stmt);
-        }
-
-        return rmv;
+                return true;
+            }
+        };
     }
 
     /**
-     * Get statement for given schema and SQL.
-     * @param schemaName Schema name.
-     * @param sql SQL statement.
-     * @return Cached {@link PreparedStatement}, or {@code null} if none found.
+     * Caches a statement.
+     *
+     * @param key Key associated with statement.
+     * @param stmt Statement which will be cached.
      */
-    @Nullable public PreparedStatement get(String schemaName, String sql) {
-        return get(new H2CachedStatementKey(schemaName, sql));
+    void put(H2CachedStatementKey key, @NotNull PreparedStatement stmt) {
+        lruStmtCache.put(key, stmt);
+    }
+
+    /**
+     * Retrieves cached statement.
+     *
+     * @param key Key for a statement.
+     * @return Statement associated with a key.
+     */
+    @Nullable PreparedStatement get(H2CachedStatementKey key) {
+        return lruStmtCache.get(key);
     }
 
     /**
@@ -70,24 +77,31 @@
      *
      * @return last usage timestamp
      */
-    public long lastUsage() {
+    long lastUsage() {
         return lastUsage;
     }
 
     /**
      * Updates the {@link #lastUsage} timestamp by current time.
      */
-    public void updateLastUsage() {
+    void updateLastUsage() {
         lastUsage = U.currentTimeMillis();
     }
 
     /**
      * Remove statement for given schema and SQL.
+     *
      * @param schemaName Schema name.
      * @param sql SQL statement.
-     * @return Cached {@link PreparedStatement}, or {@code null} if none found.
      */
-    @Nullable public PreparedStatement remove(String schemaName, String sql) {
-        return remove(new H2CachedStatementKey(schemaName, sql));
+    void remove(String schemaName, String sql) {
+        lruStmtCache.remove(new H2CachedStatementKey(schemaName, sql));
+    }
+
+    /**
+     * @return Cache size.
+     */
+    int size() {
+        return lruStmtCache.size();
     }
 }
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Utils.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Utils.java
index e9d9f90..b9d9d8e 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Utils.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/H2Utils.java
@@ -211,11 +211,13 @@
             String typeName = rsMeta.getTableName(i);
             String name = rsMeta.getColumnLabel(i);
             String type = rsMeta.getColumnClassName(i);
+            int precision = rsMeta.getPrecision(i);
+            int scale = rsMeta.getScale(i);
 
             if (type == null) // Expression always returns NULL.
                 type = Void.class.getName();
 
-            meta.add(new H2SqlFieldMetadata(schemaName, typeName, name, type));
+            meta.add(new H2SqlFieldMetadata(schemaName, typeName, name, type, precision, scale));
         }
 
         return meta;
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java
index 4f51ca0..79c431f 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/IgniteH2Indexing.java
@@ -69,6 +69,12 @@
 import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
 import org.apache.ignite.internal.processors.cache.QueryCursorImpl;
 import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxSelectForUpdateFuture;
+import org.apache.ignite.internal.processors.cache.distributed.near.TxTopologyVersionFuture;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccQueryTracker;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUtils;
 import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
 import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO;
 import org.apache.ignite.internal.processors.cache.query.CacheQueryPartitionInfo;
@@ -85,20 +91,25 @@
 import org.apache.ignite.internal.processors.query.GridQueryFieldsResult;
 import org.apache.ignite.internal.processors.query.GridQueryFieldsResultAdapter;
 import org.apache.ignite.internal.processors.query.GridQueryIndexing;
+import org.apache.ignite.internal.processors.query.GridQueryProperty;
 import org.apache.ignite.internal.processors.query.GridQueryRowCacheCleaner;
 import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor;
 import org.apache.ignite.internal.processors.query.GridRunningQueryInfo;
 import org.apache.ignite.internal.processors.query.IgniteSQLException;
+import org.apache.ignite.internal.processors.query.NestedTxMode;
 import org.apache.ignite.internal.processors.query.QueryField;
 import org.apache.ignite.internal.processors.query.QueryIndexDescriptorImpl;
 import org.apache.ignite.internal.processors.query.QueryUtils;
 import org.apache.ignite.internal.processors.query.SqlClientContext;
+import org.apache.ignite.internal.processors.query.UpdateSourceIterator;
 import org.apache.ignite.internal.processors.query.h2.database.H2RowFactory;
 import org.apache.ignite.internal.processors.query.h2.database.H2TreeIndex;
 import org.apache.ignite.internal.processors.query.h2.database.io.H2ExtrasInnerIO;
 import org.apache.ignite.internal.processors.query.h2.database.io.H2ExtrasLeafIO;
 import org.apache.ignite.internal.processors.query.h2.database.io.H2InnerIO;
 import org.apache.ignite.internal.processors.query.h2.database.io.H2LeafIO;
+import org.apache.ignite.internal.processors.query.h2.database.io.H2MvccInnerIO;
+import org.apache.ignite.internal.processors.query.h2.database.io.H2MvccLeafIO;
 import org.apache.ignite.internal.processors.query.h2.ddl.DdlStatementsProcessor;
 import org.apache.ignite.internal.processors.query.h2.dml.DmlUtils;
 import org.apache.ignite.internal.processors.query.h2.dml.UpdatePlan;
@@ -109,16 +120,23 @@
 import org.apache.ignite.internal.processors.query.h2.opt.GridH2Row;
 import org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor;
 import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table;
+import org.apache.ignite.internal.processors.query.h2.sql.GridSqlAlias;
+import org.apache.ignite.internal.processors.query.h2.sql.GridSqlAst;
 import org.apache.ignite.internal.processors.query.h2.sql.GridSqlQuery;
 import org.apache.ignite.internal.processors.query.h2.sql.GridSqlQueryParser;
 import org.apache.ignite.internal.processors.query.h2.sql.GridSqlQuerySplitter;
 import org.apache.ignite.internal.processors.query.h2.sql.GridSqlStatement;
+import org.apache.ignite.internal.processors.query.h2.sql.GridSqlTable;
 import org.apache.ignite.internal.processors.query.h2.sys.SqlSystemTableEngine;
+import org.apache.ignite.internal.processors.query.h2.sys.view.SqlSystemView;
+import org.apache.ignite.internal.processors.query.h2.sys.view.SqlSystemViewBaselineNodes;
+import org.apache.ignite.internal.processors.query.h2.sys.view.SqlSystemViewNodeAttributes;
+import org.apache.ignite.internal.processors.query.h2.sys.view.SqlSystemViewNodeMetrics;
 import org.apache.ignite.internal.processors.query.h2.sys.view.SqlSystemViewNodes;
 import org.apache.ignite.internal.processors.query.h2.twostep.GridMapQueryExecutor;
 import org.apache.ignite.internal.processors.query.h2.twostep.GridReduceQueryExecutor;
 import org.apache.ignite.internal.processors.query.h2.twostep.MapQueryLazyWorker;
-import org.apache.ignite.internal.processors.query.h2.sys.view.SqlSystemView;
+import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2QueryRequest;
 import org.apache.ignite.internal.processors.query.schema.SchemaIndexCacheVisitor;
 import org.apache.ignite.internal.processors.query.schema.SchemaIndexCacheVisitorClosure;
 import org.apache.ignite.internal.processors.query.schema.SchemaIndexCacheVisitorImpl;
@@ -127,14 +145,17 @@
 import org.apache.ignite.internal.sql.SqlParser;
 import org.apache.ignite.internal.sql.SqlStrictParseException;
 import org.apache.ignite.internal.sql.command.SqlAlterTableCommand;
-import org.apache.ignite.internal.sql.command.SqlBulkLoadCommand;
 import org.apache.ignite.internal.sql.command.SqlAlterUserCommand;
+import org.apache.ignite.internal.sql.command.SqlBeginTransactionCommand;
+import org.apache.ignite.internal.sql.command.SqlBulkLoadCommand;
 import org.apache.ignite.internal.sql.command.SqlCommand;
+import org.apache.ignite.internal.sql.command.SqlCommitTransactionCommand;
 import org.apache.ignite.internal.sql.command.SqlCreateIndexCommand;
 import org.apache.ignite.internal.sql.command.SqlCreateUserCommand;
 import org.apache.ignite.internal.sql.command.SqlDropIndexCommand;
-import org.apache.ignite.internal.sql.command.SqlSetStreamingCommand;
 import org.apache.ignite.internal.sql.command.SqlDropUserCommand;
+import org.apache.ignite.internal.sql.command.SqlRollbackTransactionCommand;
+import org.apache.ignite.internal.sql.command.SqlSetStreamingCommand;
 import org.apache.ignite.internal.util.GridBoundedConcurrentLinkedHashMap;
 import org.apache.ignite.internal.util.GridEmptyCloseableIterator;
 import org.apache.ignite.internal.util.GridSpinBusyLock;
@@ -150,6 +171,7 @@
 import org.apache.ignite.lang.IgniteBiTuple;
 import org.apache.ignite.lang.IgniteFuture;
 import org.apache.ignite.lang.IgniteInClosure;
+import org.apache.ignite.lang.IgniteUuid;
 import org.apache.ignite.marshaller.Marshaller;
 import org.apache.ignite.marshaller.jdk.JdkMarshaller;
 import org.apache.ignite.plugin.extensions.communication.Message;
@@ -177,12 +199,18 @@
 import static org.apache.ignite.IgniteSystemProperties.IGNITE_H2_INDEXING_CACHE_THREAD_USAGE_TIMEOUT;
 import static org.apache.ignite.IgniteSystemProperties.getInteger;
 import static org.apache.ignite.IgniteSystemProperties.getString;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.checkActive;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.mvccEnabled;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.tx;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.txStart;
 import static org.apache.ignite.internal.processors.cache.query.GridCacheQueryType.SQL;
 import static org.apache.ignite.internal.processors.cache.query.GridCacheQueryType.SQL_FIELDS;
 import static org.apache.ignite.internal.processors.cache.query.GridCacheQueryType.TEXT;
 import static org.apache.ignite.internal.processors.query.QueryUtils.KEY_FIELD_NAME;
 import static org.apache.ignite.internal.processors.query.QueryUtils.VAL_FIELD_NAME;
 import static org.apache.ignite.internal.processors.query.QueryUtils.VER_FIELD_NAME;
+import static org.apache.ignite.internal.processors.query.h2.PreparedStatementEx.MVCC_CACHE_ID;
+import static org.apache.ignite.internal.processors.query.h2.PreparedStatementEx.MVCC_STATE;
 import static org.apache.ignite.internal.processors.query.h2.opt.DistributedJoinMode.OFF;
 import static org.apache.ignite.internal.processors.query.h2.opt.DistributedJoinMode.distributedJoinMode;
 import static org.apache.ignite.internal.processors.query.h2.opt.GridH2QueryType.LOCAL;
@@ -200,13 +228,14 @@
 public class IgniteH2Indexing implements GridQueryIndexing {
     /** A pattern for commands having internal implementation in Ignite. */
     public static final Pattern INTERNAL_CMD_RE = Pattern.compile(
-        "^(create|drop)\\s+index|^alter\\s+table|^copy|^set|^(create|alter|drop)\\s+user", Pattern.CASE_INSENSITIVE);
+        "^(create|drop)\\s+index|^alter\\s+table|^copy|^set|^begin|^commit|^rollback|^(create|alter|drop)\\s+user",
+        Pattern.CASE_INSENSITIVE);
 
     /*
      * Register IO for indexes.
      */
     static {
-        PageIO.registerH2(H2InnerIO.VERSIONS, H2LeafIO.VERSIONS);
+        PageIO.registerH2(H2InnerIO.VERSIONS, H2LeafIO.VERSIONS, H2MvccInnerIO.VERSIONS, H2MvccLeafIO.VERSIONS);
         H2ExtrasInnerIO.register();
         H2ExtrasLeafIO.register();
 
@@ -229,22 +258,19 @@
 
     /** Dummy metadata for update result. */
     public static final List<GridQueryFieldMetadata> UPDATE_RESULT_META = Collections.<GridQueryFieldMetadata>
-        singletonList(new H2SqlFieldMetadata(null, null, "UPDATED", Long.class.getName()));
-
-    /** */
-    private static final int PREPARED_STMT_CACHE_SIZE = 256;
+        singletonList(new H2SqlFieldMetadata(null, null, "UPDATED", Long.class.getName(), -1, -1));
 
     /** */
     private static final int TWO_STEP_QRY_CACHE_SIZE = 1024;
 
-    /** The period of clean up the {@link #stmtCache}. */
+    /** The period of clean up the statement cache. */
     private final Long CLEANUP_STMT_CACHE_PERIOD = Long.getLong(IGNITE_H2_INDEXING_CACHE_CLEANUP_PERIOD, 10_000);
 
     /** The period of clean up the {@link #conns}. */
     @SuppressWarnings("FieldCanBeLocal")
     private final Long CLEANUP_CONNECTIONS_PERIOD = 2000L;
 
-    /** The timeout to remove entry from the {@link #stmtCache} if the thread doesn't perform any queries. */
+    /** The timeout to remove entry from the statement cache if the thread doesn't perform any queries. */
     private final Long STATEMENT_CACHE_THREAD_USAGE_TIMEOUT =
         Long.getLong(IGNITE_H2_INDEXING_CACHE_THREAD_USAGE_TIMEOUT, 600 * 1000);
 
@@ -271,7 +297,8 @@
     private String dbUrl = "jdbc:h2:mem:";
 
     /** */
-    private final ConcurrentMap<Thread, Connection> conns = new ConcurrentHashMap<>();
+    // TODO https://issues.apache.org/jira/browse/IGNITE-9062
+    private final ConcurrentMap<Thread, H2ConnectionWrapper> conns = new ConcurrentHashMap<>();
 
     /** */
     private GridMapQueryExecutor mapQryExec;
@@ -298,44 +325,39 @@
     private final H2RowCacheRegistry rowCache = new H2RowCacheRegistry();
 
     /** */
-    private final ThreadLocal<H2ConnectionWrapper> connCache = new ThreadLocal<H2ConnectionWrapper>() {
-        @Override public H2ConnectionWrapper get() {
-            H2ConnectionWrapper c = super.get();
+    // TODO https://issues.apache.org/jira/browse/IGNITE-9062
+    private final ThreadLocalObjectPool<H2ConnectionWrapper> connectionPool = new ThreadLocalObjectPool<>(IgniteH2Indexing.this::newConnectionWrapper, 5);
+
+    /** */
+    // TODO https://issues.apache.org/jira/browse/IGNITE-9062
+    private final ThreadLocal<ThreadLocalObjectPool.Reusable<H2ConnectionWrapper>> connCache = new ThreadLocal<ThreadLocalObjectPool.Reusable<H2ConnectionWrapper>>() {
+        @Override public ThreadLocalObjectPool.Reusable<H2ConnectionWrapper> get() {
+            ThreadLocalObjectPool.Reusable<H2ConnectionWrapper> reusable = super.get();
 
             boolean reconnect = true;
 
             try {
-                reconnect = c == null || c.connection().isClosed();
+                reconnect = reusable == null || reusable.object().connection().isClosed();
             }
             catch (SQLException e) {
                 U.warn(log, "Failed to check connection status.", e);
             }
 
             if (reconnect) {
-                c = initialValue();
+                reusable = initialValue();
 
-                set(c);
-
-                // Reset statement cache when new connection is created.
-                stmtCache.remove(Thread.currentThread());
+                set(reusable);
             }
 
-            return c;
+            return reusable;
         }
 
-        @Override protected H2ConnectionWrapper initialValue() {
-            Connection c;
+        @Override protected ThreadLocalObjectPool.Reusable<H2ConnectionWrapper> initialValue() {
+            ThreadLocalObjectPool.Reusable<H2ConnectionWrapper> reusableConnection = connectionPool.borrow();
 
-            try {
-                c = DriverManager.getConnection(dbUrl);
-            }
-            catch (SQLException e) {
-                throw new IgniteSQLException("Failed to initialize DB connection: " + dbUrl, e);
-            }
+            conns.put(Thread.currentThread(), reusableConnection.object());
 
-            conns.put(Thread.currentThread(), c);
-
-            return new H2ConnectionWrapper(c);
+            return reusableConnection;
         }
     };
 
@@ -354,11 +376,8 @@
     /** */
     private final ConcurrentMap<QueryTable, GridH2Table> dataTables = new ConcurrentHashMap<>();
 
-    /** Statement cache. */
-    private final ConcurrentHashMap<Thread, H2StatementCache> stmtCache = new ConcurrentHashMap<>();
-
     /** */
-    private final GridBoundedConcurrentLinkedHashMap<H2TwoStepCachedQueryKey, H2TwoStepCachedQuery> twoStepCache =
+    private volatile GridBoundedConcurrentLinkedHashMap<H2TwoStepCachedQueryKey, H2TwoStepCachedQuery> twoStepCache =
         new GridBoundedConcurrentLinkedHashMap<>(TWO_STEP_QRY_CACHE_SIZE);
 
     /** */
@@ -416,6 +435,15 @@
         return sysConn;
     }
 
+    /** */
+    private H2ConnectionWrapper newConnectionWrapper() {
+        try {
+            return new H2ConnectionWrapper(DriverManager.getConnection(dbUrl));
+        } catch (SQLException e) {
+            throw new IgniteSQLException("Failed to initialize DB connection: " + dbUrl, e);
+        }
+    }
+
     /**
      * @param c Connection.
      * @param sql SQL.
@@ -440,7 +468,7 @@
      * @throws SQLException If failed.
      */
     @SuppressWarnings("ConstantConditions")
-    @NotNull private PreparedStatement prepareStatement(Connection c, String sql, boolean useStmtCache)
+    @NotNull public PreparedStatement prepareStatement(Connection c, String sql, boolean useStmtCache)
         throws SQLException {
         return prepareStatement(c, sql, useStmtCache, false);
     }
@@ -465,7 +493,7 @@
 
             PreparedStatement stmt = cache.get(key);
 
-            if (stmt != null && !stmt.isClosed() && !((JdbcStatement)stmt).isCancelled() &&
+            if (stmt != null && !stmt.isClosed() && !stmt.unwrap(JdbcStatement.class).isCancelled() &&
                 !GridSqlQueryParser.prepared(stmt).needRecompile()) {
                 assert stmt.getConnection() == c;
 
@@ -475,9 +503,7 @@
             if (cachedOnly)
                 return null;
 
-            stmt = prepare0(c, sql);
-
-            cache.put(key, stmt);
+            cache.put(key, stmt = PreparedStatementExImpl.wrap(prepare0(c, sql)));
 
             return stmt;
         }
@@ -500,36 +526,25 @@
             GridH2Table.insertHack(true);
 
             try {
-                return c.prepareStatement(sql);
+                return c.prepareStatement(sql, ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
             }
             finally {
                 GridH2Table.insertHack(false);
             }
         }
         else
-            return c.prepareStatement(sql);
+            return c.prepareStatement(sql, ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY);
     }
 
     /**
      * @return {@link H2StatementCache} associated with current thread.
      */
     @NotNull private H2StatementCache getStatementsCacheForCurrentThread() {
-        Thread curThread = Thread.currentThread();
+        H2StatementCache statementCache = connCache.get().object().statementCache();
 
-        H2StatementCache cache = stmtCache.get(curThread);
+        statementCache.updateLastUsage();
 
-        if (cache == null) {
-            H2StatementCache cache0 = new H2StatementCache(PREPARED_STMT_CACHE_SIZE);
-
-            cache = stmtCache.putIfAbsent(curThread, cache0);
-
-            if (cache == null)
-                cache = cache0;
-        }
-
-        cache.updateLastUsage();
-
-        return cache;
+        return statementCache;
     }
 
     /** {@inheritDoc} */
@@ -547,7 +562,7 @@
      * @throws IgniteCheckedException In case of error.
      */
     private Connection connectionForThread(@Nullable String schema) throws IgniteCheckedException {
-        H2ConnectionWrapper c = connCache.get();
+        H2ConnectionWrapper c = connCache.get().object();
 
         if (c == null)
             throw new IgniteCheckedException("Failed to get DB connection for thread (check log for details).");
@@ -578,12 +593,53 @@
     }
 
     /**
+     * Create and register schema if needed.
+     *
+     * @param schemaName Schema name.
+     * @param predefined Whether this is predefined schema.
+     */
+    private void createSchemaIfNeeded(String schemaName, boolean predefined) {
+        assert Thread.holdsLock(schemaMux);
+
+        if (!predefined)
+            predefined = isSchemaPredefined(schemaName);
+
+        H2Schema schema = new H2Schema(schemaName, predefined);
+
+        H2Schema oldSchema = schemas.putIfAbsent(schemaName, schema);
+
+        if (oldSchema == null)
+            createSchema0(schemaName);
+        else
+            schema = oldSchema;
+
+        schema.incrementUsageCount();
+    }
+
+    /**
+     * Check if schema is predefined.
+     *
+     * @param schemaName Schema name.
+     * @return {@code True} if predefined.
+     */
+    private boolean isSchemaPredefined(String schemaName) {
+        if (F.eq(QueryUtils.DFLT_SCHEMA, schemaName))
+            return true;
+
+        for (H2Schema schema : schemas.values()) {
+            if (F.eq(schema.schemaName(), schemaName) && schema.predefined())
+                return true;
+        }
+
+        return false;
+    }
+
+    /**
      * Creates DB schema if it has not been created yet.
      *
      * @param schema Schema name.
-     * @throws IgniteCheckedException If failed to create db schema.
      */
-    private void createSchema(String schema) throws IgniteCheckedException {
+    private void createSchema0(String schema) {
         executeSystemStatement("CREATE SCHEMA IF NOT EXISTS " + H2Utils.withQuotes(schema));
 
         if (log.isDebugEnabled())
@@ -594,9 +650,8 @@
      * Creates DB schema if it has not been created yet.
      *
      * @param schema Schema name.
-     * @throws IgniteCheckedException If failed to create db schema.
      */
-    private void dropSchema(String schema) throws IgniteCheckedException {
+    private void dropSchema(String schema) {
         executeSystemStatement("DROP SCHEMA IF EXISTS " + H2Utils.withQuotes(schema));
 
         if (log.isDebugEnabled())
@@ -681,7 +736,7 @@
      * Handles SQL exception.
      */
     private void onSqlException() {
-        Connection conn = connCache.get().connection();
+        Connection conn = connCache.get().object().connection();
 
         connCache.set(null);
 
@@ -694,8 +749,12 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void store(GridCacheContext cctx, GridQueryTypeDescriptor type, CacheDataRow row,
-        @Nullable CacheDataRow prevRow, boolean prevRowAvailable) throws IgniteCheckedException {
+    @Override public void store(GridCacheContext cctx,
+        GridQueryTypeDescriptor type,
+        CacheDataRow row,
+        @Nullable CacheDataRow prevRow,
+        boolean prevRowAvailable) throws IgniteCheckedException
+    {
         String cacheName = cctx.name();
 
         H2TableDescriptor tbl = tableDescriptor(schema(cacheName), cacheName, type.name());
@@ -703,7 +762,7 @@
         if (tbl == null)
             return; // Type was rejected.
 
-        tbl.table().update(row, prevRow, prevRowAvailable);
+        tbl.table().update(row, prevRow,  prevRowAvailable);
 
         if (tbl.luceneIndex() != null) {
             long expireTime = row.expireTime();
@@ -981,76 +1040,232 @@
      * @param params Query parameters.
      * @param filter Cache name and key filter.
      * @param enforceJoinOrder Enforce join order of tables in the query.
+     * @param startTx Start transaction flag.
      * @param timeout Query timeout in milliseconds.
      * @param cancel Query cancel.
      * @return Query result.
      * @throws IgniteCheckedException If failed.
      */
     @SuppressWarnings("unchecked")
-    GridQueryFieldsResult queryLocalSqlFields(final String schemaName, final String qry,
+    public GridQueryFieldsResult queryLocalSqlFields(String schemaName, String qry, @Nullable Collection<Object> params,
+        IndexingQueryFilter filter, boolean enforceJoinOrder, boolean startTx, int timeout,
+        GridQueryCancel cancel) throws IgniteCheckedException {
+        return queryLocalSqlFields(schemaName, qry, params, filter, enforceJoinOrder, startTx, timeout, cancel, null);
+    }
+
+    /**
+     * Queries individual fields (generally used by JDBC drivers).
+     *
+     * @param schemaName Schema name.
+     * @param qry Query.
+     * @param params Query parameters.
+     * @param filter Cache name and key filter.
+     * @param enforceJoinOrder Enforce join order of tables in the query.
+     * @param startTx Start transaction flag.
+     * @param timeout Query timeout in milliseconds.
+     * @param cancel Query cancel.
+     * @param mvccTracker Query tracker.
+     * @return Query result.
+     * @throws IgniteCheckedException If failed.
+     */
+    @SuppressWarnings("unchecked")
+    GridQueryFieldsResult queryLocalSqlFields(final String schemaName, String qry,
         @Nullable final Collection<Object> params, final IndexingQueryFilter filter, boolean enforceJoinOrder,
-        final int timeout, final GridQueryCancel cancel) throws IgniteCheckedException {
-        final Connection conn = connectionForSchema(schemaName);
+        boolean startTx, int timeout, final GridQueryCancel cancel,
+        MvccQueryTracker mvccTracker) throws IgniteCheckedException {
 
-        H2Utils.setupConnection(conn, false, enforceJoinOrder);
+        GridNearTxLocal tx = null;
 
-        final PreparedStatement stmt = preparedStatementWithParams(conn, qry, params, true);
+        boolean mvccEnabled = mvccEnabled(kernalContext());
 
-        if (GridSqlQueryParser.checkMultipleStatements(stmt))
-            throw new IgniteSQLException("Multiple statements queries are not supported for local queries");
-
-        Prepared p = GridSqlQueryParser.prepared(stmt);
-
-        if (DmlStatementsProcessor.isDmlStatement(p)) {
-            SqlFieldsQuery fldsQry = new SqlFieldsQuery(qry);
-
-            if (params != null)
-                fldsQry.setArgs(params.toArray());
-
-            fldsQry.setEnforceJoinOrder(enforceJoinOrder);
-            fldsQry.setTimeout(timeout, TimeUnit.MILLISECONDS);
-
-            return dmlProc.updateSqlFieldsLocal(schemaName, conn, p, fldsQry, filter, cancel);
-        }
-        else if (DdlStatementsProcessor.isDdlStatement(p))
-            throw new IgniteSQLException("DDL statements are supported for the whole cluster only",
-                IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
-
-        List<GridQueryFieldMetadata> meta;
+        assert mvccEnabled || mvccTracker == null;
 
         try {
-            meta = H2Utils.meta(stmt.getMetaData());
-        }
-        catch (SQLException e) {
-            throw new IgniteCheckedException("Cannot prepare query metadata", e);
-        }
+            final Connection conn = connectionForSchema(schemaName);
 
-        final GridH2QueryContext ctx = new GridH2QueryContext(nodeId, nodeId, 0, LOCAL)
-            .filter(filter).distributedJoinMode(OFF);
+            H2Utils.setupConnection(conn, false, enforceJoinOrder);
 
-        return new GridQueryFieldsResultAdapter(meta, null) {
-            @Override public GridCloseableIterator<List<?>> iterator() throws IgniteCheckedException {
-                assert GridH2QueryContext.get() == null;
+            PreparedStatement stmt = preparedStatementWithParams(conn, qry, params, true);
 
-                GridH2QueryContext.set(ctx);
+            if (GridSqlQueryParser.checkMultipleStatements(stmt))
+                throw new IgniteSQLException("Multiple statements queries are not supported for local queries");
 
-                GridRunningQueryInfo run = new GridRunningQueryInfo(qryIdGen.incrementAndGet(), qry, SQL_FIELDS,
-                    schemaName, U.currentTimeMillis(), cancel, true);
+            Prepared p = GridSqlQueryParser.prepared(stmt);
 
-                runs.putIfAbsent(run.id(), run);
+            if (DmlStatementsProcessor.isDmlStatement(p)) {
+                SqlFieldsQuery fldsQry = new SqlFieldsQuery(qry);
 
-                try {
-                    ResultSet rs = executeSqlQueryWithTimer(stmt, conn, qry, params, timeout, cancel);
+                if (params != null)
+                    fldsQry.setArgs(params.toArray());
 
-                    return new H2FieldsIterator(rs);
+                fldsQry.setEnforceJoinOrder(enforceJoinOrder);
+                fldsQry.setTimeout(timeout, TimeUnit.MILLISECONDS);
+
+                return dmlProc.updateSqlFieldsLocal(schemaName, conn, p, fldsQry, filter, cancel);
+            }
+            else if (DdlStatementsProcessor.isDdlStatement(p)) {
+                throw new IgniteSQLException("DDL statements are supported for the whole cluster only.",
+                    IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
+            }
+
+            final GridH2QueryContext ctx = new GridH2QueryContext(nodeId, nodeId, 0, LOCAL)
+                .filter(filter).distributedJoinMode(OFF);
+
+            boolean forUpdate = GridSqlQueryParser.isForUpdateQuery(p);
+
+            if (forUpdate && !mvccEnabled)
+                throw new IgniteSQLException("SELECT FOR UPDATE query requires transactional " +
+                    "cache with MVCC enabled.", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
+
+            GridNearTxSelectForUpdateFuture sfuFut = null;
+
+            if (mvccEnabled) {
+                if (mvccTracker == null)
+                    mvccTracker = mvccTracker(stmt, startTx);
+
+                if (mvccTracker != null) {
+                    ctx.mvccSnapshot(mvccTracker.snapshot());
+
+                    if ((tx = checkActive(tx(this.ctx))) != null) {
+                        int tm1 = (int)tx.remainingTime(), tm2 = timeout;
+
+                        timeout = tm1 > 0 && tm2 > 0 ? Math.min(tm1, tm2) : Math.max(tm1, tm2);
+                    }
                 }
-                finally {
-                    GridH2QueryContext.clearThreadLocal();
 
-                    runs.remove(run.id());
+                if (forUpdate) {
+                    if (mvccTracker == null)
+                        throw new IgniteSQLException("SELECT FOR UPDATE query requires transactional " +
+                            "cache with MVCC enabled.", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
+
+                    GridSqlStatement stmt0 = new GridSqlQueryParser(false).parse(p);
+
+                    qry = GridSqlQueryParser.rewriteQueryForUpdateIfNeeded(stmt0, forUpdate = tx != null);
+
+                    stmt = preparedStatementWithParams(conn, qry, params, true);
+
+                    if (forUpdate) {
+                        GridCacheContext cctx = mvccTracker.context();
+
+                        try {
+                            if (tx.topologyVersionSnapshot() == null)
+                                new TxTopologyVersionFuture(tx, cctx).get();
+                        }
+                        catch (Exception e) {
+                            throw new IgniteSQLException("Failed to lock topology for SELECT FOR UPDATE query.", e);
+                        }
+
+                        sfuFut = new GridNearTxSelectForUpdateFuture(cctx, tx, timeout);
+
+                        sfuFut.initLocal();
+                    }
                 }
             }
-        };
+
+            List<GridQueryFieldMetadata> meta;
+
+            try {
+                meta = H2Utils.meta(stmt.getMetaData());
+
+                if (forUpdate) {
+                    assert meta.size() >= 1;
+
+                    meta = meta.subList(0, meta.size() - 1);
+                }
+            }
+            catch (SQLException e) {
+                throw new IgniteCheckedException("Cannot prepare query metadata", e);
+            }
+
+            GridNearTxLocal tx0 = tx;
+            MvccQueryTracker mvccTracker0 = mvccTracker;
+            GridNearTxSelectForUpdateFuture sfuFut0 = sfuFut;
+            PreparedStatement stmt0 = stmt;
+            String qry0 = qry;
+            int timeout0 = timeout;
+
+            return new GridQueryFieldsResultAdapter(meta, null) {
+                @Override public GridCloseableIterator<List<?>> iterator() throws IgniteCheckedException {
+                    assert GridH2QueryContext.get() == null;
+
+                    GridH2QueryContext.set(ctx);
+
+                    GridRunningQueryInfo run = new GridRunningQueryInfo(qryIdGen.incrementAndGet(), qry0,
+                        SQL_FIELDS, schemaName, U.currentTimeMillis(), cancel, true);
+
+                    runs.putIfAbsent(run.id(), run);
+
+                    try {
+                        ResultSet rs = executeSqlQueryWithTimer(stmt0, conn, qry0, params, timeout0, cancel);
+
+                        if (sfuFut0 != null) {
+                            assert tx0.mvccSnapshot() != null;
+
+                            ResultSetEnlistFuture enlistFut = ResultSetEnlistFuture.future(
+                                IgniteH2Indexing.this.ctx.localNodeId(),
+                                tx0.nearXidVersion(),
+                                tx0.mvccSnapshot(),
+                                tx0.threadId(),
+                                IgniteUuid.randomUuid(),
+                                -1,
+                                null,
+                                tx0,
+                                timeout0,
+                                sfuFut0.cache(),
+                                rs
+                            );
+
+                            enlistFut.listen(new IgniteInClosure<IgniteInternalFuture<Long>>() {
+                                @Override public void apply(IgniteInternalFuture<Long> fut) {
+                                    if (fut.error() != null)
+                                        sfuFut0.onResult(IgniteH2Indexing.this.ctx.localNodeId(), 0L, false, fut.error());
+                                    else
+                                        sfuFut0.onResult(IgniteH2Indexing.this.ctx.localNodeId(), fut.result(), false, null);
+                                }
+                            });
+
+                            enlistFut.init();
+
+                            try {
+                                sfuFut0.get();
+
+                                rs.beforeFirst();
+                            }
+                            catch (Exception e) {
+                                U.closeQuiet(rs);
+
+                                throw new IgniteSQLException("Failed to obtain locks on result of SELECT FOR UPDATE.",
+                                    e);
+                            }
+                        }
+
+                        return new H2FieldsIterator(rs, mvccTracker0, sfuFut0 != null);
+                    }
+                    catch (IgniteCheckedException | RuntimeException | Error e) {
+                        try {
+                            if (mvccTracker0 != null)
+                                mvccTracker0.onDone();
+                        }
+                        catch (Exception e0) {
+                            e.addSuppressed(e0);
+                        }
+
+                        throw e;
+                    }
+                    finally {
+                        GridH2QueryContext.clearThreadLocal();
+
+                        runs.remove(run.id());
+                    }
+                }
+            };
+        }
+        catch (IgniteCheckedException | RuntimeException | Error e) {
+            if (mvccEnabled && (tx != null || (tx = tx(ctx)) != null))
+                tx.setRollbackOnly();
+
+            throw e;
+        }
     }
 
     /** {@inheritDoc} */
@@ -1128,7 +1343,7 @@
      * @return Prepared statement with set parameters.
      * @throws IgniteCheckedException If failed.
      */
-    private PreparedStatement preparedStatementWithParams(Connection conn, String sql, Collection<Object> params,
+    public PreparedStatement preparedStatementWithParams(Connection conn, String sql, Collection<Object> params,
         boolean useStmtCache) throws IgniteCheckedException {
         final PreparedStatement stmt;
 
@@ -1245,7 +1460,7 @@
      * @return Result.
      * @throws IgniteCheckedException If failed.
      */
-    private ResultSet executeSqlQueryWithTimer(PreparedStatement stmt, Connection conn, String sql,
+    public ResultSet executeSqlQueryWithTimer(PreparedStatement stmt, Connection conn, String sql,
         @Nullable Collection<Object> params, int timeoutMillis, @Nullable GridQueryCancel cancel)
         throws IgniteCheckedException {
         long start = U.currentTimeMillis();
@@ -1258,19 +1473,17 @@
             long longQryExecTimeout = ctx.config().getLongQueryWarningTimeout();
 
             if (time > longQryExecTimeout) {
-                String msg = "Query execution is too long (" + time + " ms): " + sql;
-
                 ResultSet plan = executeSqlQuery(conn, preparedStatementWithParams(conn, "EXPLAIN " + sql,
                     params, false), 0, null);
 
                 plan.next();
 
                 // Add SQL explain result message into log.
-                String longMsg = "Query execution is too long [time=" + time + " ms, sql='" + sql + '\'' +
+                String msg = "Query execution is too long [time=" + time + " ms, sql='" + sql + '\'' +
                     ", plan=" + U.nl() + plan.getString(1) + U.nl() + ", parameters=" +
                     (params == null ? "[]" : Arrays.deepToString(params.toArray())) + "]";
 
-                LT.warn(log, longMsg, msg);
+                LT.warn(log, msg);
             }
 
             return rs;
@@ -1303,10 +1516,12 @@
     @Override public FieldsQueryCursor<List<?>> queryLocalSqlFields(String schemaName, SqlFieldsQuery qry,
         final boolean keepBinary, IndexingQueryFilter filter, GridQueryCancel cancel) throws IgniteCheckedException {
         String sql = qry.getSql();
-        Object[] args = qry.getArgs();
+        List<Object> params = F.asList(qry.getArgs());
+        boolean enforceJoinOrder = qry.isEnforceJoinOrder(), startTx = autoStartTx(qry);
+        int timeout = qry.getTimeout();
 
-        final GridQueryFieldsResult res = queryLocalSqlFields(schemaName, sql, F.asList(args), filter,
-            qry.isEnforceJoinOrder(), qry.getTimeout(), cancel);
+        final GridQueryFieldsResult res = queryLocalSqlFields(schemaName, sql, params, filter,
+            enforceJoinOrder, startTx, timeout, cancel);
 
         QueryCursorImpl<List<?>> cursor = new QueryCursorImpl<>(new Iterable<List<?>>() {
             @SuppressWarnings("NullableProblems")
@@ -1398,8 +1613,17 @@
 
         H2Utils.setupConnection(conn, false, false);
 
-        GridH2QueryContext.set(new GridH2QueryContext(nodeId, nodeId, 0, LOCAL).filter(filter)
-            .distributedJoinMode(OFF));
+        GridH2QueryContext qctx = new GridH2QueryContext(nodeId, nodeId, 0, LOCAL).filter(filter)
+            .distributedJoinMode(OFF);
+
+        PreparedStatement stmt = preparedStatementWithParams(conn, sql, params, true);
+
+        MvccQueryTracker mvccTracker = mvccTracker(stmt, false);
+
+        if (mvccTracker != null)
+            qctx.mvccSnapshot(mvccTracker.snapshot());
+
+        GridH2QueryContext.set(qctx);
 
         GridRunningQueryInfo run = new GridRunningQueryInfo(qryIdGen.incrementAndGet(), qry, SQL, schemaName,
             U.currentTimeMillis(), null, true);
@@ -1407,27 +1631,125 @@
         runs.put(run.id(), run);
 
         try {
-            ResultSet rs = executeSqlQueryWithTimer(conn, sql, params, true, 0, cancel);
+            ResultSet rs = executeSqlQueryWithTimer(stmt, conn, sql, params, 0, cancel);
 
             return new H2KeyValueIterator(rs);
         }
         finally {
             GridH2QueryContext.clearThreadLocal();
 
+            if (mvccTracker != null)
+                mvccTracker.onDone();
+
             runs.remove(run.id());
         }
     }
 
     /**
+     * Initialises MVCC filter and returns MVCC query tracker if needed.
+     * @param stmt Prepared statement.
+     * @param startTx Start transaction flag.
+     * @return MVCC query tracker or {@code null} if MVCC is disabled for involved caches.
+     */
+    private MvccQueryTracker mvccTracker(PreparedStatement stmt, boolean startTx) throws IgniteCheckedException {
+        boolean mvccEnabled;
+
+        GridCacheContext mvccCacheCtx = null;
+
+        try {
+            if (stmt.isWrapperFor(PreparedStatementEx.class)) {
+                PreparedStatementEx stmtEx = stmt.unwrap(PreparedStatementEx.class);
+
+                Boolean mvccState = stmtEx.meta(MVCC_STATE);
+
+                mvccEnabled = mvccState != null ? mvccState : checkMvcc(stmt);
+
+                if (mvccEnabled) {
+                    Integer cacheId = stmtEx.meta(MVCC_CACHE_ID);
+
+                    assert cacheId != null;
+
+                    mvccCacheCtx = ctx.cache().context().cacheContext(cacheId);
+
+                    assert mvccCacheCtx != null;
+                }
+            }
+            else
+                mvccEnabled = checkMvcc(stmt);
+        }
+        catch (SQLException e) {
+            throw new IgniteSQLException(e);
+        }
+
+        assert !mvccEnabled || mvccCacheCtx != null;
+
+        return mvccEnabled ? MvccUtils.mvccTracker(mvccCacheCtx, startTx) : null;
+    }
+
+    /**
+     * Checks if statement uses MVCC caches. If it does, additional metadata is added to statement.
+     *
+     * @param stmt Statement to check.
+     * @return {@code True} if there MVCC cache involved in statement.
+     * @throws SQLException If parser failed.
+     */
+    private static Boolean checkMvcc(PreparedStatement stmt) throws SQLException {
+        GridSqlQueryParser parser = new GridSqlQueryParser(false);
+
+        parser.parse(GridSqlQueryParser.prepared(stmt));
+
+        Boolean mvccEnabled = null;
+        Integer mvccCacheId = null;
+        GridCacheContext ctx0 = null;
+
+        for (Object o : parser.objectsMap().values()) {
+            if (o instanceof GridSqlAlias)
+                o = GridSqlAlias.unwrap((GridSqlAst) o);
+            if (o instanceof GridSqlTable && ((GridSqlTable) o).dataTable() != null) {
+                GridCacheContext cctx = ((GridSqlTable) o).dataTable().cache();
+
+                if (mvccEnabled == null) {
+                    mvccEnabled = cctx.mvccEnabled();
+                    mvccCacheId = cctx.cacheId();
+                    ctx0 = cctx;
+                }
+                else if (mvccEnabled != cctx.mvccEnabled())
+                    MvccUtils.throwAtomicityModesMismatchException(ctx0, cctx);
+            }
+        }
+
+        if (mvccEnabled == null)
+            return false;
+
+        // Remember mvccEnabled flag to avoid further additional parsing if statement obtained from the statement cache.
+        if (stmt.isWrapperFor(PreparedStatementEx.class)) {
+            PreparedStatementEx stmtEx = stmt.unwrap(PreparedStatementEx.class);
+
+            if (mvccEnabled) {
+                assert mvccCacheId != null;
+
+                stmtEx.putMeta(MVCC_CACHE_ID, mvccCacheId);
+                stmtEx.putMeta(MVCC_STATE, Boolean.TRUE);
+            }
+            else
+                stmtEx.putMeta(MVCC_STATE, Boolean.FALSE);
+        }
+
+        return mvccEnabled;
+    }
+
+    /**
      * @param schemaName Schema name.
      * @param qry Query.
      * @param keepCacheObj Flag to keep cache object.
      * @param enforceJoinOrder Enforce join order of tables.
+     * @param startTx Start transaction flag.
      * @param timeoutMillis Query timeout.
      * @param cancel Cancel object.
      * @param params Query parameters.
      * @param parts Partitions.
      * @param lazy Lazy query execution flag.
+     * @param mvccTracker Query tracker.
      * @return Iterable result.
      */
     private Iterable<List<?>> runQueryTwoStep(
@@ -1435,19 +1757,33 @@
         final GridCacheTwoStepQuery qry,
         final boolean keepCacheObj,
         final boolean enforceJoinOrder,
+        boolean startTx,
         final int timeoutMillis,
         final GridQueryCancel cancel,
         final Object[] params,
         final int[] parts,
-        final boolean lazy
-    ) {
-        return new Iterable<List<?>>() {
-            @SuppressWarnings("NullableProblems")
-            @Override public Iterator<List<?>> iterator() {
-                return rdcQryExec.query(schemaName, qry, keepCacheObj, enforceJoinOrder, timeoutMillis, cancel, params,
-                    parts, lazy);
-            }
-        };
+        final boolean lazy,
+        MvccQueryTracker mvccTracker) {
+        assert !qry.mvccEnabled() || !F.isEmpty(qry.cacheIds());
+
+        try {
+            final MvccQueryTracker tracker = mvccTracker == null && qry.mvccEnabled() ?
+                MvccUtils.mvccTracker(ctx.cache().context().cacheContext(qry.cacheIds().get(0)), startTx) : mvccTracker;
+
+            if (qry.forUpdate())
+                qry.forUpdate(checkActive(tx(ctx)) != null);
+
+            return new Iterable<List<?>>() {
+                @SuppressWarnings("NullableProblems")
+                @Override public Iterator<List<?>> iterator() {
+                    return rdcQryExec.query(schemaName, qry, keepCacheObj, enforceJoinOrder, timeoutMillis,
+                        cancel, params, parts, lazy, tracker);
+                }
+            };
+        }
+        catch (IgniteCheckedException e) {
+            throw new CacheException(e);
+        }
     }
 
     /**
@@ -1504,7 +1840,7 @@
             fqry.setTimeout(qry.getTimeout(), TimeUnit.MILLISECONDS);
 
         final QueryCursor<List<?>> res =
-            querySqlFields(schemaName, fqry, null, keepBinary, true, null).get(0);
+            querySqlFields(schemaName, fqry, null, keepBinary, true, null, null).get(0);
 
         final Iterable<Cache.Entry<K, V>> converted = new Iterable<Cache.Entry<K, V>>() {
             @Override public Iterator<Cache.Entry<K, V>> iterator() {
@@ -1540,21 +1876,22 @@
      * Try executing query using native facilities.
      *
      * @param schemaName Schema name.
-     * @param sql Query.
+     * @param qry Query.
      * @param cliCtx Client context, or {@code null} if not applicable.
      * @return Result or {@code null} if cannot parse/process this query.
      */
-    private List<FieldsQueryCursor<List<?>>> tryQueryDistributedSqlFieldsNative(String schemaName, String sql,
+    @SuppressWarnings({"ConstantConditions", "StatementWithEmptyBody"})
+    private List<FieldsQueryCursor<List<?>>> tryQueryDistributedSqlFieldsNative(String schemaName, SqlFieldsQuery qry,
         @Nullable SqlClientContext cliCtx) {
         // Heuristic check for fast return.
-        if (!INTERNAL_CMD_RE.matcher(sql.trim()).find())
+        if (!INTERNAL_CMD_RE.matcher(qry.getSql().trim()).find())
             return null;
 
         // Parse.
         SqlCommand cmd;
 
         try {
-            SqlParser parser = new SqlParser(schemaName, sql);
+            SqlParser parser = new SqlParser(schemaName, qry.getSql());
 
             cmd = parser.nextCommand();
 
@@ -1562,16 +1899,16 @@
             if (parser.nextCommand() != null)
                 return null;
 
-            // Currently supported commands are:
-            // CREATE/DROP INDEX
-            // COPY
-            // ALTER TABLE
-            // SET STREAMING
-            // CREATE/ALTER/DROP USER
-            if (!(cmd instanceof SqlCreateIndexCommand || cmd instanceof SqlDropIndexCommand
-                || cmd instanceof SqlAlterTableCommand || cmd instanceof SqlBulkLoadCommand
+            if (!(cmd instanceof SqlCreateIndexCommand
+                || cmd instanceof SqlDropIndexCommand
+                || cmd instanceof SqlBeginTransactionCommand
+                || cmd instanceof SqlCommitTransactionCommand
+                || cmd instanceof SqlRollbackTransactionCommand
+                || cmd instanceof SqlBulkLoadCommand
+                || cmd instanceof SqlAlterTableCommand
                 || cmd instanceof SqlSetStreamingCommand
-                || cmd instanceof SqlCreateUserCommand || cmd instanceof SqlAlterUserCommand
+                || cmd instanceof SqlCreateUserCommand
+                || cmd instanceof SqlAlterUserCommand
                 || cmd instanceof SqlDropUserCommand))
                 return null;
         }
@@ -1581,7 +1918,7 @@
         catch (Exception e) {
             // Cannot parse, return.
             if (log.isDebugEnabled())
-                log.debug("Failed to parse SQL with native parser [qry=" + sql + ", err=" + e + ']');
+                log.debug("Failed to parse SQL with native parser [qry=" + qry.getSql() + ", err=" + e + ']');
 
             if (!IgniteSystemProperties.getBoolean(IgniteSystemProperties.IGNITE_SQL_PARSER_DISABLE_H2_FALLBACK))
                 return null;
@@ -1591,42 +1928,40 @@
             if (e instanceof SqlParseException)
                 code = ((SqlParseException)e).code();
 
-            throw new IgniteSQLException("Failed to parse DDL statement: " + sql + ": " + e.getMessage(),
+            throw new IgniteSQLException("Failed to parse DDL statement: " + qry.getSql() + ": " + e.getMessage(),
                 code, e);
         }
 
         // Execute.
-        if (cmd instanceof SqlBulkLoadCommand) {
-            FieldsQueryCursor<List<?>> cursor = dmlProc.runNativeDmlStatement(sql, cmd);
+        try {
+            if (cmd instanceof SqlCreateIndexCommand
+                || cmd instanceof SqlDropIndexCommand
+                || cmd instanceof SqlAlterTableCommand
+                || cmd instanceof SqlCreateUserCommand
+                || cmd instanceof SqlAlterUserCommand
+                || cmd instanceof SqlDropUserCommand)
+                return Collections.singletonList(ddlProc.runDdlStatement(qry.getSql(), cmd));
+            else if (cmd instanceof SqlBulkLoadCommand)
+                return Collections.singletonList(dmlProc.runNativeDmlStatement(qry.getSql(), cmd));
+            else if (cmd instanceof SqlSetStreamingCommand) {
+                if (cliCtx == null)
+                    throw new IgniteSQLException("SET STREAMING command can only be executed from JDBC or ODBC driver.");
 
-            return Collections.singletonList(cursor);
-        }
-        else if (cmd instanceof SqlSetStreamingCommand) {
-            if (cliCtx == null)
-                throw new IgniteSQLException("SET STREAMING command can only be executed from JDBC or ODBC driver.");
+                SqlSetStreamingCommand setCmd = (SqlSetStreamingCommand)cmd;
 
-            SqlSetStreamingCommand setCmd = (SqlSetStreamingCommand)cmd;
-
-            boolean on = setCmd.isTurnOn();
-
-            if (on)
-                cliCtx.enableStreaming(setCmd.allowOverwrite(), setCmd.flushFrequency(),
-                    setCmd.perNodeBufferSize(), setCmd.perNodeParallelOperations(), setCmd.isOrdered());
+                if (setCmd.isTurnOn())
+                    cliCtx.enableStreaming(setCmd.allowOverwrite(), setCmd.flushFrequency(),
+                        setCmd.perNodeBufferSize(), setCmd.perNodeParallelOperations(), setCmd.isOrdered());
+                else
+                    cliCtx.disableStreaming();
+            }
             else
-                cliCtx.disableStreaming();
+                processTxCommand(cmd, qry);
 
             return Collections.singletonList(H2Utils.zeroCursor());
         }
-        else {
-            try {
-                FieldsQueryCursor<List<?>> cursor = ddlProc.runDdlStatement(sql, cmd);
-
-                return Collections.singletonList(cursor);
-            }
-            catch (IgniteCheckedException e) {
-                throw new IgniteSQLException("Failed to execute DDL statement [stmt=" + sql + "]: "
-                    + e.getMessage(), e);
-            }
+        catch (IgniteCheckedException e) {
+            throw new IgniteSQLException("Failed to execute DDL statement [stmt=" + qry.getSql() + ']', e);
         }
     }
 
@@ -1644,88 +1979,207 @@
                 IgniteQueryErrorCode.STMT_TYPE_MISMATCH);
     }
 
+    /**
+     * Process transactional command.
+     * @param cmd Command.
+     * @param qry Query.
+     * @throws IgniteCheckedException if failed.
+     */
+    private void processTxCommand(SqlCommand cmd, SqlFieldsQuery qry) throws IgniteCheckedException {
+        if (!mvccEnabled(ctx))
+            throw new IgniteSQLException("MVCC must be enabled in order to invoke transactional operation: " +
+                qry.getSql(), IgniteQueryErrorCode.MVCC_DISABLED);
+
+        NestedTxMode nestedTxMode = qry instanceof SqlFieldsQueryEx ? ((SqlFieldsQueryEx)qry).getNestedTxMode() :
+            NestedTxMode.DEFAULT;
+
+        GridNearTxLocal tx = tx(ctx);
+
+        if (cmd instanceof SqlBeginTransactionCommand) {
+            if (tx != null) {
+                if (nestedTxMode == null)
+                    nestedTxMode = NestedTxMode.DEFAULT;
+
+                switch (nestedTxMode) {
+                    case COMMIT:
+                        doCommit(tx);
+
+                        txStart(ctx, qry.getTimeout());
+
+                        break;
+
+                    case IGNORE:
+                        log.warning("Transaction has already been started, ignoring BEGIN command.");
+
+                        break;
+
+                    case ERROR:
+                        throw new IgniteSQLException("Transaction has already been started.",
+                            IgniteQueryErrorCode.TRANSACTION_EXISTS);
+
+                    default:
+                        throw new IgniteSQLException("Unexpected nested transaction handling mode: " +
+                            nestedTxMode.name());
+                }
+            }
+            else
+                txStart(ctx, qry.getTimeout());
+        }
+        else if (cmd instanceof SqlCommitTransactionCommand) {
+            // Do nothing if there's no transaction.
+            if (tx != null)
+                doCommit(tx);
+        }
+        else {
+            assert cmd instanceof SqlRollbackTransactionCommand;
+
+            // Do nothing if there's no transaction.
+            if (tx != null)
+                doRollback(tx);
+        }
+    }
+
+    /**
+     * Commit and properly close transaction.
+     * @param tx Transaction.
+     * @throws IgniteCheckedException if failed.
+     */
+    @SuppressWarnings("ThrowFromFinallyBlock")
+    private void doCommit(@NotNull GridNearTxLocal tx) throws IgniteCheckedException {
+        try {
+            if (!tx.isRollbackOnly())
+                tx.commit();
+        }
+        finally {
+            closeTx(tx);
+        }
+    }
+
+    /**
+     * Rollback and properly close transaction.
+     * @param tx Transaction.
+     * @throws IgniteCheckedException if failed.
+     */
+    @SuppressWarnings("ThrowFromFinallyBlock")
+    private void doRollback(@NotNull GridNearTxLocal tx) throws IgniteCheckedException {
+        try {
+            tx.rollback();
+        }
+        finally {
+            closeTx(tx);
+        }
+    }
+
+    /**
+     * Properly close transaction.
+     * @param tx Transaction.
+     * @throws IgniteCheckedException if failed.
+     */
+    private void closeTx(@NotNull GridNearTxLocal tx) throws IgniteCheckedException {
+        try {
+            tx.close();
+        }
+        finally {
+            ctx.cache().context().tm().resetContext();
+        }
+    }
 
     /** {@inheritDoc} */
     @SuppressWarnings({"StringEquality", "unchecked"})
     @Override public List<FieldsQueryCursor<List<?>>> querySqlFields(String schemaName, SqlFieldsQuery qry,
-        @Nullable SqlClientContext cliCtx, boolean keepBinary, boolean failOnMultipleStmts, GridQueryCancel cancel) {
-        List<FieldsQueryCursor<List<?>>> res = tryQueryDistributedSqlFieldsNative(schemaName, qry.getSql(), cliCtx);
+        @Nullable SqlClientContext cliCtx, boolean keepBinary, boolean failOnMultipleStmts, MvccQueryTracker tracker,
+        GridQueryCancel cancel) {
+        boolean mvccEnabled = mvccEnabled(ctx), startTx = autoStartTx(qry);
 
-        if (res != null)
-            return res;
+        try {
+            List<FieldsQueryCursor<List<?>>> res = tryQueryDistributedSqlFieldsNative(schemaName, qry, cliCtx);
 
-        {
-            // First, let's check if we already have a two-step query for this statement...
-            H2TwoStepCachedQueryKey cachedQryKey = new H2TwoStepCachedQueryKey(schemaName, qry.getSql(),
-                qry.isCollocated(), qry.isDistributedJoins(), qry.isEnforceJoinOrder(), qry.isLocal());
+            if (res != null)
+                return res;
 
-            H2TwoStepCachedQuery cachedQry;
+            {
+                // First, let's check if we already have a two-step query for this statement...
+                H2TwoStepCachedQueryKey cachedQryKey = new H2TwoStepCachedQueryKey(schemaName, qry.getSql(),
+                    qry.isCollocated(), qry.isDistributedJoins(), qry.isEnforceJoinOrder(), qry.isLocal());
 
-            if ((cachedQry = twoStepCache.get(cachedQryKey)) != null) {
-                checkQueryType(qry, true);
+                H2TwoStepCachedQuery cachedQry;
 
-                GridCacheTwoStepQuery twoStepQry = cachedQry.query().copy();
+                if ((cachedQry = twoStepCache.get(cachedQryKey)) != null) {
+                    checkQueryType(qry, true);
 
-                List<GridQueryFieldMetadata> meta = cachedQry.meta();
+                    GridCacheTwoStepQuery twoStepQry = cachedQry.query().copy();
 
-                res = Collections.singletonList(doRunDistributedQuery(schemaName, qry, twoStepQry, meta, keepBinary,
+                    List<GridQueryFieldMetadata> meta = cachedQry.meta();
+
+                    res = Collections.singletonList(doRunDistributedQuery(schemaName, qry, twoStepQry, meta, keepBinary,
+                        startTx, tracker, cancel));
+
+                    if (!twoStepQry.explain())
+                        twoStepCache.putIfAbsent(cachedQryKey, new H2TwoStepCachedQuery(meta, twoStepQry.copy()));
+
+                    return res;
+                }
+            }
+
+            {
+                // Second, let's check if we already have a parsed statement...
+                PreparedStatement cachedStmt;
+
+                if ((cachedStmt = cachedStatement(connectionForSchema(schemaName), qry.getSql())) != null) {
+                    Prepared prepared = GridSqlQueryParser.prepared(cachedStmt);
+
+                    // We may use this cached statement only for local queries and non queries.
+                    if (qry.isLocal() || !prepared.isQuery())
+                        return (List<FieldsQueryCursor<List<?>>>)doRunPrepared(schemaName, prepared, qry, null, null,
+                            keepBinary, startTx, tracker, cancel);
+                }
+            }
+
+            res = new ArrayList<>(1);
+
+            int firstArg = 0;
+
+            String remainingSql = qry.getSql();
+
+            while (remainingSql != null) {
+                ParsingResult parseRes = parseAndSplit(schemaName,
+                    remainingSql != qry.getSql() ? cloneFieldsQuery(qry).setSql(remainingSql) : qry, firstArg);
+
+                // Let's avoid second reflection getter call by returning Prepared object too
+                Prepared prepared = parseRes.prepared();
+
+                GridCacheTwoStepQuery twoStepQry = parseRes.twoStepQuery();
+
+                List<GridQueryFieldMetadata> meta = parseRes.meta();
+
+                SqlFieldsQuery newQry = parseRes.newQuery();
+
+                remainingSql = parseRes.remainingSql();
+
+                if (remainingSql != null && failOnMultipleStmts)
+                    throw new IgniteSQLException("Multiple statements queries are not supported");
+
+                firstArg += prepared.getParameters().size();
+
+                res.addAll(doRunPrepared(schemaName, prepared, newQry, twoStepQry, meta, keepBinary, startTx, tracker,
                     cancel));
 
-                if (!twoStepQry.explain())
-                    twoStepCache.putIfAbsent(cachedQryKey, new H2TwoStepCachedQuery(meta, twoStepQry.copy()));
-
-                return res;
-            }
-        }
-
-        {
-            // Second, let's check if we already have a parsed statement...
-            PreparedStatement cachedStmt;
-
-            if ((cachedStmt = cachedStatement(connectionForSchema(schemaName), qry.getSql())) != null) {
-                Prepared prepared = GridSqlQueryParser.prepared(cachedStmt);
-
-                // We may use this cached statement only for local queries and non queries.
-                if (qry.isLocal() || !prepared.isQuery())
-                    return (List<FieldsQueryCursor<List<?>>>)doRunPrepared(schemaName, prepared, qry, null,
-                        null, keepBinary, cancel);
-            }
-        }
-
-        res = new ArrayList<>(1);
-
-        int firstArg = 0;
-
-        String remainingSql = qry.getSql();
-
-        while (remainingSql != null) {
-            ParsingResult parseRes = parseAndSplit(schemaName,
-                remainingSql != qry.getSql() ? cloneFieldsQuery(qry).setSql(remainingSql) : qry, firstArg);
-
-            // Let's avoid second reflection getter call by returning Prepared object too
-            Prepared prepared = parseRes.prepared();
-
-            GridCacheTwoStepQuery twoStepQry = parseRes.twoStepQuery();
-
-            List<GridQueryFieldMetadata> meta = parseRes.meta();
-
-            SqlFieldsQuery newQry = parseRes.newQuery();
-
-            remainingSql = parseRes.remainingSql();
-
-            if (remainingSql != null && failOnMultipleStmts)
-                throw new IgniteSQLException("Multiple statements queries are not supported");
-
-            firstArg += prepared.getParameters().size();
-
-            res.addAll(doRunPrepared(schemaName, prepared, newQry, twoStepQry, meta, keepBinary, cancel));
-
-            if (parseRes.twoStepQuery() != null && parseRes.twoStepQueryKey() != null &&
+                if (parseRes.twoStepQuery() != null && parseRes.twoStepQueryKey() != null &&
                     !parseRes.twoStepQuery().explain())
-                twoStepCache.putIfAbsent(parseRes.twoStepQueryKey(), new H2TwoStepCachedQuery(meta, twoStepQry.copy()));
-        }
+                    twoStepCache.putIfAbsent(parseRes.twoStepQueryKey(), new H2TwoStepCachedQuery(meta,
+                        twoStepQry.copy()));
+            }
 
-        return res;
+            return res;
+        }
+        catch (RuntimeException | Error e) {
+            GridNearTxLocal tx;
+
+            if (mvccEnabled && (tx = tx(ctx)) != null)
+                tx.setRollbackOnly();
+
+            throw e;
+        }
     }
 
     /**
@@ -1736,12 +2190,15 @@
      * @param twoStepQry Two-step query if this query must be executed in a distributed way.
      * @param meta Metadata for {@code twoStepQry}.
      * @param keepBinary Whether binary objects must not be deserialized automatically.
-     * @param cancel Query cancel state holder.    @return Query result.
+     * @param startTx Start transactionq flag.
+     * @param tracker MVCC tracker.
+     * @param cancel Query cancel state holder.
+     * @return Query result.
      */
     @SuppressWarnings("unchecked")
     private List<? extends FieldsQueryCursor<List<?>>> doRunPrepared(String schemaName, Prepared prepared,
-        SqlFieldsQuery qry, GridCacheTwoStepQuery twoStepQry,
-        List<GridQueryFieldMetadata> meta, boolean keepBinary, GridQueryCancel cancel) {
+        SqlFieldsQuery qry, GridCacheTwoStepQuery twoStepQry, List<GridQueryFieldMetadata> meta, boolean keepBinary,
+        boolean startTx, MvccQueryTracker tracker, GridQueryCancel cancel) {
         String sqlQry = qry.getSql();
 
         boolean loc = qry.isLocal();
@@ -1784,12 +2241,7 @@
                     throw new IgniteSQLException("DDL statements are not supported for LOCAL caches",
                         IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
 
-                try {
-                    return Collections.singletonList(ddlProc.runDdlStatement(sqlQry, prepared));
-                }
-                catch (IgniteCheckedException e) {
-                    throw new IgniteSQLException("Failed to execute DDL statement [stmt=" + sqlQry + ']', e);
-                }
+                return Collections.singletonList(ddlProc.runDdlStatement(sqlQry, prepared));
             }
 
             if (prepared instanceof NoOperation) {
@@ -1812,7 +2264,7 @@
             checkQueryType(qry, true);
 
             return Collections.singletonList(doRunDistributedQuery(schemaName, qry, twoStepQry, meta, keepBinary,
-                cancel));
+                startTx, tracker, cancel));
         }
 
         // We've encountered a local query, let's just run it.
@@ -1849,7 +2301,8 @@
         PreparedStatement stmt = prepareStatementAndCaches(c, qry.getSql());
 
         if (loc && GridSqlQueryParser.checkMultipleStatements(stmt))
-            throw new IgniteSQLException("Multiple statements queries are not supported for local queries");
+            throw new IgniteSQLException("Multiple statements queries are not supported for local queries.",
+                IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
 
         GridSqlQueryParser.PreparedWithRemaining prep = GridSqlQueryParser.preparedWithRemaining(stmt);
 
@@ -1950,7 +2403,9 @@
                 .distributedJoinMode(distributedJoinMode(qry.isLocal(), qry.isDistributedJoins())));
 
             try {
-                return new ParsingResult(prepared, newQry, remainingSql, split(prepared, newQry),
+                GridCacheTwoStepQuery twoStepQry = split(prepared, newQry);
+
+                return new ParsingResult(prepared, newQry, remainingSql, twoStepQry,
                     cachedQryKey, H2Utils.meta(stmt.getMetaData()));
             }
             catch (IgniteCheckedException e) {
@@ -2011,18 +2466,77 @@
     }
 
     /**
+     * @param qry Sql fields query.autoStartTx(qry)
+     * @return {@code True} if need to start transaction.
+     */
+    public boolean autoStartTx(SqlFieldsQuery qry) {
+        if (!mvccEnabled(ctx))
+            return false;
+
+        return qry instanceof SqlFieldsQueryEx && !((SqlFieldsQueryEx)qry).isAutoCommit() && tx(ctx) == null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public UpdateSourceIterator<?> prepareDistributedUpdate(GridCacheContext<?, ?> cctx, int[] ids,
+        int[] parts, String schema, String qry, Object[] params, int flags,
+        int pageSize, int timeout, AffinityTopologyVersion topVer,
+        MvccSnapshot mvccSnapshot, GridQueryCancel cancel) throws IgniteCheckedException {
+
+        SqlFieldsQuery fldsQry = new SqlFieldsQuery(qry);
+
+        if (params != null)
+            fldsQry.setArgs(params);
+
+        fldsQry.setEnforceJoinOrder(isFlagSet(flags, GridH2QueryRequest.FLAG_ENFORCE_JOIN_ORDER));
+        fldsQry.setTimeout(timeout, TimeUnit.MILLISECONDS);
+        fldsQry.setPageSize(pageSize);
+        fldsQry.setLocal(true);
+
+        boolean loc = true;
+
+        final boolean replicated = isFlagSet(flags, GridH2QueryRequest.FLAG_REPLICATED);
+
+        GridCacheContext<?, ?> cctx0;
+
+        if (!replicated
+            && !F.isEmpty(ids)
+            && (cctx0 = CU.firstPartitioned(cctx.shared(), ids)) != null
+            && cctx0.config().getQueryParallelism() > 1) {
+            fldsQry.setDistributedJoins(true);
+
+            loc = false;
+        }
+
+        Connection conn = connectionForSchema(schema);
+
+        H2Utils.setupConnection(conn, false, fldsQry.isEnforceJoinOrder());
+
+        PreparedStatement stmt = preparedStatementWithParams(conn, fldsQry.getSql(),
+            F.asList(fldsQry.getArgs()), true);
+
+        return dmlProc.prepareDistributedUpdate(schema, conn, stmt, fldsQry, backupFilter(topVer, parts), cancel, loc,
+            topVer, mvccSnapshot);
+    }
+
+    private boolean isFlagSet(int flags, int flag) {
+        return (flags & flag) == flag;
+    }
+
+    /**
      * Run distributed query on detected set of partitions.
      * @param schemaName Schema name.
      * @param qry Original query.
      * @param twoStepQry Two-step query.
      * @param meta Metadata to set to cursor.
      * @param keepBinary Keep binary flag.
+     * @param startTx Start transaction flag.
+     * @param mvccTracker Query tracker.
      * @param cancel Cancel handler.
      * @return Cursor representing distributed query result.
      */
     private FieldsQueryCursor<List<?>> doRunDistributedQuery(String schemaName, SqlFieldsQuery qry,
         GridCacheTwoStepQuery twoStepQry, List<GridQueryFieldMetadata> meta, boolean keepBinary,
-        GridQueryCancel cancel) {
+        boolean startTx, MvccQueryTracker mvccTracker, GridQueryCancel cancel) {
         if (log.isDebugEnabled())
             log.debug("Parsed query: `" + qry.getSql() + "` into two step query: " + twoStepQry);
 
@@ -2044,8 +2558,8 @@
         }
 
         QueryCursorImpl<List<?>> cursor = new QueryCursorImpl<>(
-            runQueryTwoStep(schemaName, twoStepQry, keepBinary, qry.isEnforceJoinOrder(), qry.getTimeout(), cancel,
-                qry.getArgs(), partitions, qry.isLazy()), cancel);
+            runQueryTwoStep(schemaName, twoStepQry, keepBinary, qry.isEnforceJoinOrder(), startTx, qry.getTimeout(),
+                cancel, qry.getArgs(), partitions, qry.isLazy(), mvccTracker), cancel);
 
         cursor.fieldsMeta(meta);
 
@@ -2111,22 +2625,35 @@
     }
 
     /**
-     * @param cacheIds Caches identifiers.
+     * @param cacheIds Cache IDs.
+     * @param twoStepQry Query.
      * @throws IllegalStateException if segmented indices used with non-segmented indices.
      */
-    private void checkCacheIndexSegmentation(Collection<Integer> cacheIds) {
+    private void processCaches(List<Integer> cacheIds, GridCacheTwoStepQuery twoStepQry) {
         if (cacheIds.isEmpty())
             return; // Nothing to check
 
         GridCacheSharedContext sharedCtx = ctx.cache().context();
 
         int expectedParallelism = 0;
+        GridCacheContext cctx0 = null;
 
-        for (Integer cacheId : cacheIds) {
+        boolean mvccEnabled = false;
+
+        for (int i = 0; i < cacheIds.size(); i++) {
+            Integer cacheId = cacheIds.get(i);
+
             GridCacheContext cctx = sharedCtx.cacheContext(cacheId);
 
             assert cctx != null;
 
+            if (i == 0) {
+                mvccEnabled = cctx.mvccEnabled();
+                cctx0 = cctx;
+            }
+            else if (cctx.mvccEnabled() != mvccEnabled)
+                MvccUtils.throwAtomicityModesMismatchException(cctx0, cctx);
+
             if (!cctx.isPartitioned())
                 continue;
 
@@ -2137,6 +2664,18 @@
                     "forbidden.");
             }
         }
+
+        twoStepQry.mvccEnabled(mvccEnabled);
+
+        if (twoStepQry.forUpdate()) {
+            if (cacheIds.size() != 1)
+                throw new IgniteSQLException("SELECT FOR UPDATE is supported only for queries " +
+                    "that involve single transactional cache.");
+
+            if (!mvccEnabled)
+                throw new IgniteSQLException("SELECT FOR UPDATE query requires transactional cache " +
+                    "with MVCC enabled.", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
+        }
     }
 
     /**
@@ -2237,6 +2776,7 @@
      * @param type Type descriptor.
      * @throws IgniteCheckedException If validation failed.
      */
+    @SuppressWarnings("CollectionAddAllCanBeReplacedWithConstructor")
     private void validateTypeDescriptor(GridQueryTypeDescriptor type)
         throws IgniteCheckedException {
         assert type != null;
@@ -2274,8 +2814,16 @@
         assert schema != null;
         assert tbl != null;
 
-        String keyType = dbTypeFromClass(tbl.type().keyClass());
-        String valTypeStr = dbTypeFromClass(tbl.type().valueClass());
+        GridQueryProperty keyProp = tbl.type().property(KEY_FIELD_NAME);
+        GridQueryProperty valProp = tbl.type().property(VAL_FIELD_NAME);
+
+        String keyType = dbTypeFromClass(tbl.type().keyClass(),
+            keyProp == null ? -1 : keyProp.precision(),
+            keyProp == null ? -1 : keyProp.scale());
+
+        String valTypeStr = dbTypeFromClass(tbl.type().valueClass(),
+            valProp == null ? -1 : valProp.precision(),
+            valProp == null ? -1 : valProp.scale());
 
         SB sql = new SB();
 
@@ -2287,9 +2835,15 @@
         sql.a(',').a(VAL_FIELD_NAME).a(' ').a(valTypeStr).a(keyValVisibility);
         sql.a(',').a(VER_FIELD_NAME).a(" OTHER INVISIBLE");
 
-        for (Map.Entry<String, Class<?>> e : tbl.type().fields().entrySet())
-            sql.a(',').a(H2Utils.withQuotes(e.getKey())).a(' ').a(dbTypeFromClass(e.getValue()))
-            .a(tbl.type().property(e.getKey()).notNull()? " NOT NULL" : "");
+        for (Map.Entry<String, Class<?>> e : tbl.type().fields().entrySet()) {
+            GridQueryProperty prop = tbl.type().property(e.getKey());
+
+            sql.a(',')
+                .a(H2Utils.withQuotes(e.getKey()))
+                .a(' ')
+                .a(dbTypeFromClass(e.getValue(), prop.precision(), prop.scale()))
+                .a(prop.notNull() ? " NOT NULL" : "");
+        }
 
         sql.a(')');
 
@@ -2361,10 +2915,17 @@
      * Gets corresponding DB type from java class.
      *
      * @param cls Java class.
+     * @param precision Field precision.
+     * @param scale Field scale.
      * @return DB type name.
      */
-    private String dbTypeFromClass(Class<?> cls) {
-        return H2DatabaseType.fromClass(cls).dBTypeAsString();
+    private String dbTypeFromClass(Class<?> cls, int precision, int scale) {
+        String dbType = H2DatabaseType.fromClass(cls).dBTypeAsString();
+
+        if (precision != -1 && dbType.equalsIgnoreCase(H2DatabaseType.VARCHAR.dBTypeAsString()))
+            return dbType + "(" + precision + ")";
+
+        return dbType;
     }
 
     /**
@@ -2397,10 +2958,10 @@
     /**
      * Gets collection of table for given schema name.
      *
-     * @param cacheName Schema name.
+     * @param cacheName Cache name.
      * @return Collection of table descriptors.
      */
-    private Collection<H2TableDescriptor> tables(String cacheName) {
+    Collection<H2TableDescriptor> tables(String cacheName) {
         H2Schema s = schemas.get(schema(cacheName));
 
         if (s == null)
@@ -2429,28 +2990,32 @@
     }
 
     /**
-     * Called periodically by {@link GridTimeoutProcessor} to clean up the {@link #stmtCache}.
+     * Called periodically by {@link GridTimeoutProcessor} to clean up the statement cache.
      */
     private void cleanupStatementCache() {
-        long cur = U.currentTimeMillis();
+        long now = U.currentTimeMillis();
 
-        for (Iterator<Map.Entry<Thread, H2StatementCache>> it = stmtCache.entrySet().iterator(); it.hasNext(); ) {
-            Map.Entry<Thread, H2StatementCache> entry = it.next();
+        for (Iterator<Map.Entry<Thread, H2ConnectionWrapper>> it = conns.entrySet().iterator(); it.hasNext(); ) {
+            Map.Entry<Thread, H2ConnectionWrapper> entry = it.next();
 
             Thread t = entry.getKey();
 
-            if (t.getState() == Thread.State.TERMINATED
-                || cur - entry.getValue().lastUsage() > STATEMENT_CACHE_THREAD_USAGE_TIMEOUT)
+            if (t.getState() == Thread.State.TERMINATED) {
+                U.close(entry.getValue(), log);
+
                 it.remove();
+            }
+            else if (now - entry.getValue().statementCache().lastUsage() > STATEMENT_CACHE_THREAD_USAGE_TIMEOUT)
+                entry.getValue().clearStatementCache();
         }
     }
 
     /**
-     * Called periodically by {@link GridTimeoutProcessor} to clean up the {@link #stmtCache}.
+     * Called periodically by {@link GridTimeoutProcessor} to clean up the {@link #conns}.
      */
     private void cleanupConnections() {
-        for (Iterator<Map.Entry<Thread, Connection>> it = conns.entrySet().iterator(); it.hasNext(); ) {
-            Map.Entry<Thread, Connection> entry = it.next();
+        for (Iterator<Map.Entry<Thread, H2ConnectionWrapper>> it = conns.entrySet().iterator(); it.hasNext(); ) {
+            Map.Entry<Thread, H2ConnectionWrapper> entry = it.next();
 
             Thread t = entry.getKey();
 
@@ -2463,6 +3028,24 @@
     }
 
     /**
+     * Removes from cache and returns associated with current thread connection.
+     * @return Connection associated with current thread.
+     */
+    public ThreadLocalObjectPool.Reusable<H2ConnectionWrapper> detach() {
+        Thread key = Thread.currentThread();
+
+        ThreadLocalObjectPool.Reusable<H2ConnectionWrapper> reusableConnection = connCache.get();
+
+        H2ConnectionWrapper connection = conns.remove(key);
+
+        connCache.remove();
+
+        assert reusableConnection.object().connection() == connection.connection();
+
+        return reusableConnection;
+    }
+
+    /**
      * Rebuild indexes from hash index.
      *
      * @param cacheName Cache name.
@@ -2477,7 +3060,7 @@
 
         SchemaIndexCacheVisitor visitor = new SchemaIndexCacheVisitorImpl(cctx);
 
-        visitor.visit(new RebuldIndexFromHashClosure(qryMgr));
+        visitor.visit(new RebuildIndexFromHashClosure(qryMgr, cctx.mvccEnabled()));
 
         for (H2TableDescriptor tblDesc : tables(cacheName))
             tblDesc.table().markRebuildFromHashInProgress(false);
@@ -2568,7 +3151,24 @@
         else {
             this.ctx = ctx;
 
-            schemas.put(QueryUtils.DFLT_SCHEMA, new H2Schema(QueryUtils.DFLT_SCHEMA));
+            // Register PUBLIC schema which is always present.
+            schemas.put(QueryUtils.DFLT_SCHEMA, new H2Schema(QueryUtils.DFLT_SCHEMA, true));
+
+            // Register additional schemas.
+            String[] additionalSchemas = ctx.config().getSqlSchemas();
+
+            if (!F.isEmpty(additionalSchemas)) {
+                synchronized (schemaMux) {
+                    for (String schema : additionalSchemas) {
+                        if (F.isEmpty(schema))
+                            continue;
+
+                        schema = QueryUtils.normalizeSchemaName(null, schema);
+
+                        createSchemaIfNeeded(schema, true);
+                    }
+                }
+            }
 
             valCtx = new CacheQueryObjectValueContext(ctx);
 
@@ -2599,7 +3199,7 @@
             if (sysViewsEnabled) {
                 try {
                     synchronized (schemaMux) {
-                        createSchema(QueryUtils.SCHEMA_SYS);
+                        createSchema0(QueryUtils.SCHEMA_SYS);
                     }
 
                     Connection c = connectionForSchema(QueryUtils.SCHEMA_SYS);
@@ -2643,6 +3243,9 @@
         Collection<SqlSystemView> views = new ArrayList<>();
 
         views.add(new SqlSystemViewNodes(ctx));
+        views.add(new SqlSystemViewNodeAttributes(ctx));
+        views.add(new SqlSystemViewBaselineNodes(ctx));
+        views.add(new SqlSystemViewNodeMetrics(ctx));
 
         return views;
     }
@@ -2802,13 +3405,13 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void stop() throws IgniteCheckedException {
+    @Override public void stop() {
         if (log.isDebugEnabled())
             log.debug("Stopping cache query index...");
 
         mapQryExec.cancelLazyWorkers();
 
-        for (Connection c : conns.values())
+        for (H2ConnectionWrapper c : conns.values())
             U.close(c, log);
 
         conns.clear();
@@ -2843,14 +3446,15 @@
         }
     }
 
-    /**
-     * Whether this is default schema.
-     *
-     * @param schemaName Schema name.
-     * @return {@code True} if default.
-     */
-    private boolean isDefaultSchema(String schemaName) {
-        return F.eq(schemaName, QueryUtils.DFLT_SCHEMA);
+    /** {@inheritDoc} */
+    @Override public void onClientDisconnect() throws IgniteCheckedException {
+        if (!mvccEnabled(ctx))
+            return;
+
+        GridNearTxLocal tx = tx(ctx);
+
+        if (tx != null)
+            doRollback(tx);
     }
 
     /** {@inheritDoc} */
@@ -2858,19 +3462,8 @@
         throws IgniteCheckedException {
         rowCache.onCacheRegistered(cctx);
 
-        if (!isDefaultSchema(schemaName)) {
-            synchronized (schemaMux) {
-                H2Schema schema = new H2Schema(schemaName);
-
-                H2Schema oldSchema = schemas.putIfAbsent(schemaName, schema);
-
-                if (oldSchema == null)
-                    createSchema(schemaName);
-                else
-                    schema = oldSchema;
-
-                schema.incrementUsageCount();
-            }
+        synchronized (schemaMux) {
+            createSchemaIfNeeded(schemaName, false);
         }
 
         cacheName2schema.put(cacheName, schemaName);
@@ -2915,22 +3508,20 @@
                 }
             }
 
-            if (!isDefaultSchema(schemaName)) {
-                synchronized (schemaMux) {
-                    if (schema.decrementUsageCount() == 0) {
-                        schemas.remove(schemaName);
+            synchronized (schemaMux) {
+                if (schema.decrementUsageCount()) {
+                    schemas.remove(schemaName);
 
-                        try {
-                            dropSchema(schemaName);
-                        }
-                        catch (IgniteCheckedException e) {
-                            U.error(log, "Failed to drop schema on cache stop (will ignore): " + cacheName, e);
-                        }
+                    try {
+                        dropSchema(schemaName);
+                    }
+                    catch (IgniteException e) {
+                        U.error(log, "Failed to drop schema on cache stop (will ignore): " + cacheName, e);
                     }
                 }
             }
 
-            stmtCache.clear();
+            conns.values().forEach(H2ConnectionWrapper::clearStatementCache);
 
             for (H2TableDescriptor tbl : rmvTbls) {
                 for (Index idx : tbl.table().getIndexes())
@@ -2955,7 +3546,7 @@
      * Remove all cached queries from cached two-steps queries.
      */
     private void clearCachedQueries() {
-        twoStepCache.clear();
+        twoStepCache = new GridBoundedConcurrentLinkedHashMap<>(TWO_STEP_QRY_CACHE_SIZE);
     }
 
     /** {@inheritDoc} */
@@ -2984,7 +3575,7 @@
 
         AffinityTopologyVersion initVer = fut.initialVersion();
 
-        return initVer.compareTo(readyVer) > 0 && !CU.clientNode(fut.firstEvent().node());
+        return initVer.compareTo(readyVer) > 0 && !fut.firstEvent().node().isClient();
     }
 
     /**
@@ -3090,14 +3681,14 @@
     @Override public void cancelAllQueries() {
         mapQryExec.cancelLazyWorkers();
 
-        for (Connection c : conns.values())
+        for (H2ConnectionWrapper c : conns.values())
             U.close(c, log);
     }
 
     /**
      * @return Per-thread connections.
      */
-    public Map<Thread, Connection> perThreadConnections() {
+    public Map<Thread, ?> perThreadConnections() {
         return conns;
     }
 
@@ -3134,7 +3725,7 @@
             //Prohibit usage indices with different numbers of segments in same query.
             List<Integer> cacheIds = new ArrayList<>(caches0);
 
-            checkCacheIndexSegmentation(cacheIds);
+            processCaches(cacheIds, twoStepQry);
 
             return cacheIds;
         }
@@ -3160,22 +3751,4 @@
     private interface ClIter<X> extends AutoCloseable, Iterator<X> {
         // No-op.
     }
-
-    /** */
-    private static class RebuldIndexFromHashClosure implements SchemaIndexCacheVisitorClosure {
-        /** */
-        private final GridCacheQueryManager qryMgr;
-
-        /**
-         * @param qryMgr Query manager.
-         */
-        RebuldIndexFromHashClosure(GridCacheQueryManager qryMgr) {
-            this.qryMgr = qryMgr;
-        }
-
-        /** {@inheritDoc} */
-        @Override public void apply(CacheDataRow row) throws IgniteCheckedException {
-            qryMgr.store(row, null, false);
-        }
-    }
 }
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/NearResultSetEnlistFuture.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/NearResultSetEnlistFuture.java
new file mode 100644
index 0000000..1856430
--- /dev/null
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/NearResultSetEnlistFuture.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2;
+
+import java.sql.ResultSet;
+import java.util.UUID;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxLocalAdapter;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxQueryResultsEnlistFuture;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
+import org.apache.ignite.lang.IgniteUuid;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ *
+ */
+public class NearResultSetEnlistFuture extends GridNearTxQueryResultsEnlistFuture implements ResultSetEnlistFuture  {
+    /** */
+    private static final long serialVersionUID = 877907044489718378L;
+
+    /**
+     * @param nearNodeId   Near node ID.
+     * @param nearLockVer  Near lock version.
+     * @param mvccSnapshot Mvcc snapshot.
+     * @param threadId     Thread ID.
+     * @param nearFutId    Near future id.
+     * @param nearMiniId   Near mini future id.
+     * @param parts        Partitions.
+     * @param tx           Transaction.
+     * @param timeout      Lock acquisition timeout.
+     * @param cctx         Cache context.
+     * @param rs           Result set to process.
+     */
+    public NearResultSetEnlistFuture(UUID nearNodeId, GridCacheVersion nearLockVer, MvccSnapshot mvccSnapshot,
+        long threadId, IgniteUuid nearFutId, int nearMiniId, @Nullable int[] parts, GridDhtTxLocalAdapter tx,
+        long timeout, GridCacheContext<?, ?> cctx, ResultSet rs) {
+        super(cctx, (GridNearTxLocal)tx, timeout, ResultSetEnlistFuture.createIterator(rs), 0, true);
+    }
+
+
+    /** {@inheritDoc} */
+    @Override public void onError(Throwable error) {
+        onDone(error);
+    }
+}
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/PreparedStatementEx.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/PreparedStatementEx.java
new file mode 100644
index 0000000..50dd892
--- /dev/null
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/PreparedStatementEx.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2;
+
+import java.sql.PreparedStatement;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ *
+ */
+public interface PreparedStatementEx extends PreparedStatement {
+    /** */
+    static final AtomicInteger metaIdGenerator = new AtomicInteger();
+
+    /** Flag if at least one MVCC cache is used in this statement. */
+    static final int MVCC_STATE = metaIdGenerator.getAndIncrement();
+
+    /** First mvcc cache id of the involved caches. */
+    static final int MVCC_CACHE_ID = metaIdGenerator.getAndIncrement();
+
+    /**
+     * @param id Metadata key.
+     * @return Attached metadata.
+     */
+    @Nullable <T> T meta(int id);
+
+    /**
+     * @param id Metadata key.
+     * @param metaObj  Metadata object.
+     */
+    void putMeta(int id, Object metaObj);
+}
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/PreparedStatementExImpl.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/PreparedStatementExImpl.java
new file mode 100644
index 0000000..922c1ab
--- /dev/null
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/PreparedStatementExImpl.java
@@ -0,0 +1,648 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2;
+
+import java.io.InputStream;
+import java.io.Reader;
+import java.math.BigDecimal;
+import java.net.URL;
+import java.sql.Array;
+import java.sql.Blob;
+import java.sql.Clob;
+import java.sql.Connection;
+import java.sql.Date;
+import java.sql.NClob;
+import java.sql.ParameterMetaData;
+import java.sql.PreparedStatement;
+import java.sql.Ref;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.RowId;
+import java.sql.SQLException;
+import java.sql.SQLType;
+import java.sql.SQLWarning;
+import java.sql.SQLXML;
+import java.sql.Time;
+import java.sql.Timestamp;
+import java.util.Arrays;
+import java.util.Calendar;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * PreparedStatement with extended capability to store additional meta information.
+ */
+@SuppressWarnings("unchecked")
+final class PreparedStatementExImpl implements PreparedStatementEx {
+    /** */
+    private final PreparedStatement delegate;
+
+    /** */
+    private Object[] meta = null;
+
+    /**
+     * @param delegate Wrapped statement.
+     */
+    public PreparedStatementExImpl(PreparedStatement delegate) {
+        this.delegate = delegate;
+    }
+
+    /** {@inheritDoc} */
+    @Override public ResultSet executeQuery() throws SQLException {
+        return delegate.executeQuery();
+    }
+
+    /** {@inheritDoc} */
+    @Override public int executeUpdate() throws SQLException {
+        return delegate.executeUpdate();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setNull(int parameterIndex, int sqlType) throws SQLException {
+        delegate.setNull(parameterIndex, sqlType);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setBoolean(int parameterIndex, boolean x) throws SQLException {
+        delegate.setBoolean(parameterIndex, x);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setByte(int parameterIndex, byte x) throws SQLException {
+        delegate.setByte(parameterIndex, x);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setShort(int parameterIndex, short x) throws SQLException {
+        delegate.setShort(parameterIndex, x);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setInt(int parameterIndex, int x) throws SQLException {
+        delegate.setInt(parameterIndex, x);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setLong(int parameterIndex, long x) throws SQLException {
+        delegate.setLong(parameterIndex, x);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setFloat(int parameterIndex, float x) throws SQLException {
+        delegate.setFloat(parameterIndex, x);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setDouble(int parameterIndex, double x) throws SQLException {
+        delegate.setDouble(parameterIndex, x);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException {
+        delegate.setBigDecimal(parameterIndex, x);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setString(int parameterIndex, String x) throws SQLException {
+        delegate.setString(parameterIndex, x);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setBytes(int parameterIndex, byte[] x) throws SQLException {
+        delegate.setBytes(parameterIndex, x);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setDate(int parameterIndex, Date x) throws SQLException {
+        delegate.setDate(parameterIndex, x);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setTime(int parameterIndex, Time x) throws SQLException {
+        delegate.setTime(parameterIndex, x);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException {
+        delegate.setTimestamp(parameterIndex, x);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException {
+        delegate.setAsciiStream(parameterIndex, x, length);
+    }
+
+    /** {@inheritDoc} */
+    @Deprecated
+    @Override public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException {
+        delegate.setUnicodeStream(parameterIndex, x, length);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException {
+        delegate.setBinaryStream(parameterIndex, x, length);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void clearParameters() throws SQLException {
+        delegate.clearParameters();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException {
+        delegate.setObject(parameterIndex, x, targetSqlType);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setObject(int parameterIndex, Object x) throws SQLException {
+        delegate.setObject(parameterIndex, x);
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean execute() throws SQLException {
+        return delegate.execute();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void addBatch() throws SQLException {
+        delegate.addBatch();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setCharacterStream(int parameterIndex, Reader reader, int length) throws SQLException {
+        delegate.setCharacterStream(parameterIndex, reader, length);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setRef(int parameterIndex, Ref x) throws SQLException {
+        delegate.setRef(parameterIndex, x);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setBlob(int parameterIndex, Blob x) throws SQLException {
+        delegate.setBlob(parameterIndex, x);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setClob(int parameterIndex, Clob x) throws SQLException {
+        delegate.setClob(parameterIndex, x);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setArray(int parameterIndex, Array x) throws SQLException {
+        delegate.setArray(parameterIndex, x);
+    }
+
+    /** {@inheritDoc} */
+    @Override public ResultSetMetaData getMetaData() throws SQLException {
+        return delegate.getMetaData();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException {
+        delegate.setDate(parameterIndex, x, cal);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException {
+        delegate.setTime(parameterIndex, x, cal);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException {
+        delegate.setTimestamp(parameterIndex, x, cal);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException {
+        delegate.setNull(parameterIndex, sqlType, typeName);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setURL(int parameterIndex, URL x) throws SQLException {
+        delegate.setURL(parameterIndex, x);
+    }
+
+    /** {@inheritDoc} */
+    @Override public ParameterMetaData getParameterMetaData() throws SQLException {
+        return delegate.getParameterMetaData();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setRowId(int parameterIndex, RowId x) throws SQLException {
+        delegate.setRowId(parameterIndex, x);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setNString(int parameterIndex, String value) throws SQLException {
+        delegate.setNString(parameterIndex, value);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException {
+        delegate.setNCharacterStream(parameterIndex, value, length);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setNClob(int parameterIndex, NClob value) throws SQLException {
+        delegate.setNClob(parameterIndex, value);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setClob(int parameterIndex, Reader reader, long length) throws SQLException {
+        delegate.setClob(parameterIndex, reader, length);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException {
+        delegate.setBlob(parameterIndex, inputStream, length);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException {
+        delegate.setNClob(parameterIndex, reader, length);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException {
+        delegate.setSQLXML(parameterIndex, xmlObject);
+    }
+
+    @Override
+    public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException {
+        delegate.setObject(parameterIndex, x, targetSqlType, scaleOrLength);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException {
+        delegate.setAsciiStream(parameterIndex, x, length);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException {
+        delegate.setBinaryStream(parameterIndex, x, length);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException {
+        delegate.setCharacterStream(parameterIndex, reader, length);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException {
+        delegate.setAsciiStream(parameterIndex, x);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException {
+        delegate.setBinaryStream(parameterIndex, x);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException {
+        delegate.setCharacterStream(parameterIndex, reader);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException {
+        delegate.setNCharacterStream(parameterIndex, value);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setClob(int parameterIndex, Reader reader) throws SQLException {
+        delegate.setClob(parameterIndex, reader);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException {
+        delegate.setBlob(parameterIndex, inputStream);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setNClob(int parameterIndex, Reader reader) throws SQLException {
+        delegate.setNClob(parameterIndex, reader);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setObject(int parameterIndex, Object x, SQLType targetSqlType, int scaleOrLength) throws SQLException {
+        delegate.setObject(parameterIndex, x, targetSqlType, scaleOrLength);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setObject(int parameterIndex, Object x, SQLType targetSqlType) throws SQLException {
+        delegate.setObject(parameterIndex, x, targetSqlType);
+    }
+
+    /** {@inheritDoc} */
+    @Override public long executeLargeUpdate() throws SQLException {
+        return delegate.executeLargeUpdate();
+    }
+
+    /** {@inheritDoc} */
+    @Override public ResultSet executeQuery(String sql) throws SQLException {
+        return delegate.executeQuery(sql);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int executeUpdate(String sql) throws SQLException {
+        return delegate.executeUpdate(sql);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void close() throws SQLException {
+        delegate.close();
+    }
+
+    /** {@inheritDoc} */
+    @Override public int getMaxFieldSize() throws SQLException {
+        return delegate.getMaxFieldSize();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setMaxFieldSize(int max) throws SQLException {
+        delegate.setMaxFieldSize(max);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int getMaxRows() throws SQLException {
+        return delegate.getMaxRows();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setMaxRows(int max) throws SQLException {
+        delegate.setMaxRows(max);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setEscapeProcessing(boolean enable) throws SQLException {
+        delegate.setEscapeProcessing(enable);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int getQueryTimeout() throws SQLException {
+        return delegate.getQueryTimeout();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setQueryTimeout(int seconds) throws SQLException {
+        delegate.setQueryTimeout(seconds);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void cancel() throws SQLException {
+        delegate.cancel();
+    }
+
+    /** {@inheritDoc} */
+    @Override public SQLWarning getWarnings() throws SQLException {
+        return delegate.getWarnings();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void clearWarnings() throws SQLException {
+        delegate.clearWarnings();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setCursorName(String name) throws SQLException {
+        delegate.setCursorName(name);
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean execute(String sql) throws SQLException {
+        return delegate.execute(sql);
+    }
+
+    /** {@inheritDoc} */
+    @Override public ResultSet getResultSet() throws SQLException {
+        return delegate.getResultSet();
+    }
+
+    /** {@inheritDoc} */
+    @Override public int getUpdateCount() throws SQLException {
+        return delegate.getUpdateCount();
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean getMoreResults() throws SQLException {
+        return delegate.getMoreResults();
+    }
+
+    /** {@inheritDoc} */
+    @Override public int getFetchDirection() throws SQLException {
+        return delegate.getFetchDirection();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setFetchDirection(int direction) throws SQLException {
+        delegate.setFetchDirection(direction);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int getFetchSize() throws SQLException {
+        return delegate.getFetchSize();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setFetchSize(int rows) throws SQLException {
+        delegate.setFetchSize(rows);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int getResultSetConcurrency() throws SQLException {
+        return delegate.getResultSetConcurrency();
+    }
+
+    /** {@inheritDoc} */
+    @Override public int getResultSetType() throws SQLException {
+        return delegate.getResultSetType();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void addBatch(String sql) throws SQLException {
+        delegate.addBatch(sql);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void clearBatch() throws SQLException {
+        delegate.clearBatch();
+    }
+
+    /** {@inheritDoc} */
+    @Override public int[] executeBatch() throws SQLException {
+        return delegate.executeBatch();
+    }
+
+    /** {@inheritDoc} */
+    @Override public Connection getConnection() throws SQLException {
+        return delegate.getConnection();
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean getMoreResults(int current) throws SQLException {
+        return delegate.getMoreResults(current);
+    }
+
+    /** {@inheritDoc} */
+    @Override public ResultSet getGeneratedKeys() throws SQLException {
+        return delegate.getGeneratedKeys();
+    }
+
+    /** {@inheritDoc} */
+    @Override public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException {
+        return delegate.executeUpdate(sql, autoGeneratedKeys);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int executeUpdate(String sql, int[] columnIndexes) throws SQLException {
+        return delegate.executeUpdate(sql, columnIndexes);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int executeUpdate(String sql, String[] columnNames) throws SQLException {
+        return delegate.executeUpdate(sql, columnNames);
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean execute(String sql, int autoGeneratedKeys) throws SQLException {
+        return delegate.execute(sql, autoGeneratedKeys);
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean execute(String sql, int[] columnIndexes) throws SQLException {
+        return delegate.execute(sql, columnIndexes);
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean execute(String sql, String[] columnNames) throws SQLException {
+        return delegate.execute(sql, columnNames);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int getResultSetHoldability() throws SQLException {
+        return delegate.getResultSetHoldability();
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean isClosed() throws SQLException {
+        return delegate.isClosed();
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean isPoolable() throws SQLException {
+        return delegate.isPoolable();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setPoolable(boolean poolable) throws SQLException {
+        delegate.setPoolable(poolable);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void closeOnCompletion() throws SQLException {
+        delegate.closeOnCompletion();
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean isCloseOnCompletion() throws SQLException {
+        return delegate.isCloseOnCompletion();
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getLargeUpdateCount() throws SQLException {
+        return delegate.getLargeUpdateCount();
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getLargeMaxRows() throws SQLException {
+        return delegate.getLargeMaxRows();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void setLargeMaxRows(long max) throws SQLException {
+        delegate.setLargeMaxRows(max);
+    }
+
+    /** {@inheritDoc} */
+    @Override public long[] executeLargeBatch() throws SQLException {
+        return delegate.executeLargeBatch();
+    }
+
+    /** {@inheritDoc} */
+    @Override public long executeLargeUpdate(String sql) throws SQLException {
+        return delegate.executeLargeUpdate(sql);
+    }
+
+    /** {@inheritDoc} */
+    @Override public long executeLargeUpdate(String sql, int autoGeneratedKeys) throws SQLException {
+        return delegate.executeLargeUpdate(sql, autoGeneratedKeys);
+    }
+
+    /** {@inheritDoc} */
+    @Override public long executeLargeUpdate(String sql, int[] columnIndexes) throws SQLException {
+        return delegate.executeLargeUpdate(sql, columnIndexes);
+    }
+
+    /** {@inheritDoc} */
+    @Override public long executeLargeUpdate(String sql, String[] columnNames) throws SQLException {
+        return delegate.executeLargeUpdate(sql, columnNames);
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("unchecked")
+    @Override public <T> T unwrap(Class<T> iface) throws SQLException {
+        if (iface == PreparedStatementExImpl.class || iface == PreparedStatementEx.class)
+            return (T)this;
+
+        return delegate.unwrap(iface);
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean isWrapperFor(Class<?> iface) throws SQLException {
+        return iface == PreparedStatementExImpl.class
+            || iface == PreparedStatementEx.class
+            || delegate.isWrapperFor(iface);
+    }
+
+    /** {@inheritDoc} */
+    @Override public @Nullable <T> T meta(int id) {
+        return meta != null && id < meta.length ? (T)meta[id] : null;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void putMeta(int id, Object metaObj) {
+        if (meta == null)
+            meta = new Object[id + 1];
+        else if (meta.length <= id)
+            meta = Arrays.copyOf(meta, id + 1);
+
+        meta[id] = metaObj;
+    }
+
+    /**
+     *
+     * @param stmt Prepared statement to wrap.
+     * @return Wrapped statement.
+     */
+    public static PreparedStatement wrap(@NotNull PreparedStatement stmt) {
+        if (stmt.getClass() == PreparedStatementExImpl.class)
+            return stmt;
+
+        return new PreparedStatementExImpl(stmt);
+    }
+}
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/RebuildIndexFromHashClosure.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/RebuildIndexFromHashClosure.java
new file mode 100644
index 0000000..b635eac
--- /dev/null
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/RebuildIndexFromHashClosure.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
+import org.apache.ignite.internal.processors.cache.query.GridCacheQueryManager;
+import org.apache.ignite.internal.processors.query.schema.SchemaIndexCacheVisitorClosure;
+
+/** */
+class RebuildIndexFromHashClosure implements SchemaIndexCacheVisitorClosure {
+    /** */
+    private final GridCacheQueryManager qryMgr;
+
+    /** MVCC status flag. */
+    private final boolean mvccEnabled;
+
+    /**
+     * @param qryMgr Query manager.
+     * @param mvccEnabled MVCC status flag.
+     */
+    RebuildIndexFromHashClosure(GridCacheQueryManager qryMgr, boolean mvccEnabled) {
+        this.qryMgr = qryMgr;
+        this.mvccEnabled = mvccEnabled;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void apply(CacheDataRow row) throws IgniteCheckedException {
+        // prevRowAvailable is always true with MVCC on, and always false *on index rebuild* with MVCC off.
+        qryMgr.store(row, null, mvccEnabled);
+    }
+}
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ResultSetEnlistFuture.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ResultSetEnlistFuture.java
new file mode 100644
index 0000000..ee1c0fa
--- /dev/null
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ResultSetEnlistFuture.java
@@ -0,0 +1,136 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.NoSuchElementException;
+import java.util.UUID;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.distributed.dht.DhtLockFuture;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxLocalAdapter;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
+import org.apache.ignite.internal.processors.query.EnlistOperation;
+import org.apache.ignite.internal.processors.query.IgniteSQLException;
+import org.apache.ignite.internal.processors.query.UpdateSourceIterator;
+import org.apache.ignite.lang.IgniteUuid;
+import org.jetbrains.annotations.Nullable;
+
+/**
+ * Future to process whole local result set of SELECT FOR UPDATE query.
+ */
+public interface ResultSetEnlistFuture extends DhtLockFuture<Long> {
+    /**
+     * @param rs Result set.
+     * @return Update source.
+     */
+    static UpdateSourceIterator<?> createIterator(ResultSet rs) {
+        return new ResultSetUpdateSourceIteratorWrapper(rs);
+    }
+
+    /** */
+    void init();
+
+    /**
+     *
+     * @param nearNodeId   Near node ID.
+     * @param nearLockVer  Near lock version.
+     * @param mvccSnapshot Mvcc snapshot.
+     * @param threadId     Thread ID.
+     * @param nearFutId    Near future id.
+     * @param nearMiniId   Near mini future id.
+     * @param parts        Partitions.
+     * @param tx           Transaction.
+     * @param timeout      Lock acquisition timeout.
+     * @param cctx         Cache context.
+     * @param rs           Result set to process.
+     * @return Result set enlist future.
+     */
+    static ResultSetEnlistFuture future(UUID nearNodeId, GridCacheVersion nearLockVer,
+        MvccSnapshot mvccSnapshot, long threadId, IgniteUuid nearFutId, int nearMiniId, @Nullable int[] parts,
+        GridDhtTxLocalAdapter tx, long timeout, GridCacheContext<?, ?> cctx, ResultSet rs) {
+
+        if (tx.near())
+            return new NearResultSetEnlistFuture(nearNodeId, nearLockVer, mvccSnapshot, threadId, nearFutId, nearMiniId, parts, tx, timeout, cctx, rs);
+        else
+            return new DhtResultSetEnlistFuture(nearNodeId, nearLockVer, mvccSnapshot, threadId, nearFutId, nearMiniId, parts, tx, timeout, cctx, rs);
+    }
+
+    /**
+     *
+     */
+    public static class ResultSetUpdateSourceIteratorWrapper implements UpdateSourceIterator<Object> {
+        /** */
+        private static final long serialVersionUID = -8745196216234843471L;
+
+        /** */
+        private final ResultSet rs;
+
+        /** */
+        private Boolean hasNext;
+
+        /** */
+        private int keyColIdx;
+
+        /**
+         * @param rs Result set.
+         */
+        public ResultSetUpdateSourceIteratorWrapper(ResultSet rs) {
+            this.rs = rs;
+            keyColIdx = -1;
+        }
+
+        /** {@inheritDoc} */
+        @Override public EnlistOperation operation() {
+            return EnlistOperation.LOCK;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean hasNextX() {
+            try {
+                if (hasNext == null)
+                    hasNext = rs.next();
+
+                return hasNext;
+            }
+            catch (SQLException e) {
+                throw new IgniteSQLException(e);
+            }
+        }
+
+        /** {@inheritDoc} */
+        @Override public Object nextX() {
+            if (!hasNextX())
+                throw new NoSuchElementException();
+
+            try {
+                if (keyColIdx == -1)
+                    keyColIdx = rs.getMetaData().getColumnCount();
+
+                return rs.getObject(keyColIdx);
+            }
+            catch (SQLException e) {
+                throw new IgniteSQLException(e);
+            }
+            finally {
+                hasNext = null;
+            }
+        }
+    }
+}
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ThreadLocalObjectPool.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ThreadLocalObjectPool.java
new file mode 100644
index 0000000..25daa23
--- /dev/null
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ThreadLocalObjectPool.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2;
+
+import java.util.LinkedList;
+import java.util.Queue;
+import java.util.function.Supplier;
+import org.apache.ignite.internal.util.typedef.internal.U;
+
+/**
+ * Special pool for managing limited number objects for further reuse.
+ * This pool maintains separate object bag for each thread by means of {@link ThreadLocal}.
+ * <p>
+ * If object is borrowed on one thread and recycled on different then it will be returned to
+ * recycling thread bag. For thread-safe use either pooled objects should be thread-safe or
+ * <i>happens-before</i> should be established between borrowing object and subsequent recycling.
+ *
+ * @param <E> pooled objects type
+ */
+public final class ThreadLocalObjectPool<E extends AutoCloseable> {
+    /**
+     * Wrapper for a pooled object with capability to return the object to a pool.
+     *
+     * @param <T> enclosed object type
+     */
+    public static class Reusable<T extends AutoCloseable> {
+        /** */
+        private final ThreadLocalObjectPool<T> pool;
+        /** */
+        private final T object;
+
+        /** */
+        private Reusable(ThreadLocalObjectPool<T> pool, T object) {
+            this.pool = pool;
+            this.object = object;
+        }
+
+        /**
+         * @return enclosed object
+         */
+        public T object() {
+            return object;
+        }
+
+        /**
+         * Returns an object to a pool or closes it if the pool is already full.
+         */
+        public void recycle() {
+            Queue<Reusable<T>> bag = pool.bag.get();
+            if (bag.size() < pool.poolSize)
+                bag.add(this);
+            else
+                U.closeQuiet(object);
+        }
+    }
+
+    /** */
+    private final Supplier<E> objectFactory;
+    /** */
+    private final ThreadLocal<Queue<Reusable<E>>> bag = ThreadLocal.withInitial(LinkedList::new);
+    /** */
+    private final int poolSize;
+
+    /**
+     * @param objectFactory factory used for new objects creation
+     * @param poolSize number of objects which pool can contain
+     */
+    public ThreadLocalObjectPool(Supplier<E> objectFactory, int poolSize) {
+        this.objectFactory = objectFactory;
+        this.poolSize = poolSize;
+    }
+
+    /**
+     * Picks an object from the pool if one is present or creates new one otherwise.
+     * Returns an object wrapper which could be returned to the pool.
+     *
+     * @return reusable object wrapper
+     */
+    public Reusable<E> borrow() {
+        Reusable<E> pooled = bag.get().poll();
+        return pooled != null ? pooled : new Reusable<>(this, objectFactory.get());
+    }
+
+    /** Visible for test */
+    int bagSize() {
+        return bag.get().size();
+    }
+}
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2PkHashIndex.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2PkHashIndex.java
index 546f5bb..5d877cd 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2PkHashIndex.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2PkHashIndex.java
@@ -19,6 +19,7 @@
 package org.apache.ignite.internal.processors.query.h2.database;
 
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
@@ -26,9 +27,12 @@
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
 import org.apache.ignite.internal.processors.cache.IgniteCacheOffheapManager;
 import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
 import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
 import org.apache.ignite.internal.processors.query.h2.opt.GridH2IndexBase;
+import org.apache.ignite.internal.processors.query.h2.opt.GridH2QueryContext;
 import org.apache.ignite.internal.processors.query.h2.opt.GridH2Row;
+import org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor;
 import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table;
 import org.apache.ignite.internal.util.lang.GridCursor;
 import org.apache.ignite.spi.indexing.IndexingQueryCacheFilter;
@@ -84,31 +88,30 @@
 
     /** {@inheritDoc} */
     @Override public Cursor find(Session ses, final SearchRow lower, final SearchRow upper) {
-        IndexingQueryFilter f = threadLocalFilter();
-        IndexingQueryCacheFilter p = null;
+        IndexingQueryCacheFilter filter = null;
+        MvccSnapshot mvccSnapshot = null;
 
-        if (f != null) {
-            String cacheName = getTable().cacheName();
+        GridH2QueryContext qctx = GridH2QueryContext.get();
 
-            p = f.forCache(cacheName);
+        if (qctx != null) {
+            IndexingQueryFilter f = qctx.filter();
+            filter = f != null ? f.forCache(getTable().cacheName()) : null;
+            mvccSnapshot = qctx.mvccSnapshot();
         }
 
-        KeyCacheObject lowerObj = null;
-        KeyCacheObject upperObj = null;
+        assert !cctx.mvccEnabled() || mvccSnapshot != null;
 
-        if (lower != null)
-            lowerObj = cctx.toCacheKeyObject(lower.getValue(0).getObject());
-
-        if (upper != null)
-            upperObj = cctx.toCacheKeyObject(upper.getValue(0).getObject());
+        KeyCacheObject lowerObj = lower != null ? cctx.toCacheKeyObject(lower.getValue(0).getObject()) : null;
+        KeyCacheObject upperObj = upper != null ? cctx.toCacheKeyObject(upper.getValue(0).getObject()) : null;
 
         try {
-            List<GridCursor<? extends CacheDataRow>> cursors = new ArrayList<>();
+            Collection<GridCursor<? extends CacheDataRow>> cursors = new ArrayList<>();
 
             for (IgniteCacheOffheapManager.CacheDataStore store : cctx.offheap().cacheDataStores())
-                cursors.add(store.cursor(cctx.cacheId(), lowerObj, upperObj));
+                if (filter == null || filter.applyPartition(store.partId()))
+                    cursors.add(store.cursor(cctx.cacheId(), lowerObj, upperObj, null, mvccSnapshot));
 
-            return new H2Cursor(new CompositeGridCursor<>(cursors.iterator()), p);
+            return new H2Cursor(cursors.iterator());
         }
         catch (IgniteCheckedException e) {
             throw DbException.convert(e);
@@ -124,7 +127,6 @@
     @SuppressWarnings("StatementWithEmptyBody")
     @Override public GridH2Row put(GridH2Row row) {
         // Should not be called directly. Rows are inserted into underlying cache data stores.
-
         assert false;
 
         throw DbException.getUnsupportedException("put");
@@ -192,28 +194,29 @@
      */
     private class H2Cursor implements Cursor {
         /** */
-        final GridCursor<? extends CacheDataRow> cursor;
+        private final GridH2RowDescriptor desc;
 
         /** */
-        final IndexingQueryCacheFilter filter;
+        private final Iterator<GridCursor<? extends CacheDataRow>> iter;
+
+        /** */
+        private GridCursor<? extends CacheDataRow> curr;
 
         /**
-         * @param cursor Cursor.
-         * @param filter Filter.
+         * @param iter Cursors iterator.
          */
-        private H2Cursor(GridCursor<? extends CacheDataRow> cursor, IndexingQueryCacheFilter filter) {
-            assert cursor != null;
+        private H2Cursor(Iterator<GridCursor<? extends CacheDataRow>> iter) {
+            assert iter != null;
 
-            this.cursor = cursor;
-            this.filter = filter;
+            this.iter = iter;
+
+            desc = tbl.rowDescriptor();
         }
 
         /** {@inheritDoc} */
         @Override public Row get() {
             try {
-                CacheDataRow dataRow = cursor.get();
-
-                return tbl.rowDescriptor().createRow(dataRow);
+                return desc.createRow(curr.get());
             }
             catch (IgniteCheckedException e) {
                 throw DbException.convert(e);
@@ -228,13 +231,13 @@
         /** {@inheritDoc} */
         @Override public boolean next() {
             try {
-                while (cursor.next()) {
-                    if (filter == null)
-                        return true;
+                if (curr != null && curr.next())
+                    return true;
 
-                    CacheDataRow dataRow = cursor.get();
+                while (iter.hasNext()) {
+                    curr = iter.next();
 
-                    if (filter.applyPartition(dataRow.partition()))
+                    if (curr.next())
                         return true;
                 }
 
@@ -250,45 +253,4 @@
             throw DbException.getUnsupportedException("previous");
         }
     }
-
-    /**
-     *
-     */
-    private static class CompositeGridCursor<T> implements GridCursor<T> {
-        /** */
-        private final Iterator<GridCursor<? extends T>> iter;
-
-        /** */
-        private GridCursor<? extends T> curr;
-
-        /**
-         *
-         */
-        public CompositeGridCursor(Iterator<GridCursor<? extends T>> iter) {
-            this.iter = iter;
-
-            if (iter.hasNext())
-                curr = iter.next();
-        }
-
-        /** {@inheritDoc} */
-        @Override public boolean next() throws IgniteCheckedException {
-            if (curr.next())
-                return true;
-
-            while (iter.hasNext()) {
-                curr = iter.next();
-
-                if (curr.next())
-                    return true;
-            }
-
-            return false;
-        }
-
-        /** {@inheritDoc} */
-        @Override public T get() throws IgniteCheckedException {
-            return curr.get();
-        }
-    }
 }
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2RowFactory.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2RowFactory.java
index 40b9b0a..724de7e 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2RowFactory.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2RowFactory.java
@@ -18,9 +18,10 @@
 package org.apache.ignite.internal.processors.query.h2.database;
 
 import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.IgniteException;
+import org.apache.ignite.internal.pagemem.PageIdUtils;
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
 import org.apache.ignite.internal.processors.cache.persistence.CacheDataRowAdapter;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccDataRow;
 import org.apache.ignite.internal.processors.query.h2.opt.GridH2Row;
 import org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor;
 
@@ -60,17 +61,23 @@
 
         rowBuilder.initFromLink(cctx.group(), CacheDataRowAdapter.RowData.FULL);
 
-        GridH2Row row;
-
-        try {
-            row = rowDesc.createRow(rowBuilder);
-        }
-        catch (IgniteCheckedException e) {
-            throw new IgniteException(e);
-        }
+        GridH2Row row = rowDesc.createRow(rowBuilder);
 
         assert row.version() != null;
 
         return row;
     }
+
+    /**
+     * @param link Link.
+     * @param mvccCrdVer Mvcc coordinator version.
+     * @param mvccCntr Mvcc counter.
+     * @param mvccOpCntr Mvcc operation counter.
+     * @return Row.
+     * @throws IgniteCheckedException If failed.
+     */
+    public GridH2Row getMvccRow(long link, long mvccCrdVer, long mvccCntr, int mvccOpCntr) throws IgniteCheckedException {
+        return rowDesc.createRow(new MvccDataRow(cctx.group(),0, link,
+            PageIdUtils.partId(PageIdUtils.pageId(link)),null, mvccCrdVer, mvccCntr, mvccOpCntr));
+    }
 }
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2Tree.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2Tree.java
index 8da3b05..ce40df0 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2Tree.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2Tree.java
@@ -21,21 +21,23 @@
 import java.util.List;
 import java.util.concurrent.atomic.AtomicLong;
 import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.pagemem.PageIdUtils;
 import org.apache.ignite.internal.pagemem.PageMemory;
 import org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUtils;
 import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree;
 import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO;
 import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusMetaIO;
 import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList;
+import org.apache.ignite.internal.processors.failure.FailureProcessor;
 import org.apache.ignite.internal.processors.query.h2.H2RowCache;
 import org.apache.ignite.internal.processors.query.h2.database.io.H2ExtrasInnerIO;
 import org.apache.ignite.internal.processors.query.h2.database.io.H2ExtrasLeafIO;
 import org.apache.ignite.internal.processors.query.h2.database.io.H2RowLinkIO;
 import org.apache.ignite.internal.processors.query.h2.opt.GridH2KeyValueRowOnheap;
 import org.apache.ignite.internal.processors.query.h2.opt.GridH2Row;
+import org.apache.ignite.internal.processors.query.h2.opt.GridH2SearchRow;
+import org.apache.ignite.internal.util.typedef.internal.S;
 import org.apache.ignite.internal.util.typedef.internal.U;
-import org.apache.ignite.spi.indexing.IndexingQueryCacheFilter;
 import org.h2.result.SearchRow;
 import org.h2.table.IndexColumn;
 import org.h2.value.Value;
@@ -43,7 +45,7 @@
 
 /**
  */
-public abstract class H2Tree extends BPlusTree<SearchRow, GridH2Row> {
+public abstract class H2Tree extends BPlusTree<GridH2SearchRow, GridH2Row> {
     /** */
     private final H2RowFactory rowStore;
 
@@ -60,6 +62,9 @@
     private final int[] columnIds;
 
     /** */
+    private final boolean mvccEnabled;
+
+    /** */
     private final Comparator<Value> comp = new Comparator<Value>() {
         @Override public int compare(Value o1, Value o2) {
             return compareValues(o1, o2);
@@ -81,6 +86,8 @@
      * @param metaPageId Meta page ID.
      * @param initNew Initialize new index.
      * @param rowCache Row cache.
+     * @param mvccEnabled Mvcc flag.
+     * @param failureProcessor if the tree is corrupted.
      * @throws IgniteCheckedException If failed.
      */
     protected H2Tree(
@@ -96,9 +103,11 @@
         IndexColumn[] cols,
         List<InlineIndexHelper> inlineIdxs,
         int inlineSize,
-        @Nullable H2RowCache rowCache
+        boolean mvccEnabled,
+        @Nullable H2RowCache rowCache,
+        @Nullable FailureProcessor failureProcessor
     ) throws IgniteCheckedException {
-        super(name, grpId, pageMem, wal, globalRmvId, metaPageId, reuseList);
+        super(name, grpId, pageMem, wal, globalRmvId, metaPageId, reuseList, failureProcessor);
 
         if (!initNew) {
             // Page is ready - read inline size from it.
@@ -106,6 +115,7 @@
         }
 
         this.inlineSize = inlineSize;
+        this.mvccEnabled = mvccEnabled;
 
         assert rowStore != null;
 
@@ -118,9 +128,9 @@
         for (int i = 0; i < cols.length; i++)
             columnIds[i] = cols[i].column.getColumnId();
 
-        this.rowCache = rowCache;
+        setIos(H2ExtrasInnerIO.getVersions(inlineSize, mvccEnabled), H2ExtrasLeafIO.getVersions(inlineSize, mvccEnabled));
 
-        setIos(H2ExtrasInnerIO.getVersions(inlineSize), H2ExtrasLeafIO.getVersions(inlineSize));
+        this.rowCache = rowCache;
 
         initTree(initNew, inlineSize);
     }
@@ -149,21 +159,34 @@
             return rowStore.getRow(link);
     }
 
-    /** {@inheritDoc} */
-    @Override protected GridH2Row getRow(BPlusIO<SearchRow> io, long pageAddr, int idx, Object filter)
-        throws IgniteCheckedException {
-        if (filter != null) {
-            // Filter out not interesting partitions without deserializing the row.
-            IndexingQueryCacheFilter filter0 = (IndexingQueryCacheFilter)filter;
+    /**
+     * Create row from link.
+     *
+     * @param link Link.
+     * @param mvccOpCntr
+     * @return Row.
+     * @throws IgniteCheckedException if failed.
+     */
+    public GridH2Row createRowFromLink(long link, long mvccCrdVer, long mvccCntr, int mvccOpCntr) throws IgniteCheckedException {
+        if (rowCache != null) {
+            GridH2Row row = rowCache.get(link);
 
-            long link = ((H2RowLinkIO)io).getLink(pageAddr, idx);
+            if (row == null) {
+                row = rowStore.getMvccRow(link, mvccCrdVer, mvccCntr, mvccOpCntr);
 
-            int part = PageIdUtils.partId(PageIdUtils.pageId(link));
+                if (row instanceof GridH2KeyValueRowOnheap)
+                    rowCache.put((GridH2KeyValueRowOnheap)row);
+            }
 
-            if (!filter0.applyPartition(part))
-                return null;
+            return row;
         }
+        else
+            return rowStore.getMvccRow(link, mvccCrdVer, mvccCntr, mvccOpCntr);
+    }
 
+    /** {@inheritDoc} */
+    @Override public GridH2Row getRow(BPlusIO<GridH2SearchRow> io, long pageAddr, int idx, Object ignore)
+        throws IgniteCheckedException {
         return (GridH2Row)io.getLookupRow(this, pageAddr, idx);
     }
 
@@ -203,8 +226,8 @@
 
     /** {@inheritDoc} */
     @SuppressWarnings("ForLoopReplaceableByForEach")
-    @Override protected int compare(BPlusIO<SearchRow> io, long pageAddr, int idx,
-        SearchRow row) throws IgniteCheckedException {
+    @Override protected int compare(BPlusIO<GridH2SearchRow> io, long pageAddr, int idx,
+        GridH2SearchRow row) throws IgniteCheckedException {
         if (inlineSize() == 0)
             return compareRows(getRow(io, pageAddr, idx), row);
         else {
@@ -239,7 +262,7 @@
             }
 
             if (lastIdxUsed == cols.length)
-                return 0;
+                return mvccCompare((H2RowLinkIO)io, pageAddr, idx, row);
 
             SearchRow rowData = getRow(io, pageAddr, idx);
 
@@ -251,7 +274,7 @@
 
                 if (v2 == null) {
                     // Can't compare further.
-                    return 0;
+                    return mvccCompare((H2RowLinkIO)io, pageAddr, idx, row);
                 }
 
                 Value v1 = rowData.getValue(idx0);
@@ -262,7 +285,7 @@
                     return InlineIndexHelper.fixSort(c, col.sortType);
             }
 
-            return 0;
+            return mvccCompare((H2RowLinkIO)io, pageAddr, idx, row);
         }
     }
 
@@ -273,7 +296,8 @@
      * @param r2 Row 2.
      * @return Compare result: see {@link Comparator#compare(Object, Object)} for values.
      */
-    public int compareRows(SearchRow r1, SearchRow r2) {
+    public int compareRows(GridH2SearchRow r1, GridH2SearchRow r2) {
+        assert !mvccEnabled || r2.indexSearchRow() || MvccUtils.mvccVersionIsValid(r2.mvccCoordinatorVersion(), r2.mvccCounter()) : r2;
         if (r1 == r2)
             return 0;
 
@@ -285,7 +309,7 @@
 
             if (v1 == null || v2 == null) {
                 // Can't compare further.
-                return 0;
+                return mvccCompare(r1, r2);
             }
 
             int c = compareValues(v1, v2);
@@ -294,7 +318,47 @@
                 return InlineIndexHelper.fixSort(c, cols[i].sortType);
         }
 
-        return 0;
+        return mvccCompare(r1, r2);
+    }
+
+    /**
+     * @param io IO.
+     * @param pageAddr Page address.
+     * @param idx Item index.
+     * @param r2 Search row.
+     * @return Comparison result.
+     */
+    private int mvccCompare(H2RowLinkIO io, long pageAddr, int idx, GridH2SearchRow r2) {
+        if (!mvccEnabled || r2.indexSearchRow())
+            return 0;
+
+        long crd = io.getMvccCoordinatorVersion(pageAddr, idx);
+        long cntr = io.getMvccCounter(pageAddr, idx);
+        int opCntr = io.getMvccOperationCounter(pageAddr, idx);
+
+        assert MvccUtils.mvccVersionIsValid(crd, cntr, opCntr);
+
+        return -MvccUtils.compare(crd, cntr, opCntr, r2);  // descending order
+    }
+
+    /**
+     * @param r1 First row.
+     * @param r2 Second row.
+     * @return Comparison result.
+     */
+    private int mvccCompare(GridH2SearchRow r1, GridH2SearchRow r2) {
+        if (!mvccEnabled || r2.indexSearchRow())
+            return 0;
+
+        long crdVer1 = r1.mvccCoordinatorVersion();
+        long crdVer2 = r2.mvccCoordinatorVersion();
+
+        int c = -Long.compare(crdVer1, crdVer2);
+
+        if (c != 0)
+            return c;
+
+        return -Long.compare(r1.mvccCounter(), r2.mvccCounter());
     }
 
     /**
@@ -303,4 +367,9 @@
      * @return Comparison result.
      */
     public abstract int compareValues(Value v1, Value v2);
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(H2Tree.class, this, "super", super.toString());
+    }
 }
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeFilterClosure.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeFilterClosure.java
new file mode 100644
index 0000000..e583546
--- /dev/null
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeFilterClosure.java
@@ -0,0 +1,109 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2.database;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.pagemem.PageIdUtils;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO;
+import org.apache.ignite.internal.processors.query.h2.database.io.H2RowLinkIO;
+import org.apache.ignite.internal.processors.query.h2.opt.GridH2Row;
+import org.apache.ignite.internal.processors.query.h2.opt.GridH2SearchRow;
+import org.apache.ignite.internal.transactions.IgniteTxMvccVersionCheckedException;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.spi.indexing.IndexingQueryCacheFilter;
+
+import static org.apache.ignite.internal.pagemem.PageIdUtils.pageId;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.isVisible;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.mvccVersionIsValid;
+
+/**
+ *
+ */
+public class H2TreeFilterClosure implements H2Tree.TreeRowClosure<GridH2SearchRow, GridH2Row> {
+    /** */
+    private final MvccSnapshot mvccSnapshot;
+
+    /** */
+    private final IndexingQueryCacheFilter filter;
+
+    /** */
+    private final GridCacheContext cctx;
+
+    /**
+     * @param filter Cache filter.
+     * @param mvccSnapshot MVCC snapshot.
+     * @param cctx Cache context.
+     */
+    public H2TreeFilterClosure(IndexingQueryCacheFilter filter, MvccSnapshot mvccSnapshot, GridCacheContext cctx) {
+        assert (filter != null || mvccSnapshot != null) && cctx != null ;
+
+        this.filter = filter;
+        this.mvccSnapshot = mvccSnapshot;
+        this.cctx = cctx;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean apply(BPlusTree<GridH2SearchRow, GridH2Row> tree, BPlusIO<GridH2SearchRow> io,
+        long pageAddr, int idx)  throws IgniteCheckedException {
+        return (filter  == null || applyFilter((H2RowLinkIO)io, pageAddr, idx))
+            && (mvccSnapshot == null || applyMvcc((H2RowLinkIO)io, pageAddr, idx));
+    }
+
+    /**
+     * @param io Row IO.
+     * @param pageAddr Page address.
+     * @param idx Item index.
+     * @return {@code True} if row passes the filter.
+     */
+    private boolean applyFilter(H2RowLinkIO io, long pageAddr, int idx) {
+        assert filter != null;
+
+        return filter.applyPartition(PageIdUtils.partId(pageId(io.getLink(pageAddr, idx))));
+    }
+
+    /**
+     * @param io Row IO.
+     * @param pageAddr Page address.
+     * @param idx Item index.
+     * @return {@code True} if row passes the filter.
+     */
+    private boolean applyMvcc(H2RowLinkIO io, long pageAddr, int idx) throws IgniteCheckedException {
+        assert io.storeMvccInfo() : io;
+
+        long rowCrdVer = io.getMvccCoordinatorVersion(pageAddr, idx);
+        long rowCntr = io.getMvccCounter(pageAddr, idx);
+        int rowOpCntr = io.getMvccOperationCounter(pageAddr, idx);
+
+        assert mvccVersionIsValid(rowCrdVer, rowCntr, rowOpCntr);
+
+        try {
+            return isVisible(cctx, mvccSnapshot, rowCrdVer, rowCntr, rowOpCntr, io.getLink(pageAddr, idx));
+        }
+        catch (IgniteTxMvccVersionCheckedException ignored) {
+            return false; // The row is going to be removed.
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return S.toString(H2TreeFilterClosure.class, this);
+    }
+}
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java
index 2441ff1..ab6f42a 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/H2TreeIndex.java
@@ -20,36 +20,34 @@
 import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.List;
-import java.util.NoSuchElementException;
-
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.IgniteException;
 import org.apache.ignite.IgniteSystemProperties;
-import org.apache.ignite.internal.pagemem.PageIdUtils;
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
 import org.apache.ignite.internal.processors.cache.persistence.IgniteCacheDatabaseSharedManager;
 import org.apache.ignite.internal.processors.cache.persistence.RootPage;
 import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree;
-import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO;
 import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO;
 import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor;
 import org.apache.ignite.internal.processors.query.h2.H2Cursor;
 import org.apache.ignite.internal.processors.query.h2.H2RowCache;
+import org.apache.ignite.internal.processors.query.h2.opt.GridH2Cursor;
 import org.apache.ignite.internal.processors.query.h2.opt.GridH2IndexBase;
-import org.apache.ignite.internal.processors.query.h2.database.io.H2RowLinkIO;
+import org.apache.ignite.internal.processors.query.h2.opt.GridH2QueryContext;
 import org.apache.ignite.internal.processors.query.h2.opt.GridH2Row;
+import org.apache.ignite.internal.processors.query.h2.opt.GridH2SearchRow;
 import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table;
 import org.apache.ignite.internal.util.IgniteTree;
 import org.apache.ignite.internal.util.lang.GridCursor;
 import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.spi.indexing.IndexingQueryFilter;
 import org.apache.ignite.spi.indexing.IndexingQueryCacheFilter;
+import org.apache.ignite.spi.indexing.IndexingQueryFilter;
 import org.h2.engine.Session;
 import org.h2.index.Cursor;
 import org.h2.index.IndexType;
 import org.h2.index.SingleRowCursor;
 import org.h2.message.DbException;
-import org.h2.result.Row;
 import org.h2.result.SearchRow;
 import org.h2.result.SortOrder;
 import org.h2.table.Column;
@@ -139,7 +137,9 @@
                         cols,
                         inlineIdxs,
                         computeInlineSize(inlineIdxs, inlineSize),
-                        rowCache) {
+                        cctx.mvccEnabled(),
+                        rowCache,
+                        cctx.kernalContext().failure()) {
                         @Override public int compareValues(Value v1, Value v2) {
                             return v1 == v2 ? 0 : table.compareTypeSafe(v1, v2);
                         }
@@ -190,21 +190,22 @@
     /** {@inheritDoc} */
     @Override public Cursor find(Session ses, SearchRow lower, SearchRow upper) {
         try {
-            IndexingQueryCacheFilter filter = partitionFilter(threadLocalFilter());
+            assert lower == null || lower instanceof GridH2SearchRow : lower;
+            assert upper == null || upper instanceof GridH2SearchRow : upper;
 
             int seg = threadLocalSegment();
 
             H2Tree tree = treeForRead(seg);
 
-            if (indexType.isPrimaryKey() && lower != null && upper != null && tree.compareRows(lower, upper) == 0) {
-                GridH2Row row = tree.findOne(lower, filter);
+            if (!cctx.mvccEnabled() && indexType.isPrimaryKey() && lower != null && upper != null &&
+                tree.compareRows((GridH2SearchRow)lower, (GridH2SearchRow)upper) == 0) {
+                GridH2Row row = tree.findOne((GridH2SearchRow)lower, filter(GridH2QueryContext.get()), null);
 
-                return (row == null) ? EMPTY_CURSOR : new SingleRowCursor(row);
+                return (row == null) ? GridH2Cursor.EMPTY : new SingleRowCursor(row);
             }
             else {
-                GridCursor<GridH2Row> cursor = tree.find(lower, upper, filter);
-
-                return new H2Cursor(cursor);
+                return new H2Cursor(tree.find((GridH2SearchRow)lower,
+                    (GridH2SearchRow)upper, filter(GridH2QueryContext.get()), null));
             }
         }
         catch (IgniteCheckedException e) {
@@ -256,6 +257,8 @@
 
     /** {@inheritDoc} */
     @Override public GridH2Row remove(SearchRow row) {
+        assert row instanceof GridH2SearchRow : row;
+
         try {
             InlineIndexHelper.setCurrentInlineIndexes(inlineIdxs);
 
@@ -265,7 +268,7 @@
 
             assert cctx.shared().database().checkpointLockIsHeldByThread();
 
-            return tree.remove(row);
+            return tree.remove((GridH2SearchRow)row);
         }
         catch (IgniteCheckedException e) {
             throw DbException.convert(e);
@@ -278,6 +281,8 @@
     /** {@inheritDoc} */
     @Override public boolean removex(SearchRow row) {
         try {
+            assert row instanceof GridH2SearchRow : row;
+
             InlineIndexHelper.setCurrentInlineIndexes(inlineIdxs);
 
             int seg = segmentForRow(row);
@@ -286,7 +291,7 @@
 
             assert cctx.shared().database().checkpointLockIsHeldByThread();
 
-            return tree.removex(row);
+            return tree.removex((GridH2SearchRow)row);
         }
         catch (IgniteCheckedException e) {
             throw DbException.convert(e);
@@ -314,9 +319,9 @@
 
             H2Tree tree = treeForRead(seg);
 
-            BPlusTree.TreeRowClosure<SearchRow, GridH2Row> filter = filterClosure();
+            GridH2QueryContext qctx = GridH2QueryContext.get();
 
-            return tree.size(filter);
+            return tree.size(filter(qctx));
         }
         catch (IgniteCheckedException e) {
             throw DbException.convert(e);
@@ -336,13 +341,10 @@
     /** {@inheritDoc} */
     @Override public Cursor findFirstOrLast(Session session, boolean b) {
         try {
-            int seg = threadLocalSegment();
+            H2Tree tree = treeForRead(threadLocalSegment());
+            GridH2QueryContext qctx = GridH2QueryContext.get();
 
-            H2Tree tree = treeForRead(seg);
-
-            GridH2Row row = b ? tree.findFirst(): tree.findLast();
-
-            return new SingleRowCursor(row);
+            return new SingleRowCursor(b ? tree.findFirst(filter(qctx)): tree.findLast(filter(qctx)));
         }
         catch (IgniteCheckedException e) {
             throw DbException.convert(e);
@@ -381,16 +383,13 @@
     @Override protected H2Cursor doFind0(
         IgniteTree t,
         @Nullable SearchRow first,
-        boolean includeFirst,
         @Nullable SearchRow last,
-        IndexingQueryFilter filter) {
+        BPlusTree.TreeRowClosure<GridH2SearchRow, GridH2Row> filter) {
         try {
-            IndexingQueryCacheFilter pf = partitionFilter(filter);
-
-            GridCursor<GridH2Row> range = t.find(first, last, pf);
+            GridCursor<GridH2Row> range = ((BPlusTree)t).find(first, last, filter, null);
 
             if (range == null)
-                range = GridH2IndexBase.EMPTY_CURSOR;
+                range = EMPTY_CURSOR;
 
             return new H2Cursor(range);
         }
@@ -399,6 +398,26 @@
         }
     }
 
+    /** {@inheritDoc} */
+    @Override protected BPlusTree.TreeRowClosure<GridH2SearchRow, GridH2Row> filter(GridH2QueryContext qctx) {
+        if (qctx == null) {
+            assert !cctx.mvccEnabled();
+
+            return null;
+        }
+
+        IndexingQueryFilter f = qctx.filter();
+        IndexingQueryCacheFilter p = f == null ? null : f.forCache(getTable().cacheName());
+        MvccSnapshot v = qctx.mvccSnapshot();
+
+        assert !cctx.mvccEnabled() || v != null;
+
+        if(p == null && v == null)
+            return null;
+
+        return new H2TreeFilterClosure(p, v, cctx);
+    }
+
     /**
      * @param inlineIdxs Inline index helpers.
      * @param cfgInlineSize Inline size from cache config.
@@ -456,63 +475,6 @@
         cctx.offheap().dropRootPageForIndex(cctx.cacheId(), name + "%" + segIdx);
     }
 
-    /**
-     * Returns a filter which returns true for entries belonging to a particular partition.
-     *
-     * @param qryFilter Factory that creates a predicate for filtering entries for a particular cache.
-     * @return The filter or null if the filter is not needed (e.g., if the cache is not partitioned).
-     */
-    @Nullable private IndexingQueryCacheFilter partitionFilter(@Nullable IndexingQueryFilter qryFilter) {
-        if (qryFilter == null)
-            return null;
-
-        String cacheName = getTable().cacheName();
-
-        return qryFilter.forCache(cacheName);
-    }
-
-    /**
-     * An adapter from {@link IndexingQueryCacheFilter} to {@link BPlusTree.TreeRowClosure} which
-     * filters entries that belong to the current partition.
-     */
-    private static class PartitionFilterTreeRowClosure implements BPlusTree.TreeRowClosure<SearchRow, GridH2Row> {
-        /** Filter. */
-        private final IndexingQueryCacheFilter filter;
-
-        /**
-         * Creates a {@link BPlusTree.TreeRowClosure} adapter based on the given partition filter.
-         *
-         * @param filter The partition filter.
-         */
-        public PartitionFilterTreeRowClosure(IndexingQueryCacheFilter filter) {
-            this.filter = filter;
-        }
-
-        /** {@inheritDoc} */
-        @Override public boolean apply(BPlusTree<SearchRow, GridH2Row> tree,
-            BPlusIO<SearchRow> io, long pageAddr, int idx) throws IgniteCheckedException {
-
-            H2RowLinkIO h2io = (H2RowLinkIO)io;
-
-            return filter.applyPartition(
-                PageIdUtils.partId(
-                    PageIdUtils.pageId(
-                        h2io.getLink(pageAddr, idx))));
-        }
-    }
-
-    /**
-     * Returns a filter to apply to rows in the current index to obtain only the
-     * ones owned by the this cache.
-     *
-     * @return The filter, which returns true for rows owned by this cache.
-     */
-    @Nullable private BPlusTree.TreeRowClosure<SearchRow, GridH2Row> filterClosure() {
-        final IndexingQueryCacheFilter filter = partitionFilter(threadLocalFilter());
-
-        return filter != null ? new PartitionFilterTreeRowClosure(filter) : null;
-    }
-
     /** {@inheritDoc} */
     @Override public void refreshColumnIds() {
         super.refreshColumnIds();
@@ -527,29 +489,4 @@
         for (int pos = 0; pos < inlineHelpers.size(); ++pos)
             inlineIdxs.set(pos, inlineHelpers.get(pos));
     }
-
-    /**
-     * Empty cursor.
-     */
-    public static final Cursor EMPTY_CURSOR = new Cursor() {
-        /** {@inheritDoc} */
-        @Override public Row get() {
-            throw DbException.convert(new NoSuchElementException("Empty cursor"));
-        }
-
-        /** {@inheritDoc} */
-        @Override public SearchRow getSearchRow() {
-            throw DbException.convert(new NoSuchElementException("Empty cursor"));
-        }
-
-        /** {@inheritDoc} */
-        @Override public boolean next() {
-            return false;
-        }
-
-        /** {@inheritDoc} */
-        @Override public boolean previous() {
-            return false;
-        }
-    };
 }
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2ExtrasInnerIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2ExtrasInnerIO.java
new file mode 100644
index 0000000..fbca917
--- /dev/null
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2ExtrasInnerIO.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2.database.io;
+
+import java.util.List;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.pagemem.PageUtils;
+import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusInnerIO;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.IOVersions;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO;
+import org.apache.ignite.internal.processors.query.h2.database.H2Tree;
+import org.apache.ignite.internal.processors.query.h2.database.InlineIndexHelper;
+import org.apache.ignite.internal.processors.query.h2.opt.GridH2Row;
+import org.apache.ignite.internal.processors.query.h2.opt.GridH2SearchRow;
+
+/**
+ * Inner page for H2 row references.
+ */
+public abstract class AbstractH2ExtrasInnerIO extends BPlusInnerIO<GridH2SearchRow> implements H2RowLinkIO {
+    /** Payload size. */
+    protected final int payloadSize;
+
+    /** */
+    public static void register() {
+        register(false);
+
+        register(true);
+    }
+
+    /**
+     * @param mvcc Mvcc flag.
+     */
+    private static void register(boolean mvcc) {
+        short type = mvcc ? PageIO.T_H2_EX_REF_MVCC_INNER_START : PageIO.T_H2_EX_REF_INNER_START;
+
+        for (short payload = 1; payload <= PageIO.MAX_PAYLOAD_SIZE; payload++) {
+            IOVersions<? extends AbstractH2ExtrasInnerIO> io =
+                getVersions((short)(type + payload - 1), payload, mvcc);
+
+            PageIO.registerH2ExtraInner(io, mvcc);
+        }
+    }
+
+    /**
+     * @param payload Payload size.
+     * @param mvccEnabled Mvcc flag.
+     * @return IOVersions for given payload.
+     */
+    @SuppressWarnings("unchecked")
+    public static IOVersions<? extends BPlusInnerIO<GridH2SearchRow>> getVersions(int payload, boolean mvccEnabled) {
+        assert payload >= 0 && payload <= PageIO.MAX_PAYLOAD_SIZE;
+
+        if (payload == 0)
+            return mvccEnabled ? H2MvccInnerIO.VERSIONS : H2InnerIO.VERSIONS;
+        else
+            return (IOVersions<BPlusInnerIO<GridH2SearchRow>>)PageIO.getInnerVersions((short)(payload - 1), mvccEnabled);
+    }
+
+    /**
+     * @param type Type.
+     * @param payload Payload size.
+     * @param mvcc Mvcc flag.
+     * @return Instance of IO versions.
+     */
+    private static IOVersions<? extends AbstractH2ExtrasInnerIO> getVersions(short type, short payload, boolean mvcc) {
+        return new IOVersions<>(mvcc ? new H2MvccExtrasInnerIO(type, 1, payload) : new H2ExtrasInnerIO(type, 1, payload));
+    }
+
+    /**
+     * @param type Page type.
+     * @param ver Page format version.
+     * @param itemSize Item size.
+     * @param payloadSize Payload size.
+     */
+    AbstractH2ExtrasInnerIO(short type, int ver, int itemSize, int payloadSize) {
+        super(type, ver, true, itemSize + payloadSize);
+
+        this.payloadSize = payloadSize;
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("ForLoopReplaceableByForEach")
+    @Override public final void storeByOffset(long pageAddr, int off, GridH2SearchRow row) {
+        GridH2Row row0 = (GridH2Row)row;
+
+        assert row0.link() != 0 : row0;
+
+        List<InlineIndexHelper> inlineIdxs = InlineIndexHelper.getCurrentInlineIndexes();
+
+        assert inlineIdxs != null : "no inline index helpers";
+
+
+        int fieldOff = 0;
+
+        for (int i = 0; i < inlineIdxs.size(); i++) {
+            InlineIndexHelper idx = inlineIdxs.get(i);
+
+            int size = idx.put(pageAddr, off + fieldOff, row.getValue(idx.columnIndex()), payloadSize - fieldOff);
+
+            if (size == 0)
+                break;
+
+            fieldOff += size;
+        }
+
+        H2IOUtils.storeRow(row0, pageAddr, off + payloadSize, storeMvccInfo());
+    }
+
+    /** {@inheritDoc} */
+    @Override public final GridH2SearchRow getLookupRow(BPlusTree<GridH2SearchRow, ?> tree, long pageAddr, int idx)
+        throws IgniteCheckedException {
+        long link = getLink(pageAddr, idx);
+
+        assert link != 0;
+
+        if (storeMvccInfo()) {
+            long mvccCrdVer = getMvccCoordinatorVersion(pageAddr, idx);
+            long mvccCntr = getMvccCounter(pageAddr, idx);
+            int mvccOpCntr = getMvccOperationCounter(pageAddr, idx);
+
+            return ((H2Tree)tree).createRowFromLink(link, mvccCrdVer, mvccCntr, mvccOpCntr);
+        }
+
+        return ((H2Tree)tree).createRowFromLink(link);
+    }
+
+    /** {@inheritDoc} */
+    @Override public final void store(long dstPageAddr, int dstIdx, BPlusIO<GridH2SearchRow> srcIo, long srcPageAddr, int srcIdx) {
+        int srcOff = srcIo.offset(srcIdx);
+
+        byte[] payload = PageUtils.getBytes(srcPageAddr, srcOff, payloadSize);
+        long link = PageUtils.getLong(srcPageAddr, srcOff + payloadSize);
+
+        assert link != 0;
+
+        int dstOff = offset(dstIdx);
+
+        PageUtils.putBytes(dstPageAddr, dstOff, payload);
+
+        H2IOUtils.store(dstPageAddr, dstOff + payloadSize, srcIo, srcPageAddr, srcIdx, storeMvccInfo());
+    }
+
+    /** {@inheritDoc} */
+    @Override public final long getLink(long pageAddr, int idx) {
+        return PageUtils.getLong(pageAddr, offset(idx) + payloadSize);
+    }
+}
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2ExtrasLeafIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2ExtrasLeafIO.java
new file mode 100644
index 0000000..9132795
--- /dev/null
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2ExtrasLeafIO.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2.database.io;
+
+import java.util.List;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.pagemem.PageUtils;
+import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusLeafIO;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.IOVersions;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO;
+import org.apache.ignite.internal.processors.query.h2.database.H2Tree;
+import org.apache.ignite.internal.processors.query.h2.database.InlineIndexHelper;
+import org.apache.ignite.internal.processors.query.h2.opt.GridH2Row;
+import org.apache.ignite.internal.processors.query.h2.opt.GridH2SearchRow;
+
+/**
+ * Leaf page for H2 row references.
+ */
+public abstract class AbstractH2ExtrasLeafIO extends BPlusLeafIO<GridH2SearchRow> implements H2RowLinkIO {
+    /** Payload size. */
+    protected final int payloadSize;
+
+    /** */
+    public static void register() {
+        register(false);
+
+        register(true);
+    }
+
+    /**
+     * @param mvcc Mvcc flag.
+     */
+    private static void register(boolean mvcc) {
+        short type = mvcc ? PageIO.T_H2_EX_REF_MVCC_LEAF_START : PageIO.T_H2_EX_REF_LEAF_START;
+
+        for (short payload = 1; payload <= PageIO.MAX_PAYLOAD_SIZE; payload++) {
+            IOVersions<? extends AbstractH2ExtrasLeafIO> io =
+                getVersions((short)(type + payload - 1), payload, mvcc);
+
+            PageIO.registerH2ExtraLeaf(io, mvcc);
+        }
+    }
+
+    /**
+     * @param payload Payload size.
+     * @param mvccEnabled Mvcc flag.
+     * @return IOVersions for given payload.
+     */
+    @SuppressWarnings("unchecked")
+    public static IOVersions<? extends BPlusLeafIO<GridH2SearchRow>> getVersions(int payload, boolean mvccEnabled) {
+        assert payload >= 0 && payload <= PageIO.MAX_PAYLOAD_SIZE;
+
+        if (payload == 0)
+            return mvccEnabled ? H2MvccLeafIO.VERSIONS : H2LeafIO.VERSIONS;
+        else
+            return (IOVersions<BPlusLeafIO<GridH2SearchRow>>)PageIO.getLeafVersions((short)(payload - 1), mvccEnabled);
+    }
+
+    /**
+     * @param type Type.
+     * @param payload Payload size.
+     * @param mvcc Mvcc flag.
+     * @return Versions.
+     */
+    private static IOVersions<? extends AbstractH2ExtrasLeafIO> getVersions(short type, short payload, boolean mvcc) {
+        return new IOVersions<>(mvcc ? new H2MvccExtrasLeafIO(type, 1, payload) : new H2ExtrasLeafIO(type, 1, payload));
+    }
+
+    /**
+     * @param type Page type.
+     * @param ver Page format version.
+     * @param itemSize Item size.
+     * @param payloadSize Payload size.
+     */
+    AbstractH2ExtrasLeafIO(short type, int ver, int itemSize, int payloadSize) {
+        super(type, ver, itemSize + payloadSize);
+
+        this.payloadSize = payloadSize;
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("ForLoopReplaceableByForEach")
+    @Override public final void storeByOffset(long pageAddr, int off, GridH2SearchRow row) {
+        GridH2Row row0 = (GridH2Row)row;
+
+        assert row0.link() != 0;
+
+        List<InlineIndexHelper> inlineIdxs = InlineIndexHelper.getCurrentInlineIndexes();
+
+        assert inlineIdxs != null : "no inline index helpers";
+
+        int fieldOff = 0;
+
+        for (int i = 0; i < inlineIdxs.size(); i++) {
+            InlineIndexHelper idx = inlineIdxs.get(i);
+
+            int size = idx.put(pageAddr, off + fieldOff, row.getValue(idx.columnIndex()), payloadSize - fieldOff);
+
+            if (size == 0)
+                break;
+
+            fieldOff += size;
+        }
+
+        H2IOUtils.storeRow(row0, pageAddr, off + payloadSize, storeMvccInfo());
+    }
+
+    /** {@inheritDoc} */
+    @Override public final void store(long dstPageAddr, int dstIdx, BPlusIO<GridH2SearchRow> srcIo, long srcPageAddr, int srcIdx) {
+        int srcOff = srcIo.offset(srcIdx);
+
+        byte[] payload = PageUtils.getBytes(srcPageAddr, srcOff, payloadSize);
+        long link = PageUtils.getLong(srcPageAddr, srcOff + payloadSize);
+
+        assert link != 0;
+
+        int dstOff = offset(dstIdx);
+
+        PageUtils.putBytes(dstPageAddr, dstOff, payload);
+
+        H2IOUtils.store(dstPageAddr, dstOff + payloadSize, srcIo, srcPageAddr, srcIdx, storeMvccInfo());
+    }
+
+    /** {@inheritDoc} */
+    @Override public final GridH2SearchRow getLookupRow(BPlusTree<GridH2SearchRow, ?> tree, long pageAddr, int idx)
+        throws IgniteCheckedException {
+        long link = getLink(pageAddr, idx);
+
+        if (storeMvccInfo()) {
+            long mvccCrdVer = getMvccCoordinatorVersion(pageAddr, idx);
+            long mvccCntr = getMvccCounter(pageAddr, idx);
+            int mvccOpCntr = getMvccOperationCounter(pageAddr, idx);
+
+            return ((H2Tree)tree).createRowFromLink(link, mvccCrdVer, mvccCntr, mvccOpCntr);
+        }
+
+        return ((H2Tree)tree).createRowFromLink(link);
+    }
+
+    /** {@inheritDoc} */
+    @Override public final long getLink(long pageAddr, int idx) {
+        return PageUtils.getLong(pageAddr, offset(idx) + payloadSize);
+    }
+}
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2InnerIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2InnerIO.java
new file mode 100644
index 0000000..d1d569e
--- /dev/null
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2InnerIO.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2.database.io;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.pagemem.PageUtils;
+import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusInnerIO;
+import org.apache.ignite.internal.processors.query.h2.database.H2Tree;
+import org.apache.ignite.internal.processors.query.h2.opt.GridH2Row;
+import org.apache.ignite.internal.processors.query.h2.opt.GridH2SearchRow;
+
+/**
+ * Inner page for H2 row references.
+ */
+public abstract class AbstractH2InnerIO extends BPlusInnerIO<GridH2SearchRow> implements H2RowLinkIO {
+    /**
+     * @param type Page type.
+     * @param ver Page format version.
+     * @param itemSize Single item size on page.
+     */
+    AbstractH2InnerIO(int type, int ver, int itemSize) {
+        super(type, ver, true, itemSize);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void storeByOffset(long pageAddr, int off, GridH2SearchRow row) {
+        GridH2Row row0 = (GridH2Row)row;
+
+        H2IOUtils.storeRow(row0, pageAddr, off, storeMvccInfo());
+    }
+
+    /** {@inheritDoc} */
+    @Override public GridH2SearchRow getLookupRow(BPlusTree<GridH2SearchRow, ?> tree, long pageAddr, int idx)
+        throws IgniteCheckedException {
+        long link = getLink(pageAddr, idx);
+
+        if (storeMvccInfo()) {
+            long mvccCrdVer = getMvccCoordinatorVersion(pageAddr, idx);
+            long mvccCntr = getMvccCounter(pageAddr, idx);
+            int mvccOpCntr = getMvccOperationCounter(pageAddr, idx);
+
+            return ((H2Tree)tree).createRowFromLink(link, mvccCrdVer, mvccCntr, mvccOpCntr);
+        }
+
+        return ((H2Tree)tree).createRowFromLink(link);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void store(long dstPageAddr, int dstIdx, BPlusIO<GridH2SearchRow> srcIo, long srcPageAddr, int srcIdx) {
+        H2IOUtils.store(dstPageAddr, offset(dstIdx), srcIo, srcPageAddr, srcIdx, storeMvccInfo());
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getLink(long pageAddr, int idx) {
+        return PageUtils.getLong(pageAddr, offset(idx));
+    }
+}
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2LeafIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2LeafIO.java
new file mode 100644
index 0000000..07f114f
--- /dev/null
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/AbstractH2LeafIO.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2.database.io;
+
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.pagemem.PageUtils;
+import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusLeafIO;
+import org.apache.ignite.internal.processors.query.h2.database.H2Tree;
+import org.apache.ignite.internal.processors.query.h2.opt.GridH2Row;
+import org.apache.ignite.internal.processors.query.h2.opt.GridH2SearchRow;
+
+/**
+ * Leaf page for H2 row references.
+ */
+public abstract class AbstractH2LeafIO extends BPlusLeafIO<GridH2SearchRow> implements H2RowLinkIO {
+    /**
+     * @param type Page type.
+     * @param ver Page format version.
+     * @param itemSize Single item size on page.
+     */
+    AbstractH2LeafIO(int type, int ver, int itemSize) {
+        super(type, ver, itemSize);
+    }
+
+    /** {@inheritDoc} */
+    @Override public final void storeByOffset(long pageAddr, int off, GridH2SearchRow row) {
+        GridH2Row row0 = (GridH2Row)row;
+
+        H2IOUtils.storeRow(row0, pageAddr, off, storeMvccInfo());
+    }
+
+    /** {@inheritDoc} */
+    @Override public final void store(long dstPageAddr, int dstIdx, BPlusIO<GridH2SearchRow> srcIo, long srcPageAddr, int srcIdx) {
+        assert srcIo == this;
+
+        H2IOUtils.store(dstPageAddr, offset(dstIdx), srcIo, srcPageAddr, srcIdx, storeMvccInfo());
+    }
+
+    /** {@inheritDoc} */
+    @Override public final GridH2SearchRow getLookupRow(BPlusTree<GridH2SearchRow,?> tree, long pageAddr, int idx)
+        throws IgniteCheckedException {
+        long link = getLink(pageAddr, idx);
+
+        if (storeMvccInfo()) {
+            long mvccCrdVer = getMvccCoordinatorVersion(pageAddr, idx);
+            long mvccCntr = getMvccCounter(pageAddr, idx);
+            int mvccOpCntr = getMvccOperationCounter(pageAddr, idx);
+
+            return ((H2Tree)tree).createRowFromLink(link, mvccCrdVer, mvccCntr, mvccOpCntr);
+        }
+
+        return ((H2Tree)tree).createRowFromLink(link);
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getLink(long pageAddr, int idx) {
+        return PageUtils.getLong(pageAddr, offset(idx));
+    }
+}
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasInnerIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasInnerIO.java
index 294492d..8dc8c96 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasInnerIO.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasInnerIO.java
@@ -17,120 +17,17 @@
 
 package org.apache.ignite.internal.processors.query.h2.database.io;
 
-import java.util.List;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.pagemem.PageUtils;
-import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree;
-import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO;
-import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusInnerIO;
-import org.apache.ignite.internal.processors.cache.persistence.tree.io.IOVersions;
-import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO;
-import org.apache.ignite.internal.processors.query.h2.database.H2Tree;
-import org.apache.ignite.internal.processors.query.h2.database.InlineIndexHelper;
-import org.apache.ignite.internal.processors.query.h2.opt.GridH2Row;
-import org.h2.result.SearchRow;
-
 /**
  * Inner page for H2 row references.
  */
-public class H2ExtrasInnerIO extends BPlusInnerIO<SearchRow> implements H2RowLinkIO {
-    /** Payload size. */
-    private final int payloadSize;
-
-    /** */
-    public static void register() {
-        for (short payload = 1; payload <= PageIO.MAX_PAYLOAD_SIZE; payload++)
-            PageIO.registerH2ExtraInner(getVersions((short)(PageIO.T_H2_EX_REF_INNER_START + payload - 1), payload));
-    }
-
-    /**
-     * @param payload Payload size.
-     * @return IOVersions for given payload.
-     */
-    @SuppressWarnings("unchecked")
-    public static IOVersions<? extends BPlusInnerIO<SearchRow>> getVersions(int payload) {
-        assert payload >= 0 && payload <= PageIO.MAX_PAYLOAD_SIZE;
-
-        if (payload == 0)
-            return H2InnerIO.VERSIONS;
-        else
-            return (IOVersions<BPlusInnerIO<SearchRow>>)PageIO.getInnerVersions((short)(payload - 1));
-    }
-
-    /**
-     * @param type Type.
-     * @param payload Payload size.
-     * @return Instance of IO versions.
-     */
-    private static IOVersions<H2ExtrasInnerIO> getVersions(short type, short payload) {
-        return new IOVersions<>(new H2ExtrasInnerIO(type, 1, payload));
-    }
-
+public class H2ExtrasInnerIO extends AbstractH2ExtrasInnerIO implements H2RowLinkIO {
     /**
      * @param type Page type.
      * @param ver Page format version.
      * @param payloadSize Payload size.
      */
-    private H2ExtrasInnerIO(short type, int ver, int payloadSize) {
-        super(type, ver, true, 8 + payloadSize);
-        this.payloadSize = payloadSize;
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("ForLoopReplaceableByForEach")
-    @Override public void storeByOffset(long pageAddr, int off, SearchRow row) {
-        GridH2Row row0 = (GridH2Row)row;
-
-        assert row0.link() != 0 : row0;
-
-        List<InlineIndexHelper> inlineIdxs = InlineIndexHelper.getCurrentInlineIndexes();
-
-        assert inlineIdxs != null : "no inline index helpers";
-
-
-        int fieldOff = 0;
-
-        for (int i = 0; i < inlineIdxs.size(); i++) {
-            InlineIndexHelper idx = inlineIdxs.get(i);
-
-            int size = idx.put(pageAddr, off + fieldOff, row.getValue(idx.columnIndex()), payloadSize - fieldOff);
-
-            if (size == 0)
-                break;
-
-            fieldOff += size;
-        }
-
-        PageUtils.putLong(pageAddr, off + payloadSize, row0.link());
-    }
-
-    /** {@inheritDoc} */
-    @Override public SearchRow getLookupRow(BPlusTree<SearchRow, ?> tree, long pageAddr, int idx)
-        throws IgniteCheckedException {
-        long link = getLink(pageAddr, idx);
-
-        assert link != 0;
-
-        return ((H2Tree)tree).createRowFromLink(link);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void store(long dstPageAddr, int dstIdx, BPlusIO<SearchRow> srcIo, long srcPageAddr, int srcIdx) {
-        int srcOff = srcIo.offset(srcIdx);
-
-        byte[] payload = PageUtils.getBytes(srcPageAddr, srcOff, payloadSize);
-        long link = PageUtils.getLong(srcPageAddr, srcOff + payloadSize);
-
-        assert link != 0;
-
-        int dstOff = offset(dstIdx);
-
-        PageUtils.putBytes(dstPageAddr, dstOff, payload);
-        PageUtils.putLong(dstPageAddr, dstOff + payloadSize, link);
-    }
-
-    /** {@inheritDoc} */
-    @Override public long getLink(long pageAddr, int idx) {
-        return PageUtils.getLong(pageAddr, offset(idx) + payloadSize);
+    H2ExtrasInnerIO(short type, int ver, int payloadSize) {
+        super(type, ver, 8, payloadSize);
     }
 }
+
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasLeafIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasLeafIO.java
index 4770295..085f98b 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasLeafIO.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2ExtrasLeafIO.java
@@ -17,117 +17,16 @@
 
 package org.apache.ignite.internal.processors.query.h2.database.io;
 
-import java.util.List;
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.pagemem.PageUtils;
-import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree;
-import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO;
-import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusLeafIO;
-import org.apache.ignite.internal.processors.cache.persistence.tree.io.IOVersions;
-import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO;
-import org.apache.ignite.internal.processors.query.h2.database.H2Tree;
-import org.apache.ignite.internal.processors.query.h2.database.InlineIndexHelper;
-import org.apache.ignite.internal.processors.query.h2.opt.GridH2Row;
-import org.h2.result.SearchRow;
-
 /**
  * Leaf page for H2 row references.
  */
-public class H2ExtrasLeafIO extends BPlusLeafIO<SearchRow> implements H2RowLinkIO {
-    /** Payload size. */
-    private final int payloadSize;
-
-    /** */
-    public static void register() {
-        for (short payload = 1; payload <= PageIO.MAX_PAYLOAD_SIZE; payload++)
-            PageIO.registerH2ExtraLeaf(getVersions((short)(PageIO.T_H2_EX_REF_LEAF_START + payload - 1), payload));
-    }
-
-    /**
-     * @param payload Payload size.
-     * @return IOVersions for given payload.
-     */
-    @SuppressWarnings("unchecked")
-    public static IOVersions<? extends BPlusLeafIO<SearchRow>> getVersions(int payload) {
-        assert payload >= 0 && payload <= PageIO.MAX_PAYLOAD_SIZE;
-
-        if (payload == 0)
-            return H2LeafIO.VERSIONS;
-        else
-            return (IOVersions<BPlusLeafIO<SearchRow>>)PageIO.getLeafVersions((short)(payload - 1));
-    }
-
-    /**
-     * @param type Type.
-     * @param payload Payload size.
-     * @return Versions.
-     */
-    private static IOVersions<H2ExtrasLeafIO> getVersions(short type, short payload) {
-        return new IOVersions<>(new H2ExtrasLeafIO(type, 1, payload));
-    }
-
+public class H2ExtrasLeafIO extends AbstractH2ExtrasLeafIO {
     /**
      * @param type Page type.
      * @param ver Page format version.
      * @param payloadSize Payload size.
      */
-    private H2ExtrasLeafIO(short type, int ver, int payloadSize) {
-        super(type, ver, 8 + payloadSize);
-        this.payloadSize = payloadSize;
-    }
-
-    /** {@inheritDoc} */
-    @SuppressWarnings("ForLoopReplaceableByForEach")
-    @Override public void storeByOffset(long pageAddr, int off, SearchRow row) {
-        GridH2Row row0 = (GridH2Row)row;
-
-        assert row0.link() != 0;
-
-        List<InlineIndexHelper> inlineIdxs = InlineIndexHelper.getCurrentInlineIndexes();
-
-        assert inlineIdxs != null : "no inline index helpers";
-
-        int fieldOff = 0;
-
-        for (int i = 0; i < inlineIdxs.size(); i++) {
-            InlineIndexHelper idx = inlineIdxs.get(i);
-
-            int size = idx.put(pageAddr, off + fieldOff, row.getValue(idx.columnIndex()), payloadSize - fieldOff);
-
-            if (size == 0)
-                break;
-
-            fieldOff += size;
-        }
-
-        PageUtils.putLong(pageAddr, off + payloadSize, row0.link());
-    }
-
-    /** {@inheritDoc} */
-    @Override public void store(long dstPageAddr, int dstIdx, BPlusIO<SearchRow> srcIo, long srcPageAddr, int srcIdx) {
-        int srcOff = srcIo.offset(srcIdx);
-
-        byte[] payload = PageUtils.getBytes(srcPageAddr, srcOff, payloadSize);
-        long link = PageUtils.getLong(srcPageAddr, srcOff + payloadSize);
-
-        assert link != 0;
-
-        int dstOff = offset(dstIdx);
-
-        PageUtils.putBytes(dstPageAddr, dstOff, payload);
-        PageUtils.putLong(dstPageAddr, dstOff + payloadSize, link);
-    }
-
-    /** {@inheritDoc} */
-    @Override public SearchRow getLookupRow(BPlusTree<SearchRow, ?> tree, long pageAddr, int idx)
-        throws IgniteCheckedException {
-        long link = getLink(pageAddr, idx);
-
-        return ((H2Tree)tree).createRowFromLink(link);
-    }
-
-    /** {@inheritDoc} */
-    @Override public long getLink(long pageAddr, int idx) {
-        return PageUtils.getLong(pageAddr, offset(idx) + payloadSize);
+    H2ExtrasLeafIO(short type, int ver, int payloadSize) {
+        super(type, ver, 8, payloadSize);
     }
 }
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2IOUtils.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2IOUtils.java
new file mode 100644
index 0000000..b6bda36
--- /dev/null
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2IOUtils.java
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2.database.io;
+
+import org.apache.ignite.internal.pagemem.PageUtils;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUtils;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO;
+import org.apache.ignite.internal.processors.query.h2.opt.GridH2Row;
+import org.apache.ignite.internal.processors.query.h2.opt.GridH2SearchRow;
+
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.mvccVersionIsValid;
+
+/**
+ *
+ */
+class H2IOUtils {
+    /**
+     *
+     */
+    private H2IOUtils() {}
+
+    /**
+     * @param row Row.
+     * @param pageAddr Page address.
+     * @param off Offset.
+     * @param storeMvcc {@code True} to store mvcc data.
+     */
+    static void storeRow(GridH2Row row, long pageAddr, int off, boolean storeMvcc) {
+        assert row.link() != 0;
+
+        PageUtils.putLong(pageAddr, off, row.link());
+
+        if (storeMvcc) {
+            long mvccCrdVer = row.mvccCoordinatorVersion();
+            long mvccCntr = row.mvccCounter();
+            int mvccOpCntr = row.mvccOperationCounter();
+
+            assert MvccUtils.mvccVersionIsValid(mvccCrdVer, mvccCntr, mvccOpCntr);
+
+            PageUtils.putLong(pageAddr, off + 8, mvccCrdVer);
+            PageUtils.putLong(pageAddr, off + 16, mvccCntr);
+            PageUtils.putInt(pageAddr, off + 24, mvccOpCntr);
+        }
+    }
+
+    /**
+     * @param dstPageAddr Destination page address.
+     * @param dstOff Destination page offset.
+     * @param srcIo Source IO.
+     * @param srcPageAddr Source page address.
+     * @param srcIdx Source index.
+     * @param storeMvcc {@code True} to store mvcc data.
+     */
+    static void store(long dstPageAddr,
+        int dstOff,
+        BPlusIO<GridH2SearchRow> srcIo,
+        long srcPageAddr,
+        int srcIdx,
+        boolean storeMvcc)
+    {
+        H2RowLinkIO rowIo = (H2RowLinkIO)srcIo;
+
+        long link = rowIo.getLink(srcPageAddr, srcIdx);
+
+        PageUtils.putLong(dstPageAddr, dstOff, link);
+
+        if (storeMvcc) {
+            long mvccCrdVer = rowIo.getMvccCoordinatorVersion(srcPageAddr, srcIdx);
+            long mvccCntr = rowIo.getMvccCounter(srcPageAddr, srcIdx);
+            int mvccOpCntr = rowIo.getMvccOperationCounter(srcPageAddr, srcIdx);
+
+            assert MvccUtils.mvccVersionIsValid(mvccCrdVer, mvccCntr, mvccOpCntr);
+
+            PageUtils.putLong(dstPageAddr, dstOff + 8, mvccCrdVer);
+            PageUtils.putLong(dstPageAddr, dstOff + 16, mvccCntr);
+            PageUtils.putInt(dstPageAddr, dstOff + 24, mvccOpCntr);
+        }
+    }
+}
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2InnerIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2InnerIO.java
index cf37bb7..9baff7a 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2InnerIO.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2InnerIO.java
@@ -17,20 +17,12 @@
 
 package org.apache.ignite.internal.processors.query.h2.database.io;
 
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.pagemem.PageUtils;
-import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree;
-import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO;
-import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusInnerIO;
 import org.apache.ignite.internal.processors.cache.persistence.tree.io.IOVersions;
-import org.apache.ignite.internal.processors.query.h2.database.H2Tree;
-import org.apache.ignite.internal.processors.query.h2.opt.GridH2Row;
-import org.h2.result.SearchRow;
 
 /**
  * Inner page for H2 row references.
  */
-public class H2InnerIO extends BPlusInnerIO<SearchRow> implements H2RowLinkIO {
+public class H2InnerIO extends AbstractH2InnerIO {
     /** */
     public static final IOVersions<H2InnerIO> VERSIONS = new IOVersions<>(
         new H2InnerIO(1)
@@ -40,35 +32,6 @@
      * @param ver Page format version.
      */
     private H2InnerIO(int ver) {
-        super(T_H2_REF_INNER, ver, true, 8);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void storeByOffset(long pageAddr, int off, SearchRow row) {
-        GridH2Row row0 = (GridH2Row)row;
-
-        assert row0.link() != 0;
-
-        PageUtils.putLong(pageAddr, off, row0.link());
-    }
-
-    /** {@inheritDoc} */
-    @Override public SearchRow getLookupRow(BPlusTree<SearchRow,?> tree, long pageAddr, int idx)
-        throws IgniteCheckedException {
-        long link = getLink(pageAddr, idx);
-
-        return ((H2Tree)tree).createRowFromLink(link);
-    }
-
-    /** {@inheritDoc} */
-    @Override public void store(long dstPageAddr, int dstIdx, BPlusIO<SearchRow> srcIo, long srcPageAddr, int srcIdx) {
-        long link = ((H2RowLinkIO)srcIo).getLink(srcPageAddr, srcIdx);
-
-        PageUtils.putLong(dstPageAddr, offset(dstIdx), link);
-    }
-
-    /** {@inheritDoc} */
-    @Override public long getLink(long pageAddr, int idx) {
-        return PageUtils.getLong(pageAddr, offset(idx));
+        super(T_H2_REF_INNER, ver, 8);
     }
 }
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2LeafIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2LeafIO.java
index 55a980f..8954de0 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2LeafIO.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2LeafIO.java
@@ -17,20 +17,12 @@
 
 package org.apache.ignite.internal.processors.query.h2.database.io;
 
-import org.apache.ignite.IgniteCheckedException;
-import org.apache.ignite.internal.pagemem.PageUtils;
-import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree;
-import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusIO;
-import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusLeafIO;
 import org.apache.ignite.internal.processors.cache.persistence.tree.io.IOVersions;
-import org.apache.ignite.internal.processors.query.h2.database.H2Tree;
-import org.apache.ignite.internal.processors.query.h2.opt.GridH2Row;
-import org.h2.result.SearchRow;
 
 /**
  * Leaf page for H2 row references.
  */
-public class H2LeafIO extends BPlusLeafIO<SearchRow> implements H2RowLinkIO {
+public class H2LeafIO extends AbstractH2LeafIO {
     /** */
     public static final IOVersions<H2LeafIO> VERSIONS = new IOVersions<>(
         new H2LeafIO(1)
@@ -39,36 +31,7 @@
     /**
      * @param ver Page format version.
      */
-    protected H2LeafIO(int ver) {
+    private H2LeafIO(int ver) {
         super(T_H2_REF_LEAF, ver, 8);
     }
-
-    /** {@inheritDoc} */
-    @Override public void storeByOffset(long pageAddr, int off, SearchRow row) {
-        GridH2Row row0 = (GridH2Row)row;
-
-        assert row0.link() != 0;
-
-        PageUtils.putLong(pageAddr, off, row0.link());
-    }
-
-    /** {@inheritDoc} */
-    @Override public void store(long dstPageAddr, int dstIdx, BPlusIO<SearchRow> srcIo, long srcPageAddr, int srcIdx) {
-        assert srcIo == this;
-
-        PageUtils.putLong(dstPageAddr, offset(dstIdx), getLink(srcPageAddr, srcIdx));
-    }
-
-    /** {@inheritDoc} */
-    @Override public SearchRow getLookupRow(BPlusTree<SearchRow,?> tree, long pageAddr, int idx)
-        throws IgniteCheckedException {
-        long link = getLink(pageAddr, idx);
-
-        return ((H2Tree)tree).createRowFromLink(link);
-    }
-
-    /** {@inheritDoc} */
-    @Override public long getLink(long pageAddr, int idx) {
-        return PageUtils.getLong(pageAddr, offset(idx));
-    }
 }
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2MvccExtrasInnerIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2MvccExtrasInnerIO.java
new file mode 100644
index 0000000..ee6dc2a
--- /dev/null
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2MvccExtrasInnerIO.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2.database.io;
+
+import org.apache.ignite.internal.pagemem.PageUtils;
+
+/**
+ *
+ */
+class H2MvccExtrasInnerIO extends AbstractH2ExtrasInnerIO {
+    /**
+     * @param type Page type.
+     * @param ver Page format version.
+     * @param payloadSize Payload size.
+     */
+    H2MvccExtrasInnerIO(short type, int ver, int payloadSize) {
+        super(type, ver, 28, payloadSize);
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getMvccCoordinatorVersion(long pageAddr, int idx) {
+        return PageUtils.getLong(pageAddr, offset(idx) + payloadSize + 8);
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getMvccCounter(long pageAddr, int idx) {
+        return PageUtils.getLong(pageAddr, offset(idx) + payloadSize + 16);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int getMvccOperationCounter(long pageAddr, int idx) {
+        return PageUtils.getInt(pageAddr, offset(idx) + payloadSize + 24);
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean storeMvccInfo() {
+        return true;
+    }
+}
+
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2MvccExtrasLeafIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2MvccExtrasLeafIO.java
new file mode 100644
index 0000000..60a1598
--- /dev/null
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2MvccExtrasLeafIO.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2.database.io;
+
+import org.apache.ignite.internal.pagemem.PageUtils;
+
+/**
+ * Leaf page for H2 row references.
+ */
+class H2MvccExtrasLeafIO extends AbstractH2ExtrasLeafIO {
+    /**
+     * @param type Page type.
+     * @param ver Page format version.
+     * @param payloadSize Payload size.
+     */
+    H2MvccExtrasLeafIO(short type, int ver, int payloadSize) {
+        super(type, ver, 28, payloadSize);
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getMvccCoordinatorVersion(long pageAddr, int idx) {
+        return PageUtils.getLong(pageAddr, offset(idx) + payloadSize + 8);
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getMvccCounter(long pageAddr, int idx) {
+        return PageUtils.getLong(pageAddr, offset(idx) + payloadSize + 16);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int getMvccOperationCounter(long pageAddr, int idx) {
+        return PageUtils.getInt(pageAddr, offset(idx) + payloadSize + 24);
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean storeMvccInfo() {
+        return true;
+    }
+}
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2MvccInnerIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2MvccInnerIO.java
new file mode 100644
index 0000000..dbfe784
--- /dev/null
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2MvccInnerIO.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2.database.io;
+
+import org.apache.ignite.internal.pagemem.PageUtils;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.IOVersions;
+
+/**
+ * Inner page for H2 row references.
+ */
+public class H2MvccInnerIO extends AbstractH2InnerIO {
+    /** */
+    public static final IOVersions<H2MvccInnerIO> VERSIONS = new IOVersions<>(
+        new H2MvccInnerIO(1)
+    );
+
+    /**
+     * @param ver Page format version.
+     */
+    private H2MvccInnerIO(int ver) {
+        super(T_H2_MVCC_REF_INNER, ver, 28);
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getMvccCoordinatorVersion(long pageAddr, int idx) {
+        return PageUtils.getLong(pageAddr, offset(idx) + 8);
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getMvccCounter(long pageAddr, int idx) {
+        return PageUtils.getLong(pageAddr, offset(idx) + 16);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int getMvccOperationCounter(long pageAddr, int idx) {
+        return PageUtils.getInt(pageAddr, offset(idx) + 24);
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean storeMvccInfo() {
+        return true;
+    }
+}
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2MvccLeafIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2MvccLeafIO.java
new file mode 100644
index 0000000..c7cd998
--- /dev/null
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2MvccLeafIO.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2.database.io;
+
+import org.apache.ignite.internal.pagemem.PageUtils;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.IOVersions;
+
+/**
+ *
+ */
+public class H2MvccLeafIO extends AbstractH2LeafIO {
+    /** */
+    public static final IOVersions<H2MvccLeafIO> VERSIONS = new IOVersions<>(
+        new H2MvccLeafIO(1)
+    );
+
+    /**
+     * @param ver Page format version.
+     */
+    private H2MvccLeafIO(int ver) {
+        super(T_H2_MVCC_REF_LEAF, ver, 28);
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getMvccCoordinatorVersion(long pageAddr, int idx) {
+        return PageUtils.getLong(pageAddr, offset(idx) + 8);
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getMvccCounter(long pageAddr, int idx) {
+        return PageUtils.getLong(pageAddr, offset(idx) + 16);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int getMvccOperationCounter(long pageAddr, int idx) {
+        return PageUtils.getInt(pageAddr, offset(idx) + 24);
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean storeMvccInfo() {
+        return true;
+    }
+}
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2RowLinkIO.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2RowLinkIO.java
index ce69197..1942069 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2RowLinkIO.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/database/io/H2RowLinkIO.java
@@ -27,4 +27,38 @@
      * @return Row link.
      */
     public long getLink(long pageAddr, int idx);
+
+    /**
+     * @param pageAddr Page address.
+     * @param idx Index.
+     * @return Mvcc coordinator version.
+     */
+    public default long getMvccCoordinatorVersion(long pageAddr, int idx) {
+        throw new UnsupportedOperationException();
+    }
+
+    /**
+     * @param pageAddr Page address.
+     * @param idx Index.
+     * @return Mvcc counter.
+     */
+    public default long getMvccCounter(long pageAddr, int idx) {
+        throw new UnsupportedOperationException();
+    }
+
+    /**
+     * @param pageAddr Page address.
+     * @param idx Index.
+     * @return Mvcc operation counter.
+     */
+    public default int getMvccOperationCounter(long pageAddr, int idx) {
+        throw new UnsupportedOperationException();
+    }
+
+    /**
+     * @return {@code True} if IO stores mvcc information.
+     */
+    public default boolean storeMvccInfo() {
+        return false;
+    }
 }
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java
index c617d30..8688c4fbd9 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/ddl/DdlStatementsProcessor.java
@@ -25,8 +25,10 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
+import java.util.UUID;
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.IgniteCluster;
+import org.apache.ignite.IgniteSystemProperties;
 import org.apache.ignite.cache.QueryEntity;
 import org.apache.ignite.cache.QueryIndex;
 import org.apache.ignite.cache.QueryIndexType;
@@ -36,6 +38,8 @@
 import org.apache.ignite.internal.IgniteInternalFuture;
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
 import org.apache.ignite.internal.processors.cache.QueryCursorImpl;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUtils;
 import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode;
 import org.apache.ignite.internal.processors.query.GridQueryProperty;
 import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor;
@@ -94,6 +98,10 @@
     /** Indexing. */
     IgniteH2Indexing idx;
 
+    /** Is backward compatible handling of UUID through DDL enabled. */
+    private static final boolean handleUuidAsByte =
+            IgniteSystemProperties.getBoolean(IgniteSystemProperties.IGNITE_SQL_UUID_DDL_BYTE_FORMAT, false);
+
     /**
      * Initialize message handlers and this' fields needed for further operation.
      *
@@ -111,19 +119,20 @@
      * @param sql Original SQL.
      * @param cmd Command.
      * @return Result.
-     * @throws IgniteCheckedException On error.
      */
     @SuppressWarnings("unchecked")
-    public FieldsQueryCursor<List<?>> runDdlStatement(String sql, SqlCommand cmd) throws IgniteCheckedException {
+    public FieldsQueryCursor<List<?>> runDdlStatement(String sql, SqlCommand cmd) {
         IgniteInternalFuture fut = null;
 
         try {
             isDdlOnSchemaSupported(cmd.schemaName());
 
+            finishActiveTxIfNecessary();
+
             if (cmd instanceof SqlCreateIndexCommand) {
                 SqlCreateIndexCommand cmd0 = (SqlCreateIndexCommand)cmd;
 
-                GridH2Table tbl = idx.dataTable(cmd0.schemaName(), cmd0.tableName());
+                GridH2Table tbl = dataTableWithRetry(cmd0.schemaName(), cmd0.tableName());
 
                 if (tbl == null)
                     throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, cmd0.tableName());
@@ -161,7 +170,7 @@
             else if (cmd instanceof SqlDropIndexCommand) {
                 SqlDropIndexCommand cmd0 = (SqlDropIndexCommand)cmd;
 
-                GridH2Table tbl = idx.dataTableForIndex(cmd0.schemaName(), cmd0.indexName());
+                GridH2Table tbl = dataTableForIndexWithRetry(cmd0.schemaName(), cmd0.indexName());
 
                 if (tbl != null) {
                     isDdlSupported(tbl);
@@ -180,13 +189,7 @@
             else if (cmd instanceof SqlAlterTableCommand) {
                 SqlAlterTableCommand cmd0 = (SqlAlterTableCommand)cmd;
 
-                GridH2Table tbl = idx.dataTable(cmd0.schemaName(), cmd0.tableName());
-
-                if (tbl == null) {
-                    ctx.cache().createMissingQueryCaches();
-
-                    tbl = idx.dataTable(cmd0.schemaName(), cmd0.tableName());
-                }
+                GridH2Table tbl = dataTableWithRetry(cmd0.schemaName(), cmd0.tableName());
 
                 if (tbl == null) {
                     throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND,
@@ -255,14 +258,14 @@
      * @param sql SQL.
      * @param prepared Prepared.
      * @return Cursor on query results.
-     * @throws IgniteCheckedException On error.
      */
     @SuppressWarnings({"unchecked", "ThrowableResultOfMethodCallIgnored"})
-    public FieldsQueryCursor<List<?>> runDdlStatement(String sql, Prepared prepared)
-        throws IgniteCheckedException {
+    public FieldsQueryCursor<List<?>> runDdlStatement(String sql, Prepared prepared) {
         IgniteInternalFuture fut = null;
 
         try {
+            finishActiveTxIfNecessary();
+
             GridSqlStatement stmt0 = new GridSqlQueryParser(false).parse(prepared);
 
             if (stmt0 instanceof GridSqlCreateIndex) {
@@ -270,7 +273,7 @@
 
                 isDdlOnSchemaSupported(cmd.schemaName());
 
-                GridH2Table tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
+                GridH2Table tbl = dataTableWithRetry(cmd.schemaName(), cmd.tableName());
 
                 if (tbl == null)
                     throw new SchemaOperationException(SchemaOperationException.CODE_TABLE_NOT_FOUND, cmd.tableName());
@@ -309,7 +312,7 @@
 
                 isDdlOnSchemaSupported(cmd.schemaName());
 
-                GridH2Table tbl = idx.dataTableForIndex(cmd.schemaName(), cmd.indexName());
+                GridH2Table tbl = dataTableForIndexWithRetry(cmd.schemaName(), cmd.indexName());
 
                 if (tbl != null) {
                     isDdlSupported(tbl);
@@ -332,11 +335,7 @@
 
                 isDdlOnSchemaSupported(cmd.schemaName());
 
-                if (!F.eq(QueryUtils.DFLT_SCHEMA, cmd.schemaName()))
-                    throw new SchemaOperationException("CREATE TABLE can only be executed on " +
-                        QueryUtils.DFLT_SCHEMA + " schema.");
-
-                GridH2Table tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
+                GridH2Table tbl = dataTableWithRetry(cmd.schemaName(), cmd.tableName());
 
                 if (tbl != null) {
                     if (!cmd.ifNotExists())
@@ -369,17 +368,7 @@
 
                 isDdlOnSchemaSupported(cmd.schemaName());
 
-                if (!F.eq(QueryUtils.DFLT_SCHEMA, cmd.schemaName()))
-                    throw new SchemaOperationException("DROP TABLE can only be executed on " +
-                        QueryUtils.DFLT_SCHEMA + " schema.");
-
-                GridH2Table tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
-
-                if (tbl == null && cmd.ifExists()) {
-                    ctx.cache().createMissingQueryCaches();
-
-                    tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
-                }
+                GridH2Table tbl = dataTableWithRetry(cmd.schemaName(), cmd.tableName());
 
                 if (tbl == null) {
                     if (!cmd.ifExists())
@@ -394,13 +383,7 @@
 
                 isDdlOnSchemaSupported(cmd.schemaName());
 
-                GridH2Table tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
-
-                if (tbl == null && cmd.ifTableExists()) {
-                    ctx.cache().createMissingQueryCaches();
-
-                    tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
-                }
+                GridH2Table tbl = dataTableWithRetry(cmd.schemaName(), cmd.tableName());
 
                 if (tbl == null) {
                     if (!cmd.ifTableExists())
@@ -430,7 +413,7 @@
                         }
 
                         QueryField field = new QueryField(col.columnName(),
-                            DataType.getTypeClassName(col.column().getType()),
+                            getTypeClassName(col),
                             col.column().isNullable(), col.defaultValue(),
                             col.precision(), col.scale());
 
@@ -455,13 +438,7 @@
 
                 isDdlOnSchemaSupported(cmd.schemaName());
 
-                GridH2Table tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
-
-                if (tbl == null && cmd.ifTableExists()) {
-                    ctx.cache().createMissingQueryCaches();
-
-                    tbl = idx.dataTable(cmd.schemaName(), cmd.tableName());
-                }
+                GridH2Table tbl = dataTableWithRetry(cmd.schemaName(), cmd.tableName());
 
                 if (tbl == null) {
                     if (!cmd.ifTableExists())
@@ -471,6 +448,10 @@
                 else {
                     assert tbl.rowDescriptor() != null;
 
+                    if (tbl.cache().mvccEnabled())
+                        throw new IgniteSQLException("Cannot drop column(s) with enabled MVCC. " +
+                            "Operation is unsupported at the moment.", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
+
                     if (QueryUtils.isSqlType(tbl.rowDescriptor().type().valueClass()))
                         throw new SchemaOperationException("Cannot drop column(s) because table was created " +
                             "with " + PARAM_WRAP_VALUE + "=false option.");
@@ -533,6 +514,46 @@
     }
 
     /**
+     * Get table by name optionally creating missing query caches.
+     *
+     * @param schemaName Schema name.
+     * @param tableName Table name.
+     * @return Table or {@code null} if none found.
+     * @throws IgniteCheckedException If failed.
+     */
+    private GridH2Table dataTableWithRetry(String schemaName, String tableName) throws IgniteCheckedException {
+        GridH2Table tbl = idx.dataTable(schemaName, tableName);
+
+        if (tbl == null) {
+            ctx.cache().createMissingQueryCaches();
+
+            tbl = idx.dataTable(schemaName, tableName);
+        }
+
+        return tbl;
+    }
+
+    /**
+     * Get table by name optionally creating missing query caches.
+     *
+     * @param schemaName Schema name.
+     * @param indexName Index name.
+     * @return Table or {@code null} if none found.
+     * @throws IgniteCheckedException If failed.
+     */
+    private GridH2Table dataTableForIndexWithRetry(String schemaName, String indexName) throws IgniteCheckedException {
+        GridH2Table tbl = idx.dataTableForIndex(schemaName, indexName);
+
+        if (tbl == null) {
+            ctx.cache().createMissingQueryCaches();
+
+            tbl = idx.dataTableForIndex(schemaName, indexName);
+        }
+
+        return tbl;
+    }
+
+    /**
      * Check if schema supports DDL statement.
      *
      * @param schemaName Schema name.
@@ -559,6 +580,23 @@
     }
 
     /**
+     * Commits active transaction if exists.
+     *
+     * @throws IgniteCheckedException If failed.
+     */
+    private void finishActiveTxIfNecessary() throws IgniteCheckedException {
+        try (GridNearTxLocal tx = MvccUtils.tx(ctx)) {
+            if (tx == null)
+                return;
+
+            if (!tx.isRollbackOnly())
+                tx.commit();
+            else
+                tx.rollback();
+        }
+    }
+
+    /**
      * @return {@link IgniteSQLException} with the message same as of {@code this}'s and
      */
     private IgniteSQLException convert(SchemaOperationException e) {
@@ -620,14 +658,15 @@
 
         HashMap<String, Object> dfltValues = new HashMap<>();
 
-        Map<String, IgniteBiTuple<Integer, Integer>> decimalInfo = new HashMap<>();
+        Map<String, Integer> precision = new HashMap<>();
+        Map<String, Integer> scale = new HashMap<>();
 
         for (Map.Entry<String, GridSqlColumn> e : createTbl.columns().entrySet()) {
             GridSqlColumn gridCol = e.getValue();
 
             Column col = gridCol.column();
 
-            res.addQueryField(e.getKey(), DataType.getTypeClassName(col.getType()), null);
+            res.addQueryField(e.getKey(), getTypeClassName(gridCol), null);
 
             if (!col.isNullable()) {
                 if (notNullFields == null)
@@ -641,15 +680,27 @@
             if (dfltVal != null)
                 dfltValues.put(e.getKey(), dfltVal);
 
-            if (col.getType() == Value.DECIMAL)
-                decimalInfo.put(e.getKey(), F.t((int)col.getPrecision(), col.getScale()));
+            if (col.getType() == Value.DECIMAL) {
+                precision.put(e.getKey(), (int)col.getPrecision());
+
+                scale.put(e.getKey(), col.getScale());
+            }
+
+            if (col.getType() == Value.STRING || 
+                col.getType() == Value.STRING_FIXED || 
+                col.getType() == Value.STRING_IGNORECASE) {
+                precision.put(e.getKey(), (int)col.getPrecision());
+            }
         }
 
         if (!F.isEmpty(dfltValues))
             res.setDefaultFieldValues(dfltValues);
 
-        if (!F.isEmpty(decimalInfo))
-            res.setDecimalInfo(decimalInfo);
+        if (!F.isEmpty(precision))
+            res.setFieldsPrecision(precision);
+
+        if (!F.isEmpty(scale))
+            res.setFieldsScale(scale);
 
         String valTypeName = QueryUtils.createTableValueTypeName(createTbl.schemaName(), createTbl.tableName());
         String keyTypeName = QueryUtils.createTableKeyTypeName(valTypeName);
@@ -666,7 +717,7 @@
         if (!createTbl.wrapKey()) {
             GridSqlColumn pkCol = createTbl.columns().get(createTbl.primaryKeyColumns().iterator().next());
 
-            keyTypeName = DataType.getTypeClassName(pkCol.column().getType());
+            keyTypeName = getTypeClassName(pkCol);
 
             res.setKeyFieldName(pkCol.columnName());
         }
@@ -686,7 +737,7 @@
 
             assert valCol != null;
 
-            valTypeName = DataType.getTypeClassName(valCol.column().getType());
+            valTypeName = getTypeClassName(valCol);
 
             res.setValueFieldName(valCol.columnName());
         }
@@ -713,4 +764,23 @@
         return cmd instanceof CreateIndex || cmd instanceof DropIndex || cmd instanceof CreateTable ||
             cmd instanceof DropTable || cmd instanceof AlterTableAlterColumn;
     }
+
+    /**
+     * Helper function for obtaining type class name for H2.
+     *
+     * @param col Column.
+     * @return Type class name.
+     */
+    private static String getTypeClassName(GridSqlColumn col) {
+        int type = col.column().getType();
+
+        switch (type) {
+            case Value.UUID :
+                if (!handleUuidAsByte)
+                    return UUID.class.getName();
+
+            default:
+                return DataType.getTypeClassName(type);
+        }
+    }
 }
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlAstUtils.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlAstUtils.java
index 161ff4a..ebf5848 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlAstUtils.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/DmlAstUtils.java
@@ -98,8 +98,6 @@
 
             GridSqlArray[] args = new GridSqlArray[cols.length];
 
-            boolean noQry = true;
-
             for (int i = 0; i < cols.length; i++) {
                 GridSqlArray arr = new GridSqlArray(rows.size());
 
@@ -123,18 +121,10 @@
             for (GridSqlElement[] row : rows) {
                 assert cols.length == row.length;
 
-                for (int i = 0; i < row.length; i++) {
-                    GridSqlElement el = row[i];
-
-                    noQry &= (el instanceof GridSqlConst || el instanceof GridSqlParameter);
-
+                for (int i = 0; i < row.length; i++)
                     args[i].addChild(row[i]);
-                }
             }
 
-            if (noQry)
-                return null;
-
             return sel;
         }
         else {
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/FastUpdate.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/FastUpdate.java
index dcceff3..346ad48 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/FastUpdate.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/FastUpdate.java
@@ -21,6 +21,7 @@
 import org.apache.ignite.internal.processors.cache.GridCacheAdapter;
 import org.apache.ignite.internal.processors.query.h2.UpdateResult;
 import org.apache.ignite.internal.processors.query.h2.sql.GridSqlElement;
+import org.apache.ignite.lang.IgniteBiTuple;
 import org.jetbrains.annotations.Nullable;
 
 /**
@@ -101,4 +102,20 @@
 
         return res ? UpdateResult.ONE : UpdateResult.ZERO;
     }
+
+    /**
+     *
+     * @param args Query Parameters.
+     * @return Key and value.
+     * @throws IgniteCheckedException If failed.
+     */
+    public IgniteBiTuple getRow(Object[] args) throws IgniteCheckedException {
+        Object key = keyArg.get(args);
+
+        assert key != null;
+
+        Object newVal = newValArg.get(args);
+
+        return new IgniteBiTuple(key, newVal);
+    }
 }
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java
index 98fbb97..ba4b12b 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlan.java
@@ -19,20 +19,28 @@
 
 import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.binary.BinaryObject;
 import org.apache.ignite.binary.BinaryObjectBuilder;
+import org.apache.ignite.cache.query.QueryCursor;
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
 import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode;
+import org.apache.ignite.internal.processors.query.EnlistOperation;
 import org.apache.ignite.internal.processors.query.GridQueryProperty;
 import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor;
 import org.apache.ignite.internal.processors.query.IgniteSQLException;
 import org.apache.ignite.internal.processors.query.QueryUtils;
+import org.apache.ignite.internal.processors.query.UpdateSourceIterator;
+import org.apache.ignite.internal.processors.query.h2.H2ConnectionWrapper;
+import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing;
+import org.apache.ignite.internal.processors.query.h2.ThreadLocalObjectPool;
 import org.apache.ignite.internal.processors.query.h2.UpdateResult;
 import org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor;
 import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table;
+import org.apache.ignite.internal.util.GridCloseableIteratorAdapterEx;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.T3;
 import org.apache.ignite.lang.IgniteBiTuple;
@@ -358,6 +366,13 @@
     }
 
     /**
+     * @return {@code True} if DML can be fast processed.
+     */
+    public boolean fastResult() {
+        return fastUpdate != null;
+    }
+
+    /**
      * Process fast DML operation if possible.
      *
      * @param args QUery arguments.
@@ -468,6 +483,48 @@
     }
 
     /**
+     * Create iterator for transaction.
+     *
+     * @param idx Indexing.
+     * @param cur Cursor.
+     * @return Iterator.
+     */
+    public UpdateSourceIterator<?> iteratorForTransaction(IgniteH2Indexing idx, QueryCursor<List<?>> cur) {
+        switch (mode) {
+            case MERGE:
+                return new InsertIterator(idx, cur, this, EnlistOperation.UPSERT);
+            case INSERT:
+                return new InsertIterator(idx, cur, this, EnlistOperation.INSERT);
+            case UPDATE:
+                return new UpdateIterator(idx, cur, this, EnlistOperation.UPDATE);
+            case DELETE:
+                return new DeleteIterator(idx, cur, this, EnlistOperation.DELETE);
+
+            default:
+                throw new IllegalArgumentException(String.valueOf(mode));
+        }
+    }
+
+    /**
+     * @param updMode Update plan mode.
+     * @return Operation.
+     */
+    public static EnlistOperation enlistOperation(UpdateMode updMode) {
+        switch (updMode) {
+            case INSERT:
+                return EnlistOperation.INSERT;
+            case MERGE:
+                return EnlistOperation.UPSERT;
+            case UPDATE:
+                return EnlistOperation.UPDATE;
+            case DELETE:
+                return EnlistOperation.DELETE;
+            default:
+                throw new IllegalArgumentException(String.valueOf(updMode));
+        }
+    }
+
+    /**
      * @return Update mode.
      */
     public UpdateMode mode() {
@@ -508,4 +565,180 @@
     public boolean isLocalSubquery() {
         return isLocSubqry;
     }
+
+    /**
+     * @param args Query parameters.
+     * @return Iterator.
+     * @throws IgniteCheckedException If failed.
+     */
+    public IgniteBiTuple getFastRow(Object[] args) throws IgniteCheckedException {
+        if (fastUpdate != null)
+            return fastUpdate.getRow(args);
+
+        return null;
+    }
+
+    /**
+     * @param row Row.
+     * @return Resulting entry.
+     * @throws IgniteCheckedException If failed.
+     */
+    public Object processRowForTx(List<?> row) throws IgniteCheckedException {
+        switch (mode()) {
+            case INSERT:
+            case MERGE:
+                return processRow(row);
+
+            case UPDATE: {
+                T3<Object, Object, Object> row0 = processRowForUpdate(row);
+
+                return new IgniteBiTuple<>(row0.get1(), row0.get3());
+            }
+            case DELETE:
+                return row.get(0);
+
+            default:
+                throw new UnsupportedOperationException(String.valueOf(mode()));
+        }
+    }
+
+    /**
+     * Abstract iterator.
+     */
+    private abstract static class AbstractIterator extends GridCloseableIteratorAdapterEx<Object>
+        implements UpdateSourceIterator<Object> {
+        /** */
+        private final IgniteH2Indexing idx;
+
+        /** */
+        private final QueryCursor<List<?>> cur;
+
+        /** */
+        protected final UpdatePlan plan;
+
+        /** */
+        private final Iterator<List<?>> it;
+
+        /** */
+        private final EnlistOperation op;
+
+        /** */
+        private volatile ThreadLocalObjectPool.Reusable<H2ConnectionWrapper> conn;
+
+        /**
+         * @param idx Indexing.
+         * @param cur Query cursor.
+         * @param plan Update plan.
+         * @param op Operation.
+         */
+        private AbstractIterator(IgniteH2Indexing idx, QueryCursor<List<?>> cur, UpdatePlan plan, EnlistOperation op) {
+            this.idx = idx;
+            this.cur = cur;
+            this.plan = plan;
+            this.op = op;
+
+            it = cur.iterator();
+        }
+
+        /** {@inheritDoc} */
+        @Override public EnlistOperation operation() {
+            return op;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void beforeDetach() {
+            ThreadLocalObjectPool.Reusable<H2ConnectionWrapper> conn0 = conn = idx.detach();
+
+            if (isClosed())
+                conn0.recycle();
+        }
+
+        /** {@inheritDoc} */
+        @Override protected void onClose() {
+            cur.close();
+
+            ThreadLocalObjectPool.Reusable<H2ConnectionWrapper> conn0 = conn;
+
+            if (conn0 != null)
+                conn0.recycle();
+        }
+
+        /** {@inheritDoc} */
+        @Override protected Object onNext() throws IgniteCheckedException {
+            return process(it.next());
+        }
+
+        /** {@inheritDoc} */
+        @Override protected boolean onHasNext() throws IgniteCheckedException {
+            return it.hasNext();
+        }
+
+        /** */
+        protected abstract Object process(List<?> row) throws IgniteCheckedException;
+    }
+
+    /** */
+    private static final class UpdateIterator extends AbstractIterator {
+        /** */
+        private static final long serialVersionUID = -4949035950470324961L;
+
+        /**
+         * @param idx Indexing.
+         * @param cur Query cursor.
+         * @param plan Update plan.
+         * @param op Operation.
+         */
+        private UpdateIterator(IgniteH2Indexing idx, QueryCursor<List<?>> cur, UpdatePlan plan, EnlistOperation op) {
+            super(idx, cur, plan, op);
+        }
+
+        /** {@inheritDoc} */
+        @Override protected Object process(List<?> row) throws IgniteCheckedException {
+            T3<Object, Object, Object> row0 = plan.processRowForUpdate(row);
+
+            return new IgniteBiTuple<>(row0.get1(), row0.get3());
+        }
+    }
+
+    /** */
+    private static final class DeleteIterator extends AbstractIterator {
+        /** */
+        private static final long serialVersionUID = -4949035950470324961L;
+
+        /**
+         * @param idx Indexing.
+         * @param cur Query cursor.
+         * @param plan Update plan.
+         * @param op Operation.
+         */
+        private DeleteIterator(IgniteH2Indexing idx, QueryCursor<List<?>> cur, UpdatePlan plan, EnlistOperation op) {
+            super(idx, cur, plan, op);
+        }
+
+        /** {@inheritDoc} */
+        @Override protected Object process(List<?> row) throws IgniteCheckedException {
+            return row.get(0);
+        }
+    }
+
+    /** */
+    private static final class InsertIterator extends AbstractIterator {
+        /** */
+        private static final long serialVersionUID = -4949035950470324961L;
+
+        /**
+         * @param idx Indexing.
+         * @param cur Query cursor.
+         * @param plan Update plan.
+         * @param op Operation.
+         */
+        private InsertIterator(IgniteH2Indexing idx, QueryCursor<List<?>> cur, UpdatePlan plan, EnlistOperation op) {
+            super(idx, cur, plan, op);
+        }
+
+        /** {@inheritDoc} */
+        @Override protected Object process(List<?> row) throws IgniteCheckedException {
+            return plan.processRow(row);
+        }
+    }
 }
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java
index 1079005..5e44f27 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/dml/UpdatePlanBuilder.java
@@ -30,6 +30,7 @@
 import org.apache.ignite.binary.BinaryObjectBuilder;
 import org.apache.ignite.cache.query.SqlFieldsQuery;
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUtils;
 import org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery;
 import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode;
 import org.apache.ignite.internal.processors.cache.query.SqlFieldsQueryEx;
@@ -41,11 +42,15 @@
 import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing;
 import org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor;
 import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table;
+import org.apache.ignite.internal.processors.query.h2.sql.GridSqlAlias;
+import org.apache.ignite.internal.processors.query.h2.sql.GridSqlAst;
 import org.apache.ignite.internal.processors.query.h2.sql.GridSqlColumn;
+import org.apache.ignite.internal.processors.query.h2.sql.GridSqlConst;
 import org.apache.ignite.internal.processors.query.h2.sql.GridSqlDelete;
 import org.apache.ignite.internal.processors.query.h2.sql.GridSqlElement;
 import org.apache.ignite.internal.processors.query.h2.sql.GridSqlInsert;
 import org.apache.ignite.internal.processors.query.h2.sql.GridSqlMerge;
+import org.apache.ignite.internal.processors.query.h2.sql.GridSqlParameter;
 import org.apache.ignite.internal.processors.query.h2.sql.GridSqlQuery;
 import org.apache.ignite.internal.processors.query.h2.sql.GridSqlQueryParser;
 import org.apache.ignite.internal.processors.query.h2.sql.GridSqlQuerySplitter;
@@ -91,15 +96,48 @@
     public static UpdatePlan planForStatement(Prepared prepared, boolean loc, IgniteH2Indexing idx,
         @Nullable Connection conn, @Nullable SqlFieldsQuery fieldsQry, @Nullable Integer errKeysPos)
         throws IgniteCheckedException {
-        GridSqlStatement stmt = new GridSqlQueryParser(false).parse(prepared);
+        assert !prepared.isQuery();
+
+        GridSqlQueryParser parser = new GridSqlQueryParser(false);
+
+        GridSqlStatement stmt = parser.parse(prepared);
+
+        boolean mvccEnabled = false;
+
+        GridCacheContext cctx = null;
+
+        // check all involved caches
+        for (Object o : parser.objectsMap().values()) {
+            if (o instanceof GridSqlInsert)
+                o = ((GridSqlInsert)o).into();
+            else if (o instanceof GridSqlMerge)
+                o = ((GridSqlMerge)o).into();
+            else if (o instanceof GridSqlDelete)
+                o = ((GridSqlDelete)o).from();
+
+            if (o instanceof GridSqlAlias)
+                o = GridSqlAlias.unwrap((GridSqlAst)o);
+
+            if (o instanceof GridSqlTable) {
+                if (((GridSqlTable)o).dataTable() == null) { // Check for virtual tables.
+                    throw new IgniteSQLException("Operation not supported for table '" +
+                        ((GridSqlTable)o).tableName() + "'", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
+                }
+
+                if (cctx == null)
+                    mvccEnabled = (cctx = (((GridSqlTable)o).dataTable()).cache()).mvccEnabled();
+                else if (((GridSqlTable)o).dataTable().cache().mvccEnabled() != mvccEnabled)
+                    MvccUtils.throwAtomicityModesMismatchException(cctx, ((GridSqlTable)o).dataTable().cache());
+            }
+        }
 
         if (stmt instanceof GridSqlMerge || stmt instanceof GridSqlInsert)
-            return planForInsert(stmt, loc, idx, conn, fieldsQry);
+            return planForInsert(stmt, loc, idx, mvccEnabled, conn, fieldsQry);
         else if (stmt instanceof GridSqlUpdate || stmt instanceof GridSqlDelete)
-            return planForUpdate(stmt, loc, idx, conn, fieldsQry, errKeysPos);
+            return planForUpdate(stmt, loc, idx, mvccEnabled, conn, fieldsQry, errKeysPos);
         else
             throw new IgniteSQLException("Unsupported operation: " + prepared.getSQL(),
-                IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
+                    IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
     }
 
     /**
@@ -108,6 +146,7 @@
      * @param stmt INSERT or MERGE statement.
      * @param loc Local query flag.
      * @param idx Indexing.
+     * @param mvccEnabled Mvcc flag.
      * @param conn Connection.
      * @param fieldsQuery Original query.
      * @return Update plan.
@@ -115,8 +154,9 @@
      */
     @SuppressWarnings("ConstantConditions")
     private static UpdatePlan planForInsert(GridSqlStatement stmt, boolean loc, IgniteH2Indexing idx,
-        @Nullable Connection conn, @Nullable SqlFieldsQuery fieldsQuery) throws IgniteCheckedException {
-        GridSqlQuery sel;
+        boolean mvccEnabled, @Nullable Connection conn, @Nullable SqlFieldsQuery fieldsQuery)
+        throws IgniteCheckedException {
+        GridSqlQuery sel = null;
 
         GridSqlElement target;
 
@@ -140,17 +180,16 @@
 
             GridH2Table h2Tbl = tbl.dataTable();
 
-            if (h2Tbl == null)
-                throw new IgniteSQLException("Operation not supported for table '" + tbl.tableName() + "'",
-                    IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
+            assert h2Tbl != null;
 
             desc = h2Tbl.rowDescriptor();
 
             cols = ins.columns();
-            sel = DmlAstUtils.selectForInsertOrMerge(cols, ins.rows(), ins.query());
 
-            if (sel == null)
+            if (noQuery(ins.rows()))
                 elRows = ins.rows();
+            else
+                sel = DmlAstUtils.selectForInsertOrMerge(cols, ins.rows(), ins.query());
 
             isTwoStepSubqry = (ins.query() != null);
             rowsNum = isTwoStepSubqry ? 0 : ins.rows().size();
@@ -164,10 +203,11 @@
             desc = tbl.dataTable().rowDescriptor();
 
             cols = merge.columns();
-            sel = DmlAstUtils.selectForInsertOrMerge(cols, merge.rows(), merge.query());
 
-            if (sel == null)
+            if (noQuery(merge.rows()))
                 elRows = merge.rows();
+            else
+                sel = DmlAstUtils.selectForInsertOrMerge(cols, merge.rows(), merge.query());
 
             isTwoStepSubqry = (merge.query() != null);
             rowsNum = isTwoStepSubqry ? 0 : merge.rows().size();
@@ -233,7 +273,8 @@
         String selectSql = sel != null ? sel.getSQL() : null;
 
         DmlDistributedPlanInfo distributed = (rowsNum == 0 && !F.isEmpty(selectSql)) ?
-            checkPlanCanBeDistributed(idx, conn, fieldsQuery, loc, selectSql, tbl.dataTable().cacheName()) : null;
+            checkPlanCanBeDistributed(idx, mvccEnabled, conn, fieldsQuery, loc, selectSql,
+            tbl.dataTable().cacheName()) : null;
 
         UpdateMode mode = stmt instanceof GridSqlMerge ? UpdateMode.MERGE : UpdateMode.INSERT;
 
@@ -276,11 +317,37 @@
     }
 
     /**
+     * @param rows Insert rows.
+     * @return {@code True} if no query optimisation may be used.
+     */
+    private static boolean noQuery(List<GridSqlElement[]> rows) {
+        if (F.isEmpty(rows))
+            return false;
+
+        boolean noQry = true;
+
+        for (int i = 0; i < rows.size(); i++) {
+            GridSqlElement[] row = rows.get(i);
+
+            for (int i1 = 0; i1 < row.length; i1++) {
+                GridSqlElement el = row[i1];
+
+                if(!(noQry &=  (el instanceof GridSqlConst || el instanceof GridSqlParameter)))
+                    return noQry;
+
+            }
+        }
+
+        return true;
+    }
+
+    /**
      * Prepare update plan for UPDATE or DELETE.
      *
      * @param stmt UPDATE or DELETE statement.
      * @param loc Local query flag.
      * @param idx Indexing.
+     * @param mvccEnabled Mvcc flag.
      * @param conn Connection.
      * @param fieldsQuery Original query.
      * @param errKeysPos index to inject param for re-run keys at. Null if it's not a re-run plan.
@@ -288,8 +355,8 @@
      * @throws IgniteCheckedException if failed.
      */
     private static UpdatePlan planForUpdate(GridSqlStatement stmt, boolean loc, IgniteH2Indexing idx,
-        @Nullable Connection conn, @Nullable SqlFieldsQuery fieldsQuery, @Nullable Integer errKeysPos)
-        throws IgniteCheckedException {
+        boolean mvccEnabled, @Nullable Connection conn, @Nullable SqlFieldsQuery fieldsQuery,
+        @Nullable Integer errKeysPos) throws IgniteCheckedException {
         GridSqlElement target;
 
         FastUpdate fastUpdate;
@@ -319,9 +386,7 @@
 
         GridH2Table h2Tbl = tbl.dataTable();
 
-        if (h2Tbl == null)
-            throw new IgniteSQLException("Operation not supported for table '" + tbl.tableName() + "'",
-                IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
+        assert h2Tbl != null;
 
         GridH2RowDescriptor desc = h2Tbl.rowDescriptor();
 
@@ -381,7 +446,8 @@
                 String selectSql = sel.getSQL();
 
                 DmlDistributedPlanInfo distributed = F.isEmpty(selectSql) ? null :
-                    checkPlanCanBeDistributed(idx, conn, fieldsQuery, loc, selectSql, tbl.dataTable().cacheName());
+                    checkPlanCanBeDistributed(idx, mvccEnabled, conn, fieldsQuery, loc, selectSql,
+                    tbl.dataTable().cacheName());
 
                 return new UpdatePlan(
                     UpdateMode.UPDATE,
@@ -406,7 +472,8 @@
                 String selectSql = sel.getSQL();
 
                 DmlDistributedPlanInfo distributed = F.isEmpty(selectSql) ? null :
-                    checkPlanCanBeDistributed(idx, conn, fieldsQuery, loc, selectSql, tbl.dataTable().cacheName());
+                    checkPlanCanBeDistributed(idx, mvccEnabled, conn, fieldsQuery, loc, selectSql,
+                    tbl.dataTable().cacheName());
 
                 return new UpdatePlan(
                     UpdateMode.DELETE,
@@ -513,6 +580,7 @@
      * @param colIdx Column index if key or value is present in columns list, {@code -1} if it's not.
      * @param hasProps Whether column list affects individual properties of key or value.
      * @param key Whether supplier should be created for key or for value.
+     * @param forUpdate {@code FOR UPDATE} flag.
      * @return Closure returning key or value.
      * @throws IgniteCheckedException If failed.
      */
@@ -705,6 +773,7 @@
      * Checks whether the given update plan can be distributed and returns additional info.
      *
      * @param idx Indexing.
+     * @param mvccEnabled Mvcc flag.
      * @param conn Connection.
      * @param fieldsQry Initial update query.
      * @param loc Local query flag.
@@ -713,11 +782,10 @@
      * @return distributed update plan info, or {@code null} if cannot be distributed.
      * @throws IgniteCheckedException if failed.
      */
-    private static DmlDistributedPlanInfo checkPlanCanBeDistributed(IgniteH2Indexing idx,
+    private static DmlDistributedPlanInfo checkPlanCanBeDistributed(IgniteH2Indexing idx, boolean mvccEnabled,
         Connection conn, SqlFieldsQuery fieldsQry, boolean loc, String selectQry, String cacheName)
         throws IgniteCheckedException {
-
-        if (loc || !isSkipReducerOnUpdateQuery(fieldsQry) || DmlUtils.isBatched(fieldsQry))
+        if (loc || (!mvccEnabled && !isSkipReducerOnUpdateQuery(fieldsQry)) || DmlUtils.isBatched(fieldsQry))
             return null;
 
         assert conn != null;
@@ -732,7 +800,8 @@
                     fieldsQry.getArgs(),
                     fieldsQry.isCollocated(),
                     fieldsQry.isDistributedJoins(),
-                    fieldsQry.isEnforceJoinOrder(), idx);
+                    fieldsQry.isEnforceJoinOrder(),
+                    idx);
 
                 boolean distributed = qry.skipMergeTable() &&  qry.mapQueries().size() == 1 &&
                     !qry.mapQueries().get(0).hasSubQueries();
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2IndexBase.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2IndexBase.java
index 77bd69a..6fd11c1 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2IndexBase.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2IndexBase.java
@@ -28,6 +28,7 @@
 import org.apache.ignite.internal.managers.communication.GridMessageListener;
 import org.apache.ignite.internal.processors.cache.CacheObject;
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree;
 import org.apache.ignite.internal.processors.query.h2.H2Cursor;
 import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2IndexRangeRequest;
 import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2IndexRangeResponse;
@@ -46,7 +47,6 @@
 import org.apache.ignite.lang.IgniteBiTuple;
 import org.apache.ignite.logger.NullLogger;
 import org.apache.ignite.plugin.extensions.communication.Message;
-import org.apache.ignite.spi.indexing.IndexingQueryFilter;
 import org.h2.engine.Session;
 import org.h2.index.BaseIndex;
 import org.h2.index.Cursor;
@@ -266,15 +266,6 @@
         return (GridH2Table)super.getTable();
     }
 
-    /**
-     * @return Filter for currently running query or {@code null} if none.
-     */
-    protected static IndexingQueryFilter threadLocalFilter() {
-        GridH2QueryContext qctx = GridH2QueryContext.get();
-
-        return qctx != null ? qctx.filter() : null;
-    }
-
     /** {@inheritDoc} */
     @Override public long getDiskSpaceUsed() {
         return 0;
@@ -421,7 +412,7 @@
                     // This is the first request containing all the search rows.
                     assert !msg.bounds().isEmpty() : "empty bounds";
 
-                    src = new RangeSource(msg.bounds(), msg.segment(), qctx.filter());
+                    src = new RangeSource(msg.bounds(), msg.segment(), filter(qctx));
                 }
                 else {
                     // This is request to fetch next portion of data.
@@ -475,6 +466,14 @@
     }
 
     /**
+     * @param qctx Query context.
+     * @return Row filter.
+     */
+    protected BPlusTree.TreeRowClosure<GridH2SearchRow, GridH2Row> filter(GridH2QueryContext qctx) {
+        throw new UnsupportedOperationException();
+    }
+
+    /**
      * @param node Responded node.
      * @param msg Response message.
      */
@@ -1463,20 +1462,17 @@
         private final int segment;
 
         /** */
-        final IndexingQueryFilter filter;
+        private final BPlusTree.TreeRowClosure<GridH2SearchRow, GridH2Row> filter;
 
         /** Iterator. */
         Iterator<GridH2Row> iter = emptyIterator();
 
         /**
          * @param bounds Bounds.
+         * @param segment Segment.
          * @param filter Filter.
          */
-        RangeSource(
-            Iterable<GridH2RowRangeBounds> bounds,
-            int segment,
-            IndexingQueryFilter filter
-        ) {
+        RangeSource(Iterable<GridH2RowRangeBounds> bounds, int segment, BPlusTree.TreeRowClosure<GridH2SearchRow, GridH2Row> filter) {
             this.segment = segment;
             this.filter = filter;
             boundsIter = bounds.iterator();
@@ -1536,7 +1532,7 @@
 
                 IgniteTree t = treeForRead(segment);
 
-                iter = new CursorIteratorWrapper(doFind0(t, first, true, last, filter));
+                iter = new CursorIteratorWrapper(doFind0(t, first, last, filter));
 
                 if (!iter.hasNext()) {
                     // We have to return empty range here.
@@ -1561,7 +1557,6 @@
     /**
      * @param t Tree.
      * @param first Lower bound.
-     * @param includeFirst Whether lower bound should be inclusive.
      * @param last Upper bound always inclusive.
      * @param filter Filter.
      * @return Iterator over rows in given range.
@@ -1569,9 +1564,8 @@
     protected H2Cursor doFind0(
         IgniteTree t,
         @Nullable SearchRow first,
-        boolean includeFirst,
         @Nullable SearchRow last,
-        IndexingQueryFilter filter) {
+        BPlusTree.TreeRowClosure<GridH2SearchRow, GridH2Row> filter) {
         throw new UnsupportedOperationException();
     }
 
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2KeyRowOnheap.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2KeyRowOnheap.java
index 291f8c8..e152054 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2KeyRowOnheap.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2KeyRowOnheap.java
@@ -17,6 +17,7 @@
 
 package org.apache.ignite.internal.processors.query.h2.opt;
 
+import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
 import org.h2.value.Value;
 
@@ -60,4 +61,14 @@
     @Override public long expireTime() {
         return 0;
     }
+
+    /** {@inheritDoc} */
+    @Override public int size() throws IgniteCheckedException {
+        throw new UnsupportedOperationException();
+    }
+
+    /** {@inheritDoc} */
+    @Override public int headerSize() {
+        throw new UnsupportedOperationException();
+    }
 }
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2KeyValueRowOnheap.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2KeyValueRowOnheap.java
index e855536..f966034 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2KeyValueRowOnheap.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2KeyValueRowOnheap.java
@@ -65,8 +65,10 @@
      * @param valType Value type.
      * @throws IgniteCheckedException If failed.
      */
-    public GridH2KeyValueRowOnheap(GridH2RowDescriptor desc, CacheDataRow row, int keyType, int valType)
-        throws IgniteCheckedException {
+    public GridH2KeyValueRowOnheap(GridH2RowDescriptor desc,
+        CacheDataRow row,
+        int keyType,
+        int valType) throws IgniteCheckedException {
         super(row);
 
         this.desc = desc;
@@ -81,6 +83,11 @@
     }
 
     /** {@inheritDoc} */
+    @Override public boolean indexSearchRow() {
+        return false;
+    }
+
+    /** {@inheritDoc} */
     @Override public int getColumnCount() {
         return DEFAULT_COLUMNS_COUNT + desc.fieldsCount();
     }
@@ -220,4 +227,14 @@
     @Override public final int hashCode() {
         throw new UnsupportedOperationException();
     }
+
+    /** {@inheritDoc} */
+    @Override public int size() throws IgniteCheckedException {
+        throw new UnsupportedOperationException();
+    }
+
+    /** {@inheritDoc} */
+    @Override public int headerSize() {
+        throw new UnsupportedOperationException();
+    }
 }
\ No newline at end of file
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2MetaTable.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2MetaTable.java
index 00312b8..b008c081 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2MetaTable.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2MetaTable.java
@@ -284,6 +284,11 @@
                     throw new IllegalStateException("Index: " + idx);
             }
         }
+
+        /** {@inheritDoc} */
+        @Override public boolean indexSearchRow() {
+            return false;
+        }
     }
 
     /**
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2PlainRowFactory.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2PlainRowFactory.java
index fd8a613..d24dc08 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2PlainRowFactory.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2PlainRowFactory.java
@@ -70,7 +70,7 @@
         /**
          * @param key Key.
          */
-        public RowKey(Value key) {
+        RowKey(Value key) {
             this.key = key;
         }
 
@@ -92,6 +92,11 @@
         }
 
         /** {@inheritDoc} */
+        @Override public boolean indexSearchRow() {
+            return true;
+        }
+
+        /** {@inheritDoc} */
         @Override public String toString() {
             return S.toString(RowKey.class, this);
         }
@@ -138,6 +143,11 @@
         }
 
         /** {@inheritDoc} */
+        @Override public boolean indexSearchRow() {
+            return true;
+        }
+
+        /** {@inheritDoc} */
         @Override public String toString() {
             return S.toString(RowPair.class, this);
         }
@@ -174,6 +184,11 @@
         }
 
         /** {@inheritDoc} */
+        @Override public boolean indexSearchRow() {
+            return true;
+        }
+
+        /** {@inheritDoc} */
         @Override public String toString() {
             return S.toString(RowSimple.class, this);
         }
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2QueryContext.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2QueryContext.java
index 7b52ea4..f12c0f3 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2QueryContext.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2QueryContext.java
@@ -26,6 +26,7 @@
 import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridReservable;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
 import org.apache.ignite.internal.processors.query.h2.twostep.MapQueryLazyWorker;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.internal.S;
@@ -85,6 +86,9 @@
     private GridH2CollocationModel qryCollocationMdl;
 
     /** */
+    private MvccSnapshot mvccSnapshot;
+
+    /** */
     private MapQueryLazyWorker lazyWorker;
 
     /**
@@ -106,13 +110,34 @@
      * @param segmentId Index segment ID.
      * @param type Query type.
      */
-    public GridH2QueryContext(UUID locNodeId, UUID nodeId, long qryId, int segmentId, GridH2QueryType type) {
+    public GridH2QueryContext(UUID locNodeId,
+        UUID nodeId,
+        long qryId,
+        int segmentId,
+        GridH2QueryType type) {
         assert segmentId == 0 || type == MAP;
 
         key = new Key(locNodeId, nodeId, qryId, segmentId, type);
     }
 
     /**
+     * @return Mvcc snapshot.
+     */
+    @Nullable public MvccSnapshot mvccSnapshot() {
+        return mvccSnapshot;
+    }
+
+    /**
+     * @param mvccSnapshot Mvcc snapshot.
+     * @return {@code this}.
+     */
+    public GridH2QueryContext mvccSnapshot(MvccSnapshot mvccSnapshot) {
+        this.mvccSnapshot = mvccSnapshot;
+
+        return this;
+    }
+
+    /**
      * @return Type.
      */
     public GridH2QueryType type() {
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Row.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Row.java
index 8b1b711..2133e1a 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Row.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Row.java
@@ -27,7 +27,7 @@
  */
 public abstract class GridH2Row extends GridH2SearchRowAdapter implements CacheDataRow {
     /** Row. */
-    private CacheDataRow row;
+    protected final CacheDataRow row;
 
     /**
      * @param row Row.
@@ -78,11 +78,57 @@
 
     /** {@inheritDoc} */
     @Override public int hash() {
-        throw new UnsupportedOperationException();
+        return row.hash();
     }
 
     /** {@inheritDoc} */
     @Override public int cacheId() {
         return row.cacheId();
     }
+
+    /** {@inheritDoc} */
+    @Override public long mvccCoordinatorVersion() {
+        return row.mvccCoordinatorVersion();
+    }
+
+    /** {@inheritDoc} */
+    @Override public long mvccCounter() {
+        return row.mvccCounter();
+    }
+
+    /** {@inheritDoc} */
+    @Override public int mvccOperationCounter() {
+        return row.mvccOperationCounter();
+    }
+
+    /** {@inheritDoc} */
+    public byte mvccTxState() {
+        return row.mvccTxState();
+    }
+
+    /** {@inheritDoc} */
+    @Override public long newMvccCoordinatorVersion() {
+        return row.newMvccCoordinatorVersion();
+    }
+
+    /** {@inheritDoc} */
+    @Override public long newMvccCounter() {
+        return row.newMvccCounter();
+    }
+
+    /** {@inheritDoc} */
+    @Override public int newMvccOperationCounter() {
+        return row.newMvccOperationCounter();
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte newMvccTxState() {
+        return row.newMvccTxState();
+    }
+
+
+    /** {@inheritDoc} */
+    @Override public boolean indexSearchRow() {
+        return false;
+    }
 }
\ No newline at end of file
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2RowDescriptor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2RowDescriptor.java
index 1d915e5..23f3ba4 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2RowDescriptor.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2RowDescriptor.java
@@ -29,6 +29,8 @@
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.internal.processors.cache.CacheObject;
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccVersion;
 import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
 import org.apache.ignite.internal.processors.query.GridQueryProperty;
 import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor;
@@ -36,7 +38,6 @@
 import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing;
 import org.h2.message.DbException;
 import org.h2.result.SearchRow;
-import org.h2.result.SimpleRow;
 import org.h2.util.LocalDateTimeUtils;
 import org.h2.value.DataType;
 import org.h2.value.Value;
@@ -58,6 +59,7 @@
 import org.h2.value.ValueTime;
 import org.h2.value.ValueTimestamp;
 import org.h2.value.ValueUuid;
+import org.jetbrains.annotations.Nullable;
 
 import static org.apache.ignite.internal.processors.query.h2.opt.GridH2KeyValueRowOnheap.DEFAULT_COLUMNS_COUNT;
 import static org.apache.ignite.internal.processors.query.h2.opt.GridH2KeyValueRowOnheap.KEY_COL;
@@ -280,8 +282,9 @@
         GridH2Row row;
 
         try {
-            if (dataRow.value() == null) // Only can happen for remove operation, can create simple search row.
+            if (dataRow.value() == null) { // Only can happen for remove operation, can create simple search row.
                 row = new GridH2KeyRowOnheap(dataRow, wrap(dataRow.key(), keyType));
+            }
             else
                 row = new GridH2KeyValueRowOnheap(this, dataRow, keyType, valType);
         }
@@ -465,7 +468,7 @@
         copyAliasColumnData(data, KEY_COL, keyAliasColId);
         copyAliasColumnData(data, VAL_COL, valAliasColId);
 
-        return new SimpleRow(data);
+        return GridH2PlainRowFactory.create(data);
     }
 
     /**
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2SearchRow.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2SearchRow.java
new file mode 100644
index 0000000..5de6216
--- /dev/null
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2SearchRow.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2.opt;
+
+import org.apache.ignite.internal.processors.cache.mvcc.MvccVersionAware;
+import org.apache.ignite.internal.processors.query.h2.database.H2Tree;
+import org.h2.result.Row;
+
+/**
+ *
+ */
+public interface GridH2SearchRow extends Row, MvccVersionAware {
+    /**
+     * @return {@code True} for rows used for index search (as opposed to rows stored in {@link H2Tree}.
+     */
+    public boolean indexSearchRow();
+}
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2SearchRowAdapter.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2SearchRowAdapter.java
index 24a90b3..2e512b1 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2SearchRowAdapter.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2SearchRowAdapter.java
@@ -17,15 +17,20 @@
 
 package org.apache.ignite.internal.processors.query.h2.opt;
 
+import org.apache.ignite.internal.processors.cache.mvcc.txlog.TxState;
 import org.h2.result.Row;
 import org.h2.result.SearchRow;
 import org.h2.store.Data;
 import org.h2.value.Value;
 
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_COUNTER_NA;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_CRD_COUNTER_NA;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.MVCC_OP_COUNTER_NA;
+
 /**
  * Dummy H2 search row adadpter.
  */
-public abstract class GridH2SearchRowAdapter implements Row {
+public abstract class GridH2SearchRowAdapter implements GridH2SearchRow {
     /** {@inheritDoc} */
     @Override public void setKeyAndVersion(SearchRow old) {
         throw new UnsupportedOperationException();
@@ -100,4 +105,24 @@
     @Override public Value[] getValueList() {
         throw new UnsupportedOperationException();
     }
+
+    /** {@inheritDoc} */
+    @Override public long mvccCoordinatorVersion() {
+        return MVCC_CRD_COUNTER_NA;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long mvccCounter() {
+        return MVCC_COUNTER_NA;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int mvccOperationCounter() {
+        return MVCC_OP_COUNTER_NA;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte mvccTxState() {
+        return TxState.NA;
+    }
 }
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Table.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Table.java
index 15be253..a612b63 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Table.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridH2Table.java
@@ -24,7 +24,6 @@
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.LongAdder;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReadWriteLock;
@@ -442,12 +441,12 @@
      * @param prevRowAvailable Whether previous row is available.
      * @throws IgniteCheckedException If failed.
      */
-    public void update(CacheDataRow row, @Nullable CacheDataRow prevRow, boolean prevRowAvailable)
-        throws IgniteCheckedException {
+    public void update(CacheDataRow row, @Nullable CacheDataRow prevRow, boolean prevRowAvailable) throws IgniteCheckedException {
         assert desc != null;
 
         GridH2KeyValueRowOnheap row0 = (GridH2KeyValueRowOnheap)desc.createRow(row);
-        GridH2KeyValueRowOnheap prevRow0 = prevRow != null ? (GridH2KeyValueRowOnheap)desc.createRow(prevRow) : null;
+        GridH2KeyValueRowOnheap prevRow0 = prevRow != null ? (GridH2KeyValueRowOnheap)desc.createRow(prevRow) :
+            null;
 
         row0.prepareValuesCache();
 
@@ -539,7 +538,6 @@
 
     /**
      * Add row to index.
-     *
      * @param idx Index to add row to.
      * @param row Row to add to index.
      * @param prevRow Previous row state, if any.
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridLuceneDirectory.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridLuceneDirectory.java
index 4994e61..a7703be 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridLuceneDirectory.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridLuceneDirectory.java
@@ -75,7 +75,7 @@
     }
 
     /** {@inheritDoc} */
-    @Override public void renameFile(String source, String dest) throws IOException {
+    @Override public void rename(String source, String dest) throws IOException {
         ensureOpen();
 
         GridLuceneFile file = fileMap.get(source);
@@ -89,6 +89,16 @@
     }
 
     /** {@inheritDoc} */
+    @Override public void syncMetaData() throws IOException {
+        // Noop. No meta data sync needed as all data is in-memory.
+    }
+
+    /** {@inheritDoc} */
+    @Override public IndexOutput createTempOutput(String prefix, String suffix, IOContext ctx) throws IOException {
+        throw new UnsupportedOperationException();
+    }
+
+    /** {@inheritDoc} */
     @Override public final long fileLength(String name) throws IOException {
         ensureOpen();
 
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridLuceneIndex.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridLuceneIndex.java
index b5d2456..02f4a60 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridLuceneIndex.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridLuceneIndex.java
@@ -38,7 +38,7 @@
 import org.apache.lucene.analysis.standard.StandardAnalyzer;
 import org.apache.lucene.document.Document;
 import org.apache.lucene.document.Field;
-import org.apache.lucene.document.LongField;
+import org.apache.lucene.document.LongPoint;
 import org.apache.lucene.document.StoredField;
 import org.apache.lucene.document.StringField;
 import org.apache.lucene.document.TextField;
@@ -51,7 +51,6 @@
 import org.apache.lucene.search.BooleanClause;
 import org.apache.lucene.search.BooleanQuery;
 import org.apache.lucene.search.IndexSearcher;
-import org.apache.lucene.search.NumericRangeQuery;
 import org.apache.lucene.search.Query;
 import org.apache.lucene.search.ScoreDoc;
 import org.apache.lucene.search.TopDocs;
@@ -201,7 +200,7 @@
 
             doc.add(new StoredField(VER_FIELD_NAME, ver.toString().getBytes()));
 
-            doc.add(new LongField(EXPIRATION_TIME_FIELD_NAME, expires, Field.Store.YES));
+            doc.add(new LongPoint(EXPIRATION_TIME_FIELD_NAME, expires));
 
             // Next implies remove than add atomically operation.
             writer.updateDocument(term, doc);
@@ -255,7 +254,7 @@
             }
 
             //We can cache reader\searcher and change this to 'openIfChanged'
-            reader = DirectoryReader.open(writer, true);
+            reader = DirectoryReader.open(writer);
         }
         catch (IOException e) {
             throw new IgniteCheckedException(e);
@@ -274,8 +273,7 @@
 //            parser.setAllowLeadingWildcard(true);
 
             // Filter expired items.
-            Query filter = NumericRangeQuery.newLongRange(EXPIRATION_TIME_FIELD_NAME, U.currentTimeMillis(),
-                null, false, false);
+            Query filter = LongPoint.newRangeQuery(EXPIRATION_TIME_FIELD_NAME, U.currentTimeMillis(), Long.MAX_VALUE);
 
             BooleanQuery query = new BooleanQuery.Builder()
                 .add(parser.parse(qry), BooleanClause.Occur.MUST)
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridLuceneOutputStream.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridLuceneOutputStream.java
index d8f09df..ada3d9e 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridLuceneOutputStream.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/opt/GridLuceneOutputStream.java
@@ -66,7 +66,7 @@
      * @param f File.
      */
     public GridLuceneOutputStream(GridLuceneFile f) {
-        super("RAMOutputStream(name=\"noname\")");
+        super("RAMOutputStream(name=\"noname\")", "noname");
 
         file = f;
 
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java
index 133333e2..18c068a 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQueryParser.java
@@ -19,6 +19,7 @@
 
 import java.lang.reflect.Field;
 import java.sql.PreparedStatement;
+import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.HashMap;
@@ -42,6 +43,7 @@
 import org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor;
 import org.apache.ignite.internal.processors.query.h2.opt.GridH2Table;
 import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.lang.IgniteUuid;
 import org.h2.command.Command;
 import org.h2.command.CommandContainer;
 import org.h2.command.CommandInterface;
@@ -99,6 +101,7 @@
 import org.h2.table.TableFilter;
 import org.h2.table.TableView;
 import org.h2.value.DataType;
+import org.jetbrains.annotations.NotNull;
 import org.jetbrains.annotations.Nullable;
 
 import static org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.AND;
@@ -152,6 +155,15 @@
     private static final Getter<Select, int[]> GROUP_INDEXES = getter(Select.class, "groupIndex");
 
     /** */
+    private static final Getter<Select, Boolean> SELECT_IS_FOR_UPDATE = getter(Select.class, "isForUpdate");
+
+    /** */
+    private static final Getter<Select, Boolean> SELECT_IS_GROUP_QUERY = getter(Select.class, "isGroupQuery");
+
+    /** */
+    private static final Getter<SelectUnion, Boolean> UNION_IS_FOR_UPDATE = getter(SelectUnion.class, "isForUpdate");
+
+    /** */
     private static final Getter<Operation, Integer> OPERATION_TYPE = getter(Operation.class, "opType");
 
     /** */
@@ -527,7 +539,7 @@
      * @return {@code true} in case of multiple statements.
      */
     public static boolean checkMultipleStatements(PreparedStatement stmt) {
-        Command cmd = COMMAND.get((JdbcPreparedStatement)stmt);
+        Command cmd = extractCommand(stmt);
 
         return ORG_H2_COMMAND_COMMAND_LIST.equals(cmd.getClass().getName());
     }
@@ -537,7 +549,7 @@
      * @return Parsed select.
      */
     public static Prepared prepared(PreparedStatement stmt) {
-        Command cmd = COMMAND.get((JdbcPreparedStatement)stmt);
+        Command cmd = extractCommand(stmt);
 
         assert cmd instanceof CommandContainer;
 
@@ -549,21 +561,99 @@
      * @return Parsed select.
      */
     public static PreparedWithRemaining preparedWithRemaining(PreparedStatement stmt) {
-        Command cmd = COMMAND.get((JdbcPreparedStatement)stmt);
+        Command cmd = extractCommand(stmt);
 
         if (cmd instanceof CommandContainer)
             return new PreparedWithRemaining(PREPARED.get(cmd), null);
         else {
             Class<?> cmdCls = cmd.getClass();
 
-            if (cmdCls.getName().equals(ORG_H2_COMMAND_COMMAND_LIST)) {
+            if (cmdCls.getName().equals(ORG_H2_COMMAND_COMMAND_LIST))
                 return new PreparedWithRemaining(PREPARED.get(LIST_COMMAND.get(cmd)), REMAINING.get(cmd));
-            }
             else
                 throw new IgniteSQLException("Unexpected statement command");
         }
     }
 
+    /** */
+    private static Command extractCommand(PreparedStatement stmt) {
+        try {
+            return COMMAND.get(stmt.unwrap(JdbcPreparedStatement.class));
+        } catch (SQLException e) {
+            throw new IgniteSQLException(e);
+        }
+    }
+
+    /**
+     * @param p Prepared.
+     * @return Whether {@code p} is an {@code SELECT FOR UPDATE} query.
+     */
+    public static boolean isForUpdateQuery(Prepared p) {
+        boolean union;
+
+        if (p.getClass() == Select.class)
+            union = false;
+        else if (p.getClass() == SelectUnion.class)
+            union = true;
+        else
+            return false;
+
+        boolean forUpdate = (!union && SELECT_IS_FOR_UPDATE.get((Select)p)) ||
+            (union && UNION_IS_FOR_UPDATE.get((SelectUnion)p));
+
+        if (union && forUpdate)
+            throw new IgniteSQLException("SELECT UNION FOR UPDATE is not supported.",
+                IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
+
+        return forUpdate;
+    }
+
+    /**
+     * @param p Statement to rewrite, if needed.
+     * @param inTx Whether there is an active transaction.
+     * @return Query with {@code key} and {@code val} columns appended to the list of columns,
+     *     if it's an {@code FOR UPDATE} query, or {@code null} if nothing has to be done.
+     */
+    @NotNull public static String rewriteQueryForUpdateIfNeeded(Prepared p, boolean inTx) {
+        GridSqlStatement gridStmt = new GridSqlQueryParser(false).parse(p);
+        return rewriteQueryForUpdateIfNeeded(gridStmt, inTx);
+    }
+
+    /**
+     * @param stmt Statement to rewrite, if needed.
+     * @param inTx Whether there is an active transaction.
+     * @return Query with {@code key} and {@code val} columns appended to the list of columns,
+     *     if it's an {@code FOR UPDATE} query, or {@code null} if nothing has to be done.
+     */
+    @NotNull public static String rewriteQueryForUpdateIfNeeded(GridSqlStatement stmt, boolean inTx) {
+        // We have checked above that it's not an UNION query, so it's got to be SELECT.
+        assert stmt instanceof GridSqlSelect;
+
+        GridSqlSelect sel = (GridSqlSelect)stmt;
+
+        // How'd we get here otherwise?
+        assert sel.isForUpdate();
+
+        if (inTx) {
+            GridSqlAst from = sel.from();
+
+            GridSqlTable gridTbl = from instanceof GridSqlTable ? (GridSqlTable)from :
+                ((GridSqlAlias)from).child();
+
+            GridH2Table tbl = gridTbl.dataTable();
+
+            Column keyCol = tbl.getColumn(0);
+
+            sel.addColumn(new GridSqlAlias("_key_" + IgniteUuid.vmId(),
+                new GridSqlColumn(keyCol, null, keyCol.getName()), true), true);
+        }
+
+        // We need to remove this flag for final flag we'll feed to H2.
+        sel.forUpdate(false);
+
+        return sel.getSQL();
+    }
+
     /**
      * @param qry Query expression to parse.
      * @return Subquery AST.
@@ -611,7 +701,7 @@
             // We can't cache simple tables because otherwise it will be the same instance for all
             // table filters. Thus we will not be able to distinguish one table filter from another.
             // Table here is semantically equivalent to a table filter.
-            if (tbl instanceof TableBase)
+            if (tbl instanceof TableBase || tbl instanceof MetaTable)
                 return new GridSqlTable(tbl);
 
             // Other stuff can be cached because we will have separate instances in
@@ -662,6 +752,8 @@
 
         TableFilter filter = select.getTopTableFilter();
 
+        boolean isForUpdate = SELECT_IS_FOR_UPDATE.get(select);
+
         do {
             assert0(filter != null, select);
             assert0(filter.getNestedJoin() == null, select);
@@ -693,6 +785,30 @@
 
         res.from(from);
 
+        if (isForUpdate) {
+            if (!(from instanceof GridSqlTable ||
+                (from instanceof GridSqlAlias && from.size() == 1 && from.child() instanceof GridSqlTable)))
+                throw new IgniteSQLException("SELECT FOR UPDATE with joins is not supported.",
+                    IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
+
+            GridSqlTable gridTbl = from instanceof GridSqlTable ? (GridSqlTable)from :
+                ((GridSqlAlias)from).child();
+
+            GridH2Table tbl = gridTbl.dataTable();
+
+            if (tbl == null)
+                throw new IgniteSQLException("SELECT FOR UPDATE query must involve Ignite table.",
+                    IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
+
+            if (select.getLimit() != null || select.getOffset() != null)
+                throw new IgniteSQLException("LIMIT/OFFSET clauses are not supported for SELECT FOR UPDATE.",
+                    IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
+
+            if (SELECT_IS_GROUP_QUERY.get(select))
+                throw new IgniteSQLException("SELECT FOR UPDATE with aggregates and/or GROUP BY is not supported.",
+                    IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
+        }
+
         ArrayList<Expression> expressions = select.getExpressions();
 
         for (int i = 0; i < expressions.size(); i++)
@@ -708,6 +824,8 @@
         if (havingIdx >= 0)
             res.havingColumn(havingIdx);
 
+        res.forUpdate(isForUpdate);
+
         processSortOrder(select.getSortOrder(), res);
 
         res.limit(parseExpression(select.getLimit(), false));
@@ -1374,6 +1492,8 @@
                     atomicityMode = CacheAtomicityMode.TRANSACTIONAL;
                 else if (CacheAtomicityMode.ATOMIC.name().equalsIgnoreCase(val))
                     atomicityMode = CacheAtomicityMode.ATOMIC;
+                else if (CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT.name().equalsIgnoreCase(val))
+                    atomicityMode = CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT;
                 else
                     throw new IgniteSQLException("Invalid value of \"" + PARAM_ATOMICITY + "\" parameter " +
                         "(should be either TRANSACTIONAL or ATOMIC): " + val, IgniteQueryErrorCode.PARSING);
@@ -1558,6 +1678,29 @@
     }
 
     /**
+     * @param stmt Prepared.
+     * @return Target table.
+     */
+    @NotNull public static GridH2Table dmlTable(@NotNull Prepared stmt) {
+        Table table;
+
+        if (stmt.getClass() == Insert.class)
+            table = INSERT_TABLE.get((Insert)stmt);
+        else if (stmt.getClass() == Merge.class)
+            table = MERGE_TABLE.get((Merge)stmt);
+        else if (stmt.getClass() == Delete.class)
+            table = DELETE_FROM.get((Delete)stmt).getTable();
+        else if (stmt.getClass() == Update.class)
+            table = UPDATE_TARGET.get((Update)stmt).getTable();
+        else
+            throw new IgniteException("Unsupported statement: " + stmt);
+
+        assert table instanceof GridH2Table : table;
+
+        return (GridH2Table) table;
+    }
+
+    /**
      * Check if query may be run locally on all caches mentioned in the query.
      * @param replicatedOnlyQry replicated-only query flag from original {@link SqlFieldsQuery}.
      * @return {@code true} if query may be run locally on all caches mentioned in the query, i.e. there's no need
@@ -1658,6 +1801,13 @@
     }
 
     /**
+     * @return H2 to Grid objects map.
+     */
+    public Map<Object, Object> objectsMap() {
+        return h2ObjToGridObj;
+    }
+
+    /**
      * @param qry Query.
      * @return Parsed query AST.
      */
@@ -1676,6 +1826,10 @@
      * @return Parsed AST.
      */
     private GridSqlUnion parseUnion(SelectUnion union) {
+        if (UNION_IS_FOR_UPDATE.get(union))
+            throw new IgniteSQLException("SELECT UNION FOR UPDATE is not supported.",
+                IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
+
         GridSqlUnion res = (GridSqlUnion)h2ObjToGridObj.get(union);
 
         if (res != null)
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQuerySplitter.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQuerySplitter.java
index 5c32005..3e3b449 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQuerySplitter.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlQuerySplitter.java
@@ -220,8 +220,10 @@
         qry = parse(optimize(h2, conn, qry.getSQL(), params, false, enforceJoinOrder),
             true);
 
+        boolean forUpdate = GridSqlQueryParser.isForUpdateQuery(prepared);
+
         // Do the actual query split. We will update the original query AST, need to be careful.
-        splitter.splitQuery(qry);
+        splitter.splitQuery(qry, forUpdate);
 
         assert !F.isEmpty(splitter.mapSqlQrys): "map"; // We must have at least one map query.
         assert splitter.rdcSqlQry != null: "rdc"; // We must have a reduce query.
@@ -261,13 +263,16 @@
         // all map queries must have non-empty derivedPartitions to use this feature.
         twoStepQry.derivedPartitions(mergePartitionsFromMultipleQueries(twoStepQry.mapQueries()));
 
+        twoStepQry.forUpdate(forUpdate);
+
         return twoStepQry;
     }
 
     /**
      * @param qry Optimized and normalized query to split.
+     * @param forUpdate {@code SELECT FOR UPDATE} flag.
      */
-    private void splitQuery(GridSqlQuery qry) throws IgniteCheckedException {
+    private void splitQuery(GridSqlQuery qry, boolean forUpdate) throws IgniteCheckedException {
         // Create a fake parent AST element for the query to allow replacing the query in the parent by split.
         GridSqlSubquery fakeQryPrnt = new GridSqlSubquery(qry);
 
@@ -308,6 +313,15 @@
         // Get back the updated query from the fake parent. It will be our reduce query.
         qry = fakeQryPrnt.subquery();
 
+        // Reset SELECT FOR UPDATE flag for reduce query.
+        if (forUpdate) {
+            assert qry instanceof GridSqlSelect;
+
+            GridSqlSelect sel = (GridSqlSelect)qry;
+
+            sel.forUpdate(false);
+        }
+
         String rdcQry = qry.getSQL();
 
         checkNoDataTablesInReduceQuery(qry, rdcQry);
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlSelect.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlSelect.java
index bfa0089..8ea61e1 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlSelect.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sql/GridSqlSelect.java
@@ -56,6 +56,9 @@
     /** */
     private int havingCol = -1;
 
+    /** */
+    private boolean isForUpdate;
+
     /**
      * @param colIdx Column index as for {@link #column(int)}.
      * @return Child index for {@link #child(int)}.
@@ -167,6 +170,9 @@
 
         getSortLimitSQL(buff);
 
+        if (isForUpdate)
+            buff.append("\nFOR UPDATE");
+
         return buff.toString();
     }
 
@@ -364,6 +370,20 @@
     }
 
     /**
+     * @return Whether this statement is {@code FOR UPDATE}.
+     */
+    public boolean isForUpdate() {
+        return isForUpdate;
+    }
+
+    /**
+     * @param forUpdate Whether this statement is {@code FOR UPDATE}.
+     */
+    public void forUpdate(boolean forUpdate) {
+        isForUpdate = forUpdate;
+    }
+
+    /**
      * @return Index of HAVING column.
      */
     public int havingColumn() {
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sys/SqlSystemIndex.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sys/SqlSystemIndex.java
index 55f5f29..2b4896e 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sys/SqlSystemIndex.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sys/SqlSystemIndex.java
@@ -17,6 +17,7 @@
 
 package org.apache.ignite.internal.processors.query.h2.sys;
 
+import java.util.Iterator;
 import org.apache.ignite.internal.processors.query.h2.opt.GridH2Cursor;
 import org.h2.engine.Session;
 import org.h2.index.BaseIndex;
@@ -73,9 +74,9 @@
     @Override public Cursor find(Session ses, SearchRow first, SearchRow last) {
         assert table instanceof SqlSystemTable;
 
-        Iterable<Row> rows = ((SqlSystemTable)table).getRows(ses, first, last);
+        Iterator<Row> rows = ((SqlSystemTable)table).getRows(ses, first, last);
 
-        return new GridH2Cursor(rows.iterator());
+        return new GridH2Cursor(rows);
     }
 
     /** {@inheritDoc} */
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sys/SqlSystemTable.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sys/SqlSystemTable.java
index 23106ee..664da0f 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sys/SqlSystemTable.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sys/SqlSystemTable.java
@@ -18,7 +18,7 @@
 package org.apache.ignite.internal.processors.query.h2.sys;
 
 import java.util.ArrayList;
-
+import java.util.Iterator;
 import org.apache.ignite.internal.processors.query.h2.sys.view.SqlSystemView;
 import org.h2.command.ddl.CreateTableData;
 import org.h2.engine.Session;
@@ -202,7 +202,7 @@
      * @param first First.
      * @param last Last.
      */
-    public Iterable<Row> getRows(Session ses, SearchRow first, SearchRow last) {
+    public Iterator<Row> getRows(Session ses, SearchRow first, SearchRow last) {
         return view.getRows(ses, first, last);
     }
 }
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sys/view/SqlAbstractLocalSystemView.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sys/view/SqlAbstractLocalSystemView.java
index c601708..d692dba 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sys/view/SqlAbstractLocalSystemView.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sys/view/SqlAbstractLocalSystemView.java
@@ -17,6 +17,7 @@
 
 package org.apache.ignite.internal.processors.query.h2.sys.view;
 
+import java.util.UUID;
 import org.apache.ignite.internal.GridKernalContext;
 import org.h2.engine.Session;
 import org.h2.result.Row;
@@ -25,9 +26,11 @@
 import org.h2.value.Value;
 import org.h2.value.ValueNull;
 import org.h2.value.ValueString;
+import org.h2.value.ValueTime;
+import org.h2.value.ValueTimestamp;
 
 /**
- * Local meta view base class (which uses only local node data).
+ * Local system view base class (which uses only local node data).
  */
 public abstract class SqlAbstractLocalSystemView extends SqlAbstractSystemView {
     /**
@@ -42,12 +45,21 @@
         super(tblName, desc, ctx, cols, indexes);
 
         assert tblName != null;
-        assert ctx != null;
         assert cols != null;
         assert indexes != null;
     }
 
     /**
+     * @param tblName Table name.
+     * @param desc Description.
+     * @param ctx Context.
+     * @param cols Columns.
+     */
+    public SqlAbstractLocalSystemView(String tblName, String desc, GridKernalContext ctx, Column ... cols) {
+        this(tblName, desc, ctx, new String[] {}, cols);
+    }
+
+    /**
      * @param ses Session.
      * @param key Key.
      * @param data Data for each column.
@@ -101,4 +113,43 @@
     protected SqlSystemViewColumnCondition conditionForColumn(String colName, SearchRow first, SearchRow last) {
         return SqlSystemViewColumnCondition.forColumn(getColumnIndex(colName), first, last);
     }
+
+    /**
+     * Converts value to UUID safe (suppressing exceptions).
+     *
+     * @param val UUID.
+     */
+    protected static UUID uuidFromValue(Value val) {
+        try {
+            return UUID.fromString(val.getString());
+        }
+        catch (RuntimeException e) {
+            return null;
+        }
+    }
+
+    /**
+     * Converts millis to ValueTime
+     *
+     * @param millis Millis.
+     */
+    protected static Value valueTimeFromMillis(long millis) {
+        if (millis == -1L || millis == Long.MAX_VALUE)
+            return ValueNull.INSTANCE;
+        else
+            // Note: ValueTime.fromMillis(long) method trying to convert time using timezone and return wrong result.
+            return ValueTime.fromNanos(millis * 1_000_000L);
+    }
+
+    /**
+     * Converts millis to ValueTimestamp
+     *
+     * @param millis Millis.
+     */
+    protected static Value valueTimestampFromMillis(long millis) {
+        if (millis <= 0L || millis == Long.MAX_VALUE)
+            return ValueNull.INSTANCE;
+        else
+            return ValueTimestamp.fromMillis(millis);
+    }
 }
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sys/view/SqlSystemView.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sys/view/SqlSystemView.java
index 7eab521..93fdfa0 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sys/view/SqlSystemView.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sys/view/SqlSystemView.java
@@ -17,6 +17,7 @@
 
 package org.apache.ignite.internal.processors.query.h2.sys.view;
 
+import java.util.Iterator;
 import org.h2.engine.Session;
 import org.h2.result.Row;
 import org.h2.result.SearchRow;
@@ -53,7 +54,7 @@
      * @param first First.
      * @param last Last.
      */
-    public Iterable<Row> getRows(Session ses, SearchRow first, SearchRow last);
+    public Iterator<Row> getRows(Session ses, SearchRow first, SearchRow last);
 
     /**
      * Gets row count for this view (or approximated row count, if real value can't be calculated quickly).
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sys/view/SqlSystemViewBaselineNodes.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sys/view/SqlSystemViewBaselineNodes.java
new file mode 100644
index 0000000..81a9a77
--- /dev/null
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sys/view/SqlSystemViewBaselineNodes.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2.sys.view;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Set;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.internal.GridKernalContext;
+import org.apache.ignite.internal.processors.cluster.BaselineTopology;
+import org.apache.ignite.internal.util.typedef.F;
+import org.h2.engine.Session;
+import org.h2.result.Row;
+import org.h2.result.SearchRow;
+import org.h2.value.Value;
+
+/**
+ * System view: baseline nodes.
+ */
+public class SqlSystemViewBaselineNodes extends SqlAbstractLocalSystemView {
+    /**
+     * @param ctx Grid context.
+     */
+    public SqlSystemViewBaselineNodes(GridKernalContext ctx) {
+        super("BASELINE_NODES", "Baseline topology nodes", ctx,
+            newColumn("CONSISTENT_ID"),
+            newColumn("ONLINE", Value.BOOLEAN)
+        );
+    }
+
+    /** {@inheritDoc} */
+    @Override public Iterator<Row> getRows(Session ses, SearchRow first, SearchRow last) {
+        List<Row> rows = new ArrayList<>();
+
+        BaselineTopology blt = ctx.state().clusterState().baselineTopology();
+
+        if (blt == null)
+            return rows.iterator();
+
+        Set<Object> consistentIds = blt.consistentIds();
+
+        Collection<ClusterNode> srvNodes = ctx.discovery().aliveServerNodes();
+
+        Set<Object> aliveNodeIds = new HashSet<>(F.nodeConsistentIds(srvNodes));
+
+        for (Object consistentId : consistentIds) {
+            rows.add(
+                createRow(ses, rows.size(),
+                    consistentId,
+                    aliveNodeIds.contains(consistentId)
+                )
+            );
+        }
+
+        return rows.iterator();
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean canGetRowCount() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getRowCount() {
+        BaselineTopology blt = ctx.state().clusterState().baselineTopology();
+
+        return blt == null ? 0 : blt.consistentIds().size();
+    }
+}
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sys/view/SqlSystemViewNodeAttributes.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sys/view/SqlSystemViewNodeAttributes.java
new file mode 100644
index 0000000..1ba0c7f
--- /dev/null
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sys/view/SqlSystemViewNodeAttributes.java
@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2.sys.view;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.UUID;
+import java.util.concurrent.atomic.AtomicLong;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.internal.GridKernalContext;
+import org.apache.ignite.internal.util.typedef.F;
+import org.h2.engine.Session;
+import org.h2.result.Row;
+import org.h2.result.SearchRow;
+import org.h2.value.Value;
+
+/**
+ * System view: node attributes.
+ */
+public class SqlSystemViewNodeAttributes extends SqlAbstractLocalSystemView {
+    /**
+     * @param ctx Grid context.
+     */
+    public SqlSystemViewNodeAttributes(GridKernalContext ctx) {
+        super("NODE_ATTRIBUTES", "Node attributes", ctx, new String[] {"NODE_ID,NAME", "NAME"},
+            newColumn("NODE_ID", Value.UUID),
+            newColumn("NAME"),
+            newColumn("VALUE")
+        );
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("unchecked")
+    @Override public Iterator<Row> getRows(Session ses, SearchRow first, SearchRow last) {
+        Collection<ClusterNode> nodes;
+
+        SqlSystemViewColumnCondition idCond = conditionForColumn("NODE_ID", first, last);
+        SqlSystemViewColumnCondition nameCond = conditionForColumn("NAME", first, last);
+
+        if (idCond.isEquality()) {
+            try {
+                UUID nodeId = uuidFromValue(idCond.valueForEquality());
+
+                ClusterNode node = nodeId == null ? null : ctx.discovery().node(nodeId);
+
+                if (node != null)
+                    nodes = Collections.singleton(node);
+                else
+                    nodes = Collections.emptySet();
+            }
+            catch (Exception e) {
+                nodes = Collections.emptySet();
+            }
+        }
+        else
+            nodes = F.concat(false, ctx.discovery().allNodes(), ctx.discovery().daemonNodes());
+
+        if (nameCond.isEquality()) {
+            String attrName = nameCond.valueForEquality().getString();
+
+            List<Row> rows = new ArrayList<>();
+
+            for (ClusterNode node : nodes) {
+                if (node.attributes().containsKey(attrName))
+                    rows.add(
+                        createRow(ses, rows.size(),
+                            node.id(),
+                            attrName,
+                            node.attribute(attrName)
+                        )
+                    );
+            }
+
+            return rows.iterator();
+        }
+        else {
+            AtomicLong rowKey = new AtomicLong();
+
+            return F.concat(F.iterator(nodes,
+                node -> F.iterator(node.attributes().entrySet(),
+                    attr -> createRow(ses,
+                        rowKey.incrementAndGet(),
+                        node.id(),
+                        attr.getKey(),
+                        attr.getValue()),
+                    true).iterator(),
+                true));
+        }
+    }
+}
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sys/view/SqlSystemViewNodeMetrics.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sys/view/SqlSystemViewNodeMetrics.java
new file mode 100644
index 0000000..01b4e97
--- /dev/null
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sys/view/SqlSystemViewNodeMetrics.java
@@ -0,0 +1,210 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2.sys.view;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.UUID;
+import org.apache.ignite.cluster.ClusterMetrics;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.internal.GridKernalContext;
+import org.apache.ignite.internal.util.typedef.F;
+import org.h2.engine.Session;
+import org.h2.result.Row;
+import org.h2.result.SearchRow;
+import org.h2.value.Value;
+
+/**
+ * System view: node metrics.
+ */
+public class SqlSystemViewNodeMetrics extends SqlAbstractLocalSystemView {
+    /**
+     * @param ctx Grid context.
+     */
+    public SqlSystemViewNodeMetrics(GridKernalContext ctx) {
+        super("NODE_METRICS", "Node metrics", ctx, new String[] {"NODE_ID"},
+            newColumn("NODE_ID", Value.UUID),
+            newColumn("LAST_UPDATE_TIME", Value.TIMESTAMP),
+            newColumn("MAX_ACTIVE_JOBS", Value.INT),
+            newColumn("CUR_ACTIVE_JOBS", Value.INT),
+            newColumn("AVG_ACTIVE_JOBS", Value.FLOAT),
+            newColumn("MAX_WAITING_JOBS", Value.INT),
+            newColumn("CUR_WAITING_JOBS", Value.INT),
+            newColumn("AVG_WAITING_JOBS", Value.FLOAT),
+            newColumn("MAX_REJECTED_JOBS", Value.INT),
+            newColumn("CUR_REJECTED_JOBS", Value.INT),
+            newColumn("AVG_REJECTED_JOBS", Value.FLOAT),
+            newColumn("TOTAL_REJECTED_JOBS", Value.INT),
+            newColumn("MAX_CANCELED_JOBS", Value.INT),
+            newColumn("CUR_CANCELED_JOBS", Value.INT),
+            newColumn("AVG_CANCELED_JOBS", Value.FLOAT),
+            newColumn("TOTAL_CANCELED_JOBS", Value.INT),
+            newColumn("MAX_JOBS_WAIT_TIME", Value.TIME),
+            newColumn("CUR_JOBS_WAIT_TIME", Value.TIME),
+            newColumn("AVG_JOBS_WAIT_TIME", Value.TIME),
+            newColumn("MAX_JOBS_EXECUTE_TIME", Value.TIME),
+            newColumn("CUR_JOBS_EXECUTE_TIME", Value.TIME),
+            newColumn("AVG_JOBS_EXECUTE_TIME", Value.TIME),
+            newColumn("TOTAL_JOBS_EXECUTE_TIME", Value.TIME),
+            newColumn("TOTAL_EXECUTED_JOBS", Value.INT),
+            newColumn("TOTAL_EXECUTED_TASKS", Value.INT),
+            newColumn("TOTAL_BUSY_TIME", Value.TIME),
+            newColumn("TOTAL_IDLE_TIME", Value.TIME),
+            newColumn("CUR_IDLE_TIME", Value.TIME),
+            newColumn("BUSY_TIME_PERCENTAGE", Value.FLOAT),
+            newColumn("IDLE_TIME_PERCENTAGE", Value.FLOAT),
+            newColumn("TOTAL_CPU", Value.INT),
+            newColumn("CUR_CPU_LOAD", Value.DOUBLE),
+            newColumn("AVG_CPU_LOAD", Value.DOUBLE),
+            newColumn("CUR_GC_CPU_LOAD", Value.DOUBLE),
+            newColumn("HEAP_MEMORY_INIT", Value.LONG),
+            newColumn("HEAP_MEMORY_USED", Value.LONG),
+            newColumn("HEAP_MEMORY_COMMITED", Value.LONG),
+            newColumn("HEAP_MEMORY_MAX", Value.LONG),
+            newColumn("HEAP_MEMORY_TOTAL", Value.LONG),
+            newColumn("NONHEAP_MEMORY_INIT", Value.LONG),
+            newColumn("NONHEAP_MEMORY_USED", Value.LONG),
+            newColumn("NONHEAP_MEMORY_COMMITED", Value.LONG),
+            newColumn("NONHEAP_MEMORY_MAX", Value.LONG),
+            newColumn("NONHEAP_MEMORY_TOTAL", Value.LONG),
+            newColumn("UPTIME", Value.TIME),
+            newColumn("JVM_START_TIME", Value.TIMESTAMP),
+            newColumn("NODE_START_TIME", Value.TIMESTAMP),
+            newColumn("LAST_DATA_VERSION", Value.LONG),
+            newColumn("CUR_THREAD_COUNT", Value.INT),
+            newColumn("MAX_THREAD_COUNT", Value.INT),
+            newColumn("TOTAL_THREAD_COUNT", Value.LONG),
+            newColumn("CUR_DAEMON_THREAD_COUNT", Value.INT),
+            newColumn("SENT_MESSAGES_COUNT", Value.INT),
+            newColumn("SENT_BYTES_COUNT", Value.LONG),
+            newColumn("RECEIVED_MESSAGES_COUNT", Value.INT),
+            newColumn("RECEIVED_BYTES_COUNT", Value.LONG),
+            newColumn("OUTBOUND_MESSAGES_QUEUE", Value.INT)
+        );
+    }
+
+    /** {@inheritDoc} */
+    @Override public Iterator<Row> getRows(Session ses, SearchRow first, SearchRow last) {
+        List<Row> rows = new ArrayList<>();
+
+        Collection<ClusterNode> nodes;
+
+        SqlSystemViewColumnCondition idCond = conditionForColumn("NODE_ID", first, last);
+
+        if (idCond.isEquality()) {
+            try {
+                UUID nodeId = uuidFromValue(idCond.valueForEquality());
+
+                ClusterNode node = nodeId == null ? null : ctx.discovery().node(nodeId);
+
+                if (node != null)
+                    nodes = Collections.singleton(node);
+                else
+                    nodes = Collections.emptySet();
+            }
+            catch (Exception e) {
+                nodes = Collections.emptySet();
+            }
+        }
+        else
+            nodes = F.concat(false, ctx.discovery().allNodes(), ctx.discovery().daemonNodes());
+
+        for (ClusterNode node : nodes) {
+            if (node != null) {
+                ClusterMetrics metrics = node.metrics();
+
+                rows.add(
+                    createRow(ses, rows.size(),
+                        node.id(),
+                        valueTimestampFromMillis(metrics.getLastUpdateTime()),
+                        metrics.getMaximumActiveJobs(),
+                        metrics.getCurrentActiveJobs(),
+                        metrics.getAverageActiveJobs(),
+                        metrics.getMaximumWaitingJobs(),
+                        metrics.getCurrentWaitingJobs(),
+                        metrics.getAverageWaitingJobs(),
+                        metrics.getMaximumRejectedJobs(),
+                        metrics.getCurrentRejectedJobs(),
+                        metrics.getAverageRejectedJobs(),
+                        metrics.getTotalRejectedJobs(),
+                        metrics.getMaximumCancelledJobs(),
+                        metrics.getCurrentCancelledJobs(),
+                        metrics.getAverageCancelledJobs(),
+                        metrics.getTotalCancelledJobs(),
+                        valueTimeFromMillis(metrics.getMaximumJobWaitTime()),
+                        valueTimeFromMillis(metrics.getCurrentJobWaitTime()),
+                        valueTimeFromMillis((long)metrics.getAverageJobWaitTime()),
+                        valueTimeFromMillis(metrics.getMaximumJobExecuteTime()),
+                        valueTimeFromMillis(metrics.getCurrentJobExecuteTime()),
+                        valueTimeFromMillis((long)metrics.getAverageJobExecuteTime()),
+                        valueTimeFromMillis(metrics.getTotalJobsExecutionTime()),
+                        metrics.getTotalExecutedJobs(),
+                        metrics.getTotalExecutedTasks(),
+                        valueTimeFromMillis(metrics.getTotalBusyTime()),
+                        valueTimeFromMillis(metrics.getTotalIdleTime()),
+                        valueTimeFromMillis(metrics.getCurrentIdleTime()),
+                        metrics.getBusyTimePercentage(),
+                        metrics.getIdleTimePercentage(),
+                        metrics.getTotalCpus(),
+                        metrics.getCurrentCpuLoad(),
+                        metrics.getAverageCpuLoad(),
+                        metrics.getCurrentGcCpuLoad(),
+                        metrics.getHeapMemoryInitialized(),
+                        metrics.getHeapMemoryUsed(),
+                        metrics.getHeapMemoryCommitted(),
+                        metrics.getHeapMemoryMaximum(),
+                        metrics.getHeapMemoryTotal(),
+                        metrics.getNonHeapMemoryInitialized(),
+                        metrics.getNonHeapMemoryUsed(),
+                        metrics.getNonHeapMemoryCommitted(),
+                        metrics.getNonHeapMemoryMaximum(),
+                        metrics.getNonHeapMemoryTotal(),
+                        valueTimeFromMillis(metrics.getUpTime()),
+                        valueTimestampFromMillis(metrics.getStartTime()),
+                        valueTimestampFromMillis(metrics.getNodeStartTime()),
+                        metrics.getLastDataVersion(),
+                        metrics.getCurrentThreadCount(),
+                        metrics.getMaximumThreadCount(),
+                        metrics.getTotalStartedThreadCount(),
+                        metrics.getCurrentDaemonThreadCount(),
+                        metrics.getSentMessagesCount(),
+                        metrics.getSentBytesCount(),
+                        metrics.getReceivedMessagesCount(),
+                        metrics.getReceivedBytesCount(),
+                        metrics.getOutboundMessagesQueueSize()
+                    )
+                );
+            }
+        }
+
+        return rows.iterator();
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean canGetRowCount() {
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public long getRowCount() {
+        return F.concat(false, ctx.discovery().allNodes(), ctx.discovery().daemonNodes()).size();
+    }
+}
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sys/view/SqlSystemViewNodes.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sys/view/SqlSystemViewNodes.java
index e944b4f..514f92e 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sys/view/SqlSystemViewNodes.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/sys/view/SqlSystemViewNodes.java
@@ -20,6 +20,7 @@
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.Iterator;
 import java.util.List;
 import java.util.UUID;
 import org.apache.ignite.cluster.ClusterNode;
@@ -31,18 +32,17 @@
 import org.h2.value.Value;
 
 /**
- * Meta view: nodes.
+ * System view: nodes.
  */
 public class SqlSystemViewNodes extends SqlAbstractLocalSystemView {
     /**
      * @param ctx Grid context.
      */
     public SqlSystemViewNodes(GridKernalContext ctx) {
-        super("NODES", "Topology nodes", ctx, new String[] {"ID", "IS_LOCAL"},
+        super("NODES", "Topology nodes", ctx, new String[] {"ID"},
             newColumn("ID", Value.UUID),
             newColumn("CONSISTENT_ID"),
             newColumn("VERSION"),
-            newColumn("IS_LOCAL", Value.BOOLEAN),
             newColumn("IS_CLIENT", Value.BOOLEAN),
             newColumn("IS_DAEMON", Value.BOOLEAN),
             newColumn("NODE_ORDER", Value.INT),
@@ -52,18 +52,15 @@
     }
 
     /** {@inheritDoc} */
-    @Override public Iterable<Row> getRows(Session ses, SearchRow first, SearchRow last) {
+    @Override public Iterator<Row> getRows(Session ses, SearchRow first, SearchRow last) {
         List<Row> rows = new ArrayList<>();
 
         Collection<ClusterNode> nodes;
 
-        SqlSystemViewColumnCondition locCond = conditionForColumn("IS_LOCAL", first, last);
         SqlSystemViewColumnCondition idCond = conditionForColumn("ID", first, last);
 
-        if (locCond.isEquality() && locCond.valueForEquality().getBoolean())
-            nodes = Collections.singleton(ctx.discovery().localNode());
-        else if (idCond.isEquality()) {
-            UUID nodeId = uuidFromString(idCond.valueForEquality().getString());
+        if (idCond.isEquality()) {
+            UUID nodeId = uuidFromValue(idCond.valueForEquality());
 
             nodes = nodeId == null ? Collections.emptySet() : Collections.singleton(ctx.discovery().node(nodeId));
         }
@@ -77,7 +74,6 @@
                         node.id(),
                         node.consistentId(),
                         node.version(),
-                        node.isLocal(),
                         node.isClient(),
                         node.isDaemon(),
                         node.order(),
@@ -87,7 +83,7 @@
                 );
         }
 
-        return rows;
+        return rows.iterator();
     }
 
     /** {@inheritDoc} */
@@ -99,18 +95,4 @@
     @Override public long getRowCount() {
         return ctx.discovery().allNodes().size() + ctx.discovery().daemonNodes().size();
     }
-
-    /**
-     * Converts string to UUID safe (suppressing exceptions).
-     *
-     * @param val UUID in string format.
-     */
-    private static UUID uuidFromString(String val) {
-        try {
-            return UUID.fromString(val);
-        }
-        catch (RuntimeException e) {
-            return null;
-        }
-    }
 }
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMapQueryExecutor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMapQueryExecutor.java
index 216a259..9166604 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMapQueryExecutor.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMapQueryExecutor.java
@@ -18,7 +18,9 @@
 package org.apache.ignite.internal.processors.query.h2.twostep;
 
 import java.sql.Connection;
+import java.sql.PreparedStatement;
 import java.sql.ResultSet;
+import java.sql.SQLException;
 import java.util.AbstractCollection;
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -33,11 +35,13 @@
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicInteger;
 import javax.cache.CacheException;
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.IgniteException;
 import org.apache.ignite.IgniteLogger;
 import org.apache.ignite.IgniteSystemProperties;
+import org.apache.ignite.cache.PartitionLossPolicy;
 import org.apache.ignite.cache.query.QueryCancelledException;
 import org.apache.ignite.cache.query.SqlFieldsQuery;
 import org.apache.ignite.cluster.ClusterNode;
@@ -47,25 +51,34 @@
 import org.apache.ignite.events.EventType;
 import org.apache.ignite.internal.GridKernalContext;
 import org.apache.ignite.internal.GridTopic;
+import org.apache.ignite.internal.IgniteInternalFuture;
 import org.apache.ignite.internal.managers.communication.GridMessageListener;
 import org.apache.ignite.internal.managers.eventstorage.GridLocalEventListener;
 import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
+import org.apache.ignite.internal.processors.cache.CacheInvalidStateException;
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
-import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
+import org.apache.ignite.internal.processors.cache.distributed.dht.CompoundLockFuture;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionsReservation;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTransactionalCacheAdapter;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxLocalAdapter;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridReservable;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccUtils;
 import org.apache.ignite.internal.processors.cache.query.CacheQueryType;
 import org.apache.ignite.internal.processors.cache.query.GridCacheQueryMarshallable;
 import org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery;
 import org.apache.ignite.internal.processors.query.GridQueryCancel;
+import org.apache.ignite.internal.processors.query.IgniteSQLException;
 import org.apache.ignite.internal.processors.query.h2.H2Utils;
 import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing;
+import org.apache.ignite.internal.processors.query.h2.ResultSetEnlistFuture;
 import org.apache.ignite.internal.processors.query.h2.UpdateResult;
 import org.apache.ignite.internal.processors.query.h2.opt.DistributedJoinMode;
 import org.apache.ignite.internal.processors.query.h2.opt.GridH2QueryContext;
 import org.apache.ignite.internal.processors.query.h2.opt.GridH2RetryException;
+import org.apache.ignite.internal.processors.query.h2.sql.GridSqlQueryParser;
 import org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryCancelRequest;
 import org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryFailResponse;
 import org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryNextPageRequest;
@@ -73,22 +86,30 @@
 import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2DmlRequest;
 import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2DmlResponse;
 import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2QueryRequest;
+import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2SelectForUpdateTxDetails;
 import org.apache.ignite.internal.util.GridSpinBusyLock;
 import org.apache.ignite.internal.util.typedef.CI1;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.X;
+import org.apache.ignite.internal.util.typedef.internal.CU;
 import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteInClosure;
+import org.apache.ignite.lang.IgniteUuid;
 import org.apache.ignite.plugin.extensions.communication.Message;
 import org.apache.ignite.spi.indexing.IndexingQueryFilter;
 import org.apache.ignite.thread.IgniteThread;
+import org.h2.command.Prepared;
 import org.h2.jdbc.JdbcResultSet;
 import org.h2.value.Value;
 import org.jetbrains.annotations.Nullable;
 
 import static org.apache.ignite.IgniteSystemProperties.IGNITE_SQL_FORCE_LAZY_RESULT_SET;
+import static org.apache.ignite.cache.PartitionLossPolicy.READ_ONLY_SAFE;
+import static org.apache.ignite.cache.PartitionLossPolicy.READ_WRITE_SAFE;
 import static org.apache.ignite.events.EventType.EVT_CACHE_QUERY_EXECUTED;
 import static org.apache.ignite.internal.managers.communication.GridIoPolicy.QUERY_POOL;
 import static org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion.NONE;
+import static org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState.LOST;
 import static org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState.OWNING;
 import static org.apache.ignite.internal.processors.query.h2.opt.DistributedJoinMode.OFF;
 import static org.apache.ignite.internal.processors.query.h2.opt.DistributedJoinMode.distributedJoinMode;
@@ -296,10 +317,10 @@
      * @param reserved Reserved list.
      * @param nodeId Node ID.
      * @param reqId Request ID.
-     * @return {@code true} If all the needed partitions successfully reserved.
+     * @return String which is null in case of success or with causeMessage if failed
      * @throws IgniteCheckedException If failed.
      */
-    private boolean reservePartitions(
+    private String reservePartitions(
         @Nullable List<Integer> cacheIds,
         AffinityTopologyVersion topVer,
         final int[] explicitParts,
@@ -310,7 +331,7 @@
         assert topVer != null;
 
         if (F.isEmpty(cacheIds))
-            return true;
+            return null;
 
         Collection<Integer> partIds = wrap(explicitParts);
 
@@ -319,11 +340,9 @@
 
             // Cache was not found, probably was not deployed yet.
             if (cctx == null) {
-                logRetry("Failed to reserve partitions for query (cache is not found on local node) [" +
-                    "rmtNodeId=" + nodeId + ", reqId=" + reqId + ", affTopVer=" + topVer + ", cacheId=" +
-                    cacheIds.get(i) + "]");
-
-                return false;
+                return String.format("Failed to reserve partitions for query (cache is not found on " +
+                    "local node) [localNodeId=%s, rmtNodeId=%s, reqId=%s, affTopVer=%s, cacheId=%s]",
+                    ctx.localNodeId(), nodeId, reqId, topVer, cacheIds.get(i));
             }
 
             if (cctx.isLocal() || !cctx.rebalanceEnabled())
@@ -336,13 +355,10 @@
 
             if (explicitParts == null && r != null) { // Try to reserve group partition if any and no explicits.
                 if (r != MapReplicatedReservation.INSTANCE) {
-                    if (!r.reserve()) {
-                        logRetry("Failed to reserve partitions for query (group reservation failed) [" +
-                            "rmtNodeId=" + nodeId + ", reqId=" + reqId + ", affTopVer=" + topVer +
-                            ", cacheId=" + cacheIds.get(i) + ", cacheName=" + cctx.name() + "]");
-
-                        return false; // We need explicit partitions here -> retry.
-                    }
+                    if (!r.reserve())
+                        return String.format("Failed to reserve partitions for query (group " +
+                            "reservation failed) [localNodeId=%s, rmtNodeId=%s, reqId=%s, affTopVer=%s, cacheId=%s, " +
+                            "cacheName=%s]",ctx.localNodeId(), nodeId, reqId, topVer, cacheIds.get(i), cctx.name());
 
                     reserved.add(r);
                 }
@@ -358,15 +374,21 @@
                             // We don't need to reserve partitions because they will not be evicted in replicated caches.
                             GridDhtPartitionState partState = part != null ? part.state() : null;
 
-                            if (partState != OWNING) {
-                                logRetry("Failed to reserve partitions for query (partition of " +
-                                    "REPLICATED cache is not in OWNING state) [rmtNodeId=" + nodeId +
-                                    ", reqId=" + reqId + ", affTopVer=" + topVer + ", cacheId=" + cacheIds.get(i) +
-                                    ", cacheName=" + cctx.name() + ", part=" + p + ", partFound=" + (part != null) +
-                                    ", partState=" + partState + "]");
-
-                                return false;
-                            }
+                            if (partState != OWNING)
+                                return String.format("Failed to reserve partitions for query " +
+                                    "(partition of REPLICATED cache is not in OWNING state) [" +
+                                    "localNodeId=%s, rmtNodeId=%s, reqId=%s, affTopVer=%s, cacheId=%s, cacheName=%s, " +
+                                    "part=%s, partFound=%s, partState=%s]",
+                                    ctx.localNodeId(),
+                                    nodeId,
+                                    reqId,
+                                    topVer,
+                                    cacheIds.get(i),
+                                    cctx.name(),
+                                    p,
+                                    (part != null),
+                                    partState
+                                );
                         }
 
                         // Mark that we checked this replicated cache.
@@ -377,41 +399,84 @@
                     if (explicitParts == null)
                         partIds = cctx.affinity().primaryPartitions(ctx.localNodeId(), topVer);
 
+                    int reservedCnt = 0;
+
                     for (int partId : partIds) {
                         GridDhtLocalPartition part = partition(cctx, partId);
 
                         GridDhtPartitionState partState = part != null ? part.state() : null;
 
-                        if (partState != OWNING || !part.reserve()) {
-                            logRetry("Failed to reserve partitions for query (partition of " +
-                                "PARTITIONED cache cannot be reserved) [rmtNodeId=" + nodeId + ", reqId=" + reqId +
-                                ", affTopVer=" + topVer + ", cacheId=" + cacheIds.get(i) +
-                                ", cacheName=" + cctx.name() + ", part=" + partId + ", partFound=" + (part != null) +
-                                ", partState=" + partState + "]");
+                        if (partState != OWNING) {
+                            if (partState == LOST)
+                                ignoreLostPartitionIfPossible(cctx, part);
+                            else {
+                                return String.format("Failed to reserve partitions for query " +
+                                        "(partition of PARTITIONED cache is not found or not in OWNING state) [" +
+                                        "localNodeId=%s, rmtNodeId=%s, reqId=%s, affTopVer=%s, cacheId=%s, " +
+                                        "cacheName=%s, part=%s, partFound=%s, partState=%s]",
+                                    ctx.localNodeId(),
+                                    nodeId,
+                                    reqId,
+                                    topVer,
+                                    cacheIds.get(i),
+                                    cctx.name(),
+                                    partId,
+                                    (part != null),
+                                    partState
+                                );
+                            }
+                        }
 
-                            return false;
+                        if (!part.reserve()) {
+                            return String.format("Failed to reserve partitions for query " +
+                                    "(partition of PARTITIONED cache cannot be reserved) [" +
+                                    "localNodeId=%s, rmtNodeId=%s, reqId=%s, affTopVer=%s, cacheId=%s, " +
+                                    "cacheName=%s, part=%s, partFound=%s, partState=%s]",
+                                ctx.localNodeId(),
+                                nodeId,
+                                reqId,
+                                topVer,
+                                cacheIds.get(i),
+                                cctx.name(),
+                                partId,
+                                true,
+                                partState
+                            );
                         }
 
                         reserved.add(part);
 
+                        reservedCnt++;
+
                         // Double check that we are still in owning state and partition contents are not cleared.
                         partState = part.state();
 
-                        if (part.state() != OWNING) {
-                            logRetry("Failed to reserve partitions for query (partition of " +
-                                "PARTITIONED cache is not in OWNING state after reservation) [rmtNodeId=" + nodeId +
-                                ", reqId=" + reqId + ", affTopVer=" + topVer + ", cacheId=" + cacheIds.get(i) +
-                                ", cacheName=" + cctx.name() + ", part=" + partId + ", partState=" + partState + "]");
-
-                            return false;
+                        if (partState != OWNING) {
+                            if (partState == LOST)
+                                ignoreLostPartitionIfPossible(cctx, part);
+                            else {
+                                return String.format("Failed to reserve partitions for query " +
+                                    "(partition of PARTITIONED cache is not in OWNING state after reservation) [" +
+                                    "localNodeId=%s, rmtNodeId=%s, reqId=%s, affTopVer=%s, cacheId=%s, " +
+                                    "cacheName=%s, part=%s, partState=%s]",
+                                    ctx.localNodeId(),
+                                    nodeId,
+                                    reqId,
+                                    topVer,
+                                    cacheIds.get(i),
+                                    cctx.name(),
+                                    partId,
+                                    partState
+                                );
+                            }
                         }
                     }
 
-                    if (explicitParts == null) {
+                    if (explicitParts == null && reservedCnt > 0) {
                         // We reserved all the primary partitions for cache, attempt to add group reservation.
                         GridDhtPartitionsReservation grp = new GridDhtPartitionsReservation(topVer, cctx, "SQL");
 
-                        if (grp.register(reserved.subList(reserved.size() - partIds.size(), reserved.size()))) {
+                        if (grp.register(reserved.subList(reserved.size() - reservedCnt, reserved.size()))) {
                             if (reservations.putIfAbsent(grpKey, grp) != null)
                                 throw new IllegalStateException("Reservation already exists.");
 
@@ -426,16 +491,26 @@
             }
         }
 
-        return true;
+        return null;
     }
 
     /**
-     * Load failed partition reservation.
+     * Decide whether to ignore or proceed with lost partition.
      *
-     * @param msg Message.
+     * @param cctx Cache context.
+     * @param part Partition.
+     * @throws IgniteCheckedException If failed.
      */
-    private void logRetry(String msg) {
-        log.info(msg);
+    private static void ignoreLostPartitionIfPossible(GridCacheContext cctx, GridDhtLocalPartition part)
+        throws IgniteCheckedException {
+        PartitionLossPolicy plc = cctx.config().getPartitionLossPolicy();
+
+        if (plc != null) {
+            if (plc == READ_ONLY_SAFE || plc == READ_WRITE_SAFE) {
+                throw new CacheInvalidStateException("Failed to execute query because cache partition has been " +
+                    "lost [cacheName=" + cctx.name() + ", part=" + part + ']');
+            }
+        }
     }
 
     /**
@@ -477,26 +552,6 @@
     }
 
     /**
-     * @param caches Cache IDs.
-     * @return The first found partitioned cache.
-     */
-    private GridCacheContext<?,?> findFirstPartitioned(List<Integer> caches) {
-        GridCacheSharedContext<?,?> sctx = ctx.cache().context();
-
-        for (int i = 0; i < caches.size(); i++) {
-            GridCacheContext<?,?> mainCctx = sctx.cacheContext(caches.get(i));
-
-            if (mainCctx == null)
-                throw new CacheException("Failed to find cache.");
-
-            if (!mainCctx.isLocal() && !mainCctx.isReplicated())
-                return mainCctx;
-        }
-
-        throw new IllegalStateException("Failed to find a partitioned cache.");
-    }
-
-    /**
      * @param node Node.
      * @param req Query request.
      */
@@ -519,10 +574,73 @@
         final List<Integer> cacheIds = req.caches();
 
         int segments = explain || replicated || F.isEmpty(cacheIds) ? 1 :
-            findFirstPartitioned(cacheIds).config().getQueryParallelism();
+            CU.firstPartitioned(ctx.cache().context(), cacheIds).config().getQueryParallelism();
 
         final Object[] params = req.parameters();
 
+        final GridDhtTxLocalAdapter tx;
+
+        GridH2SelectForUpdateTxDetails txReq = req.txDetails();
+
+        try {
+            if (txReq != null) {
+                // Prepare to run queries.
+                GridCacheContext<?, ?> mainCctx = mainCacheContext(cacheIds);
+
+                if (mainCctx == null || mainCctx.atomic() || !mainCctx.mvccEnabled() || cacheIds.size() != 1)
+                    throw new IgniteSQLException("SELECT FOR UPDATE is supported only for queries " +
+                        "that involve single transactional cache.");
+
+                GridDhtTransactionalCacheAdapter txCache = (GridDhtTransactionalCacheAdapter)mainCctx.cache();
+
+                if (!node.isLocal()) {
+                    tx = txCache.initTxTopologyVersion(
+                        node.id(),
+                        node,
+                        txReq.version(),
+                        txReq.futureId(),
+                        txReq.miniId(),
+                        txReq.firstClientRequest(),
+                        req.topologyVersion(),
+                        txReq.threadId(),
+                        txReq.timeout(),
+                        txReq.subjectId(),
+                        txReq.taskNameHash());
+                }
+                else {
+                    tx = MvccUtils.tx(ctx, txReq.version());
+
+                    assert tx != null;
+                }
+            }
+            else
+                tx = null;
+        }
+        catch (IgniteException | IgniteCheckedException e) {
+            // send error if TX was not initialized properly.
+            releaseReservations();
+
+            U.error(log, "Failed to execute local query.", e);
+
+            sendError(node, req.requestId(), e);
+
+            return;
+        }
+
+        AtomicInteger runCntr;
+        CompoundLockFuture lockFut;
+
+        if (txReq != null && segments > 1) {
+            runCntr = new AtomicInteger(segments);
+            lockFut = new CompoundLockFuture(segments, tx);
+
+            lockFut.init();
+        }
+        else {
+            runCntr = null;
+            lockFut = null;
+        }
+
         for (int i = 1; i < segments; i++) {
             assert !F.isEmpty(cacheIds);
 
@@ -544,13 +662,17 @@
                     false, // Replicated is always false here (see condition above).
                     req.timeout(),
                     params,
-                    true); // Lazy = true.
+                    true,
+                    req.mvccSnapshot(),
+                    tx,
+                    txReq,
+                    lockFut,
+                    runCntr);
             }
             else {
                 ctx.closure().callLocal(
                     new Callable<Void>() {
-                        @Override
-                        public Void call() throws Exception {
+                        @Override public Void call() {
                             onQueryRequest0(node,
                                 req.requestId(),
                                 segment,
@@ -566,7 +688,12 @@
                                 false,
                                 req.timeout(),
                                 params,
-                                false); // Lazy = false.
+                                false,
+                                req.mvccSnapshot(),
+                                tx,
+                                txReq,
+                                lockFut,
+                                runCntr);
 
                             return null;
                         }
@@ -590,7 +717,11 @@
             replicated,
             req.timeout(),
             params,
-            lazy);
+            lazy,
+            req.mvccSnapshot(),
+            tx,
+            txReq,
+            lockFut, runCntr);
     }
 
     /**
@@ -606,6 +737,11 @@
      * @param pageSize Page size.
      * @param distributedJoinMode Query distributed join mode.
      * @param lazy Streaming flag.
+     * @param mvccSnapshot MVCC snapshot.
+     * @param tx Transaction.
+     * @param txDetails TX details, if it's a {@code FOR UPDATE} request, or {@code null}.
+     * @param lockFut Lock future.
+     * @param runCntr Counter which counts remaining queries in case segmented index is used.
      */
     private void onQueryRequest0(
         final ClusterNode node,
@@ -623,10 +759,19 @@
         final boolean replicated,
         final int timeout,
         final Object[] params,
-        boolean lazy
-    ) {
+        boolean lazy,
+        @Nullable final MvccSnapshot mvccSnapshot,
+        @Nullable final GridDhtTxLocalAdapter tx,
+        @Nullable final GridH2SelectForUpdateTxDetails txDetails,
+        @Nullable final CompoundLockFuture lockFut,
+        @Nullable final AtomicInteger runCntr) {
         MapQueryLazyWorker worker = MapQueryLazyWorker.currentWorker();
 
+        // In presence of TX, we also must always have matching details.
+        assert tx == null || txDetails != null;
+
+        boolean inTx = (tx != null);
+
         if (lazy && worker == null) {
             // Lazy queries must be re-submitted to dedicated workers.
             MapQueryLazyWorkerKey key = new MapQueryLazyWorkerKey(node.id(), reqId, segmentId);
@@ -634,8 +779,28 @@
 
             worker.submit(new Runnable() {
                 @Override public void run() {
-                    onQueryRequest0(node, reqId, segmentId, schemaName, qrys, cacheIds, topVer, partsMap, parts,
-                        pageSize, distributedJoinMode, enforceJoinOrder, replicated, timeout, params, true);
+                    onQueryRequest0(
+                        node,
+                        reqId,
+                        segmentId,
+                        schemaName,
+                        qrys,
+                        cacheIds,
+                        topVer,
+                        partsMap,
+                        parts,
+                        pageSize,
+                        distributedJoinMode,
+                        enforceJoinOrder,
+                        replicated,
+                        timeout,
+                        params,
+                        true,
+                        mvccSnapshot,
+                        tx,
+                        txDetails,
+                        lockFut,
+                        runCntr);
                 }
             });
 
@@ -660,9 +825,11 @@
             return;
         }
 
+        if (lazy && txDetails != null)
+            throw new IgniteSQLException("Lazy execution of SELECT FOR UPDATE queries is not supported.");
+
         // Prepare to run queries.
-        GridCacheContext<?, ?> mainCctx =
-            !F.isEmpty(cacheIds) ? ctx.cache().context().cacheContext(cacheIds.get(0)) : null;
+        GridCacheContext<?, ?> mainCctx = mainCacheContext(cacheIds);
 
         MapNodeResults nodeRess = resultsForNode(node.id());
 
@@ -671,20 +838,24 @@
         List<GridReservable> reserved = new ArrayList<>();
 
         try {
-            if (topVer != null) {
+            // We want to reserve only in not SELECT FOR UPDATE case -
+            // otherwise, their state is protected by locked topology.
+            if (topVer != null && txDetails == null) {
                 // Reserve primary for topology version or explicit partitions.
-                if (!reservePartitions(cacheIds, topVer, parts, reserved, node.id(), reqId)) {
+                String err = reservePartitions(cacheIds, topVer, parts, reserved, node.id(), reqId);
+
+                if (!F.isEmpty(err)) {
                     // Unregister lazy worker because re-try may never reach this node again.
                     if (lazy)
                         stopAndUnregisterCurrentLazyWorker();
 
-                    sendRetry(node, reqId, segmentId);
+                    sendRetry(node, reqId, segmentId, err);
 
                     return;
                 }
             }
 
-            qr = new MapQueryResults(h2, reqId, qrys.size(), mainCctx, MapQueryLazyWorker.currentWorker());
+            qr = new MapQueryResults(h2, reqId, qrys.size(), mainCctx, MapQueryLazyWorker.currentWorker(), inTx);
 
             if (nodeRess.put(reqId, segmentId, qr) != null)
                 throw new IllegalStateException();
@@ -701,6 +872,7 @@
                 .pageSize(pageSize)
                 .topologyVersion(topVer)
                 .reservations(reserved)
+                .mvccSnapshot(mvccSnapshot)
                 .lazyWorker(worker);
 
             Connection conn = h2.connectionForSchema(schemaName);
@@ -729,12 +901,56 @@
                 for (GridCacheSqlQuery qry : qrys) {
                     ResultSet rs = null;
 
+                    boolean removeMapping = false;
+
                     // If we are not the target node for this replicated query, just ignore it.
                     if (qry.node() == null || (segmentId == 0 && qry.node().equals(ctx.localNodeId()))) {
-                        rs = h2.executeSqlQueryWithTimer(conn, qry.query(),
-                            F.asList(qry.parameters(params)), true,
-                            timeout,
-                            qr.queryCancel(qryIdx));
+                        String sql = qry.query(); Collection<Object> params0 = F.asList(qry.parameters(params));
+
+                        PreparedStatement stmt;
+
+                        try {
+                            stmt = h2.prepareStatement(conn, sql, true);
+                        }
+                        catch (SQLException e) {
+                            throw new IgniteCheckedException("Failed to parse SQL query: " + sql, e);
+                        }
+
+                        Prepared p = GridSqlQueryParser.prepared(stmt);
+
+                        if (GridSqlQueryParser.isForUpdateQuery(p)) {
+                            sql = GridSqlQueryParser.rewriteQueryForUpdateIfNeeded(p, inTx);
+                            stmt = h2.prepareStatement(conn, sql, true);
+                        }
+
+                        h2.bindParameters(stmt, params0);
+
+                        rs = h2.executeSqlQueryWithTimer(stmt, conn, sql, params0, timeout, qr.queryCancel(qryIdx));
+
+                        if (inTx) {
+                            ResultSetEnlistFuture enlistFut = ResultSetEnlistFuture.future(
+                                ctx.localNodeId(),
+                                txDetails.version(),
+                                mvccSnapshot,
+                                txDetails.threadId(),
+                                IgniteUuid.randomUuid(),
+                                txDetails.miniId(),
+                                parts,
+                                tx,
+                                timeout,
+                                mainCctx,
+                                rs
+                            );
+
+                            if (lockFut != null)
+                                lockFut.register(enlistFut);
+
+                            enlistFut.init();
+
+                            enlistFut.get();
+
+                            rs.beforeFirst();
+                        }
 
                         if (evt) {
                             ctx.event().record(new CacheQueryExecutedEvent<>(
@@ -763,8 +979,35 @@
                         throw new QueryCancelledException();
                     }
 
+                    if (inTx) {
+                        if (tx.dht() && (runCntr == null || runCntr.decrementAndGet() == 0)) {
+                            if (removeMapping = tx.empty() && !tx.queryEnlisted())
+                                tx.rollbackAsync().get();
+                        }
+                    }
+
                     // Send the first page.
-                    sendNextPage(nodeRess, node, qr, qryIdx, segmentId, pageSize);
+                    if (lockFut == null)
+                        sendNextPage(nodeRess, node, qr, qryIdx, segmentId, pageSize, removeMapping);
+                    else {
+                        GridQueryNextPageResponse msg = prepareNextPage(nodeRess, node, qr, qryIdx, segmentId, pageSize, removeMapping);
+
+                        if (msg != null) {
+                            lockFut.listen(new IgniteInClosure<IgniteInternalFuture<Void>>() {
+                                @Override public void apply(IgniteInternalFuture<Void> future) {
+                                    try {
+                                        if (node.isLocal())
+                                            h2.reduceQueryExecutor().onMessage(ctx.localNodeId(), msg);
+                                        else
+                                            ctx.io().sendToGridTopic(node, GridTopic.TOPIC_QUERY, msg, QUERY_POOL);
+                                    }
+                                    catch (Exception e) {
+                                        U.error(log, e);
+                                    }
+                                }
+                            });
+                        }
+                    }
 
                     qryIdx++;
                 }
@@ -793,10 +1036,12 @@
             GridH2RetryException retryErr = X.cause(e, GridH2RetryException.class);
 
             if (retryErr != null) {
-                logRetry("Failed to execute non-collocated query (will retry) [nodeId=" + node.id() +
-                    ", reqId=" + reqId + ", errMsg=" + retryErr.getMessage() + ']');
+                final String retryCause = String.format(
+                    "Failed to execute non-collocated query (will retry) [localNodeId=%s, rmtNodeId=%s, reqId=%s, " +
+                    "errMsg=%s]", ctx.localNodeId(), node.id(), reqId, retryErr.getMessage()
+                );
 
-                sendRetry(node, reqId, segmentId);
+                sendRetry(node, reqId, segmentId, retryCause);
             }
             else {
                 U.error(log, "Failed to execute local query.", e);
@@ -817,6 +1062,14 @@
     }
 
     /**
+     * @param cacheIds Cache ids.
+     * @return Id of the first cache in list, or {@code null} if list is empty.
+     */
+    private GridCacheContext mainCacheContext(List<Integer> cacheIds) {
+        return !F.isEmpty(cacheIds) ? ctx.cache().context().cacheContext(cacheIds.get(0)) : null;
+    }
+
+    /**
      * Releases reserved partitions.
      */
     private void releaseReservations() {
@@ -845,20 +1098,22 @@
 
         List<GridReservable> reserved = new ArrayList<>();
 
-        if (!reservePartitions(cacheIds, topVer, parts, reserved, node.id(), reqId)) {
-            U.error(log, "Failed to reserve partitions for DML request. [localNodeId=" + ctx.localNodeId() +
-                ", nodeId=" + node.id() + ", reqId=" + req.requestId() + ", cacheIds=" + cacheIds +
-                ", topVer=" + topVer + ", parts=" + Arrays.toString(parts) + ']');
-
-            sendUpdateResponse(node, reqId, null, "Failed to reserve partitions for DML request. " +
-                "Explanation (Retry your request when re-balancing is over).");
-
-            return;
-        }
-
         MapNodeResults nodeResults = resultsForNode(node.id());
 
         try {
+            String err = reservePartitions(cacheIds, topVer, parts, reserved, node.id(), reqId);
+
+            if (!F.isEmpty(err)) {
+                U.error(log, "Failed to reserve partitions for DML request. [localNodeId=" + ctx.localNodeId() +
+                    ", nodeId=" + node.id() + ", reqId=" + req.requestId() + ", cacheIds=" + cacheIds +
+                    ", topVer=" + topVer + ", parts=" + Arrays.toString(parts) + ']');
+
+                sendUpdateResponse(node, reqId, null,
+                    "Failed to reserve partitions for DML request. " + err);
+
+                return;
+            }
+
             IndexingQueryFilter filter = h2.backupFilter(topVer, parts);
 
             GridQueryCancel cancel = nodeResults.putUpdate(reqId);
@@ -878,7 +1133,7 @@
             final boolean replicated = req.isFlagSet(GridH2QueryRequest.FLAG_REPLICATED);
 
             if (!replicated && !F.isEmpty(cacheIds) &&
-                findFirstPartitioned(cacheIds).config().getQueryParallelism() > 1) {
+                CU.firstPartitioned(ctx.cache().context(), cacheIds).config().getQueryParallelism() > 1) {
                 fldsQry.setDistributedJoins(true);
 
                 local = false;
@@ -1010,12 +1265,12 @@
             if (lazyWorker != null) {
                 lazyWorker.submit(new Runnable() {
                     @Override public void run() {
-                        sendNextPage(nodeRess, node, qr, req.query(), req.segmentId(), req.pageSize());
+                        sendNextPage(nodeRess, node, qr, req.query(), req.segmentId(), req.pageSize(), false);
                     }
                 });
             }
             else
-                sendNextPage(nodeRess, node, qr, req.query(), req.segmentId(), req.pageSize());
+                sendNextPage(nodeRess, node, qr, req.query(), req.segmentId(), req.pageSize(), false);
         }
     }
 
@@ -1026,15 +1281,18 @@
      * @param qry Query.
      * @param segmentId Index segment ID.
      * @param pageSize Page size.
+     * @param removeMapping Remove mapping flag.
+     * @return Next page.
+     * @throws IgniteCheckedException If failed.
      */
-    private void sendNextPage(MapNodeResults nodeRess, ClusterNode node, MapQueryResults qr, int qry, int segmentId,
-        int pageSize) {
+    private GridQueryNextPageResponse prepareNextPage(MapNodeResults nodeRess, ClusterNode node, MapQueryResults qr, int qry, int segmentId,
+        int pageSize, boolean removeMapping) throws IgniteCheckedException {
         MapQueryResult res = qr.result(qry);
 
         assert res != null;
 
         if (res.closed())
-            return;
+            return null;
 
         int page = res.page();
 
@@ -1054,20 +1312,45 @@
             }
         }
 
+        boolean loc = node.isLocal();
+
+        // In case of SELECT FOR UPDATE the last columns is _KEY,
+        // we can't retrieve them for an arbitrary row otherwise.
+        int colsCnt = !qr.isForUpdate() ? res.columnCount() : res.columnCount() - 1;
+
+        GridQueryNextPageResponse msg = new GridQueryNextPageResponse(qr.queryRequestId(), segmentId, qry, page,
+            page == 0 ? res.rowCount() : -1,
+            colsCnt,
+            loc ? null : toMessages(rows, new ArrayList<>(res.columnCount()), colsCnt),
+            loc ? rows : null,
+            last);
+
+        msg.removeMapping(removeMapping);
+
+        return msg;
+    }
+
+    /**
+     * @param nodeRess Results.
+     * @param node Node.
+     * @param qr Query results.
+     * @param qry Query.
+     * @param segmentId Index segment ID.
+     * @param pageSize Page size.
+     * @param removeMapping Remove mapping flag.
+     */
+    @SuppressWarnings("unchecked")
+    private void sendNextPage(MapNodeResults nodeRess, ClusterNode node, MapQueryResults qr, int qry, int segmentId,
+        int pageSize, boolean removeMapping) {
         try {
-            boolean loc = node.isLocal();
+            GridQueryNextPageResponse msg = prepareNextPage(nodeRess, node, qr, qry, segmentId, pageSize, removeMapping);
 
-            GridQueryNextPageResponse msg = new GridQueryNextPageResponse(qr.queryRequestId(), segmentId, qry, page,
-                page == 0 ? res.rowCount() : -1,
-                res.columnCount(),
-                loc ? null : toMessages(rows, new ArrayList<Message>(res.columnCount())),
-                loc ? rows : null,
-                last);
-
-            if (loc)
-                h2.reduceQueryExecutor().onMessage(ctx.localNodeId(), msg);
-            else
-                ctx.io().sendToGridTopic(node, GridTopic.TOPIC_QUERY, msg, QUERY_POOL);
+            if (msg != null) {
+                if (node.isLocal())
+                    h2.reduceQueryExecutor().onMessage(ctx.localNodeId(), msg);
+                else
+                    ctx.io().sendToGridTopic(node, GridTopic.TOPIC_QUERY, msg, QUERY_POOL);
+            }
         }
         catch (IgniteCheckedException e) {
             U.error(log, "Failed to send message.", e);
@@ -1081,7 +1364,7 @@
      * @param reqId Request ID.
      * @param segmentId Index segment ID.
      */
-    private void sendRetry(ClusterNode node, long reqId, int segmentId) {
+    private void sendRetry(ClusterNode node, long reqId, int segmentId, String retryCause) {
         try {
             boolean loc = node.isLocal();
 
@@ -1092,6 +1375,7 @@
                 false);
 
             msg.retry(h2.readyTopologyVersion());
+            msg.retryCause(retryCause);
 
             if (loc)
                 h2.reduceQueryExecutor().onMessage(ctx.localNodeId(), msg);
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMergeIndexIterator.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMergeIndexIterator.java
index 1c0efb3..851e1e4 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMergeIndexIterator.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMergeIndexIterator.java
@@ -22,11 +22,12 @@
 import java.util.Iterator;
 import java.util.List;
 import java.util.NoSuchElementException;
-
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccQueryTracker;
 import org.h2.index.Cursor;
 import org.h2.result.Row;
+import org.jetbrains.annotations.Nullable;
 
 /**
  * Iterator that transparently and sequentially traverses a bunch of {@link GridMergeIndex} objects.
@@ -59,6 +60,9 @@
     /** Whether remote resources were released. */
     private boolean released;
 
+    /** */
+    private MvccQueryTracker mvccTracker;
+
     /**
      * Constructor.
      *
@@ -69,14 +73,19 @@
      * @param distributedJoins Distributed joins.
      * @throws IgniteCheckedException if failed.
      */
-    GridMergeIndexIterator(GridReduceQueryExecutor rdcExec, Collection<ClusterNode> nodes, ReduceQueryRun run,
-        long qryReqId, boolean distributedJoins)
+    GridMergeIndexIterator(GridReduceQueryExecutor rdcExec,
+        Collection<ClusterNode> nodes,
+        ReduceQueryRun run,
+        long qryReqId,
+        boolean distributedJoins,
+        @Nullable MvccQueryTracker mvccTracker)
         throws IgniteCheckedException {
         this.rdcExec = rdcExec;
         this.nodes = nodes;
         this.run = run;
         this.qryReqId = qryReqId;
         this.distributedJoins = distributedJoins;
+        this.mvccTracker = mvccTracker;
 
         this.idxIter = run.indexes().iterator();
 
@@ -155,7 +164,7 @@
     private void releaseIfNeeded() {
         if (!released) {
             try {
-                rdcExec.releaseRemoteResources(nodes, run, qryReqId, distributedJoins);
+                rdcExec.releaseRemoteResources(nodes, run, qryReqId, distributedJoins, mvccTracker);
             }
             finally {
                 released = true;
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMergeIndexSorted.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMergeIndexSorted.java
index 0dc8354..9f5547a 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMergeIndexSorted.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridMergeIndexSorted.java
@@ -61,7 +61,9 @@
     /** */
     private final Comparator<RowStream> streamCmp = new Comparator<RowStream>() {
         @Override public int compare(RowStream o1, RowStream o2) {
-            // Nulls at the beginning.
+            if (o1 == o2) // both nulls
+                return 0;
+
             if (o1 == null)
                 return -1;
 
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridReduceQueryExecutor.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridReduceQueryExecutor.java
index 65aeae9..96c88ff 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridReduceQueryExecutor.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridReduceQueryExecutor.java
@@ -37,6 +37,7 @@
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
@@ -60,6 +61,10 @@
 import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
 import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxSelectForUpdateFuture;
+import org.apache.ignite.internal.processors.cache.distributed.near.TxTopologyVersionFuture;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccQueryTracker;
 import org.apache.ignite.internal.processors.cache.query.GridCacheQueryMarshallable;
 import org.apache.ignite.internal.processors.cache.query.GridCacheQueryType;
 import org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery;
@@ -67,6 +72,7 @@
 import org.apache.ignite.internal.processors.query.GridQueryCacheObjectsIterator;
 import org.apache.ignite.internal.processors.query.GridQueryCancel;
 import org.apache.ignite.internal.processors.query.GridRunningQueryInfo;
+import org.apache.ignite.internal.processors.query.IgniteSQLException;
 import org.apache.ignite.internal.processors.query.h2.H2FieldsIterator;
 import org.apache.ignite.internal.processors.query.h2.H2Utils;
 import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing;
@@ -81,15 +87,18 @@
 import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2DmlRequest;
 import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2DmlResponse;
 import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2QueryRequest;
+import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2SelectForUpdateTxDetails;
 import org.apache.ignite.internal.util.GridIntIterator;
 import org.apache.ignite.internal.util.GridIntList;
 import org.apache.ignite.internal.util.GridSpinBusyLock;
+import org.apache.ignite.internal.util.typedef.C2;
 import org.apache.ignite.internal.util.typedef.CIX2;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.X;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.lang.IgniteBiClosure;
 import org.apache.ignite.lang.IgniteFuture;
+import org.apache.ignite.lang.IgniteUuid;
 import org.apache.ignite.plugin.extensions.communication.Message;
 import org.apache.ignite.transactions.TransactionException;
 import org.h2.command.ddl.CreateTableData;
@@ -104,6 +113,7 @@
 import static java.util.Collections.singletonList;
 import static org.apache.ignite.IgniteSystemProperties.IGNITE_SQL_RETRY_TIMEOUT;
 import static org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion.NONE;
+import static org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.*;
 import static org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery.EMPTY_PARAMS;
 import static org.apache.ignite.internal.processors.query.h2.opt.DistributedJoinMode.OFF;
 import static org.apache.ignite.internal.processors.query.h2.opt.GridH2QueryType.REDUCE;
@@ -223,8 +233,7 @@
      * @param nodeId Left node ID.
      */
     private void handleNodeLeft(ReduceQueryRun r, UUID nodeId) {
-        // Will attempt to retry. If reduce query was started it will fail on next page fetching.
-        retry(r, h2.readyTopologyVersion(), nodeId);
+        r.setStateOnNodeLeave(nodeId, h2.readyTopologyVersion());
     }
 
     /**
@@ -276,12 +285,13 @@
      */
     private void fail(ReduceQueryRun r, UUID nodeId, String msg, byte failCode) {
         if (r != null) {
-            CacheException e = new CacheException("Failed to execute map query on the node: " + nodeId + ", " + msg);
+            CacheException e = new CacheException("Failed to execute map query on remote node [nodeId=" + nodeId +
+                ", errMsg=" + msg + ']');
 
             if (failCode == GridQueryFailResponse.CANCELLED_BY_ORIGINATOR)
                 e.addSuppressed(new QueryCancelledException());
 
-            r.state(e, nodeId);
+            r.setStateOnException(nodeId, e);
         }
     }
 
@@ -289,7 +299,7 @@
      * @param node Node.
      * @param msg Message.
      */
-    private void onNextPage(final ClusterNode node, GridQueryNextPageResponse msg) {
+    private void onNextPage(final ClusterNode node, final GridQueryNextPageResponse msg) {
         final long qryReqId = msg.queryRequestId();
         final int qry = msg.query();
         final int seg = msg.segmentId();
@@ -308,20 +318,13 @@
         try {
             page = new GridResultPage(ctx, node.id(), msg) {
                 @Override public void fetchNextPage() {
-                    Object errState = r.state();
+                    if (r.hasErrorOrRetry()) {
+                        if (r.exception() != null)
+                            throw r.exception();
 
-                    if (errState != null) {
-                        CacheException err0 = errState instanceof CacheException ? (CacheException)errState : null;
+                        assert r.retryCause() != null;
 
-                        if (err0 != null && err0.getCause() instanceof IgniteClientDisconnectedException)
-                            throw err0;
-
-                        CacheException e = new CacheException("Failed to fetch data from node: " + node.id());
-
-                        if (err0 != null)
-                            e.addSuppressed(err0);
-
-                        throw e;
+                        throw new CacheException(r.retryCause());
                     }
 
                     try {
@@ -349,18 +352,16 @@
         idx.addPage(page);
 
         if (msg.retry() != null)
-            retry(r, msg.retry(), node.id());
-        else if (msg.page() == 0) // Do count down on each first page received.
+            r.setStateOnRetry(node.id(), msg.retry(), msg.retryCause());
+        else if (msg.page() == 0) {
+            // Do count down on each first page received.
             r.latch().countDown();
-    }
 
-    /**
-     * @param r Query run.
-     * @param retryVer Retry version.
-     * @param nodeId Node ID.
-     */
-    private void retry(ReduceQueryRun r, AffinityTopologyVersion retryVer, UUID nodeId) {
-        r.state(retryVer, nodeId);
+            GridNearTxSelectForUpdateFuture sfuFut = r.selectForUpdateFuture();
+
+            if (sfuFut != null)
+                sfuFut.onResult(node.id(), (long)msg.allRows(), msg.removeMapping(), null);
+        }
     }
 
     /**
@@ -369,6 +370,9 @@
      */
     private boolean isPreloadingActive(List<Integer> cacheIds) {
         for (Integer cacheId : cacheIds) {
+            if (null == cacheContext(cacheId))
+                throw new CacheException(String.format("Cache not found on local node [cacheId=%d]", cacheId));
+
             if (hasMovingPartitions(cacheContext(cacheId)))
                 return true;
         }
@@ -381,6 +385,8 @@
      * @return {@code True} If cache has partitions in {@link GridDhtPartitionState#MOVING} state.
      */
     private boolean hasMovingPartitions(GridCacheContext<?, ?> cctx) {
+        assert cctx != null;
+
         return !cctx.isLocal() && cctx.topology().hasMovingPartitions();
     }
 
@@ -550,6 +556,7 @@
      * @param params Query parameters.
      * @param parts Partitions.
      * @param lazy Lazy execution flag.
+     * @param mvccTracker Query tracker.
      * @return Rows iterator.
      */
     public Iterator<List<?>> query(
@@ -561,8 +568,10 @@
         GridQueryCancel cancel,
         Object[] params,
         final int[] parts,
-        boolean lazy
-    ) {
+        boolean lazy,
+        MvccQueryTracker mvccTracker) {
+        assert !qry.mvccEnabled() || mvccTracker != null;
+
         if (F.isEmpty(params))
             params = EMPTY_PARAMS;
 
@@ -572,9 +581,18 @@
 
         final long startTime = U.currentTimeMillis();
 
+        ReduceQueryRun lastRun = null;
+
         for (int attempt = 0;; attempt++) {
-            if (attempt > 0 && retryTimeout > 0 && (U.currentTimeMillis() - startTime > retryTimeout))
-                throw new CacheException("Failed to map SQL query to topology.");
+            if (attempt > 0 && retryTimeout > 0 && (U.currentTimeMillis() - startTime > retryTimeout)) {
+                UUID retryNodeId = lastRun.retryNodeId();
+                String retryCause = lastRun.retryCause();
+
+                assert !F.isEmpty(retryCause);
+
+                throw new CacheException("Failed to map SQL query to topology on data node [dataNodeId=" + retryNodeId +
+                    ", msg=" + retryCause + ']');
+            }
 
             if (attempt != 0) {
                 try {
@@ -589,19 +607,52 @@
 
             long qryReqId = qryIdGen.incrementAndGet();
 
-            final ReduceQueryRun r = new ReduceQueryRun(qryReqId, qry.originalSql(), schemaName,
-                h2.connectionForSchema(schemaName), qry.mapQueries().size(), qry.pageSize(),
-                U.currentTimeMillis(), cancel);
+            List<Integer> cacheIds = qry.cacheIds();
 
-            AffinityTopologyVersion topVer = h2.readyTopologyVersion();
+            boolean mvccEnabled = mvccEnabled(ctx);
 
-            // Check if topology is changed while retrying on locked topology.
-            if (h2.serverTopologyChanged(topVer) && ctx.cache().context().lockedTopologyVersion(null) != null) {
-                throw new CacheException(new TransactionException("Server topology is changed during query " +
-                    "execution inside a transaction. It's recommended to rollback and retry transaction."));
+            final GridNearTxLocal curTx = mvccEnabled ? checkActive(tx(ctx)) : null;
+
+            final GridNearTxSelectForUpdateFuture sfuFut;
+
+            final boolean clientFirst;
+
+            AffinityTopologyVersion topVer;
+
+            if (qry.forUpdate()) {
+                // Indexing should have started TX at this point for FOR UPDATE query.
+                assert mvccEnabled && curTx != null;
+
+                try {
+                    TxTopologyVersionFuture topFut = new TxTopologyVersionFuture(curTx, mvccTracker.context());
+
+                    topVer = topFut.get();
+
+                    clientFirst = topFut.clientFirst();
+                }
+                catch (IgniteCheckedException e) {
+                    throw new IgniteSQLException("Failed to map SELECT FOR UPDATE query on topology.", e);
+                }
+
+                sfuFut = new GridNearTxSelectForUpdateFuture(mvccTracker.context(), curTx, timeoutMillis);
+            }
+            else {
+                sfuFut = null;
+
+                clientFirst = false;
+
+                topVer = h2.readyTopologyVersion();
+
+                // Check if topology has changed while retrying on locked topology.
+                if (h2.serverTopologyChanged(topVer) && ctx.cache().context().lockedTopologyVersion(null) != null) {
+                    throw new CacheException(new TransactionException("Server topology is changed during query " +
+                        "execution inside a transaction. It's recommended to rollback and retry transaction."));
+                }
             }
 
-            List<Integer> cacheIds = qry.cacheIds();
+            final ReduceQueryRun r = new ReduceQueryRun(qryReqId, qry.originalSql(), schemaName,
+                h2.connectionForSchema(schemaName), qry.mapQueries().size(), qry.pageSize(),
+                U.currentTimeMillis(), sfuFut, cancel);
 
             Collection<ClusterNode> nodes;
 
@@ -637,8 +688,12 @@
                 partsMap = nodesParts.partitionsMap();
                 qryMap = nodesParts.queryPartitionsMap();
 
-                if (nodes == null)
+                if (nodes == null) {
+                    if (sfuFut != null)
+                        sfuFut.onDone(0L, null);
+
                     continue; // Retry.
+                }
 
                 assert !nodes.isEmpty();
 
@@ -656,6 +711,9 @@
                 }
             }
 
+            if (sfuFut != null && !sfuFut.isFailed())
+                sfuFut.init(topVer, nodes);
+
             int tblIdx = 0;
 
             final boolean skipMergeTbl = !qry.explain() && qry.skipMergeTable();
@@ -774,29 +832,68 @@
                     .timeout(timeoutMillis)
                     .schemaName(schemaName);
 
-                if (send(nodes, req, parts == null ? null : new ExplicitPartitionsSpecializer(qryMap), false)) {
+                if (curTx != null && curTx.mvccSnapshot() != null)
+                    req.mvccSnapshot(curTx.mvccSnapshot());
+                else if (mvccTracker != null)
+                    req.mvccSnapshot(mvccTracker.snapshot());
+
+                final C2<ClusterNode, Message, Message> pspec =
+                    (parts == null ? null : new ExplicitPartitionsSpecializer(qryMap));
+
+                final C2<ClusterNode, Message, Message> spec;
+
+                if (qry.forUpdate()) {
+                    final AtomicInteger cnt = new AtomicInteger();
+
+                    spec = new C2<ClusterNode, Message, Message>() {
+                        @Override public Message apply(ClusterNode clusterNode, Message msg) {
+                            assert msg instanceof GridH2QueryRequest;
+
+                            GridH2QueryRequest res = pspec != null ? (GridH2QueryRequest)pspec.apply(clusterNode, msg) :
+                                new GridH2QueryRequest((GridH2QueryRequest)msg);
+
+                            GridH2SelectForUpdateTxDetails txReq = new GridH2SelectForUpdateTxDetails(
+                                curTx.threadId(),
+                                IgniteUuid.randomUuid(),
+                                cnt.incrementAndGet(),
+                                curTx.subjectId(),
+                                curTx.xidVersion(),
+                                curTx.taskNameHash(),
+                                clientFirst,
+                                curTx.remainingTime());
+
+                            res.txDetails(txReq);
+
+                            return res;
+                        }
+                    };
+                }
+                else
+                    spec = pspec;
+
+                if (send(nodes, req, spec, false)) {
                     awaitAllReplies(r, nodes, cancel);
 
-                    Object state = r.state();
+                    if (r.hasErrorOrRetry()) {
+                        CacheException err = r.exception();
 
-                    if (state != null) {
-                        if (state instanceof CacheException) {
-                            CacheException err = (CacheException)state;
-
+                        if (err != null) {
                             if (err.getCause() instanceof IgniteClientDisconnectedException)
                                 throw err;
 
                             if (wasCancelled(err))
                                 throw new QueryCancelledException(); // Throw correct exception.
 
-                            throw new CacheException("Failed to run map query remotely." + err.getMessage(), err);
+                            throw err;
                         }
-
-                        if (state instanceof AffinityTopologyVersion) {
+                        else {
                             retry = true;
 
+                            // On-the-fly topology change must not be possible in FOR UPDATE case.
+                            assert sfuFut == null;
+
                             // If remote node asks us to retry then we have outdated full partition map.
-                            h2.awaitForReadyTopologyVersion((AffinityTopologyVersion)state);
+                            h2.awaitForReadyTopologyVersion(r.retryTopologyVersion());
                         }
                     }
                 }
@@ -807,7 +904,12 @@
 
                 if (!retry) {
                     if (skipMergeTbl) {
-                        resIter = new GridMergeIndexIterator(this, finalNodes, r, qryReqId, qry.distributedJoins());
+                        resIter = new GridMergeIndexIterator(this,
+                            finalNodes,
+                            r,
+                            qryReqId,
+                            qry.distributedJoins(),
+                            mvccTracker);
 
                         release = false;
                     }
@@ -834,21 +936,31 @@
                                 timeoutMillis,
                                 cancel);
 
-                            resIter = new H2FieldsIterator(res);
+                            resIter = new H2FieldsIterator(res, mvccTracker, false);
+
+                            mvccTracker = null; // To prevent callback inside finally block;
                         }
                         finally {
                             GridH2QueryContext.clearThreadLocal();
                         }
                     }
                 }
+                else {
+                    assert r != null;
+                    lastRun=r;
 
-                if (retry) {
                     if (Thread.currentThread().isInterrupted())
                         throw new IgniteInterruptedCheckedException("Query was interrupted.");
 
+                    if (sfuFut != null)
+                        sfuFut.onDone(0L);
+
                     continue;
                 }
 
+                if (sfuFut != null)
+                    sfuFut.get();
+
                 return new GridQueryCacheObjectsIterator(resIter, h2.objectContext(), keepBinary);
             }
             catch (IgniteCheckedException | RuntimeException e) {
@@ -856,11 +968,21 @@
 
                 U.closeQuiet(r.connection());
 
+                CacheException resEx = null;
+
                 if (e instanceof CacheException) {
                     if (wasCancelled((CacheException)e))
-                        throw new CacheException("Failed to run reduce query locally.", new QueryCancelledException());
+                        resEx = new  CacheException("Failed to run reduce query locally.",
+                            new QueryCancelledException());
+                    else
+                        resEx = (CacheException)e;
+                }
 
-                    throw (CacheException)e;
+                if (resEx != null) {
+                    if (sfuFut != null)
+                        sfuFut.onDone(resEx);
+
+                    throw resEx;
                 }
 
                 Throwable cause = e;
@@ -873,11 +995,16 @@
                         cause = disconnectedErr;
                 }
 
-                throw new CacheException("Failed to run reduce query locally.", cause);
+                resEx = new CacheException("Failed to run reduce query locally.", cause);
+
+                if (sfuFut != null)
+                    sfuFut.onDone(resEx);
+
+                throw resEx;
             }
             finally {
                 if (release) {
-                    releaseRemoteResources(finalNodes, r, qryReqId, qry.distributedJoins());
+                    releaseRemoteResources(finalNodes, r, qryReqId, qry.distributedJoins(), mvccTracker);
 
                     if (!skipMergeTbl) {
                         for (int i = 0, mapQrys = qry.mapQueries().size(); i < mapQrys; i++)
@@ -1072,7 +1199,10 @@
      * @param distributedJoins Distributed join flag.
      */
     public void releaseRemoteResources(Collection<ClusterNode> nodes, ReduceQueryRun r, long qryReqId,
-        boolean distributedJoins) {
+        boolean distributedJoins, MvccQueryTracker mvccTracker) {
+        if (mvccTracker != null)
+            mvccTracker.onDone();
+
         // For distributedJoins need always send cancel request to cleanup resources.
         if (distributedJoins)
             send(nodes, new GridQueryCancelRequest(qryReqId), null, false);
@@ -1499,7 +1629,7 @@
      * @param runLocParallel Run local handler in parallel thread.
      * @return {@code true} If all messages sent successfully.
      */
-    private boolean send(
+    public boolean send(
         Collection<ClusterNode> nodes,
         Message msg,
         @Nullable IgniteBiClosure<ClusterNode, Message, Message> specialize,
@@ -1767,7 +1897,7 @@
     }
 
     /** */
-    private static class ExplicitPartitionsSpecializer implements IgniteBiClosure<ClusterNode, Message, Message> {
+    private static class ExplicitPartitionsSpecializer implements C2<ClusterNode, Message, Message> {
         /** Partitions map. */
         private final Map<ClusterNode, IntArray> partsMap;
 
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridResultPage.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridResultPage.java
index 103084e..0cb986b 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridResultPage.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/GridResultPage.java
@@ -17,6 +17,8 @@
 
 package org.apache.ignite.internal.processors.query.h2.twostep;
 
+import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Iterator;
@@ -26,7 +28,9 @@
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.internal.GridKernalContext;
 import org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryNextPageResponse;
+import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.lang.IgniteClosure;
 import org.apache.ignite.plugin.extensions.communication.Message;
 import org.h2.value.Value;
 
@@ -70,7 +74,21 @@
             if (plainRows != null) {
                 rowsInPage = plainRows.size();
 
-                rows = (Iterator<Value[]>)plainRows.iterator();
+                if (rowsInPage == 0 || ((ArrayList<Value[]>)plainRows).get(0).length == res.columns())
+                    rows = (Iterator<Value[]>)plainRows.iterator();
+                else {
+                    // If it's a result of SELECT FOR UPDATE (we can tell by difference in number
+                    // of columns checked above), we need to strip off stuff we don't need.
+                    rows = F.iterator(plainRows, new IgniteClosure<Object, Value[]>() {
+                        @Override public Value[] apply(Object o) {
+                            Value[] row = (Value[])o;
+
+                            assert row.length >= res.columns();
+
+                            return Arrays.copyOfRange(row, 0, res.columns());
+                        }
+                    }, true);
+                }
             }
             else {
                 final int cols = res.columns();
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/MapQueryResult.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/MapQueryResult.java
index 733590c..fb928c4 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/MapQueryResult.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/MapQueryResult.java
@@ -239,7 +239,7 @@
             rows.add(res.currentRow());
         }
 
-        return false;
+        return !res.hasNext();
     }
 
     /**
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/MapQueryResults.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/MapQueryResults.java
index 45f9c1f..76527bc 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/MapQueryResults.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/MapQueryResults.java
@@ -51,17 +51,22 @@
     /** */
     private volatile boolean cancelled;
 
+    /** {@code SELECT FOR UPDATE} flag. */
+    private final boolean forUpdate;
+
     /**
      * Constructor.
-     *
+     * @param h2 Indexing instance.
      * @param qryReqId Query request ID.
      * @param qrys Number of queries.
      * @param cctx Cache context.
      * @param lazyWorker Lazy worker (if any).
+     * @param forUpdate {@code SELECT FOR UPDATE} flag.
      */
     @SuppressWarnings("unchecked")
     MapQueryResults(IgniteH2Indexing h2, long qryReqId, int qrys, @Nullable GridCacheContext<?, ?> cctx,
-        @Nullable MapQueryLazyWorker lazyWorker) {
+        @Nullable MapQueryLazyWorker lazyWorker, boolean forUpdate) {
+        this.forUpdate = forUpdate;
         this.h2 = h2;
         this.qryReqId = qryReqId;
         this.cctx = cctx;
@@ -101,11 +106,11 @@
 
     /**
      * Add result.
-     *
      * @param qry Query result index.
      * @param q Query object.
      * @param qrySrcNodeId Query source node.
      * @param rs Result set.
+     * @param params Query arguments.
      */
     void addResult(int qry, GridCacheSqlQuery q, UUID qrySrcNodeId, ResultSet rs, Object[] params) {
         MapQueryResult res = new MapQueryResult(h2, rs, cctx, qrySrcNodeId, q, params, lazyWorker);
@@ -172,4 +177,11 @@
     long queryRequestId() {
         return qryReqId;
     }
+
+    /**
+     * @return {@code SELECT FOR UPDATE} flag.
+     */
+    public boolean isForUpdate() {
+        return forUpdate;
+    }
 }
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/ReduceQueryRun.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/ReduceQueryRun.java
index 73bb002..7ddd653 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/ReduceQueryRun.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/ReduceQueryRun.java
@@ -17,20 +17,21 @@
 
 package org.apache.ignite.internal.processors.query.h2.twostep;
 
-import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
-import org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery;
-import org.apache.ignite.internal.processors.query.GridQueryCancel;
-import org.apache.ignite.internal.processors.query.GridRunningQueryInfo;
-import org.h2.jdbc.JdbcConnection;
-import org.jetbrains.annotations.Nullable;
-
-import javax.cache.CacheException;
 import java.sql.Connection;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.UUID;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.atomic.AtomicReference;
+import javax.cache.CacheException;
+import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxSelectForUpdateFuture;
+import org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery;
+import org.apache.ignite.internal.processors.query.GridQueryCancel;
+import org.apache.ignite.internal.processors.query.GridRunningQueryInfo;
+import org.apache.ignite.internal.util.typedef.F;
+import org.h2.jdbc.JdbcConnection;
+import org.jetbrains.annotations.Nullable;
 
 import static org.apache.ignite.internal.processors.cache.query.GridCacheQueryType.SQL_FIELDS;
 
@@ -53,12 +54,14 @@
     /** */
     private final int pageSize;
 
-    /** Can be either CacheException in case of error or AffinityTopologyVersion to retry if needed. */
-    private final AtomicReference<Object> state = new AtomicReference<>();
+    /** */
+    private final AtomicReference<State> state = new AtomicReference<>();
+
+    /** Future controlling {@code SELECT FOR UPDATE} query execution. */
+    private final GridNearTxSelectForUpdateFuture selectForUpdateFut;
 
     /**
      * Constructor.
-     *
      * @param id Query ID.
      * @param qry Query text.
      * @param schemaName Schema name.
@@ -66,44 +69,77 @@
      * @param idxsCnt Number of indexes.
      * @param pageSize Page size.
      * @param startTime Start time.
+     * @param selectForUpdateFut Future controlling {@code SELECT FOR UPDATE} query execution.
      * @param cancel Query cancel handler.
      */
     ReduceQueryRun(Long id, String qry, String schemaName, Connection conn, int idxsCnt, int pageSize, long startTime,
-        GridQueryCancel cancel) {
-        this.qry = new GridRunningQueryInfo(id, qry, SQL_FIELDS, schemaName, startTime, cancel, false);
+        GridNearTxSelectForUpdateFuture selectForUpdateFut, GridQueryCancel cancel) {
+        this.qry = new GridRunningQueryInfo(id, qry, SQL_FIELDS, schemaName, startTime, cancel,
+            false);
 
         this.conn = (JdbcConnection)conn;
 
         this.idxs = new ArrayList<>(idxsCnt);
 
         this.pageSize = pageSize > 0 ? pageSize : GridCacheTwoStepQuery.DFLT_PAGE_SIZE;
+
+        this.selectForUpdateFut = selectForUpdateFut;
     }
 
     /**
-     * @param o Fail state object.
+     * Set state on exception.
+     *
+     * @param err error.
      * @param nodeId Node ID.
      */
-    void state(Object o, @Nullable UUID nodeId) {
-        assert o != null;
-        assert o instanceof CacheException || o instanceof AffinityTopologyVersion : o.getClass();
+    void setStateOnException(@Nullable UUID nodeId, CacheException err) {
+        setState0(new State(nodeId, err, null, null));
+    }
 
-        if (!state.compareAndSet(null, o))
+    /**
+     * Set state on map node leave.
+     *
+     * @param nodeId Node ID.
+     * @param topVer Topology version.
+     */
+    void setStateOnNodeLeave(UUID nodeId, AffinityTopologyVersion topVer) {
+        setState0(new State(nodeId, null, topVer, "Data node has left the grid during query execution [nodeId=" +
+            nodeId + ']'));
+    }
+
+    /**
+     * Set state on retry due to mapping failure.
+     *
+     * @param nodeId Node ID.
+     * @param topVer Topology version.
+     * @param retryCause Retry cause.
+     */
+    void setStateOnRetry(UUID nodeId, AffinityTopologyVersion topVer, String retryCause) {
+        assert !F.isEmpty(retryCause);
+
+        setState0(new State(nodeId, null, topVer, retryCause));
+    }
+
+    /**
+     *
+     * @param state state
+     */
+    private void setState0(State state){
+        if (!this.state.compareAndSet(null, state))
             return;
 
         while (latch.getCount() != 0) // We don't need to wait for all nodes to reply.
             latch.countDown();
 
-        CacheException e = o instanceof CacheException ? (CacheException) o : null;
-
         for (GridMergeIndex idx : idxs) // Fail all merge indexes.
-            idx.fail(nodeId, e);
+            idx.fail(state.nodeId, state.ex);
     }
 
     /**
      * @param e Error.
      */
     void disconnected(CacheException e) {
-        state(e, null);
+        setStateOnException(null, e);
     }
 
     /**
@@ -127,11 +163,45 @@
         return conn;
     }
 
+    /** */
+    boolean hasErrorOrRetry(){
+        return state.get() != null;
+    }
+
     /**
-     * @return State.
+     * @return Exception.
      */
-    Object state() {
-        return state.get();
+    CacheException exception() {
+        State st = state.get();
+
+        return st != null ? st.ex : null;
+    }
+
+    /**
+     * @return Retry topology version.
+     */
+    AffinityTopologyVersion retryTopologyVersion(){
+        State st = state.get();
+
+        return st != null ? st.retryTopVer : null;
+    }
+
+    /**
+     * @return Retry bode ID.
+     */
+    UUID retryNodeId() {
+        State st = state.get();
+
+        return st != null ? st.nodeId : null;
+    }
+
+    /**
+     * @return Retry cause.
+     */
+    String retryCause(){
+        State st = state.get();
+
+        return st != null ? st.retryCause : null;
     }
 
     /**
@@ -154,4 +224,36 @@
     void latch(CountDownLatch latch) {
         this.latch = latch;
     }
+
+    /**
+     * @return {@code SELECT FOR UPDATE} future, if any.
+     */
+    @Nullable public GridNearTxSelectForUpdateFuture selectForUpdateFuture() {
+        return selectForUpdateFut;
+    }
+
+    /**
+     * Error state.
+     */
+    private static class State {
+        /** Affected node (may be null in case of local node failure). */
+        private final UUID nodeId;
+
+        /** Error. */
+        private final CacheException ex;
+
+        /** Retry topology version. */
+        private final AffinityTopologyVersion retryTopVer;
+
+        /** Retry cause. */
+        private final String retryCause;
+
+        /** */
+        private State(UUID nodeId, CacheException ex, AffinityTopologyVersion retryTopVer, String retryCause){
+            this.nodeId = nodeId;
+            this.ex = ex;
+            this.retryTopVer = retryTopVer;
+            this.retryCause = retryCause;
+        }
+    }
 }
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/msg/GridH2QueryRequest.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/msg/GridH2QueryRequest.java
index 4e1fadb..0bec66e 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/msg/GridH2QueryRequest.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/msg/GridH2QueryRequest.java
@@ -31,6 +31,7 @@
 import org.apache.ignite.internal.GridKernalContext;
 import org.apache.ignite.internal.binary.BinaryMarshaller;
 import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot;
 import org.apache.ignite.internal.processors.cache.query.GridCacheQueryMarshallable;
 import org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery;
 import org.apache.ignite.internal.processors.cache.query.QueryTable;
@@ -42,6 +43,7 @@
 import org.apache.ignite.plugin.extensions.communication.MessageCollectionItemType;
 import org.apache.ignite.plugin.extensions.communication.MessageReader;
 import org.apache.ignite.plugin.extensions.communication.MessageWriter;
+import org.jetbrains.annotations.Nullable;
 
 import static org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery.EMPTY_PARAMS;
 
@@ -133,6 +135,12 @@
     /** Schema name. */
     private String schemaName;
 
+    /** */
+    private MvccSnapshot mvccSnapshot;
+
+    /** TX details holder for {@code SELECT FOR UPDATE}, or {@code null} if not applicable. */
+    private GridH2SelectForUpdateTxDetails txReq;
+
     /**
      * Required by {@link Externalizable}
      */
@@ -157,6 +165,25 @@
         params = req.params;
         paramsBytes = req.paramsBytes;
         schemaName = req.schemaName;
+        mvccSnapshot = req.mvccSnapshot;
+        txReq = req.txReq;
+    }
+
+    /**
+     * @return MVCC snapshot.
+     */
+    @Nullable public MvccSnapshot mvccSnapshot() {
+        return mvccSnapshot;
+    }
+
+    /**
+     * @param mvccSnapshot MVCC snapshot version.
+     * @return {@code this}.
+     */
+    public GridH2QueryRequest mvccSnapshot(MvccSnapshot mvccSnapshot) {
+        this.mvccSnapshot = mvccSnapshot;
+
+        return this;
     }
 
     /**
@@ -373,6 +400,20 @@
         return this;
     }
 
+    /**
+     * @return TX details holder for {@code SELECT FOR UPDATE}, or {@code null} if not applicable.
+     */
+    public GridH2SelectForUpdateTxDetails txDetails() {
+        return txReq;
+    }
+
+    /**
+     * @param txDetails TX details holder for {@code SELECT FOR UPDATE}, or {@code null} if not applicable.
+     */
+    public void txDetails(GridH2SelectForUpdateTxDetails txDetails) {
+        this.txReq = txDetails;
+    }
+
     /** {@inheritDoc} */
     @Override public void marshall(Marshaller m) {
         if (paramsBytes != null)
@@ -482,7 +523,6 @@
 
                 writer.incrementState();
 
-
             case 10:
                 if (!writer.writeIntArray("qryParts", qryParts))
                     return false;
@@ -494,6 +534,19 @@
                     return false;
 
                 writer.incrementState();
+
+            case 12:
+                if (!writer.writeMessage("mvccSnapshot", mvccSnapshot))
+                    return false;
+
+                writer.incrementState();
+
+            case 13:
+                if (!writer.writeMessage("txReq", txReq))
+                    return false;
+
+                writer.incrementState();
+
         }
 
         return true;
@@ -587,7 +640,6 @@
 
                 reader.incrementState();
 
-
             case 10:
                 qryParts = reader.readIntArray("qryParts");
 
@@ -603,6 +655,23 @@
                     return false;
 
                 reader.incrementState();
+
+            case 12:
+                mvccSnapshot = reader.readMessage("mvccSnapshot");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 13:
+                txReq = reader.readMessage("txReq");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
         }
 
         return reader.afterMessageRead(GridH2QueryRequest.class);
@@ -615,7 +684,7 @@
 
     /** {@inheritDoc} */
     @Override public byte fieldsCount() {
-        return 12;
+        return 14;
     }
 
     /** {@inheritDoc} */
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/msg/GridH2SelectForUpdateTxDetails.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/msg/GridH2SelectForUpdateTxDetails.java
new file mode 100644
index 0000000..a13a9c1
--- /dev/null
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/msg/GridH2SelectForUpdateTxDetails.java
@@ -0,0 +1,299 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2.twostep.msg;
+
+import java.nio.ByteBuffer;
+import java.util.UUID;
+import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
+import org.apache.ignite.lang.IgniteUuid;
+import org.apache.ignite.plugin.extensions.communication.Message;
+import org.apache.ignite.plugin.extensions.communication.MessageReader;
+import org.apache.ignite.plugin.extensions.communication.MessageWriter;
+
+/**
+ * TX details holder for {@link GridH2QueryRequest}.
+ */
+public class GridH2SelectForUpdateTxDetails implements Message {
+    /** */
+    private static final long serialVersionUID = 8166491041528984454L;
+    /** */
+    private long threadId;
+
+    /** */
+    private IgniteUuid futId;
+
+    /** */
+    private int miniId;
+
+    /** */
+    private UUID subjId;
+
+    /** */
+    private GridCacheVersion lockVer;
+
+    /** */
+    private int taskNameHash;
+
+    /** */
+    private boolean clientFirst;
+
+    /** */
+    private long timeout;
+
+    /**
+     * Default constructor.
+     */
+    GridH2SelectForUpdateTxDetails() {
+        // No-op.
+    }
+
+    /**
+     * @param threadId Thread id.
+     * @param futId Future id.
+     * @param miniId Mini fture id.
+     * @param subjId Subject id.
+     * @param lockVer Lock version.
+     * @param taskNameHash Task name hash.
+     * @param clientFirst {@code True} if this is the first client request.
+     * @param timeout Tx timeout.
+     */
+    public GridH2SelectForUpdateTxDetails(long threadId, IgniteUuid futId, int miniId, UUID subjId,
+        GridCacheVersion lockVer, int taskNameHash, boolean clientFirst, long timeout) {
+        this.threadId = threadId;
+        this.futId = futId;
+        this.miniId = miniId;
+        this.subjId = subjId;
+        this.lockVer = lockVer;
+        this.taskNameHash = taskNameHash;
+        this.clientFirst = clientFirst;
+        this.timeout = timeout;
+    }
+
+    /**
+     * @return Thread id.
+     */
+    public long threadId() {
+        return threadId;
+    }
+
+    /**
+     * @return Future id.
+     */
+    public IgniteUuid futureId() {
+        return futId;
+    }
+
+    /**
+     * @return Mini fture id.
+     */
+    public int miniId() {
+        return miniId;
+    }
+
+    /**
+     * @return Subject id.
+     */
+    public UUID subjectId() {
+        return subjId;
+    }
+
+    /**
+     * @return Lock version.
+     */
+    public GridCacheVersion version() {
+        return lockVer;
+    }
+
+    /**
+     * @return Task name hash.
+     */
+    public int taskNameHash() {
+        return taskNameHash;
+    }
+
+    /**
+     * @return {@code True} if this is the first client request in transaction.
+     */
+    public boolean firstClientRequest() {
+        return clientFirst;
+    }
+
+    /**
+     * @return Tx timeout.
+     */
+    public long timeout() {
+        return timeout;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) {
+        writer.setBuffer(buf);
+
+        if (!writer.isHeaderWritten()) {
+            if (!writer.writeHeader(directType(), fieldsCount()))
+                return false;
+
+            writer.onHeaderWritten();
+        }
+
+        switch (writer.state()) {
+            case 0:
+                if (!writer.writeBoolean("clientFirst", clientFirst))
+                    return false;
+
+                writer.incrementState();
+
+            case 1:
+                if (!writer.writeIgniteUuid("futId", futId))
+                    return false;
+
+                writer.incrementState();
+
+            case 2:
+                if (!writer.writeMessage("lockVer", lockVer))
+                    return false;
+
+                writer.incrementState();
+
+            case 3:
+                if (!writer.writeInt("miniId", miniId))
+                    return false;
+
+                writer.incrementState();
+
+            case 4:
+                if (!writer.writeUuid("subjId", subjId))
+                    return false;
+
+                writer.incrementState();
+
+            case 5:
+                if (!writer.writeInt("taskNameHash", taskNameHash))
+                    return false;
+
+                writer.incrementState();
+
+            case 6:
+                if (!writer.writeLong("threadId", threadId))
+                    return false;
+
+                writer.incrementState();
+
+            case 7:
+                if (!writer.writeLong("timeout", timeout))
+                    return false;
+
+                writer.incrementState();
+
+        }
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) {
+        reader.setBuffer(buf);
+
+        if (!reader.beforeMessageRead())
+            return false;
+
+        switch (reader.state()) {
+            case 0:
+                clientFirst = reader.readBoolean("clientFirst");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 1:
+                futId = reader.readIgniteUuid("futId");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 2:
+                lockVer = reader.readMessage("lockVer");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 3:
+                miniId = reader.readInt("miniId");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 4:
+                subjId = reader.readUuid("subjId");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 5:
+                taskNameHash = reader.readInt("taskNameHash");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 6:
+                threadId = reader.readLong("threadId");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+            case 7:
+                timeout = reader.readLong("timeout");
+
+                if (!reader.isLastRead())
+                    return false;
+
+                reader.incrementState();
+
+        }
+
+        return reader.afterMessageRead(GridH2SelectForUpdateTxDetails.class);
+    }
+
+    /** {@inheritDoc} */
+    @Override public short directType() {
+        return -57;
+    }
+
+    /** {@inheritDoc} */
+    @Override public byte fieldsCount() {
+        return 8;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void onAckReceived() {
+        // No-op.
+    }
+}
diff --git a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/msg/GridH2ValueMessageFactory.java b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/msg/GridH2ValueMessageFactory.java
index 3c13392..c399d76 100644
--- a/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/msg/GridH2ValueMessageFactory.java
+++ b/modules/indexing/src/main/java/org/apache/ignite/internal/processors/query/h2/twostep/msg/GridH2ValueMessageFactory.java
@@ -118,6 +118,9 @@
 
             case -56:
                 return new GridH2DmlResponse();
+
+            case -57:
+                return new GridH2SelectForUpdateTxDetails();
         }
 
         return null;
@@ -126,14 +129,17 @@
     /**
      * @param src Source values.
      * @param dst Destination collection.
+     * @param cnt Number of columns to actually send.
      * @return Destination collection.
      * @throws IgniteCheckedException If failed.
      */
-    public static Collection<Message> toMessages(Collection<Value[]> src, Collection<Message> dst)
+    public static Collection<Message> toMessages(Collection<Value[]> src, Collection<Message> dst, int cnt)
         throws IgniteCheckedException {
         for (Value[] row : src) {
-            for (Value val : row)
-                dst.add(toMessage(val));
+            assert row.length >= cnt;
+
+            for (int i = 0; i < cnt; i++)
+                dst.add(toMessage(row[i]));
         }
 
         return dst;
diff --git a/modules/indexing/src/test/java/org/apache/ignite/client/ClientTestSuite.java b/modules/indexing/src/test/java/org/apache/ignite/client/ClientTestSuite.java
index 3fb243c..623a19e 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/client/ClientTestSuite.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/client/ClientTestSuite.java
@@ -33,7 +33,8 @@
     ReliabilityTest.class,
     SecurityTest.class,
     FunctionalQueryTest.class,
-    IgniteBinaryQueryTest.class
+    IgniteBinaryQueryTest.class,
+    SslParametersTest.class
 })
 public class ClientTestSuite {
     // No-op.
diff --git a/modules/indexing/src/test/java/org/apache/ignite/client/FunctionalQueryTest.java b/modules/indexing/src/test/java/org/apache/ignite/client/FunctionalQueryTest.java
index eeec30f..d10ed1a 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/client/FunctionalQueryTest.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/client/FunctionalQueryTest.java
@@ -36,7 +36,10 @@
 import org.apache.ignite.configuration.BinaryConfiguration;
 import org.apache.ignite.configuration.ClientConfiguration;
 import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.Timeout;
 
 import static org.junit.Assert.*;
 
@@ -44,6 +47,10 @@
  * Thin client functional tests.
  */
 public class FunctionalQueryTest {
+    /** Per test timeout */
+    @Rule
+    public Timeout globalTimeout = new Timeout((int) GridTestUtils.DFLT_TEST_TIMEOUT);
+
     /**
      * Tested API:
      * <ul>
diff --git a/modules/indexing/src/test/java/org/apache/ignite/client/IgniteBinaryQueryTest.java b/modules/indexing/src/test/java/org/apache/ignite/client/IgniteBinaryQueryTest.java
index 4b3eebc..a524394 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/client/IgniteBinaryQueryTest.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/client/IgniteBinaryQueryTest.java
@@ -38,7 +38,10 @@
 import org.apache.ignite.client.Config;
 import org.apache.ignite.client.IgniteClient;
 import org.apache.ignite.configuration.ClientConfiguration;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.Timeout;
 
 import static org.junit.Assert.assertEquals;
 
@@ -46,6 +49,10 @@
  * Ignite {@link BinaryObject} API system tests.
  */
 public class IgniteBinaryQueryTest {
+    /** Per test timeout */
+    @Rule
+    public Timeout globalTimeout = new Timeout((int) GridTestUtils.DFLT_TEST_TIMEOUT);
+
     /**
      * Test queries in Ignite binary.
      */
diff --git a/modules/indexing/src/test/java/org/apache/ignite/client/SecurityTest.java b/modules/indexing/src/test/java/org/apache/ignite/client/SecurityTest.java
index e2b11db..0be13db 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/client/SecurityTest.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/client/SecurityTest.java
@@ -31,8 +31,11 @@
 import org.apache.ignite.configuration.IgniteConfiguration;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.ssl.SslContextFactory;
+import org.apache.ignite.testframework.GridTestUtils;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.Timeout;
 
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
@@ -41,6 +44,10 @@
  * Thin client security test.
  */
 public class SecurityTest {
+    /** Per test timeout */
+    @Rule
+    public Timeout globalTimeout = new Timeout((int) GridTestUtils.DFLT_TEST_TIMEOUT);
+
     /** Ignite home. */
     private static final String IGNITE_HOME = U.getIgniteHome();
 
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/CacheScanPartitionQueryFallbackSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/CacheScanPartitionQueryFallbackSelfTest.java
index 999b1ad..3afcad8 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/CacheScanPartitionQueryFallbackSelfTest.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/CacheScanPartitionQueryFallbackSelfTest.java
@@ -30,6 +30,7 @@
 import javax.cache.Cache;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.IgniteException;
 import org.apache.ignite.cache.CacheAtomicityMode;
 import org.apache.ignite.cache.CacheMode;
@@ -57,6 +58,7 @@
 import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi;
 import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
 import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.GridTestUtils;
 import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
 
 /**
@@ -149,6 +151,66 @@
     }
 
     /**
+     * Scan (with explicit {@code setLocal(true)}) should perform on the local node.
+     *
+     * @throws Exception If failed.
+     */
+    public void testScanLocalExplicit() throws Exception {
+        cacheMode = CacheMode.PARTITIONED;
+        backups = 0;
+        commSpiFactory = new TestLocalCommunicationSpiFactory();
+
+        try {
+            Ignite ignite = startGrids(GRID_CNT);
+
+            IgniteCacheProxy<Integer, Integer> cache = fillCache(ignite);
+
+            int part = anyLocalPartition(cache.context());
+
+            QueryCursor<Cache.Entry<Integer, Integer>> qry =
+                cache.query(new ScanQuery<Integer, Integer>().setPartition(part).setLocal(true));
+
+            doTestScanQuery(qry, part);
+
+            GridTestUtils.assertThrows(log, (Callable<Void>)() -> {
+                int remPart = remotePartition(cache.context()).getKey();
+
+                cache.query(new ScanQuery<Integer, Integer>().setPartition(remPart).setLocal(true));
+
+                return null;
+            }, IgniteCheckedException.class, null);
+        }
+        finally {
+            stopAllGrids();
+        }
+    }
+
+    /**
+     * Scan (with explicit {@code setLocal(true)}, no partition specified) should perform on the local node.
+     *
+     * @throws Exception If failed.
+     */
+    public void testScanLocalExplicitNoPart() throws Exception {
+        cacheMode = CacheMode.PARTITIONED;
+        backups = 0;
+        commSpiFactory = new TestLocalCommunicationSpiFactory();
+
+        try {
+            Ignite ignite = startGrids(GRID_CNT);
+
+            IgniteCacheProxy<Integer, Integer> cache = fillCache(ignite);
+
+            QueryCursor<Cache.Entry<Integer, Integer>> qry =
+                cache.query(new ScanQuery<Integer, Integer>().setLocal(true));
+
+            assertFalse(qry.getAll().isEmpty());
+        }
+        finally {
+            stopAllGrids();
+        }
+    }
+
+    /**
      * Scan should perform on the remote node.
      *
      * @throws Exception If failed.
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/DdlTransactionSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/DdlTransactionSelfTest.java
new file mode 100644
index 0000000..6652559
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/DdlTransactionSelfTest.java
@@ -0,0 +1,335 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache;
+
+import java.util.List;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.cache.query.FieldsQueryCursor;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.configuration.TransactionConfiguration;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+import org.apache.ignite.transactions.Transaction;
+import org.apache.ignite.transactions.TransactionConcurrency;
+import org.apache.ignite.transactions.TransactionIsolation;
+import org.apache.ignite.transactions.TransactionState;
+
+/**
+ *
+ */
+public class DdlTransactionSelfTest extends GridCommonAbstractTest {
+    /** */
+    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
+    /** */
+    private boolean client;
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName);
+
+        cfg.setTransactionConfiguration(new TransactionConfiguration()
+            .setDefaultTxIsolation(TransactionIsolation.REPEATABLE_READ)
+            .setDefaultTxConcurrency(TransactionConcurrency.PESSIMISTIC)
+            .setDefaultTxTimeout(5000));
+
+        TcpDiscoverySpi discoSpi = new TcpDiscoverySpi();
+
+        discoSpi.setIpFinder(IP_FINDER);
+
+        cfg.setDiscoverySpi(discoSpi);
+        cfg.setCacheConfiguration(getCacheConfiguration());
+        cfg.setClientMode(client);
+
+        return cfg;
+    }
+
+    /**
+     * @return Cache configuration.
+     */
+    private CacheConfiguration getCacheConfiguration() {
+        CacheConfiguration<?, ?> ccfg = defaultCacheConfiguration();
+
+        ccfg.setNearConfiguration(null);
+
+        return ccfg;
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testTxIsCommittedOnDdlRequestMultinodeClient() throws Exception {
+        startGridsMultiThreaded(4, false);
+
+        client = true;
+
+        Ignite node = startGrid(4);
+
+        awaitPartitionMapExchange();
+
+        IgniteCache<Object, Object> cache = node.cache(DEFAULT_CACHE_NAME);
+
+        try (Transaction tx = node.transactions().txStart()) {
+            cache.putAll(F.asMap(1, 1, 2, 2, 3, 3));
+
+            try (FieldsQueryCursor<List<?>> cur = cache.query(new SqlFieldsQuery(
+                "CREATE TABLE " +
+                    "    person (id int, name varchar, age int, company varchar, city varchar, primary key (id, name, city))" +
+                    "WITH " +
+                    "    \"template=PARTITIONED,atomicity=TRANSACTIONAL,affinity_key=city\"").setSchema("PUBLIC"))) {
+
+                assertNotNull(cur);
+
+                List<List<?>> rows = cur.getAll();
+
+                assertEquals(1, rows.size());
+
+                assertEquals(0L, rows.get(0).get(0));
+            }
+
+            assertTrue(tx.state() == TransactionState.COMMITTED);
+        }
+
+        try (FieldsQueryCursor<List<?>> cur = cache.query(new SqlFieldsQuery("SELECT * FROM person").setSchema("PUBLIC"))) {
+            assertNotNull(cur);
+
+            List<List<?>> rows = cur.getAll();
+
+            assertEquals(0, rows.size());
+        }
+
+        assertEquals(1, cache.get(1));
+        assertEquals(2, cache.get(2));
+        assertEquals(3, cache.get(3));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testTxIsCommittedOnDdlRequestMultinode() throws Exception {
+        Ignite node = startGridsMultiThreaded(4);
+
+        IgniteCache<Object, Object> cache = node.cache(DEFAULT_CACHE_NAME);
+
+        try (Transaction tx = node.transactions().txStart()) {
+            cache.putAll(F.asMap(1, 1, 2, 2, 3, 3));
+
+            try (FieldsQueryCursor<List<?>> cur = cache.query(new SqlFieldsQuery(
+                "CREATE TABLE " +
+                    "    person (id int, name varchar, age int, company varchar, city varchar, primary key (id, name, city))" +
+                    "WITH " +
+                    "    \"template=PARTITIONED,atomicity=TRANSACTIONAL,affinity_key=city\"").setSchema("PUBLIC"))) {
+
+                assertNotNull(cur);
+
+                List<List<?>> rows = cur.getAll();
+
+                assertEquals(1, rows.size());
+
+                assertEquals(0L, rows.get(0).get(0));
+            }
+
+            assertTrue(tx.state() == TransactionState.COMMITTED);
+        }
+
+        try (FieldsQueryCursor<List<?>> cur = cache.query(new SqlFieldsQuery("SELECT * FROM person").setSchema("PUBLIC"))) {
+            assertNotNull(cur);
+
+            List<List<?>> rows = cur.getAll();
+
+            assertEquals(0, rows.size());
+        }
+
+        assertEquals(1, cache.get(1));
+        assertEquals(2, cache.get(2));
+        assertEquals(3, cache.get(3));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testTxIsCommittedOnDdlRequest() throws Exception {
+        Ignite node = startGrid();
+
+        IgniteCache<Object, Object> cache = node.cache(DEFAULT_CACHE_NAME);
+
+        try (Transaction tx = node.transactions().txStart()) {
+            cache.putAll(F.asMap(1, 1, 2, 2, 3, 3));
+
+            try (FieldsQueryCursor<List<?>> cur = cache.query(new SqlFieldsQuery(
+                "CREATE TABLE " +
+                    "    person (id int, name varchar, age int, company varchar, city varchar, primary key (id, name, city))" +
+                    "WITH " +
+                    "    \"template=PARTITIONED,atomicity=TRANSACTIONAL,affinity_key=city\"").setSchema("PUBLIC"))) {
+
+                assertNotNull(cur);
+
+                List<List<?>> rows = cur.getAll();
+
+                assertEquals(1, rows.size());
+
+                assertEquals(0L, rows.get(0).get(0));
+            }
+
+            assertTrue(tx.state() == TransactionState.COMMITTED);
+        }
+
+        try (FieldsQueryCursor<List<?>> cur = cache.query(new SqlFieldsQuery("SELECT * FROM person").setSchema("PUBLIC"))) {
+            assertNotNull(cur);
+
+            List<List<?>> rows = cur.getAll();
+
+            assertEquals(0, rows.size());
+        }
+
+        assertEquals(1, cache.get(1));
+        assertEquals(2, cache.get(2));
+        assertEquals(3, cache.get(3));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testDdlRequestWithoutTxMultinodeClient() throws Exception {
+        startGridsMultiThreaded(4, false);
+
+        client = true;
+
+        Ignite node = startGrid(4);
+
+        awaitPartitionMapExchange();
+
+        IgniteCache<Object, Object> cache = node.cache(DEFAULT_CACHE_NAME);
+
+        cache.putAll(F.asMap(1, 1, 2, 2, 3, 3));
+
+        try (FieldsQueryCursor<List<?>> cur = cache.query(new SqlFieldsQuery(
+            "CREATE TABLE " +
+                "    person (id int, name varchar, age int, company varchar, city varchar, primary key (id, name, city))" +
+                "WITH " +
+                "    \"template=PARTITIONED,atomicity=TRANSACTIONAL,affinity_key=city\"").setSchema("PUBLIC"))) {
+
+            assertNotNull(cur);
+
+            List<List<?>> rows = cur.getAll();
+
+            assertEquals(1, rows.size());
+
+            assertEquals(0L, rows.get(0).get(0));
+        }
+
+        try (FieldsQueryCursor<List<?>> cur = cache.query(new SqlFieldsQuery("SELECT * FROM person").setSchema("PUBLIC"))) {
+            assertNotNull(cur);
+
+            List<List<?>> rows = cur.getAll();
+
+            assertEquals(0, rows.size());
+        }
+
+        assertEquals(1, cache.get(1));
+        assertEquals(2, cache.get(2));
+        assertEquals(3, cache.get(3));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testDdlRequestWithoutTxMultinode() throws Exception {
+        Ignite node = startGridsMultiThreaded(4);
+
+        IgniteCache<Object, Object> cache = node.cache(DEFAULT_CACHE_NAME);
+
+        cache.putAll(F.asMap(1, 1, 2, 2, 3, 3));
+
+        try (FieldsQueryCursor<List<?>> cur = cache.query(new SqlFieldsQuery(
+            "CREATE TABLE " +
+                "    person (id int, name varchar, age int, company varchar, city varchar, primary key (id, name, city))" +
+                "WITH " +
+                "    \"template=PARTITIONED,atomicity=TRANSACTIONAL,affinity_key=city\"").setSchema("PUBLIC"))) {
+
+            assertNotNull(cur);
+
+            List<List<?>> rows = cur.getAll();
+
+            assertEquals(1, rows.size());
+
+            assertEquals(0L, rows.get(0).get(0));
+        }
+
+        try (FieldsQueryCursor<List<?>> cur = cache.query(new SqlFieldsQuery("SELECT * FROM person").setSchema("PUBLIC"))) {
+            assertNotNull(cur);
+
+            List<List<?>> rows = cur.getAll();
+
+            assertEquals(0, rows.size());
+        }
+
+        assertEquals(1, cache.get(1));
+        assertEquals(2, cache.get(2));
+        assertEquals(3, cache.get(3));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testDdlRequestWithoutTx() throws Exception {
+        Ignite node = startGrid();
+
+        IgniteCache<Object, Object> cache = node.cache(DEFAULT_CACHE_NAME);
+
+        cache.putAll(F.asMap(1, 1, 2, 2, 3, 3));
+
+        try (FieldsQueryCursor<List<?>> cur = cache.query(new SqlFieldsQuery(
+            "CREATE TABLE " +
+                "    person (id int, name varchar, age int, company varchar, city varchar, primary key (id, name, city))" +
+                "WITH " +
+                "    \"template=PARTITIONED,atomicity=TRANSACTIONAL,affinity_key=city\"").setSchema("PUBLIC"))) {
+
+            assertNotNull(cur);
+
+            List<List<?>> rows = cur.getAll();
+
+            assertEquals(1, rows.size());
+
+            assertEquals(0L, rows.get(0).get(0));
+        }
+
+        try (FieldsQueryCursor<List<?>> cur = cache.query(new SqlFieldsQuery("SELECT * FROM person").setSchema("PUBLIC"))) {
+            assertNotNull(cur);
+
+            List<List<?>> rows = cur.getAll();
+
+            assertEquals(0, rows.size());
+        }
+
+        assertEquals(1, cache.get(1));
+        assertEquals(2, cache.get(2));
+        assertEquals(3, cache.get(3));
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAbstractFieldsQuerySelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAbstractFieldsQuerySelfTest.java
index 361f8b4..ce5c95e 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAbstractFieldsQuerySelfTest.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAbstractFieldsQuerySelfTest.java
@@ -17,6 +17,7 @@
 
 package org.apache.ignite.internal.processors.cache;
 
+import com.google.common.collect.ImmutableMap;
 import java.io.Serializable;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -35,8 +36,8 @@
 import org.apache.ignite.cache.CacheMode;
 import org.apache.ignite.cache.CacheRebalanceMode;
 import org.apache.ignite.cache.CacheWriteSynchronizationMode;
+import org.apache.ignite.cache.QueryEntity;
 import org.apache.ignite.cache.affinity.AffinityKey;
-import org.apache.ignite.cache.query.FieldsQueryCursor;
 import org.apache.ignite.cache.query.QueryCursor;
 import org.apache.ignite.cache.query.SqlFieldsQuery;
 import org.apache.ignite.cache.query.annotations.QuerySqlField;
@@ -44,14 +45,13 @@
 import org.apache.ignite.configuration.IgniteConfiguration;
 import org.apache.ignite.internal.IgniteKernal;
 import org.apache.ignite.internal.binary.BinaryMarshaller;
-import org.apache.ignite.internal.processors.cache.index.AbstractSchemaSelfTest;
 import org.apache.ignite.internal.processors.cache.query.GridCacheSqlIndexMetadata;
 import org.apache.ignite.internal.processors.cache.query.GridCacheSqlMetadata;
 import org.apache.ignite.internal.processors.cache.persistence.tree.BPlusTree;
-import org.apache.ignite.internal.processors.cache.query.SqlFieldsQueryEx;
 import org.apache.ignite.internal.processors.datastructures.GridCacheAtomicLongValue;
 import org.apache.ignite.internal.processors.datastructures.GridCacheInternalKeyImpl;
 import org.apache.ignite.internal.processors.query.GridQueryFieldMetadata;
+import org.apache.ignite.internal.processors.query.GridQueryProcessor;
 import org.apache.ignite.internal.processors.query.h2.sql.GridSqlQuerySplitter;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.X;
@@ -352,7 +352,7 @@
                 }
                 else if (DEFAULT_CACHE_NAME.equals(meta.cacheName()) || noOpCache.getName().equals(meta.cacheName()))
                     assertTrue("Invalid types size", types.isEmpty());
-                else
+                else if (!"cacheWithCustomKeyPrecision".equalsIgnoreCase(meta.cacheName()))
                     fail("Unknown cache: " + meta.cacheName());
             }
         }
@@ -383,6 +383,102 @@
     }
 
     /** @throws Exception If failed. */
+    @SuppressWarnings("unchecked")
+    public void testExecuteWithMetaDataAndPrecision() throws Exception {
+        QueryEntity qeWithPrecision = new QueryEntity()
+            .setKeyType("java.lang.Long")
+            .setValueType("TestType")
+            .addQueryField("strField", "java.lang.String", "strField")
+            .setFieldsPrecision(ImmutableMap.of("strField", 999));
+
+        grid(0).getOrCreateCache(cacheConfiguration()
+            .setName("cacheWithPrecision")
+            .setQueryEntities(Collections.singleton(qeWithPrecision)));
+
+        GridQueryProcessor qryProc = grid(0).context().query();
+
+        qryProc.querySqlFields(
+            new SqlFieldsQuery("INSERT INTO TestType(_KEY, strField) VALUES(?, ?)")
+                .setSchema("cacheWithPrecision")
+                .setArgs(1, "ABC"), true);
+
+        qryProc.querySqlFields(
+            new SqlFieldsQuery("INSERT INTO TestType(_KEY, strField) VALUES(?, ?)")
+                .setSchema("cacheWithPrecision")
+                .setArgs(2, "DEF"), true);
+
+
+        QueryCursorImpl<List<?>> cursor = (QueryCursorImpl<List<?>>)qryProc.querySqlFields(
+            new SqlFieldsQuery("SELECT _KEY, strField FROM TestType")
+                .setSchema("cacheWithPrecision"), true);
+
+        List<GridQueryFieldMetadata> fieldsMeta = cursor.fieldsMeta();
+
+        for (GridQueryFieldMetadata meta : fieldsMeta) {
+            if (!meta.fieldName().equalsIgnoreCase("strField"))
+                continue;
+
+            assertEquals(999, meta.precision());
+        }
+    }
+
+    public void testExecuteWithMetaDataAndCustomKeyPrecision() throws Exception {
+        QueryEntity qeWithPrecision = new QueryEntity()
+            .setKeyType("java.lang.String")
+            .setKeyFieldName("my_key")
+            .setValueType("CustomKeyType")
+            .addQueryField("my_key", "java.lang.String", "my_key")
+            .addQueryField("strField", "java.lang.String", "strField")
+            .setFieldsPrecision(ImmutableMap.of("strField", 999, "my_key", 777));
+
+        grid(0).getOrCreateCache(cacheConfiguration()
+            .setName("cacheWithCustomKeyPrecision")
+            .setQueryEntities(Collections.singleton(qeWithPrecision)));
+
+        GridQueryProcessor qryProc = grid(0).context().query();
+
+        qryProc.querySqlFields(
+            new SqlFieldsQuery("INSERT INTO CustomKeyType(my_key, strField) VALUES(?, ?)")
+                .setSchema("cacheWithCustomKeyPrecision")
+                .setArgs("1", "ABC"), true);
+
+        qryProc.querySqlFields(
+            new SqlFieldsQuery("INSERT INTO CustomKeyType(my_key, strField) VALUES(?, ?)")
+                .setSchema("cacheWithCustomKeyPrecision")
+                .setArgs("2", "DEF"), true);
+
+        QueryCursorImpl<List<?>> cursor = (QueryCursorImpl<List<?>>)qryProc.querySqlFields(
+            new SqlFieldsQuery("SELECT my_key, strField FROM CustomKeyType")
+                .setSchema("cacheWithCustomKeyPrecision"), true);
+
+        List<GridQueryFieldMetadata> fieldsMeta = cursor.fieldsMeta();
+
+        int fldCnt = 0;
+
+        for (GridQueryFieldMetadata meta : fieldsMeta) {
+            switch (meta.fieldName()) {
+                case "STRFIELD":
+                    assertEquals(999, meta.precision());
+
+                    fldCnt++;
+
+                    break;
+
+                case "MY_KEY":
+                    assertEquals(777, meta.precision());
+
+                    fldCnt++;
+
+                    break;
+                default:
+                    fail("Unknown field - " + meta.fieldName());
+            }
+        }
+
+        assertEquals("Metadata for all fields should be returned.", 2, fldCnt);
+    }
+
+    /** @throws Exception If failed. */
     public void testExecuteWithMetaData() throws Exception {
         QueryCursorImpl<List<?>> cursor = (QueryCursorImpl<List<?>>)personCache.query(sqlFieldsQuery(
             String.format("select p._KEY, p.name, p.age, o.name " +
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAbstractQuerySelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAbstractQuerySelfTest.java
index a845aaa..ac9de6f 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAbstractQuerySelfTest.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheAbstractQuerySelfTest.java
@@ -64,6 +64,7 @@
 import org.apache.ignite.cache.store.CacheStore;
 import org.apache.ignite.cache.store.CacheStoreAdapter;
 import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.DataStorageConfiguration;
 import org.apache.ignite.configuration.IgniteConfiguration;
 import org.apache.ignite.configuration.NearCacheConfiguration;
 import org.apache.ignite.events.CacheQueryExecutedEvent;
@@ -96,6 +97,7 @@
 import static org.apache.ignite.events.EventType.EVT_CACHE_QUERY_OBJECT_READ;
 import static org.apache.ignite.internal.processors.cache.query.CacheQueryType.FULL_TEXT;
 import static org.apache.ignite.internal.processors.cache.query.CacheQueryType.SCAN;
+import static org.apache.ignite.testframework.GridTestUtils.assertThrowsWithCause;
 import static org.junit.Assert.assertArrayEquals;
 
 /**
@@ -142,9 +144,12 @@
 
         c.setDiscoverySpi(new TcpDiscoverySpi().setForceServerMode(true).setIpFinder(ipFinder));
 
-        if (igniteInstanceName.startsWith("client"))
+        if (igniteInstanceName.startsWith("client")) {
             c.setClientMode(true);
 
+            c.setDataStorageConfiguration(new DataStorageConfiguration());
+        }
+
         return c;
     }
 
@@ -1774,6 +1779,53 @@
     }
 
     /**
+     * @throws Exception If failed.
+     */
+    public void testLocalSqlQueryFromClient() throws Exception {
+        try {
+            Ignite g = startGrid("client");
+
+            IgniteCache<Integer, Integer> c = jcache(g, Integer.class, Integer.class);
+
+            for (int i = 0; i < 10; i++)
+                c.put(i, i);
+
+            SqlQuery<Integer, Integer> qry = new SqlQuery<>(Integer.class, "_key >= 5 order by _key");
+
+            qry.setLocal(true);
+
+            assertThrowsWithCause(() -> c.query(qry), CacheException.class);
+        }
+        finally {
+            stopGrid("client");
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testLocalSqlFieldsQueryFromClient() throws Exception {
+        try {
+            Ignite g = startGrid("client");
+
+            IgniteCache<UUID, Person> c = jcache(g, UUID.class, Person.class);
+
+            Person p = new Person("Jon", 1500);
+
+            c.put(p.id(), p);
+
+            SqlFieldsQuery qry = new SqlFieldsQuery("select count(*) from Person");
+
+            qry.setLocal(true);
+
+            assertThrowsWithCause(() -> c.query(qry), CacheException.class);
+        }
+        finally {
+            stopGrid("client");
+        }
+    }
+
+    /**
      *
      */
     private static class ArrayObject implements Serializable {
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheQueryH2IndexingLeakTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheQueryH2IndexingLeakTest.java
index 7212bf8..59be138 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheQueryH2IndexingLeakTest.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IgniteCacheQueryH2IndexingLeakTest.java
@@ -27,6 +27,7 @@
 import org.apache.ignite.configuration.IgniteConfiguration;
 import org.apache.ignite.internal.IgniteInternalFuture;
 import org.apache.ignite.internal.processors.query.GridQueryProcessor;
+import org.apache.ignite.internal.processors.query.h2.H2ConnectionWrapper;
 import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing;
 import org.apache.ignite.internal.util.lang.GridAbsPredicate;
 import org.apache.ignite.internal.util.typedef.CAX;
@@ -120,9 +121,14 @@
     private static int getStatementCacheSize(GridQueryProcessor qryProcessor) {
         IgniteH2Indexing h2Idx = GridTestUtils.getFieldValue(qryProcessor, GridQueryProcessor.class, "idx");
 
-        ConcurrentMap stmtCache = GridTestUtils.getFieldValue(h2Idx, IgniteH2Indexing.class, "stmtCache");
+        ConcurrentMap<Thread, H2ConnectionWrapper> conns = GridTestUtils.getFieldValue(h2Idx, IgniteH2Indexing.class, "conns");
 
-        return stmtCache.size();
+        int cntr = 0;
+
+        for (H2ConnectionWrapper w : conns.values())
+            cntr += w.statementCacheSize();
+
+        return cntr;
     }
 
     /**
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IndexingCachePartitionLossPolicySelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IndexingCachePartitionLossPolicySelfTest.java
new file mode 100644
index 0000000..f208599
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/IndexingCachePartitionLossPolicySelfTest.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache;
+
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.internal.processors.cache.distributed.IgniteCachePartitionLossPolicySelfTest;
+
+import java.util.Collection;
+
+/**
+ * Partition loss policy test with enabled indexing.
+ */
+public class IndexingCachePartitionLossPolicySelfTest extends IgniteCachePartitionLossPolicySelfTest {
+    /** {@inheritDoc} */
+    @Override protected CacheConfiguration<Integer, Integer> cacheConfiguration() {
+        CacheConfiguration<Integer, Integer> ccfg = super.cacheConfiguration();
+
+        ccfg.setIndexedTypes(Integer.class, Integer.class);
+
+        return ccfg;
+    }
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("unchecked")
+    @Override protected void validateQuery(boolean safe, int part, Ignite node) {
+        // Get node lost and remaining partitions.
+        IgniteCache cache = node.cache(CACHE_NAME);
+
+        Collection<Integer> lostParts = cache.lostPartitions();
+
+        Integer remainingPart = null;
+
+        for (int i = 0; i < node.affinity(CACHE_NAME).partitions(); i++) {
+            if (lostParts.contains(i))
+                continue;
+
+            remainingPart = i;
+
+            break;
+        }
+
+        // Determine whether local query should be executed on that node.
+        boolean execLocQry = false;
+
+        for (int nodePrimaryPart : node.affinity(CACHE_NAME).primaryPartitions(node.cluster().localNode())) {
+            if (part == nodePrimaryPart) {
+                execLocQry = true;
+
+                break;
+            }
+        }
+
+        // 1. Check query against all partitions.
+        validateQuery0(safe, node, false);
+
+        // TODO: https://issues.apache.org/jira/browse/IGNITE-7039
+//        if (execLocQry)
+//            validateQuery0(safe, node, true);
+
+        // 2. Check query against LOST partition.
+        validateQuery0(safe, node, false, part);
+
+        // TODO: https://issues.apache.org/jira/browse/IGNITE-7039
+//        if (execLocQry)
+//            validateQuery0(safe, node, true, part);
+
+        // 3. Check query on remaining partition.
+        if (remainingPart != null) {
+            executeQuery(node, false, remainingPart);
+
+            // 4. Check query over two partitions - normal and LOST.
+            validateQuery0(safe, node, false, part, remainingPart);
+        }
+    }
+
+    /**
+     * Query validation routine.
+     *
+     * @param safe Safe flag.
+     * @param node Node.
+     * @param loc Local flag.
+     * @param parts Partitions.
+     */
+    private void validateQuery0(boolean safe, Ignite node, boolean loc, int... parts) {
+        if (safe) {
+            try {
+                executeQuery(node, loc, parts);
+
+                fail("Exception is not thrown.");
+            }
+            catch (Exception e) {
+                assertTrue(e.getMessage(), e.getMessage() != null &&
+                    e.getMessage().contains("Failed to execute query because cache partition has been lost"));
+            }
+        }
+        else {
+            executeQuery(node, loc, parts);
+        }
+    }
+
+    /**
+     * Execute SQL query on a given node.
+     *
+     * @param parts Partitions.
+     * @param node Node.
+     * @param loc Local flag.
+     */
+    private static void executeQuery(Ignite node, boolean loc, int... parts) {
+        IgniteCache cache = node.cache(CACHE_NAME);
+
+        SqlFieldsQuery qry = new SqlFieldsQuery("SELECT * FROM Integer");
+
+        if (parts != null && parts.length != 0)
+            qry.setPartitions(parts);
+
+        if (loc)
+            qry.setLocal(true);
+
+        cache.query(qry).getAll();
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteCacheDistributedQueryCancelSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteCacheDistributedQueryCancelSelfTest.java
index e26b211..d5ee0e9 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteCacheDistributedQueryCancelSelfTest.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/near/IgniteCacheDistributedQueryCancelSelfTest.java
@@ -158,7 +158,7 @@
                 fail();
             }
             catch (Exception e) {
-                assertTrue(e.getCause() instanceof CacheException);
+                assertTrue(e instanceof CacheException);
             }
         }
     }
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/replicated/IgniteCacheReplicatedQuerySelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/replicated/IgniteCacheReplicatedQuerySelfTest.java
index bd3dffd..13942c2 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/replicated/IgniteCacheReplicatedQuerySelfTest.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/distributed/replicated/IgniteCacheReplicatedQuerySelfTest.java
@@ -28,7 +28,6 @@
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.CountDownLatch;
 import javax.cache.Cache;
-import javax.cache.CacheException;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.IgniteException;
@@ -53,7 +52,6 @@
 import static org.apache.ignite.cache.CacheMode.REPLICATED;
 import static org.apache.ignite.cache.CachePeekMode.ALL;
 import static org.apache.ignite.events.EventType.EVT_NODE_LEFT;
-import static org.apache.ignite.testframework.GridTestUtils.assertThrowsWithCause;
 
 /**
  * Tests replicated query.
@@ -163,31 +161,6 @@
     }
 
     /**
-     * @throws Exception If failed.
-     */
-    public void testClientsLocalQuery() throws Exception {
-        try {
-            Ignite g = startGrid("client");
-
-            IgniteCache<Integer, Integer> c = jcache(g, Integer.class, Integer.class);
-
-            for (int i = 0; i < 10; i++)
-                c.put(i, i);
-
-            assertEquals(0, c.localSize());
-
-            SqlQuery<Integer, Integer> qry = new SqlQuery<>(Integer.class, "_key >= 5 order by _key");
-
-            qry.setLocal(true);
-
-            assertThrowsWithCause(() -> c.query(qry), CacheException.class);
-        }
-        finally {
-            stopGrid("client");
-        }
-    }
-
-    /**
      * JUnit.
      *
      * @throws Exception If failed.
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/AbstractSchemaSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/AbstractSchemaSelfTest.java
index 0a0efc7..7f1e2e7 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/AbstractSchemaSelfTest.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/AbstractSchemaSelfTest.java
@@ -21,12 +21,15 @@
 import java.sql.DriverManager;
 import java.sql.ResultSet;
 import java.sql.SQLException;
+import java.sql.Statement;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.LinkedHashMap;
+import java.util.List;
 import java.util.Map;
+import javax.cache.CacheException;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.IgniteCheckedException;
@@ -36,12 +39,17 @@
 import org.apache.ignite.cache.query.SqlFieldsQuery;
 import org.apache.ignite.cache.query.annotations.QuerySqlField;
 import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.DataRegionConfiguration;
+import org.apache.ignite.configuration.DataStorageConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
 import org.apache.ignite.internal.IgniteEx;
 import org.apache.ignite.internal.IgniteInterruptedCheckedException;
+import org.apache.ignite.internal.binary.BinaryMarshaller;
 import org.apache.ignite.internal.processors.odbc.ClientListenerProcessor;
 import org.apache.ignite.internal.processors.port.GridPortRecord;
 import org.apache.ignite.internal.processors.query.GridQueryProcessor;
 import org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor;
+import org.apache.ignite.internal.processors.query.IgniteSQLException;
 import org.apache.ignite.internal.processors.query.QueryIndexDescriptorImpl;
 import org.apache.ignite.internal.processors.query.QueryTypeDescriptorImpl;
 import org.apache.ignite.internal.processors.query.QueryUtils;
@@ -51,6 +59,9 @@
 import org.apache.ignite.internal.util.typedef.internal.SB;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.lang.IgniteBiTuple;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
 import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
 import org.jetbrains.annotations.Nullable;
 
@@ -59,6 +70,9 @@
  */
 @SuppressWarnings("unchecked")
 public abstract class AbstractSchemaSelfTest extends GridCommonAbstractTest {
+    /** IP finder. */
+    private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
     /** Cache. */
     protected static final String CACHE_NAME = "cache";
 
@@ -102,6 +116,59 @@
     protected static final String FIELD_NAME_2_ESCAPED = "field2";
 
     /**
+     * Create common node configuration.
+     *
+     * @param idx Index.
+     * @return Configuration.
+     * @throws Exception If failed.
+     */
+    protected IgniteConfiguration commonConfiguration(int idx) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(getTestIgniteInstanceName(idx));
+
+        cfg.setDiscoverySpi(new TcpDiscoverySpi().setIpFinder(IP_FINDER));
+
+        cfg.setMarshaller(new BinaryMarshaller());
+
+        DataStorageConfiguration memCfg = new DataStorageConfiguration().setDefaultDataRegionConfiguration(
+            new DataRegionConfiguration().setMaxSize(128 * 1024 * 1024));
+
+        cfg.setDataStorageConfiguration(memCfg);
+
+        return optimize(cfg);
+    }
+
+    /**
+     * Ensure that SQL exception is thrown.
+     *
+     * @param r Runnable.
+     * @param expCode Error code.
+     */
+    static void assertSqlException(DynamicIndexAbstractBasicSelfTest.RunnableX r, int expCode) {
+        try {
+            try {
+                r.run();
+            }
+            catch (CacheException e) {
+                if (e.getCause() != null)
+                    throw (Exception)e.getCause();
+                else
+                    throw e;
+            }
+        }
+        catch (IgniteSQLException e) {
+            assertEquals("Unexpected error code [expected=" + expCode + ", actual=" + e.statusCode() + ']',
+                expCode, e.statusCode());
+
+            return;
+        }
+        catch (Exception e) {
+            fail("Unexpected exception: " + e);
+        }
+
+        fail(IgniteSQLException.class.getSimpleName() +  " is not thrown.");
+    }
+
+    /**
      * Get type on the given node for the given cache and table name. Type must exist.
      *
      * @param node Node.
@@ -230,7 +297,7 @@
      * @param node Node to connect to.
      * @return Thin JDBC connection to specified node.
      */
-    static Connection connect(IgniteEx node) {
+    public static Connection connect(IgniteEx node) {
         Collection<GridPortRecord> recs = node.context().ports().records();
 
         GridPortRecord cliLsnrRec = null;
@@ -254,6 +321,17 @@
     }
 
     /**
+     * @param conn Connection.
+     * @param sql Statement.
+     * @throws SQLException if failed.
+     */
+    public static void execute(Connection conn, String sql) throws SQLException {
+        try (Statement s = conn.createStatement()) {
+            s.execute(sql);
+        }
+    }
+
+    /**
      * Assert index doesn't exist on all nodes.
      *
      * @param cacheName Cache name.
@@ -348,13 +426,33 @@
     }
 
     /**
+     * Execute SQL statement on given node.
+     *
+     * @param node Node.
+     * @param sql Statement.
+     */
+    protected List<List<?>> execute(Ignite node, String sql) {
+        return queryProcessor(node).querySqlFields(new SqlFieldsQuery(sql).setSchema("PUBLIC"), true).getAll();
+    }
+
+    /**
      * Get query processor.
      *
      * @param node Node.
      * @return Query processor.
      */
     static GridQueryProcessor queryProcessor(Ignite node) {
-        return ((IgniteEx)node).context().query();
+        return queryProcessor((IgniteEx)node);
+    }
+
+    /**
+     * Get query processor.
+     *
+     * @param node Node.
+     * @return Query processor.
+     */
+    protected static GridQueryProcessor queryProcessor(IgniteEx node) {
+        return node.context().query();
     }
 
     /**
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsAbstractConcurrentSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsAbstractConcurrentSelfTest.java
index 3f09062..072f1ab 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsAbstractConcurrentSelfTest.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicColumnsAbstractConcurrentSelfTest.java
@@ -380,9 +380,9 @@
                     IgniteCache<Object, BinaryObject> cache = node.cache(CACHE_NAME);
 
                     if (ThreadLocalRandom.current().nextBoolean())
-                        cache.put(key(node, key), val(node, val));
+                        cache.put(key(key), val(node, val));
                     else
-                        cache.remove(key(node, key));
+                        cache.remove(key(key));
                 }
 
                 return null;
@@ -416,7 +416,7 @@
         IgniteCache<Object, BinaryObject> cache = srv1.cache(CACHE_NAME).withKeepBinary();
 
         for (int i = 0; i < LARGE_CACHE_SIZE; i++) {
-            Object key = key(srv1, i);
+            Object key = key(i);
 
             BinaryObject val = cache.get(key);
 
@@ -430,7 +430,7 @@
             }
         }
 
-        String valTypeName = ((IgniteEx)srv1).context().query().types(CACHE_NAME).iterator().next().valueTypeName();
+        String valTypeName = (srv1).context().query().types(CACHE_NAME).iterator().next().valueTypeName();
 
         // Validate query result.
         for (Ignite node : Ignition.allGrids()) {
@@ -469,11 +469,10 @@
     }
 
     /**
-     * @param node Node.
      * @param id Key.
      * @return PERSON cache key (int or {@link BinaryObject}).
      */
-    private Object key(Ignite node, int id) {
+    private Object key(int id) {
         return id;
     }
 
@@ -550,7 +549,7 @@
      */
     private void put(Ignite node, int startIdx, int endIdx) {
         for (int i = startIdx; i < endIdx; i++)
-            node.cache(CACHE_NAME).put(key(node, i), val(node, i));
+            node.cache(CACHE_NAME).put(key(i), val(node, i));
     }
 
     /**
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicIndexAbstractSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicIndexAbstractSelfTest.java
index 2b5dcb7..b00d750 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicIndexAbstractSelfTest.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/DynamicIndexAbstractSelfTest.java
@@ -40,6 +40,7 @@
 import org.apache.ignite.configuration.DataRegionConfiguration;
 import org.apache.ignite.configuration.DataStorageConfiguration;
 import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.failure.StopNodeFailureHandler;
 import org.apache.ignite.internal.IgniteEx;
 import org.apache.ignite.internal.binary.BinaryMarshaller;
 import org.apache.ignite.internal.util.typedef.T2;
@@ -82,6 +83,13 @@
     /** Argument for simple SQL (2). */
     protected static final int SQL_ARG_2 = 80;
 
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        stopAllGrids();
+
+        super.afterTestsStopped();
+    }
+
     /**
      * Create server configuration.
      *
@@ -131,6 +139,8 @@
     protected IgniteConfiguration commonConfiguration(int idx) throws Exception {
         IgniteConfiguration cfg = super.getConfiguration(getTestIgniteInstanceName(idx));
 
+        cfg.setFailureHandler(new StopNodeFailureHandler());
+
         cfg.setDiscoverySpi(new TcpDiscoverySpi().setIpFinder(IP_FINDER));
 
         cfg.setMarshaller(new BinaryMarshaller());
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2ConnectionLeaksSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2ConnectionLeaksSelfTest.java
index 99661e4..7713004 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2ConnectionLeaksSelfTest.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2ConnectionLeaksSelfTest.java
@@ -17,7 +17,6 @@
 
 package org.apache.ignite.internal.processors.cache.index;
 
-import java.sql.Connection;
 import java.util.Map;
 import java.util.concurrent.CountDownLatch;
 import org.apache.ignite.Ignite;
@@ -25,7 +24,6 @@
 import org.apache.ignite.cache.query.SqlFieldsQuery;
 import org.apache.ignite.configuration.CacheConfiguration;
 import org.apache.ignite.configuration.IgniteConfiguration;
-import org.apache.ignite.internal.IgniteInterruptedCheckedException;
 import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing;
 import org.apache.ignite.internal.util.lang.GridAbsPredicate;
 import org.apache.ignite.testframework.GridTestUtils;
@@ -145,7 +143,7 @@
         boolean notLeak = GridTestUtils.waitForCondition(new GridAbsPredicate() {
             @Override public boolean apply() {
                 for (int i = 0; i < NODE_CNT; i++) {
-                    Map<Thread, Connection> conns = perThreadConnections(i);
+                    Map<Thread, ?> conns = perThreadConnections(i);
 
                     for(Thread t : conns.keySet()) {
                         if (!t.isAlive())
@@ -159,7 +157,7 @@
 
         if (!notLeak) {
             for (int i = 0; i < NODE_CNT; i++) {
-                Map<Thread, Connection> conns = perThreadConnections(i);
+                Map<Thread, ?> conns = perThreadConnections(i);
 
                 for(Thread t : conns.keySet())
                     log.error("+++ Connection is not closed for thread: " + t.getName());
@@ -173,7 +171,7 @@
      * @param nodeIdx Node index.
      * @return Per-thread connections.
      */
-    private Map<Thread, Connection> perThreadConnections(int nodeIdx) {
+    private Map<Thread, ?> perThreadConnections(int nodeIdx) {
         return ((IgniteH2Indexing)grid(nodeIdx).context().query().getIndexing()).perThreadConnections();
     }
 }
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicColumnsAbstractBasicSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicColumnsAbstractBasicSelfTest.java
index 650da7d..e74e9cd 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicColumnsAbstractBasicSelfTest.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicColumnsAbstractBasicSelfTest.java
@@ -21,6 +21,8 @@
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
+import java.util.Random;
+import java.util.UUID;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.Ignition;
 import org.apache.ignite.binary.BinaryObject;
@@ -282,6 +284,76 @@
     }
 
     /**
+     * Tests that we can add dynamically UUID column to tables.
+     *
+     * @throws SQLException If failed.
+     */
+    @SuppressWarnings("unchecked")
+    public void testAddColumnUUID() throws SQLException {
+        CacheConfiguration<Integer, Object> ccfg = defaultCacheConfiguration().setName("GuidTest")
+                .setIndexedTypes(Integer.class, GuidTest.class);
+
+        Random rnd = new Random();
+
+        IgniteCache<Integer, Object> cache = ignite(nodeIndex()).getOrCreateCache(ccfg);
+
+        run(cache, "ALTER TABLE \"GuidTest\".GuidTest ADD COLUMN GUID UUID");
+        run(cache, "ALTER TABLE \"GuidTest\".GuidTest ADD COLUMN DATA BINARY(128)");
+
+        doSleep(500);
+
+        QueryField c1 = c("GUID", Object.class.getName());
+        QueryField c2 = c("DATA", byte[].class.getName());
+
+        checkTableState("GuidTest", "GUIDTEST", c1, c2);
+
+        UUID guid1 = UUID.randomUUID();
+        UUID guid2 = UUID.randomUUID();
+
+        // Populate random data for BINARY field.
+        byte[] data1 = new byte[128];
+        rnd.nextBytes(data1);
+        byte[] data2 = new byte[128];
+        rnd.nextBytes(data2);
+
+        run(cache, "INSERT INTO \"GuidTest\".GuidTest (_key, id, guid, data) values " +
+                "(1, 1, ?, ?)", guid1.toString(), data1);
+
+        cache.put(2, new GuidTest(2, guid2, data2));
+
+        List<List<?>> res = run(cache, "select _key, id, guid from \"GuidTest\".GuidTest order by id");
+
+        assertEquals(Arrays.asList(Arrays.asList(1, 1, guid1), Arrays.asList(2, 2, guid2)), res);
+
+        // Additional check for BINARY field content.
+        res = run(cache, "select data from \"GuidTest\".GuidTest order by id");
+
+        assertTrue(Arrays.equals(data1, (byte[])res.get(0).get(0)));
+        assertTrue(Arrays.equals(data2, (byte[])res.get(1).get(0)));
+
+        if (!Boolean.valueOf(GridTestProperties.getProperty(BINARY_MARSHALLER_USE_SIMPLE_NAME_MAPPER))) {
+            GuidTest val1 = (GuidTest)cache.get(1);
+            GuidTest val2 = (GuidTest)cache.get(2);
+
+            assertEquals(guid1, val1.guid());
+            assertEquals(guid2, val2.guid());
+            assertTrue(Arrays.equals(data1, val1.data()));
+            assertTrue(Arrays.equals(data2, val2.data()));
+        }
+        else {
+            BinaryObject val1 = (BinaryObject)cache.withKeepBinary().get(1);
+            BinaryObject val2 = (BinaryObject)cache.withKeepBinary().get(2);
+
+            assertEquals(guid1, val1.field("guid"));
+            assertEquals(guid2, val2.field("guid"));
+            assertTrue(Arrays.equals(data1, val1.field("data")));
+            assertTrue(Arrays.equals(data2, val2.field("data")));
+        }
+
+        cache.destroy();
+    }
+
+    /**
      * Test addition of column with not null constraint.
      */
     public void testAddNotNullColumn() throws SQLException {
@@ -770,4 +842,49 @@
             this.state = state;
         }
     }
+
+    /**  */
+    private final static class GuidTest {
+        /** */
+        @QuerySqlField
+        private int id;
+
+        /** */
+        private UUID guid;
+
+        /** */
+        private byte[] data;
+
+        /**
+         * @param id   Id.
+         * @param guid Guid.
+         * @param data Data.
+         */
+        public GuidTest(int id, UUID guid, byte[] data) {
+            this.id = id;
+            this.guid = guid;
+            this.data = data;
+        }
+
+        /**
+         * @return Id.
+         */
+        public int id() {
+            return id;
+        }
+
+        /**
+         * @return Guid.
+         */
+        public UUID guid() {
+            return guid;
+        }
+
+        /**
+         * @return Data.
+         */
+        public byte[] data() {
+            return data;
+        }
+    }
 }
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicIndexAbstractSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicIndexAbstractSelfTest.java
index cf47774..10ef56f 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicIndexAbstractSelfTest.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicIndexAbstractSelfTest.java
@@ -21,7 +21,6 @@
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
-import javax.cache.CacheException;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.Ignition;
@@ -33,9 +32,7 @@
 import org.apache.ignite.configuration.CacheConfiguration;
 import org.apache.ignite.configuration.IgniteConfiguration;
 import org.apache.ignite.configuration.NearCacheConfiguration;
-import org.apache.ignite.internal.binary.BinaryMarshaller;
 import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode;
-import org.apache.ignite.internal.processors.query.IgniteSQLException;
 import org.apache.ignite.internal.util.typedef.F;
 
 /**
@@ -88,6 +85,9 @@
 
         // Test that local queries on all nodes use new index.
         for (int i = 0 ; i < 4; i++) {
+            if (ignite(i).configuration().isClientMode())
+                continue;
+
             List<List<?>> locRes = ignite(i).cache("cache").query(new SqlFieldsQuery("explain select \"id\" from " +
                 "\"cache\".\"ValueClass\" where \"field1\" = 'A'").setLocal(true)).getAll();
 
@@ -158,6 +158,9 @@
 
         // Test that no local queries on all nodes use new index.
         for (int i = 0 ; i < 4; i++) {
+            if (ignite(i).configuration().isClientMode())
+                continue;
+
             List<List<?>> locRes = ignite(i).cache("cache").query(new SqlFieldsQuery("explain select \"id\" from " +
                 "\"cache\".\"ValueClass\" where \"field1\" = 'A'").setLocal(true)).getAll();
 
@@ -298,21 +301,6 @@
     }
 
     /**
-     * Create common node configuration.
-     *
-     * @param idx Index.
-     * @return Configuration.
-     * @throws Exception If failed.
-     */
-    private IgniteConfiguration commonConfiguration(int idx) throws Exception {
-        IgniteConfiguration cfg = super.getConfiguration(getTestIgniteInstanceName(idx));
-
-        cfg.setMarshaller(new BinaryMarshaller());
-
-        return optimize(cfg);
-    }
-
-    /**
      * @return Default cache configuration.
      */
     private CacheConfiguration cacheConfiguration() {
@@ -359,35 +347,4 @@
      * @return Whether to use near cache.
      */
     protected abstract boolean nearCache();
-
-    /**
-     * Ensure that SQL exception is thrown.
-     *
-     * @param r Runnable.
-     * @param expCode Error code.
-     */
-    private static void assertSqlException(DynamicIndexAbstractBasicSelfTest.RunnableX r, int expCode) {
-        try {
-            try {
-                r.run();
-            }
-            catch (CacheException e) {
-                if (e.getCause() != null)
-                    throw (Exception)e.getCause();
-                else
-                    throw e;
-            }
-        }
-        catch (IgniteSQLException e) {
-            assertEquals("Unexpected error code [expected=" + expCode + ", actual=" + e.statusCode() + ']',
-                expCode, e.statusCode());
-
-            return;
-        }
-        catch (Exception e) {
-            fail("Unexpected exception: " + e);
-        }
-
-        fail(IgniteSQLException.class.getSimpleName() +  " is not thrown.");
-    }
 }
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicTableSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicTableSelfTest.java
index d132ebb..6ed914c 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicTableSelfTest.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/H2DynamicTableSelfTest.java
@@ -31,6 +31,7 @@
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
+import java.util.UUID;
 import java.util.concurrent.Callable;
 import javax.cache.CacheException;
 import org.apache.ignite.Ignite;
@@ -43,12 +44,12 @@
 import org.apache.ignite.cache.QueryEntity;
 import org.apache.ignite.cache.QueryIndex;
 import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.cache.query.annotations.QuerySqlField;
 import org.apache.ignite.configuration.CacheConfiguration;
 import org.apache.ignite.configuration.DataRegionConfiguration;
 import org.apache.ignite.configuration.DataStorageConfiguration;
 import org.apache.ignite.configuration.IgniteConfiguration;
 import org.apache.ignite.internal.IgniteEx;
-import org.apache.ignite.internal.binary.BinaryMarshaller;
 import org.apache.ignite.internal.processors.cache.DynamicCacheDescriptor;
 import org.apache.ignite.internal.processors.cache.GridCacheContext;
 import org.apache.ignite.internal.processors.cache.IgniteInternalCache;
@@ -1259,7 +1260,7 @@
      * @throws SQLException if failed.
      */
     public void testNoWrap() throws SQLException {
-        doTestKeyValueWrap(false, false);
+        doTestKeyValueWrap(false, false, false);
     }
 
     /**
@@ -1267,7 +1268,7 @@
      * @throws SQLException if failed.
      */
     public void testKeyWrap() throws SQLException {
-        doTestKeyValueWrap(true, false);
+        doTestKeyValueWrap(true, false, false);
     }
 
     /**
@@ -1275,7 +1276,7 @@
      * @throws SQLException if failed.
      */
     public void testValueWrap() throws SQLException {
-        doTestKeyValueWrap(false, true);
+        doTestKeyValueWrap(false, true, false);
     }
 
     /**
@@ -1283,29 +1284,73 @@
      * @throws SQLException if failed.
      */
     public void testKeyAndValueWrap() throws SQLException {
-        doTestKeyValueWrap(true, true);
+        doTestKeyValueWrap(true, true, false);
+    }
+
+    /**
+     * Test behavior when neither key nor value should be wrapped.
+     * Key and value are UUID.
+     * @throws SQLException if failed.
+     */
+    public void testUuidNoWrap() throws SQLException {
+        doTestKeyValueWrap(false, false, true);
+    }
+
+    /**
+     * Test behavior when only key is wrapped.
+     * Key and value are UUID.
+     * @throws SQLException if failed.
+     */
+    public void testUuidKeyWrap() throws SQLException {
+        doTestKeyValueWrap(true, false, true);
+    }
+
+    /**
+     * Test behavior when only value is wrapped.
+     * Key and value are UUID.
+     * @throws SQLException if failed.
+     */
+    public void testUuidValueWrap() throws SQLException {
+        doTestKeyValueWrap(false, true, true);
+    }
+
+    /**
+     * Test behavior when both key and value is wrapped.
+     * Key and value are UUID.
+     * @throws SQLException if failed.
+     */
+    public void testUuidKeyAndValueWrap() throws SQLException {
+        doTestKeyValueWrap(true, true, true);
     }
 
     /**
      * Test behavior for given combination of wrap flags.
      * @param wrapKey Whether key wrap should be enforced.
      * @param wrapVal Whether value wrap should be enforced.
+     * @param testUuid Whether should test with UUID as key and value.
      * @throws SQLException if failed.
      */
-    private void doTestKeyValueWrap(boolean wrapKey, boolean wrapVal) throws SQLException {
+    private void doTestKeyValueWrap(boolean wrapKey, boolean wrapVal, boolean testUuid) throws SQLException {
         try {
-            String sql = String.format("CREATE TABLE T (\"id\" int primary key, \"x\" varchar) WITH " +
-                "\"wrap_key=%b,wrap_value=%b\"", wrapKey, wrapVal);
+            String sql = testUuid ? String.format("CREATE TABLE T (\"id\" UUID primary key, \"x\" UUID) WITH " +
+                            "\"wrap_key=%b,wrap_value=%b\"", wrapKey, wrapVal) :
+                    String.format("CREATE TABLE T (\"id\" int primary key, \"x\" varchar) WITH " +
+                            "\"wrap_key=%b,wrap_value=%b\"", wrapKey, wrapVal);
+
+            UUID guid = UUID.randomUUID();
 
             if (wrapKey)
-                sql += ",\"key_type=tkey\"";
+                sql += ",\"key_type=" + (testUuid ? "tkey_guid" : "tkey") + "\"";
 
             if (wrapVal)
-                sql += ",\"value_type=tval\"";
+                sql += ",\"value_type=" + (testUuid ? "tval_guid" : "tval") + "\"";
 
             execute(sql);
 
-            execute("INSERT INTO T(\"id\", \"x\") values(1, 'a')");
+            if(testUuid)
+                execute("INSERT INTO T(\"id\", \"x\") values('" + guid.toString() + "', '" + guid.toString() + "')");
+            else
+                execute("INSERT INTO T(\"id\", \"x\") values(1, 'a')");
 
             LinkedHashMap<String, String> resCols = new LinkedHashMap<>();
 
@@ -1331,20 +1376,27 @@
 
             LinkedHashMap<String, String> expCols = new LinkedHashMap<>();
 
-            expCols.put("id", Integer.class.getName());
-            expCols.put("x", String.class.getName());
+            if (testUuid) {
+                expCols.put("id", Object.class.getName());
+                expCols.put("x", Object.class.getName());
+            }
+            else {
+                expCols.put("id", Integer.class.getName());
+                expCols.put("x", String.class.getName());
+            }
 
             assertEquals(expCols, resCols);
 
-            assertEqualsCollections(Arrays.asList(1, "a"), resData);
+            assertEqualsCollections(testUuid ? Arrays.asList(guid, guid) : Arrays.asList(1, "a")
+                    , resData);
 
-            Object key = createKeyForWrapTest(1, wrapKey);
+            Object key = createKeyForWrapTest(testUuid ? guid : 1, wrapKey);
 
             Object val = client().cache(cacheName("T")).withKeepBinary().get(key);
 
             assertNotNull(val);
 
-            assertEquals(createValueForWrapTest("a", wrapVal), val);
+            assertEquals(createValueForWrapTest(testUuid ? guid : "a", wrapVal), val);
         }
         finally {
             execute("DROP TABLE IF EXISTS T");
@@ -1356,11 +1408,11 @@
      * @param wrap Whether key should be wrapped.
      * @return (optionally wrapped) key.
      */
-    private Object createKeyForWrapTest(int key, boolean wrap) {
+    private Object createKeyForWrapTest(Object key, boolean wrap) {
         if (!wrap)
             return key;
 
-        return client().binary().builder("tkey").setField("id", key).build();
+        return client().binary().builder(key instanceof UUID ? "tkey_guid" : "tkey").setField("id", key).build();
     }
 
     /**
@@ -1368,11 +1420,11 @@
      * @param wrap Whether value should be wrapped.
      * @return (optionally wrapped) value.
      */
-    private Object createValueForWrapTest(String val, boolean wrap) {
+    private Object createValueForWrapTest(Object val, boolean wrap) {
         if (!wrap)
             return val;
 
-        return client().binary().builder("tval").setField("x", val).build();
+        return client().binary().builder(val instanceof UUID ? "tval_guid" : "tval").setField("x", val).build();
     }
 
     /**
@@ -1423,24 +1475,6 @@
     }
 
     /**
-     * Test that {@code CREATE TABLE} in non-public schema causes an exception.
-     *
-     * @throws Exception if failed.
-     */
-    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
-    public void testCreateTableInNonPublicSchema() throws Exception {
-        GridTestUtils.assertThrows(null, new Callable<Object>() {
-            @Override public Object call() throws Exception {
-                execute("CREATE TABLE \"cache_idx\".\"Person\" (\"id\" int, \"city\" varchar," +
-                    " \"name\" varchar, \"surname\" varchar, \"age\" int, PRIMARY KEY (\"id\", \"city\")) WITH " +
-                    "\"template=cache\"");
-
-                return null;
-            }
-        }, IgniteSQLException.class, "CREATE TABLE can only be executed on PUBLIC schema.");
-    }
-
-    /**
      * Execute {@code CREATE TABLE} w/given params expecting a particular error.
      * @param params Engine parameters.
      * @param expErrMsg Expected error message.
@@ -1473,17 +1507,6 @@
     }
 
     /**
-     * Test that {@code DROP TABLE} on non-public schema causes an exception.
-     *
-     * @throws Exception if failed.
-     */
-    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
-    public void testDropTableNotPublicSchema() throws Exception {
-       assertDdlCommandThrows("DROP TABLE \"cache_idx\".\"Person\"",
-           "DROP TABLE can only be executed on PUBLIC schema.");
-    }
-
-    /**
      * Test that {@link IgniteH2Indexing#tables(String)} method
      * only returns tables belonging to given cache.
      *
@@ -1585,12 +1608,11 @@
      * @return Configuration.
      * @throws Exception If failed.
      */
-    private IgniteConfiguration commonConfiguration(int idx) throws Exception {
-        IgniteConfiguration cfg = super.getConfiguration(getTestIgniteInstanceName(idx));
+    protected IgniteConfiguration commonConfiguration(int idx) throws Exception {
+        IgniteConfiguration cfg = super.commonConfiguration(idx);
 
         DataRegionConfiguration dataRegionCfg = new DataRegionConfiguration().setName(DATA_REGION_NAME);
 
-        cfg.setMarshaller(new BinaryMarshaller());
         cfg.setDataStorageConfiguration(new DataStorageConfiguration().setDataRegionConfigurations(dataRegionCfg));
 
         return optimize(cfg);
@@ -1599,16 +1621,6 @@
     /**
      * Execute DDL statement on given node.
      *
-     * @param node Node.
-     * @param sql Statement.
-     */
-    private List<List<?>> execute(Ignite node, String sql) {
-        return queryProcessor(node).querySqlFields(new SqlFieldsQuery(sql).setSchema("PUBLIC"), true).getAll();
-    }
-
-    /**
-     * Execute DDL statement on given node.
-     *
      * @param sql Statement.
      */
     private List<List<?>> executeLocal(GridCacheContext cctx, String sql) {
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/IgniteDecimalSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/IgniteDecimalSelfTest.java
index 9e65276..96926ea 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/IgniteDecimalSelfTest.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/IgniteDecimalSelfTest.java
@@ -32,8 +32,6 @@
 import org.apache.ignite.configuration.CacheConfiguration;
 import org.apache.ignite.configuration.IgniteConfiguration;
 import org.apache.ignite.internal.IgniteEx;
-import org.apache.ignite.internal.util.typedef.F;
-import org.apache.ignite.lang.IgniteBiTuple;
 import org.jetbrains.annotations.NotNull;
 
 import static java.math.RoundingMode.HALF_UP;
@@ -108,11 +106,14 @@
         queryEntity.addQueryField("id", Integer.class.getName(), null);
         queryEntity.addQueryField("amount", BigDecimal.class.getName(), null);
 
-        Map<String, IgniteBiTuple<Integer, Integer>> decimalInfo = new HashMap<>();
+        Map<String, Integer> precision = new HashMap<>();
+        Map<String, Integer> scale = new HashMap<>();
 
-        decimalInfo.put("amount", F.t(PRECISION, SCALE));
+        precision.put("amount",PRECISION);
+        scale.put("amount", SCALE);
 
-        queryEntity.setDecimalInfo(decimalInfo);
+        queryEntity.setFieldsPrecision(precision);
+        queryEntity.setFieldsScale(scale);
 
         ccfg.setQueryEntities(Collections.singletonList(queryEntity));
 
@@ -123,14 +124,14 @@
      * @throws Exception If failed.
      */
     public void testConfiguredFromDdl() throws Exception {
-        checkDecimalInfo(DEC_TAB_NAME, VALUE, PRECISION, SCALE);
+        checkPrecisionAndScale(DEC_TAB_NAME, VALUE, PRECISION, SCALE);
     }
 
     /**
      * @throws Exception If failed.
      */
     public void testConfiguredFromQueryEntity() throws Exception {
-        checkDecimalInfo(SALARY_TAB_NAME, "amount", PRECISION, SCALE);
+        checkPrecisionAndScale(SALARY_TAB_NAME, "amount", PRECISION, SCALE);
     }
 
     /**
@@ -145,7 +146,7 @@
 
         IgniteCache<Integer, Salary> cache = grid.createCache(ccfg);
 
-        checkDecimalInfo(tabName, "amount", PRECISION, SCALE);
+        checkPrecisionAndScale(tabName, "amount", PRECISION, SCALE);
     }
 
     /**
@@ -160,7 +161,7 @@
 
         grid.createCache(ccfg);
 
-        checkDecimalInfo(SalaryWithAnnotations.class.getSimpleName().toUpperCase(), "amount", PRECISION, SCALE);
+        checkPrecisionAndScale(SalaryWithAnnotations.class.getSimpleName().toUpperCase(), "amount", PRECISION, SCALE);
     }
 
     /** */
@@ -177,21 +178,22 @@
     }
 
     /** */
-    private void checkDecimalInfo(String tabName, String colName, Integer precision, Integer scale) {
+    private void checkPrecisionAndScale(String tabName, String colName, Integer precision, Integer scale) {
         QueryEntity queryEntity = findTableInfo(tabName);
 
         assertNotNull(queryEntity);
 
-        Map<String, IgniteBiTuple<Integer, Integer>> decimalInfo = queryEntity.getDecimalInfo();
+        Map<String, Integer> fieldsPrecision = queryEntity.getFieldsPrecision();
 
-        assertNotNull(decimalInfo);
+        assertNotNull(precision);
 
-        IgniteBiTuple<Integer, Integer> columnInfo = decimalInfo.get(colName);
+        assertEquals(fieldsPrecision.get(colName), precision);
 
-        assertNotNull(columnInfo);
+        Map<String, Integer> fieldsScale = queryEntity.getFieldsScale();
 
-        assertEquals(columnInfo.get1(), precision);
-        assertEquals(columnInfo.get2(), scale);
+        assertEquals(fieldsScale.get(colName), scale);
+
+        assertNotNull(scale);
     }
 
     /**
@@ -202,7 +204,6 @@
         IgniteEx ignite = grid(0);
 
         Collection<String> cacheNames = ignite.cacheNames();
-
         for (String cacheName : cacheNames) {
             CacheConfiguration ccfg = ignite.cache(cacheName).getConfiguration(CacheConfiguration.class);
 
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/SchemaExchangeSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/SchemaExchangeSelfTest.java
index b92c792..c7709f2 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/SchemaExchangeSelfTest.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/SchemaExchangeSelfTest.java
@@ -32,6 +32,7 @@
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.lang.IgniteFuture;
 import org.apache.ignite.lang.IgnitePredicate;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
 import org.apache.ignite.testframework.GridTestUtils;
 
 import java.util.Collections;
@@ -47,6 +48,9 @@
     /** Node on which filter should be applied (if any). */
     private static String filterNodeName;
 
+    /** */
+    private static final TcpDiscoveryVmIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
     /** {@inheritDoc} */
     @Override protected void afterTest() throws Exception {
         stopAllGrids();
@@ -541,7 +545,7 @@
 
         cfg.setClientMode(client);
         cfg.setLocalHost("127.0.0.1");
-        cfg.setDiscoverySpi(new TestTcpDiscoverySpi());
+        cfg.setDiscoverySpi(new TestTcpDiscoverySpi().setIpFinder(IP_FINDER));
 
         if (filterNodeName != null && F.eq(name, filterNodeName))
             cfg.setUserAttributes(Collections.singletonMap("AFF_NODE", true));
@@ -591,7 +595,7 @@
         cfg.setClientMode(client);
         cfg.setLocalHost("127.0.0.1");
 
-        cfg.setDiscoverySpi(new TestTcpDiscoverySpi());
+        cfg.setDiscoverySpi(new TestTcpDiscoverySpi().setIpFinder(IP_FINDER));
 
         return (IgniteEx)Ignition.start(cfg);
     }
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/SqlTransactionsComandsSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/SqlTransactionsComandsSelfTest.java
new file mode 100644
index 0000000..8b3fbe3
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/SqlTransactionsComandsSelfTest.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.index;
+
+import java.util.concurrent.Callable;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.processors.query.IgniteSQLException;
+import org.apache.ignite.testframework.GridTestUtils;
+
+/**
+ *
+ */
+public class SqlTransactionsComandsSelfTest extends AbstractSchemaSelfTest {
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        super.beforeTestsStarted();
+
+        startGrid(commonConfiguration(0));
+
+        super.execute(grid(0), "CREATE TABLE INTS(k int primary key, v int) WITH \"wrap_value=false,cache_name=ints," +
+            "atomicity=transactional\"");
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        stopAllGrids();
+
+        super.afterTestsStopped();
+    }
+
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testBeginWithMvccDisabledThrows() throws Exception {
+        checkMvccDisabledBehavior("BEGIN");
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testCommitWithMvccDisabledThrows() throws Exception {
+        checkMvccDisabledBehavior("COMMIT");
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testRollbackWithMvccDisabledThrows() throws Exception {
+        checkMvccDisabledBehavior("rollback");
+    }
+
+    /**
+     * @param sql Operation to test.
+     * @throws Exception if failed.
+     */
+    private void checkMvccDisabledBehavior(String sql) throws Exception {
+        try (IgniteEx node = startGrid(commonConfiguration(1))) {
+            GridTestUtils.assertThrows(null, new Callable<Object>() {
+                @Override public Object call() throws Exception {
+                    execute(node, sql);
+
+                    return null;
+                }
+            }, IgniteSQLException.class, "MVCC must be enabled in order to invoke transactional operation: " + sql);
+        }
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/SqlTransactionsComandsWithMvccDisabledSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/SqlTransactionsComandsWithMvccDisabledSelfTest.java
new file mode 100644
index 0000000..d2931ba
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/SqlTransactionsComandsWithMvccDisabledSelfTest.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.index;
+
+import java.util.concurrent.Callable;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.processors.query.IgniteSQLException;
+import org.apache.ignite.testframework.GridTestUtils;
+
+/**
+ *
+ */
+public class SqlTransactionsComandsWithMvccDisabledSelfTest extends AbstractSchemaSelfTest {
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        super.beforeTestsStarted();
+
+        startGrid(commonConfiguration(0));
+
+        super.execute(grid(0), "CREATE TABLE INTS(k int primary key, v int) WITH \"wrap_value=false,cache_name=ints," +
+            "atomicity=transactional\"");
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        stopAllGrids();
+
+        super.afterTestsStopped();
+    }
+
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testBeginWithMvccDisabledThrows() throws Exception {
+        checkMvccDisabledBehavior("BEGIN");
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testCommitWithMvccDisabledThrows() throws Exception {
+        checkMvccDisabledBehavior("COMMIT");
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testRollbackWithMvccDisabledThrows() throws Exception {
+        checkMvccDisabledBehavior("rollback");
+    }
+
+    /**
+     * @param sql Operation to test.
+     * @throws Exception if failed.
+     */
+    private void checkMvccDisabledBehavior(String sql) throws Exception {
+        try (IgniteEx node = startGrid(commonConfiguration(1))) {
+            GridTestUtils.assertThrows(null, new Callable<Object>() {
+                @Override public Object call() throws Exception {
+                    execute(node, sql);
+
+                    return null;
+                }
+            }, IgniteSQLException.class, "MVCC must be enabled in order to invoke transactional operation: " + sql);
+        }
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/SqlTransactionsCommandsWithMvccEnabledSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/SqlTransactionsCommandsWithMvccEnabledSelfTest.java
new file mode 100644
index 0000000..8c6f407
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/SqlTransactionsCommandsWithMvccEnabledSelfTest.java
@@ -0,0 +1,420 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.index;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import javax.cache.processor.EntryProcessor;
+import javax.cache.processor.EntryProcessorException;
+import javax.cache.processor.MutableEntry;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.cache.CacheEntryProcessor;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.processors.cache.GatewayProtectedCacheProxy;
+import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode;
+import org.apache.ignite.internal.processors.query.QueryUtils;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.X;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteFuture;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.transactions.Transaction;
+import org.apache.ignite.transactions.TransactionState;
+
+/**
+ * Tests to check behavior regarding transactions started via SQL.
+ */
+public class SqlTransactionsCommandsWithMvccEnabledSelfTest extends AbstractSchemaSelfTest {
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        super.beforeTestsStarted();
+
+        startGrid(commonConfiguration(0));
+
+        super.execute(node(), "CREATE TABLE INTS(k int primary key, v int) WITH \"wrap_value=false,cache_name=ints," +
+            "atomicity=transactional_snapshot\"");
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        stopAllGrids();
+
+        super.afterTestsStopped();
+    }
+
+    /**
+     * Test that BEGIN opens a transaction.
+     */
+    public void testBegin() {
+        execute(node(), "BEGIN");
+
+        assertTxPresent();
+
+        assertTxState(tx(), TransactionState.ACTIVE);
+    }
+
+    /**
+     * Test that COMMIT commits a transaction.
+     */
+    public void testCommit() {
+        execute(node(), "BEGIN WORK");
+
+        assertTxPresent();
+
+        Transaction tx = tx();
+
+        assertTxState(tx, TransactionState.ACTIVE);
+
+        execute(node(), "COMMIT TRANSACTION");
+
+        assertTxState(tx, TransactionState.COMMITTED);
+
+        assertSqlTxNotPresent();
+    }
+
+    /**
+     * Test that COMMIT without a transaction yields nothing.
+     */
+    public void testCommitNoTransaction() {
+        execute(node(), "COMMIT");
+    }
+
+    /**
+     * Test that ROLLBACK without a transaction yields nothing.
+     */
+    public void testRollbackNoTransaction() {
+        execute(node(), "ROLLBACK");
+    }
+
+    /**
+     * Test that ROLLBACK rolls back a transaction.
+     */
+    public void testRollback() {
+        execute(node(), "BEGIN TRANSACTION");
+
+        assertTxPresent();
+
+        Transaction tx = tx();
+
+        assertTxState(tx, TransactionState.ACTIVE);
+
+        execute(node(), "ROLLBACK TRANSACTION");
+
+        assertTxState(tx, TransactionState.ROLLED_BACK);
+
+        assertSqlTxNotPresent();
+    }
+
+    /**
+     * Test that attempting to perform various SQL operations within non SQL transaction yields an exception.
+     */
+    public void testSqlOperationsWithinNonSqlTransaction() {
+        assertSqlOperationWithinNonSqlTransactionThrows("COMMIT");
+
+        assertSqlOperationWithinNonSqlTransactionThrows("ROLLBACK");
+
+        assertSqlOperationWithinNonSqlTransactionThrows("SELECT * from ints");
+
+        assertSqlOperationWithinNonSqlTransactionThrows("DELETE from ints");
+
+        assertSqlOperationWithinNonSqlTransactionThrows("INSERT INTO ints(k, v) values(10, 15)");
+
+        assertSqlOperationWithinNonSqlTransactionThrows("MERGE INTO ints(k, v) values(10, 15)");
+
+        assertSqlOperationWithinNonSqlTransactionThrows("UPDATE ints SET v = 100 WHERE k = 5");
+
+        assertSqlOperationWithinNonSqlTransactionThrows("create index idx on ints(v)");
+
+        assertSqlOperationWithinNonSqlTransactionThrows("CREATE TABLE T(k int primary key, v int)");
+    }
+
+    /**
+     * Check that trying to run given SQL statement both locally and in distributed mode yields an exception
+     * if transaction already has been marked as being of SQL type.
+     * @param sql SQL statement.
+     */
+    private void assertSqlOperationWithinNonSqlTransactionThrows(final String sql) {
+        try (Transaction ignored = node().transactions().txStart()) {
+            node().cache("ints").put(1, 1);
+
+            assertSqlException(new RunnableX() {
+                @Override public void run() throws Exception {
+                    execute(node(), sql);
+                }
+            }, IgniteQueryErrorCode.TRANSACTION_TYPE_MISMATCH);
+        }
+
+        try (Transaction ignored = node().transactions().txStart()) {
+            node().cache("ints").put(1, 1);
+
+            assertSqlException(new RunnableX() {
+                @Override public void run() throws Exception {
+                    node().cache("ints").query(new SqlFieldsQuery(sql).setLocal(true)).getAll();
+                }
+            }, IgniteQueryErrorCode.TRANSACTION_TYPE_MISMATCH);
+        }
+    }
+
+    /**
+     * Test that attempting to perform a cache API operation from within an SQL transaction fails.
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    private void checkCacheOperationThrows(final String opName, final Object... args) {
+        execute(node(), "BEGIN");
+
+        try {
+            GridTestUtils.assertThrows(null, new Callable<Object>() {
+                @Override public Object call() throws Exception {
+                    try {
+                        // We need to detect types based on arguments due to multiple overloads.
+                        Class[] types;
+
+                        if (F.isEmpty(args))
+                            types = (Class[]) X.EMPTY_OBJECT_ARRAY;
+                        else {
+                            types = new Class[args.length];
+
+                            for (int i = 0; i < args.length; i++)
+                                types[i] = argTypeForObject(args[i]);
+                        }
+
+                        Object res = U.invoke(GatewayProtectedCacheProxy.class, node().cache("ints"),
+                            opName, types, args);
+
+                        if (opName.endsWith("Async"))
+                            ((IgniteFuture)res).get();
+                    }
+                    catch (IgniteCheckedException e) {
+                        if (e.getCause() != null) {
+                            try {
+                                if (e.getCause().getCause() != null)
+                                    throw (Exception)e.getCause().getCause();
+                                else
+                                    fail();
+                            }
+                            catch (IgniteException e1) {
+                                // Some public API methods don't have IgniteCheckedException on their signature
+                                // and thus may wrap it into an IgniteException.
+                                if (e1.getCause() != null)
+                                    throw (Exception)e1.getCause();
+                                else
+                                    fail();
+                            }
+                        }
+                        else
+                            fail();
+                    }
+
+                    return null;
+                }
+            }, IgniteCheckedException.class,
+                "SQL queries and cache operations may not be used in the same transaction.");
+        }
+        finally {
+            try {
+                execute(node(), "ROLLBACK");
+            }
+            catch (Throwable e) {
+                // No-op.
+            }
+        }
+    }
+
+    /**
+     *
+     */
+    private static Class<?> argTypeForObject(Object arg) {
+        if (arg instanceof Set)
+            return Set.class;
+        else if (arg instanceof Map)
+            return Map.class;
+        else if (arg.getClass().getName().startsWith("java.lang."))
+            return Object.class;
+        else if (arg instanceof CacheEntryProcessor)
+            return CacheEntryProcessor.class;
+        else if (arg instanceof EntryProcessor)
+            return EntryProcessor.class;
+        else
+            return arg.getClass();
+    }
+
+    /**
+     * Test that attempting to perform a cache PUT operation from within an SQL transaction fails.
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testCacheOperationsFromSqlTransaction() {
+        checkCacheOperationThrows("get", 1);
+
+        checkCacheOperationThrows("getAsync", 1);
+
+        checkCacheOperationThrows("getEntry", 1);
+
+        checkCacheOperationThrows("getEntryAsync", 1);
+
+        checkCacheOperationThrows("getAndPut", 1, 1);
+
+        checkCacheOperationThrows("getAndPutAsync", 1, 1);
+
+        checkCacheOperationThrows("getAndPutIfAbsent", 1, 1);
+
+        checkCacheOperationThrows("getAndPutIfAbsentAsync", 1, 1);
+
+        checkCacheOperationThrows("getAndReplace", 1, 1);
+
+        checkCacheOperationThrows("getAndReplaceAsync", 1, 1);
+
+        checkCacheOperationThrows("getAndRemove", 1);
+
+        checkCacheOperationThrows("getAndRemoveAsync", 1);
+
+        checkCacheOperationThrows("containsKey", 1);
+
+        checkCacheOperationThrows("containsKeyAsync", 1);
+
+        checkCacheOperationThrows("put", 1, 1);
+
+        checkCacheOperationThrows("putAsync", 1, 1);
+
+        checkCacheOperationThrows("putIfAbsent", 1, 1);
+
+        checkCacheOperationThrows("putIfAbsentAsync", 1, 1);
+
+        checkCacheOperationThrows("remove", 1);
+
+        checkCacheOperationThrows("removeAsync", 1);
+
+        checkCacheOperationThrows("remove", 1, 1);
+
+        checkCacheOperationThrows("removeAsync", 1, 1);
+
+        checkCacheOperationThrows("replace", 1, 1);
+
+        checkCacheOperationThrows("replaceAsync", 1, 1);
+
+        checkCacheOperationThrows("replace", 1, 1, 1);
+
+        checkCacheOperationThrows("replaceAsync", 1, 1, 1);
+
+        checkCacheOperationThrows("getAll", new HashSet<>(Arrays.asList(1, 2)));
+
+        checkCacheOperationThrows("containsKeys", new HashSet<>(Arrays.asList(1, 2)));
+
+        checkCacheOperationThrows("getEntries", new HashSet<>(Arrays.asList(1, 2)));
+
+        checkCacheOperationThrows("putAll", Collections.singletonMap(1, 1));
+
+        checkCacheOperationThrows("removeAll", new HashSet<>(Arrays.asList(1, 2)));
+
+        checkCacheOperationThrows("getAllAsync", new HashSet<>(Arrays.asList(1, 2)));
+
+        checkCacheOperationThrows("containsKeysAsync", new HashSet<>(Arrays.asList(1, 2)));
+
+        checkCacheOperationThrows("getEntriesAsync", new HashSet<>(Arrays.asList(1, 2)));
+
+        checkCacheOperationThrows("putAllAsync", Collections.singletonMap(1, 1));
+
+        checkCacheOperationThrows("removeAllAsync", new HashSet<>(Arrays.asList(1, 2)));
+
+        checkCacheOperationThrows("invoke", 1, ENTRY_PROC, X.EMPTY_OBJECT_ARRAY);
+
+        checkCacheOperationThrows("invoke", 1, CACHE_ENTRY_PROC, X.EMPTY_OBJECT_ARRAY);
+
+        checkCacheOperationThrows("invokeAsync", 1, ENTRY_PROC, X.EMPTY_OBJECT_ARRAY);
+
+        checkCacheOperationThrows("invokeAsync", 1, CACHE_ENTRY_PROC, X.EMPTY_OBJECT_ARRAY);
+
+        checkCacheOperationThrows("invokeAll", Collections.singletonMap(1, CACHE_ENTRY_PROC), X.EMPTY_OBJECT_ARRAY);
+
+        checkCacheOperationThrows("invokeAll", Collections.singleton(1), CACHE_ENTRY_PROC, X.EMPTY_OBJECT_ARRAY);
+
+        checkCacheOperationThrows("invokeAll", Collections.singleton(1), ENTRY_PROC, X.EMPTY_OBJECT_ARRAY);
+
+        checkCacheOperationThrows("invokeAllAsync", Collections.singletonMap(1, CACHE_ENTRY_PROC),
+            X.EMPTY_OBJECT_ARRAY);
+
+        checkCacheOperationThrows("invokeAllAsync", Collections.singleton(1), CACHE_ENTRY_PROC, X.EMPTY_OBJECT_ARRAY);
+
+        checkCacheOperationThrows("invokeAllAsync", Collections.singleton(1), ENTRY_PROC, X.EMPTY_OBJECT_ARRAY);
+    }
+
+    /** */
+    private final static EntryProcessor<Integer, Integer, Object> ENTRY_PROC =
+        new EntryProcessor<Integer, Integer, Object>() {
+        @Override public Object process(MutableEntry<Integer, Integer> entry, Object... arguments)
+        throws EntryProcessorException {
+            return null;
+        }
+    };
+
+    /** */
+    private final static CacheEntryProcessor<Integer, Integer, Object> CACHE_ENTRY_PROC =
+        new CacheEntryProcessor<Integer, Integer, Object>() {
+            @Override public Object process(MutableEntry<Integer, Integer> entry, Object... arguments)
+                throws EntryProcessorException {
+                return null;
+            }
+        };
+
+    /**
+     * @return Node.
+     */
+    private IgniteEx node() {
+        return grid(0);
+    }
+
+    /**
+     * @return Currently open transaction.
+     */
+    private Transaction tx() {
+        return node().transactions().tx();
+    }
+
+    /**
+     * Check that there's an open transaction with SQL flag.
+     */
+    private void assertTxPresent() {
+        assertNotNull(tx());
+    }
+
+    /** {@inheritDoc} */
+    @Override protected List<List<?>> execute(Ignite node, String sql) {
+        return node.cache("ints").query(new SqlFieldsQuery(sql).setSchema(QueryUtils.DFLT_SCHEMA)).getAll();
+    }
+
+    /**
+     * Check that there's no open transaction.
+     */
+    private void assertSqlTxNotPresent() {
+        assertNull(tx());
+    }
+
+    /**
+     * Check transaction state.
+     */
+    private static void assertTxState(Transaction tx, TransactionState state) {
+        assertEquals(state, tx.state());
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/SqlTransactionsSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/SqlTransactionsSelfTest.java
new file mode 100644
index 0000000..d93bdab
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/index/SqlTransactionsSelfTest.java
@@ -0,0 +1,420 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.index;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import javax.cache.processor.EntryProcessor;
+import javax.cache.processor.EntryProcessorException;
+import javax.cache.processor.MutableEntry;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.cache.CacheEntryProcessor;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.processors.cache.GatewayProtectedCacheProxy;
+import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode;
+import org.apache.ignite.internal.processors.query.QueryUtils;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.X;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteFuture;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.transactions.Transaction;
+import org.apache.ignite.transactions.TransactionState;
+
+/**
+ * Tests to check behavior regarding transactions started via SQL.
+ */
+public class SqlTransactionsSelfTest extends AbstractSchemaSelfTest {
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        super.beforeTestsStarted();
+
+        startGrid(commonConfiguration(0));
+
+        super.execute(node(), "CREATE TABLE INTS(k int primary key, v int) WITH \"wrap_value=false,cache_name=ints," +
+            "atomicity=transactional_snapshot\"");
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        stopAllGrids();
+
+        super.afterTestsStopped();
+    }
+
+    /**
+     * Test that BEGIN opens a transaction.
+     */
+    public void testBegin() {
+        execute(node(), "BEGIN");
+
+        assertTxPresent();
+
+        assertTxState(tx(), TransactionState.ACTIVE);
+    }
+
+    /**
+     * Test that COMMIT commits a transaction.
+     */
+    public void testCommit() {
+        execute(node(), "BEGIN WORK");
+
+        assertTxPresent();
+
+        Transaction tx = tx();
+
+        assertTxState(tx, TransactionState.ACTIVE);
+
+        execute(node(), "COMMIT TRANSACTION");
+
+        assertTxState(tx, TransactionState.COMMITTED);
+
+        assertSqlTxNotPresent();
+    }
+
+    /**
+     * Test that COMMIT without a transaction yields nothing.
+     */
+    public void testCommitNoTransaction() {
+        execute(node(), "COMMIT");
+    }
+
+    /**
+     * Test that ROLLBACK without a transaction yields nothing.
+     */
+    public void testRollbackNoTransaction() {
+        execute(node(), "ROLLBACK");
+    }
+
+    /**
+     * Test that ROLLBACK rolls back a transaction.
+     */
+    public void testRollback() {
+        execute(node(), "BEGIN TRANSACTION");
+
+        assertTxPresent();
+
+        Transaction tx = tx();
+
+        assertTxState(tx, TransactionState.ACTIVE);
+
+        execute(node(), "ROLLBACK TRANSACTION");
+
+        assertTxState(tx, TransactionState.ROLLED_BACK);
+
+        assertSqlTxNotPresent();
+    }
+
+    /**
+     * Test that attempting to perform various SQL operations within non SQL transaction yields an exception.
+     */
+    public void testSqlOperationsWithinNonSqlTransaction() {
+        assertSqlOperationWithinNonSqlTransactionThrows("COMMIT");
+
+        assertSqlOperationWithinNonSqlTransactionThrows("ROLLBACK");
+
+        assertSqlOperationWithinNonSqlTransactionThrows("SELECT * from ints");
+
+        assertSqlOperationWithinNonSqlTransactionThrows("DELETE from ints");
+
+        assertSqlOperationWithinNonSqlTransactionThrows("INSERT INTO ints(k, v) values(10, 15)");
+
+        assertSqlOperationWithinNonSqlTransactionThrows("MERGE INTO ints(k, v) values(10, 15)");
+
+        assertSqlOperationWithinNonSqlTransactionThrows("UPDATE ints SET v = 100 WHERE k = 5");
+
+        assertSqlOperationWithinNonSqlTransactionThrows("create index idx on ints(v)");
+
+        assertSqlOperationWithinNonSqlTransactionThrows("CREATE TABLE T(k int primary key, v int)");
+    }
+
+    /**
+     * Check that trying to run given SQL statement both locally and in distributed mode yields an exception
+     * if transaction already has been marked as being of SQL type.
+     * @param sql SQL statement.
+     */
+    private void assertSqlOperationWithinNonSqlTransactionThrows(final String sql) {
+        try (Transaction ignored = node().transactions().txStart()) {
+            node().cache("ints").put(1, 1);
+
+            assertSqlException(new RunnableX() {
+                @Override public void run() throws Exception {
+                    execute(node(), sql);
+                }
+            }, IgniteQueryErrorCode.TRANSACTION_TYPE_MISMATCH);
+        }
+
+        try (Transaction ignored = node().transactions().txStart()) {
+            node().cache("ints").put(1, 1);
+
+            assertSqlException(new RunnableX() {
+                @Override public void run() throws Exception {
+                    node().cache("ints").query(new SqlFieldsQuery(sql).setLocal(true)).getAll();
+                }
+            }, IgniteQueryErrorCode.TRANSACTION_TYPE_MISMATCH);
+        }
+    }
+
+    /**
+     * Test that attempting to perform a cache API operation from within an SQL transaction fails.
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    private void checkCacheOperationThrows(final String opName, final Object... args) {
+        execute(node(), "BEGIN");
+
+        try {
+            GridTestUtils.assertThrows(null, new Callable<Object>() {
+                @Override public Object call() throws Exception {
+                    try {
+                        // We need to detect types based on arguments due to multiple overloads.
+                        Class[] types;
+
+                        if (F.isEmpty(args))
+                            types = (Class[]) X.EMPTY_OBJECT_ARRAY;
+                        else {
+                            types = new Class[args.length];
+
+                            for (int i = 0; i < args.length; i++)
+                                types[i] = argTypeForObject(args[i]);
+                        }
+
+                        Object res = U.invoke(GatewayProtectedCacheProxy.class, node().cache("ints"),
+                            opName, types, args);
+
+                        if (opName.endsWith("Async"))
+                            ((IgniteFuture)res).get();
+                    }
+                    catch (IgniteCheckedException e) {
+                        if (e.getCause() != null) {
+                            try {
+                                if (e.getCause().getCause() != null)
+                                    throw (Exception)e.getCause().getCause();
+                                else
+                                    fail();
+                            }
+                            catch (IgniteException e1) {
+                                // Some public API methods don't have IgniteCheckedException on their signature
+                                // and thus may wrap it into an IgniteException.
+                                if (e1.getCause() != null)
+                                    throw (Exception)e1.getCause();
+                                else
+                                    fail();
+                            }
+                        }
+                        else
+                            fail();
+                    }
+
+                    return null;
+                }
+            }, IgniteCheckedException.class,
+                "SQL queries and cache operations may not be used in the same transaction.");
+        }
+        finally {
+            try {
+                execute(node(), "ROLLBACK");
+            }
+            catch (Throwable e) {
+                // No-op.
+            }
+        }
+    }
+
+    /**
+     *
+     */
+    private static Class<?> argTypeForObject(Object arg) {
+        if (arg instanceof Set)
+            return Set.class;
+        else if (arg instanceof Map)
+            return Map.class;
+        else if (arg.getClass().getName().startsWith("java.lang."))
+            return Object.class;
+        else if (arg instanceof CacheEntryProcessor)
+            return CacheEntryProcessor.class;
+        else if (arg instanceof EntryProcessor)
+            return EntryProcessor.class;
+        else
+            return arg.getClass();
+    }
+
+    /**
+     * Test that attempting to perform a cache PUT operation from within an SQL transaction fails.
+     */
+    @SuppressWarnings("ThrowableResultOfMethodCallIgnored")
+    public void testCacheOperationsFromSqlTransaction() {
+        checkCacheOperationThrows("get", 1);
+
+        checkCacheOperationThrows("getAsync", 1);
+
+        checkCacheOperationThrows("getEntry", 1);
+
+        checkCacheOperationThrows("getEntryAsync", 1);
+
+        checkCacheOperationThrows("getAndPut", 1, 1);
+
+        checkCacheOperationThrows("getAndPutAsync", 1, 1);
+
+        checkCacheOperationThrows("getAndPutIfAbsent", 1, 1);
+
+        checkCacheOperationThrows("getAndPutIfAbsentAsync", 1, 1);
+
+        checkCacheOperationThrows("getAndReplace", 1, 1);
+
+        checkCacheOperationThrows("getAndReplaceAsync", 1, 1);
+
+        checkCacheOperationThrows("getAndRemove", 1);
+
+        checkCacheOperationThrows("getAndRemoveAsync", 1);
+
+        checkCacheOperationThrows("containsKey", 1);
+
+        checkCacheOperationThrows("containsKeyAsync", 1);
+
+        checkCacheOperationThrows("put", 1, 1);
+
+        checkCacheOperationThrows("putAsync", 1, 1);
+
+        checkCacheOperationThrows("putIfAbsent", 1, 1);
+
+        checkCacheOperationThrows("putIfAbsentAsync", 1, 1);
+
+        checkCacheOperationThrows("remove", 1);
+
+        checkCacheOperationThrows("removeAsync", 1);
+
+        checkCacheOperationThrows("remove", 1, 1);
+
+        checkCacheOperationThrows("removeAsync", 1, 1);
+
+        checkCacheOperationThrows("replace", 1, 1);
+
+        checkCacheOperationThrows("replaceAsync", 1, 1);
+
+        checkCacheOperationThrows("replace", 1, 1, 1);
+
+        checkCacheOperationThrows("replaceAsync", 1, 1, 1);
+
+        checkCacheOperationThrows("getAll", new HashSet<>(Arrays.asList(1, 2)));
+
+        checkCacheOperationThrows("containsKeys", new HashSet<>(Arrays.asList(1, 2)));
+
+        checkCacheOperationThrows("getEntries", new HashSet<>(Arrays.asList(1, 2)));
+
+        checkCacheOperationThrows("putAll", Collections.singletonMap(1, 1));
+
+        checkCacheOperationThrows("removeAll", new HashSet<>(Arrays.asList(1, 2)));
+
+        checkCacheOperationThrows("getAllAsync", new HashSet<>(Arrays.asList(1, 2)));
+
+        checkCacheOperationThrows("containsKeysAsync", new HashSet<>(Arrays.asList(1, 2)));
+
+        checkCacheOperationThrows("getEntriesAsync", new HashSet<>(Arrays.asList(1, 2)));
+
+        checkCacheOperationThrows("putAllAsync", Collections.singletonMap(1, 1));
+
+        checkCacheOperationThrows("removeAllAsync", new HashSet<>(Arrays.asList(1, 2)));
+
+        checkCacheOperationThrows("invoke", 1, ENTRY_PROC, X.EMPTY_OBJECT_ARRAY);
+
+        checkCacheOperationThrows("invoke", 1, CACHE_ENTRY_PROC, X.EMPTY_OBJECT_ARRAY);
+
+        checkCacheOperationThrows("invokeAsync", 1, ENTRY_PROC, X.EMPTY_OBJECT_ARRAY);
+
+        checkCacheOperationThrows("invokeAsync", 1, CACHE_ENTRY_PROC, X.EMPTY_OBJECT_ARRAY);
+
+        checkCacheOperationThrows("invokeAll", Collections.singletonMap(1, CACHE_ENTRY_PROC), X.EMPTY_OBJECT_ARRAY);
+
+        checkCacheOperationThrows("invokeAll", Collections.singleton(1), CACHE_ENTRY_PROC, X.EMPTY_OBJECT_ARRAY);
+
+        checkCacheOperationThrows("invokeAll", Collections.singleton(1), ENTRY_PROC, X.EMPTY_OBJECT_ARRAY);
+
+        checkCacheOperationThrows("invokeAllAsync", Collections.singletonMap(1, CACHE_ENTRY_PROC),
+            X.EMPTY_OBJECT_ARRAY);
+
+        checkCacheOperationThrows("invokeAllAsync", Collections.singleton(1), CACHE_ENTRY_PROC, X.EMPTY_OBJECT_ARRAY);
+
+        checkCacheOperationThrows("invokeAllAsync", Collections.singleton(1), ENTRY_PROC, X.EMPTY_OBJECT_ARRAY);
+    }
+
+    /** */
+    private final static EntryProcessor<Integer, Integer, Object> ENTRY_PROC =
+        new EntryProcessor<Integer, Integer, Object>() {
+        @Override public Object process(MutableEntry<Integer, Integer> entry, Object... arguments)
+        throws EntryProcessorException {
+            return null;
+        }
+    };
+
+    /** */
+    private final static CacheEntryProcessor<Integer, Integer, Object> CACHE_ENTRY_PROC =
+        new CacheEntryProcessor<Integer, Integer, Object>() {
+            @Override public Object process(MutableEntry<Integer, Integer> entry, Object... arguments)
+                throws EntryProcessorException {
+                return null;
+            }
+        };
+
+    /**
+     * @return Node.
+     */
+    private IgniteEx node() {
+        return grid(0);
+    }
+
+    /**
+     * @return Currently open transaction.
+     */
+    private Transaction tx() {
+        return node().transactions().tx();
+    }
+
+    /**
+     * Check that there's an open transaction with SQL flag.
+     */
+    private void assertTxPresent() {
+        assertNotNull(tx());
+    }
+
+    /** {@inheritDoc} */
+    @Override protected List<List<?>> execute(Ignite node, String sql) {
+        return node.cache("ints").query(new SqlFieldsQuery(sql).setSchema(QueryUtils.DFLT_SCHEMA)).getAll();
+    }
+
+    /**
+     * Check that there's no open transaction.
+     */
+    private void assertSqlTxNotPresent() {
+        assertNull(tx());
+    }
+
+    /**
+     * Check transaction state.
+     */
+    private static void assertTxState(Transaction tx, TransactionState state) {
+        assertEquals(state, tx.state());
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/local/IgniteCacheLocalQuerySelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/local/IgniteCacheLocalQuerySelfTest.java
index 2570bc8..2272f27 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/local/IgniteCacheLocalQuerySelfTest.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/local/IgniteCacheLocalQuerySelfTest.java
@@ -19,9 +19,12 @@
 
 import java.util.Iterator;
 import java.util.List;
+import java.util.UUID;
 import javax.cache.Cache;
+import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.cache.query.FieldsQueryCursor;
 import org.apache.ignite.cache.query.QueryCursor;
 import org.apache.ignite.cache.query.SqlFieldsQuery;
 import org.apache.ignite.cache.query.SqlQuery;
@@ -93,4 +96,63 @@
             cache.destroy();
         }
     }
-}
\ No newline at end of file
+
+    /** {@inheritDoc} */
+    @Override public void testLocalSqlQueryFromClient() throws Exception {
+        try {
+            Ignite g = startGrid("client");
+
+            IgniteCache<Integer, Integer> c = jcache(g, Integer.class, Integer.class);
+
+            for (int i = 0; i < 10; i++)
+                c.put(i, i);
+
+            SqlQuery<Integer, Integer> qry = new SqlQuery<>(Integer.class, "_key >= 5 order by _key");
+
+            qry.setLocal(true);
+
+            try(QueryCursor<Cache.Entry<Integer, Integer>> qryCursor = c.query(qry)) {
+                assertNotNull(qryCursor);
+
+                List<Cache.Entry<Integer, Integer>> res = qryCursor.getAll();
+
+                assertNotNull(res);
+
+                assertEquals(5, res.size());
+            }
+        }
+        finally {
+            stopGrid("client");
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public void testLocalSqlFieldsQueryFromClient() throws Exception {
+        try {
+            Ignite g = startGrid("client");
+
+            IgniteCache<UUID, Person> c = jcache(g, UUID.class, Person.class);
+
+            Person p = new Person("Jon", 1500);
+
+            c.put(p.id(), p);
+
+            SqlFieldsQuery qry = new SqlFieldsQuery("select * from Person");
+
+            qry.setLocal(true);
+
+            try(FieldsQueryCursor<List<?>> qryCursor = c.query(qry)) {
+                assertNotNull(qryCursor);
+
+                List<List<?>> res = qryCursor.getAll();
+
+                assertNotNull(res);
+
+                assertEquals(1, res.size());
+            }
+        }
+        finally {
+            stopGrid("client");
+        }
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccAbstractSqlCoordinatorFailoverTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccAbstractSqlCoordinatorFailoverTest.java
new file mode 100644
index 0000000..c449ee2
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccAbstractSqlCoordinatorFailoverTest.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import static org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.ReadMode.SCAN;
+import static org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.ReadMode.SQL;
+import static org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.WriteMode.DML;
+import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC;
+import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ;
+
+/**
+ * Mvcc SQL API coordinator failover test.
+ */
+@SuppressWarnings("unchecked")
+public abstract class CacheMvccAbstractSqlCoordinatorFailoverTest extends CacheMvccAbstractBasicCoordinatorFailoverTest {
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxSql_Server_Backups0_CoordinatorFails() throws Exception {
+        accountsTxReadAll(2, 1, 0, 64,
+            new InitIndexing(Integer.class, MvccTestAccount.class), true, SQL, DML, DFLT_TEST_TIME, RestartMode.RESTART_CRD);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxSql_SingleNode_CoordinatorFails_Persistence() throws Exception {
+        persistence = true;
+
+        accountsTxReadAll(1, 0, 0, 1,
+            new InitIndexing(Integer.class, MvccTestAccount.class), true, SQL, DML, DFLT_TEST_TIME, RestartMode.RESTART_CRD);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutAllGetAll_ClientServer_Backups0_RestartCoordinator_ScanDml() throws Exception {
+        putAllGetAll(RestartMode.RESTART_CRD  , 2, 1, 0, 64,
+            new InitIndexing(Integer.class, Integer.class), SCAN, DML);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutAllGetAll_SingleNode_RestartCoordinator_ScanDml_Persistence() throws Exception {
+        persistence = true;
+
+        putAllGetAll(RestartMode.RESTART_CRD  , 1, 0, 0, 1,
+            new InitIndexing(Integer.class, Integer.class), SCAN, DML);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutAllGetAll_ClientServer_Backups0_RestartCoordinator_SqlDml() throws Exception {
+        putAllGetAll(RestartMode.RESTART_CRD, 2, 1, 0, DFLT_PARTITION_COUNT,
+            new InitIndexing(Integer.class, Integer.class), SQL, DML);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutAllGetAll_SingleNode_RestartCoordinator_SqlDml_Persistence() throws Exception {
+        persistence = true;
+
+        putAllGetAll(RestartMode.RESTART_CRD, 1, 0, 0, 1,
+            new InitIndexing(Integer.class, Integer.class), SQL, DML);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testUpdate_N_Objects_ClientServer_Backups0_Sql_Persistence() throws Exception {
+        persistence = true;
+
+        updateNObjectsTest(5, 2, 0, 0, 64, DFLT_TEST_TIME,
+            new InitIndexing(Integer.class, Integer.class), SQL, DML, RestartMode.RESTART_CRD);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testUpdate_N_Objects_SingleNode_Sql_Persistence() throws Exception {
+        updateNObjectsTest(3, 1, 0, 0, 1, DFLT_TEST_TIME,
+            new InitIndexing(Integer.class, Integer.class), SQL, DML, RestartMode.RESTART_CRD);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testCoordinatorFailureSimplePessimisticTxSql() throws Exception {
+        coordinatorFailureSimple(PESSIMISTIC, REPEATABLE_READ, SQL, DML);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testTxInProgressCoordinatorChangeSimple_Readonly() throws Exception {
+        txInProgressCoordinatorChangeSimple(PESSIMISTIC, REPEATABLE_READ,
+            new InitIndexing(Integer.class, Integer.class), SQL, DML);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testReadInProgressCoordinatorFailsSimple_FromClient() throws Exception {
+        readInProgressCoordinatorFailsSimple(true, new InitIndexing(Integer.class, Integer.class), SQL, DML);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testCoordinatorChangeActiveQueryClientFails_Simple() throws Exception {
+        checkCoordinatorChangeActiveQueryClientFails_Simple(new InitIndexing(Integer.class, Integer.class), SQL, DML);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testCoordinatorChangeActiveQueryClientFails_SimpleScan() throws Exception {
+        checkCoordinatorChangeActiveQueryClientFails_Simple(new InitIndexing(Integer.class, Integer.class), SCAN, DML);
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccBackupsAbstractTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccBackupsAbstractTest.java
new file mode 100644
index 0000000..998cb76
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccBackupsAbstractTest.java
@@ -0,0 +1,808 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicInteger;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.cache.affinity.Affinity;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.TestRecordingCommunicationSpi;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.IgniteCacheProxy;
+import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxQueryEnlistResponse;
+import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtForceKeysRequest;
+import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtForceKeysResponse;
+import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionSupplyMessage;
+import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
+import org.apache.ignite.internal.processors.cache.tree.mvcc.data.MvccDataRow;
+import org.apache.ignite.internal.util.lang.GridCursor;
+import org.apache.ignite.lang.IgniteBiInClosure;
+import org.apache.ignite.lang.IgniteBiTuple;
+import org.apache.ignite.plugin.extensions.communication.Message;
+import org.apache.ignite.transactions.Transaction;
+
+import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC;
+import static org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.ReadMode.SQL;
+import static org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.WriteMode.DML;
+import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC;
+import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ;
+import static org.junit.Assert.assertArrayEquals;
+
+/**
+ * Backups tests.
+ */
+@SuppressWarnings("unchecked")
+public abstract class CacheMvccBackupsAbstractTest extends CacheMvccAbstractTest {
+
+    /** Test timeout. */
+    private final long txLongTimeout = getTestTimeout() / 4;
+
+    /**
+     * Tests backup consistency.
+     *
+     * @throws Exception If fails.
+     */
+    public void testBackupsCoherenceSimple() throws Exception {
+        disableScheduledVacuum = true;
+
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, 10)
+            .setIndexedTypes(Integer.class, Integer.class);
+
+        final int KEYS_CNT = 5_000;
+        assert KEYS_CNT % 2 == 0;
+
+        startGrids(3);
+
+        Ignite node0 = grid(0);
+        Ignite node1 = grid(1);
+        Ignite node2 = grid(2);
+
+        client = true;
+
+        Ignite client = startGrid();
+
+        awaitPartitionMapExchange();
+
+        IgniteCache clientCache = client.cache(DEFAULT_CACHE_NAME);
+        IgniteCache cache0 = node0.cache(DEFAULT_CACHE_NAME);
+        IgniteCache cache1 = node1.cache(DEFAULT_CACHE_NAME);
+        IgniteCache cache2 = node2.cache(DEFAULT_CACHE_NAME);
+
+        try (Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(txLongTimeout);
+
+            for (int i = 0; i < KEYS_CNT / 2; i += 2) {
+                SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO Integer (_key, _val) values ("
+                    + i + ',' + i * 2 + "), (" + (i + 1) + ',' + (i + 1) * 2 + ')');
+
+                clientCache.query(qry).getAll();
+            }
+
+            tx.commit();
+        }
+
+        try (Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(txLongTimeout);
+
+            for (int i = 0; i < 10; i++) {
+                SqlFieldsQuery qry = new SqlFieldsQuery("DELETE from Integer WHERE _key = " + i);
+
+                clientCache.query(qry).getAll();
+            }
+
+            for (int i = 10; i < KEYS_CNT + 1; i++) {
+                SqlFieldsQuery qry = new SqlFieldsQuery("UPDATE Integer SET _val=" + i * 10 + " WHERE _key = " + i);
+
+                clientCache.query(qry).getAll();
+            }
+
+            tx.commit();
+        }
+
+        Map<KeyCacheObject, List<CacheDataRow>> vers0 = allVersions(cache0);
+
+        List res0 = getAll(cache0, "Integer");
+
+        stopGrid(0);
+
+        awaitPartitionMapExchange();
+
+        Map<KeyCacheObject, List<CacheDataRow>> vers1 = allVersions(cache1);
+
+        assertVersionsEquals(vers0, vers1);
+
+        List res1 = getAll(cache1, "Integer");
+
+        assertEqualsCollections(res0, res1);
+
+        try (Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(txLongTimeout);
+
+            for (int i = 10; i < 20; i++) {
+                SqlFieldsQuery qry = new SqlFieldsQuery("DELETE from Integer WHERE _key = " + i);
+
+                clientCache.query(qry).getAll();
+            }
+
+            for (int i = 20; i < KEYS_CNT + 1; i++) {
+                SqlFieldsQuery qry = new SqlFieldsQuery("UPDATE Integer SET _val=" + i * 100 + " WHERE _key = " + i);
+
+                clientCache.query(qry).getAll();
+            }
+
+            tx.commit();
+        }
+
+        vers1 = allVersions(cache1);
+
+        res1 = getAll(cache2, "Integer");
+
+        stopGrid(1);
+
+        awaitPartitionMapExchange();
+
+        Map<KeyCacheObject, List<CacheDataRow>> vers2 = allVersions(cache2);
+
+        assertVersionsEquals(vers1, vers2);
+
+        List res2 = getAll(cache2, "Integer");
+
+        assertEqualsCollections(res1, res2);
+    }
+
+    /**
+     * Checks cache backups consistency with large queries.
+     *
+     * @throws Exception If failed.
+     */
+    public void testBackupsCoherenceWithLargeOperations() throws Exception {
+        disableScheduledVacuum = true;
+
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 1, 10)
+            .setIndexedTypes(Integer.class, Integer.class);
+
+        final int KEYS_CNT = 5_000;
+        assert KEYS_CNT % 2 == 0;
+
+        startGrids(2);
+
+        Ignite node1 = grid(0);
+        Ignite node2 = grid(1);
+
+        client = true;
+
+        Ignite client = startGrid();
+
+        awaitPartitionMapExchange();
+
+        IgniteCache clientCache = client.cache(DEFAULT_CACHE_NAME);
+        IgniteCache cache1 = node1.cache(DEFAULT_CACHE_NAME);
+        IgniteCache cache2 = node2.cache(DEFAULT_CACHE_NAME);
+
+        StringBuilder insert = new StringBuilder("INSERT INTO Integer (_key, _val) values ");
+
+        boolean first = true;
+
+        for (int key = 0; key < KEYS_CNT; key++) {
+            if (!first)
+                insert.append(',');
+            else
+                first = false;
+
+            insert.append('(').append(key).append(',').append(key * 10).append(')');
+        }
+
+        String qryStr = insert.toString();
+
+        try (Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(txLongTimeout);
+
+            SqlFieldsQuery qry = new SqlFieldsQuery(qryStr);
+
+            clientCache.query(qry).getAll();
+
+            tx.commit();
+        }
+
+        qryStr = "SELECT * FROM Integer WHERE _key >= " + KEYS_CNT / 2 + " FOR UPDATE";
+
+        try (Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(txLongTimeout);
+
+            SqlFieldsQuery qry = new SqlFieldsQuery(qryStr);
+
+            clientCache.query(qry).getAll();
+
+            tx.commit();
+        }
+
+
+        qryStr = "DELETE FROM Integer WHERE _key >= " + KEYS_CNT / 2;
+
+        try (Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(txLongTimeout);
+
+            SqlFieldsQuery qry = new SqlFieldsQuery(qryStr);
+
+            clientCache.query(qry).getAll();
+
+            tx.commit();
+        }
+
+        Map<KeyCacheObject, List<CacheDataRow>> cache1Vers = allVersions(cache1);
+
+        List res1 = getAll(cache1, "Integer");
+
+        stopGrid(0);
+
+        awaitPartitionMapExchange();
+
+        Map<KeyCacheObject, List<CacheDataRow>> cache2Vers = allVersions(cache2);
+
+        assertVersionsEquals(cache1Vers, cache2Vers);
+
+        List res2 = getAll(cache2, "Integer");
+
+        assertEqualsCollections(res1, res2);
+    }
+
+    /**
+     * Checks cache backups consistency with in-flight batches overflow.
+     *
+     * @throws Exception If failed.
+     */
+    public void testBackupsCoherenceWithInFlightBatchesOverflow() throws Exception {
+        testSpi = true;
+
+        disableScheduledVacuum = true;
+
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 1, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class);
+
+        final int KEYS_CNT = 30_000;
+        assert KEYS_CNT % 2 == 0;
+
+        startGrids(2);
+
+        Ignite node1 = grid(0);
+        Ignite node2 = grid(1);
+
+        client = true;
+
+        Ignite client = startGrid();
+
+        awaitPartitionMapExchange();
+
+        IgniteCache<?,?> clientCache = client.cache(DEFAULT_CACHE_NAME);
+        IgniteCache<?,?> cache1 = node1.cache(DEFAULT_CACHE_NAME);
+        IgniteCache<?,?> cache2 = node2.cache(DEFAULT_CACHE_NAME);
+
+        AtomicInteger keyGen = new AtomicInteger();
+        Affinity affinity = affinity(clientCache);
+
+        ClusterNode cNode1 = ((IgniteEx)node1).localNode();
+        ClusterNode cNode2 = ((IgniteEx)node2).localNode();
+
+        StringBuilder insert = new StringBuilder("INSERT INTO Integer (_key, _val) values ");
+
+        for (int i = 0; i < KEYS_CNT; i++) {
+            if (i > 0)
+                insert.append(',');
+
+            // To make big batches in near results future.
+            Integer key = i < KEYS_CNT / 2 ? keyForNode(affinity, keyGen, cNode1) : keyForNode(affinity, keyGen, cNode2);
+
+            assert key != null;
+
+            insert.append('(').append(key).append(',').append(key * 10).append(')');
+        }
+
+        String qryStr = insert.toString();
+
+        try (Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(txLongTimeout);
+
+            SqlFieldsQuery qry = new SqlFieldsQuery(qryStr);
+
+            clientCache.query(qry).getAll();
+
+            tx.commit();
+        }
+
+        // Add a delay to simulate batches overflow.
+        TestRecordingCommunicationSpi spi1 = TestRecordingCommunicationSpi.spi(node1);
+        TestRecordingCommunicationSpi spi2 = TestRecordingCommunicationSpi.spi(node2);
+
+        spi1.closure(new IgniteBiInClosure<ClusterNode, Message>() {
+            @Override public void apply(ClusterNode node, Message msg) {
+                if (msg instanceof GridDhtTxQueryEnlistResponse)
+                    doSleep(100);
+            }
+        });
+
+        spi2.closure(new IgniteBiInClosure<ClusterNode, Message>() {
+            @Override public void apply(ClusterNode node, Message msg) {
+                if (msg instanceof GridDhtTxQueryEnlistResponse)
+                    doSleep(100);
+            }
+        });
+
+        qryStr = "DELETE FROM Integer WHERE _key >= " + 10;
+
+        try (Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(txLongTimeout);
+
+            SqlFieldsQuery qry = new SqlFieldsQuery(qryStr);
+
+            clientCache.query(qry).getAll();
+
+            tx.commit();
+        }
+
+        Map<KeyCacheObject, List<CacheDataRow>> cache1Vers = allVersions(cache1);
+
+        List res1 = getAll(cache1, "Integer");
+
+        stopGrid(0);
+
+        awaitPartitionMapExchange();
+
+        Map<KeyCacheObject, List<CacheDataRow>> cache2Vers = allVersions(cache2);
+
+        assertVersionsEquals(cache1Vers, cache2Vers);
+
+        List res2 = getAll(cache2, "Integer");
+
+        assertEqualsCollections(res1, res2);
+    }
+
+    /**
+     * Tests concurrent updates backups coherence.
+     *
+     * @throws Exception If failed.
+     */
+    public void testBackupsCoherenceWithConcurrentUpdates2ServersNoClients() throws Exception {
+        checkBackupsCoherenceWithConcurrentUpdates(2, 0);
+    }
+
+    /**
+     * Tests concurrent updates backups coherence.
+     *
+     * @throws Exception If failed.
+     */
+    public void testBackupsCoherenceWithConcurrentUpdates4ServersNoClients() throws Exception {
+        checkBackupsCoherenceWithConcurrentUpdates(4, 0);
+    }
+
+    /**
+     * Tests concurrent updates backups coherence.
+     *
+     * @throws Exception If failed.
+     */
+    public void testBackupsCoherenceWithConcurrentUpdates3Servers1Client() throws Exception {
+        checkBackupsCoherenceWithConcurrentUpdates(3, 1);
+    }
+
+    /**
+     * Tests concurrent updates backups coherence.
+     *
+     * @throws Exception If failed.
+     */
+    public void testBackupsCoherenceWithConcurrentUpdates5Servers2Clients() throws Exception {
+        checkBackupsCoherenceWithConcurrentUpdates(5, 2);
+    }
+
+    /**
+     * Tests concurrent updates backups coherence.
+     *
+     * @throws Exception If failed.
+     */
+    private void checkBackupsCoherenceWithConcurrentUpdates(int srvs, int clients) throws Exception {
+        assert srvs > 1;
+
+        disableScheduledVacuum = true;
+
+        accountsTxReadAll(srvs, clients, srvs - 1, DFLT_PARTITION_COUNT,
+            new InitIndexing(Integer.class, MvccTestAccount.class), true, SQL, DML, 5_000, null);
+
+        for (int i = 0; i < srvs - 1; i++) {
+            Ignite node1 = grid(i);
+
+            IgniteCache cache1 = node1.cache(DEFAULT_CACHE_NAME);
+
+            Map<KeyCacheObject, List<CacheDataRow>> vers1 = allVersions(cache1);
+
+            List res1 = getAll(cache1, "MvccTestAccount");
+
+            stopGrid(i);
+
+            awaitPartitionMapExchange();
+
+            Ignite node2 = grid(i + 1);
+
+            IgniteCache cache2 = node2.cache(DEFAULT_CACHE_NAME);
+
+            Map<KeyCacheObject, List<CacheDataRow>> vers2 = allVersions(cache2);
+
+            assertVersionsEquals(vers1, vers2);
+
+            List res2 = getAll(cache2, "MvccTestAccount");
+
+            assertEqualsCollections(res1, res2);
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testNoForceKeyRequestDelayedRebalanceNoVacuum() throws Exception {
+        disableScheduledVacuum = true;
+
+        doTestRebalanceNodeAdd(true);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testNoForceKeyRequestDelayedRebalance() throws Exception {
+        doTestRebalanceNodeAdd(true);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testNoForceKeyRequestNoVacuum() throws Exception {
+        disableScheduledVacuum = true;
+
+        doTestRebalanceNodeAdd(false);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testNoForceKeyRequest() throws Exception {
+        doTestRebalanceNodeAdd(false);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    private void doTestRebalanceNodeAdd(boolean delayRebalance) throws Exception {
+        testSpi = true;
+
+        final Ignite node1 = startGrid(0);
+
+        final IgniteCache<Object, Object> cache = node1.createCache(
+            cacheConfiguration(cacheMode(), FULL_SYNC, 1, 16)
+                .setIndexedTypes(Integer.class, Integer.class));
+
+        try (Transaction tx = node1.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+
+            SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO Integer (_key, _val) values " +
+                "(1,1),(2,2),(3,3),(4,4),(5,5)");
+
+            cache.query(qry).getAll();
+
+            tx.commit();
+        }
+
+        TestRecordingCommunicationSpi spi = TestRecordingCommunicationSpi.spi(node1);
+
+        // Check for a force key request.
+        spi.closure(new IgniteBiInClosure<ClusterNode, Message>() {
+            @Override public void apply(ClusterNode node, Message msg) {
+                if (delayRebalance && msg instanceof GridDhtPartitionSupplyMessage)
+                    doSleep(500);
+
+                if (msg instanceof GridDhtForceKeysResponse)
+                    fail("Force key request");
+            }
+        });
+
+        final Ignite node2 = startGrid(1);
+
+        TestRecordingCommunicationSpi.spi(node2).closure(
+            new IgniteBiInClosure<ClusterNode, Message>() {
+                @Override public void apply(ClusterNode node, Message msg) {
+                    if (msg instanceof GridDhtForceKeysRequest)
+                        fail("Force key request");
+                }
+            }
+        );
+
+        IgniteCache<Object, Object> cache2 = node2.cache(DEFAULT_CACHE_NAME);
+
+        try (Transaction tx = node2.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            SqlFieldsQuery qry = new SqlFieldsQuery("DELETE FROM Integer WHERE _key IN " +
+                "(1,2,3,4,5)");
+
+            cache2.query(qry).getAll();
+
+            tx.commit();
+        }
+
+        awaitPartitionMapExchange();
+
+        doSleep(2000);
+
+        stopGrid(1);
+
+        try (Transaction tx = node1.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+
+            SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO Integer (_key, _val) values " +
+                "(1,1),(2,2),(3,3),(4,4),(5,5)");
+
+            cache.query(qry).getAll();
+
+            tx.commit();
+        }
+
+        doSleep(1000);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testRebalanceNodeLeaveClient() throws Exception {
+        doTestRebalanceNodeLeave(true);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testRebalanceNodeLeaveServer() throws Exception {
+        doTestRebalanceNodeLeave(false);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void doTestRebalanceNodeLeave(boolean startClient) throws Exception {
+        testSpi = true;
+        disableScheduledVacuum = true;
+
+        startGridsMultiThreaded(4);
+
+        client = true;
+
+        final Ignite node = startClient ? startGrid(4) : grid(0);
+
+        final IgniteCache<Object, Object> cache = node.createCache(
+            cacheConfiguration(cacheMode(), FULL_SYNC, 2, 16)
+                .setIndexedTypes(Integer.class, Integer.class));
+
+        List<Integer> keys = new ArrayList<>();
+
+        for (int i = 0; i < 4; i++)
+            keys.addAll(primaryKeys(grid(i).cache(DEFAULT_CACHE_NAME), 2));
+
+        try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            StringBuilder sb = new StringBuilder("INSERT INTO Integer (_key, _val) values ");
+
+            for (int i = 0; i < keys.size(); i++) {
+                if (i > 0)
+                    sb.append(", ");
+
+                sb.append("(" + keys.get(i) + ", " + keys.get(i) + ")");
+            }
+
+            SqlFieldsQuery qry = new SqlFieldsQuery(sb.toString());
+
+            cache.query(qry).getAll();
+
+            tx.commit();
+        }
+
+        stopGrid(3);
+
+        awaitPartitionMapExchange();
+
+        try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            SqlFieldsQuery qry = new SqlFieldsQuery("UPDATE Integer SET _val = 10*_key");
+
+            cache.query(qry).getAll();
+
+            tx.commit();
+        }
+
+        awaitPartitionMapExchange();
+
+        for (Integer key : keys) {
+            List<CacheDataRow> vers = null;
+
+            for (int i = 0; i < 3; i++) {
+                ClusterNode n = grid(i).cluster().localNode();
+
+                if (node.affinity(DEFAULT_CACHE_NAME).isPrimaryOrBackup(n, key)) {
+
+                    List<CacheDataRow> vers0 = allKeyVersions(grid(i).cache(DEFAULT_CACHE_NAME), key);
+
+                    if (vers != null)
+                        assertKeyVersionsEquals(vers, vers0);
+
+                    vers = vers0;
+                }
+            }
+        }
+    }
+
+    /**
+     * Retrieves all versions of all keys from cache.
+     *
+     * @param cache Cache.
+     * @return {@link Map} of keys to its versions.
+     * @throws IgniteCheckedException If failed.
+     */
+    private Map<KeyCacheObject, List<CacheDataRow>> allVersions(IgniteCache cache) throws IgniteCheckedException {
+        IgniteCacheProxy cache0 = (IgniteCacheProxy)cache;
+        GridCacheContext cctx = cache0.context();
+
+        assert cctx.mvccEnabled();
+
+        Map<KeyCacheObject, List<CacheDataRow>> vers = new HashMap<>();
+
+        for (Object e : cache) {
+            IgniteBiTuple entry = (IgniteBiTuple)e;
+
+            KeyCacheObject key = cctx.toCacheKeyObject(entry.getKey());
+
+            GridCursor<CacheDataRow> cur = cctx.offheap().mvccAllVersionsCursor(cctx, key, null);
+
+            List<CacheDataRow> rows = new ArrayList<>();
+
+            while (cur.next()) {
+                CacheDataRow row = cur.get();
+
+                rows.add(row);
+            }
+
+            vers.put(key, rows);
+        }
+
+        return vers;
+    }
+
+    /**
+     * @param cache Cache.
+     * @param key Key.
+     * @return Collection of versioned rows.
+     * @throws IgniteCheckedException if failed.
+     */
+    private List<CacheDataRow> allKeyVersions(IgniteCache cache, Object key) throws IgniteCheckedException {
+        IgniteCacheProxy cache0 = (IgniteCacheProxy)cache;
+        GridCacheContext cctx = cache0.context();
+
+        KeyCacheObject key0 = cctx.toCacheKeyObject(key);
+
+        GridCursor<CacheDataRow> cur = cctx.offheap().mvccAllVersionsCursor(cctx, key0, null);
+
+        List<CacheDataRow> rows = new ArrayList<>();
+
+        while (cur.next()) {
+            CacheDataRow row = cur.get();
+
+            rows.add(row);
+        }
+
+        return rows;
+    }
+
+    /**
+     * Checks stored versions equality.
+     *
+     * @param left Keys versions to compare.
+     * @param right Keys versions to compare.
+     * @throws IgniteCheckedException If failed.
+     */
+    private void assertVersionsEquals(Map<KeyCacheObject, List<CacheDataRow>> left,
+        Map<KeyCacheObject, List<CacheDataRow>> right) throws IgniteCheckedException {
+        assertNotNull(left);
+        assertNotNull(right);
+
+        assertTrue(!left.isEmpty());
+        assertTrue(!right.isEmpty());
+
+        assertEqualsCollections(left.keySet(), right.keySet());
+
+        for (KeyCacheObject key : right.keySet()) {
+            List<CacheDataRow> leftRows = left.get(key);
+            List<CacheDataRow> rightRows = right.get(key);
+
+            assertKeyVersionsEquals(leftRows, rightRows);
+        }
+    }
+
+    /**
+     *
+     * @param leftRows Left rows.
+     * @param rightRows Right rows.
+     * @throws IgniteCheckedException If failed.
+     */
+    private void assertKeyVersionsEquals(List<CacheDataRow> leftRows, List<CacheDataRow> rightRows)
+        throws IgniteCheckedException {
+
+        assertNotNull(leftRows);
+        assertNotNull(rightRows);
+
+        assertEquals("leftRows=" + leftRows + ", rightRows=" + rightRows, leftRows.size(), rightRows.size());
+
+        for (int i = 0; i < leftRows.size(); i++) {
+            CacheDataRow leftRow = leftRows.get(i);
+            CacheDataRow rightRow = rightRows.get(i);
+
+            assertNotNull(leftRow);
+            assertNotNull(rightRow);
+
+            assertTrue(leftRow instanceof MvccDataRow);
+            assertTrue(rightRow instanceof MvccDataRow);
+
+            leftRow.key().valueBytes(null);
+
+            assertEquals(leftRow.expireTime(), rightRow.expireTime());
+            assertEquals(leftRow.partition(), rightRow.partition());
+            assertArrayEquals(leftRow.value().valueBytes(null), rightRow.value().valueBytes(null));
+            assertEquals(leftRow.version(), rightRow.version());
+            assertEquals(leftRow.cacheId(), rightRow.cacheId());
+            assertEquals(leftRow.hash(), rightRow.hash());
+            assertEquals(leftRow.key(), rightRow.key());
+            assertTrue(MvccUtils.compare(leftRow, rightRow.mvccVersion()) == 0);
+            assertTrue(MvccUtils.compareNewVersion(leftRow, rightRow.newMvccVersion()) == 0);
+            assertEquals(leftRow.newMvccCoordinatorVersion(), rightRow.newMvccCoordinatorVersion());
+            assertEquals(leftRow.newMvccCounter(), rightRow.newMvccCounter());
+            assertEquals(leftRow.newMvccOperationCounter(), rightRow.newMvccOperationCounter());
+        }
+    }
+
+    /**
+     * Retrieves all table rows from local node.
+     * @param cache Cache.
+     * @param tblName Table name.
+     * @return All table rows.
+     */
+    private List getAll(IgniteCache cache, String tblName) {
+        List res = cache.query(new SqlFieldsQuery("SELECT * FROM " + tblName)).getAll();
+
+        Collections.sort(res, new Comparator<Object>() {
+            @Override public int compare(Object o1, Object o2) {
+                List<?> l1 = (List<?>)o1;
+                List<?> l2 = (List<?>)o2;
+
+                int res =  ((Comparable)l1.get(0)).compareTo((Comparable)l2.get(0));
+
+                if (res == 0 && l1.size() > 1)
+                    return ((Comparable)l1.get(1)).compareTo((Comparable)l2.get(1));
+                else
+                    return res;
+
+            }
+        });
+
+        return res;
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccBulkLoadTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccBulkLoadTest.java
new file mode 100644
index 0000000..98bbdfc
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccBulkLoadTest.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.io.File;
+import java.io.Serializable;
+import java.sql.DriverManager;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.List;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.configuration.CacheConfiguration;
+
+import static java.util.Arrays.asList;
+import static java.util.Collections.singletonList;
+
+/**
+ *
+ */
+public class CacheMvccBulkLoadTest extends CacheMvccAbstractTest {
+    /** */
+    private IgniteCache<Object, Object> sqlNexus;
+
+    /** */
+    private Statement stmt;
+
+    /** {@inheritDoc} */
+    @Override protected CacheMode cacheMode() {
+        return CacheMode.PARTITIONED;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        super.beforeTest();
+
+        Ignite ignite = startGrid(0);
+        sqlNexus = ignite.getOrCreateCache(new CacheConfiguration<>("sqlNexus").setSqlSchema("PUBLIC"));
+        sqlNexus.query(q("" +
+            "create table person(" +
+            "  id int not null primary key," +
+            "  name varchar not null" +
+            ") with \"atomicity=transactional_snapshot\""
+        ));
+        stmt = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1").createStatement();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testCopyStoresData() throws Exception {
+        String csvFilePath = new File(getClass().getResource("mvcc_person.csv").toURI()).getAbsolutePath();
+        stmt.executeUpdate("copy from '" + csvFilePath + "' into person (id, name) format csv");
+
+        List<List<?>> rows = sqlNexus.query(q("select * from person")).getAll();
+
+        List<List<? extends Serializable>> exp = asList(
+            asList(1, "John"),
+            asList(2, "Jack")
+        );
+        assertEquals(exp, rows);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testCopyDoesNotOverwrite() throws Exception {
+        sqlNexus.query(q("insert into person values(1, 'Old')"));
+        String csvFilePath = new File(getClass().getResource("mvcc_person.csv").toURI()).getAbsolutePath();
+        stmt.executeUpdate("copy from '" + csvFilePath + "' into person (id, name) format csv");
+
+        List<List<?>> rows = sqlNexus.query(q("select * from person")).getAll();
+
+        List<List<? extends Serializable>> exp = asList(
+            asList(1, "Old"),
+            asList(2, "Jack")
+        );
+        assertEquals(exp, rows);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testCopyLeavesPartialResultsInCaseOfFailure() throws Exception {
+        String csvFilePath = new File(getClass().getResource("mvcc_person_broken.csv").toURI()).getAbsolutePath();
+        try {
+            stmt.executeUpdate("copy from '" + csvFilePath + "' into person (id, name) format csv");
+            fail();
+        }
+        catch (SQLException ignored) {
+            // assert exception is thrown
+        }
+
+        List<List<?>> rows = sqlNexus.query(q("select * from person")).getAll();
+
+        List<List<? extends Serializable>> exp = singletonList(
+            asList(1, "John")
+        );
+        assertEquals(exp, rows);
+    }
+
+    /** */
+    private static SqlFieldsQuery q(String sql) {
+        return new SqlFieldsQuery(sql);
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccDmlSimpleTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccDmlSimpleTest.java
new file mode 100644
index 0000000..7f141ca
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccDmlSimpleTest.java
@@ -0,0 +1,200 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import javax.cache.CacheException;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.cache.CacheAtomicityMode;
+import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode;
+import org.apache.ignite.internal.processors.query.IgniteSQLException;
+
+import static java.util.Arrays.asList;
+
+/**
+ *
+ */
+public class CacheMvccDmlSimpleTest extends CacheMvccAbstractTest {
+    /** */
+    private IgniteCache<?, ?> cache;
+
+    /** {@inheritDoc} */
+    @Override protected CacheMode cacheMode() {
+        return CacheMode.PARTITIONED;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        super.beforeTest();
+
+        cache = startGrid(0).getOrCreateCache(
+            new CacheConfiguration<>("test")
+                .setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT)
+                .setSqlSchema("PUBLIC")
+                .setIndexedTypes(Integer.class, Integer.class)
+        );
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testInsert() throws Exception {
+        int cnt = update("insert into Integer(_key, _val) values(1, 1),(2, 2)");
+
+        assertEquals(2, cnt);
+
+        assertEquals(asSet(asList(1, 1), asList(2, 2)), query("select * from Integer"));
+
+        try {
+            update("insert into Integer(_key, _val) values(3, 3),(1, 1)");
+        } catch (CacheException e) {
+            assertTrue(e.getCause() instanceof IgniteSQLException);
+            assertEquals(IgniteQueryErrorCode.DUPLICATE_KEY, ((IgniteSQLException)e.getCause()).statusCode());
+        }
+
+        assertEquals(asSet(asList(1, 1), asList(2, 2)), query("select * from Integer"));
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testMerge() throws Exception {
+        {
+            int cnt = update("merge into Integer(_key, _val) values(1, 1),(2, 2)");
+
+            assertEquals(2, cnt);
+            assertEquals(asSet(asList(1, 1), asList(2, 2)), query("select * from Integer"));
+        }
+
+        {
+            int cnt = update("merge into Integer(_key, _val) values(3, 3),(1, 1)");
+
+            assertEquals(2, cnt);
+            assertEquals(asSet(asList(1, 1), asList(2, 2), asList(3, 3)), query("select * from Integer"));
+        }
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testUpdate() throws Exception {
+        {
+            int cnt = update("update Integer set _val = 42 where _key = 42");
+
+            assertEquals(0, cnt);
+            assertTrue(query("select * from Integer").isEmpty());
+        }
+
+        update("insert into Integer(_key, _val) values(1, 1),(2, 2)");
+
+        {
+            int cnt = update("update Integer set _val = 42 where _key = 42");
+
+            assertEquals(0, cnt);
+            assertEquals(asSet(asList(1, 1), asList(2, 2)), query("select * from Integer"));
+        }
+
+        {
+            int cnt = update("update Integer set _val = 42 where _key >= 42");
+
+            assertEquals(0, cnt);
+            assertEquals(asSet(asList(1, 1), asList(2, 2)), query("select * from Integer"));
+        }
+
+        {
+            int cnt = update("update Integer set _val = 11 where _key = 1");
+
+            assertEquals(1, cnt);
+            assertEquals(asSet(asList(1, 11), asList(2, 2)), query("select * from Integer"));
+        }
+
+        {
+            int cnt = update("update Integer set _val = 12 where _key <= 2");
+
+            assertEquals(asSet(asList(1, 12), asList(2, 12)), query("select * from Integer"));
+            assertEquals(2, cnt);
+        }
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testDelete() throws Exception {
+        {
+            int cnt = update("delete from Integer where _key = 42");
+
+            assertEquals(0, cnt);
+        }
+
+        update("insert into Integer(_key, _val) values(1, 1),(2, 2)");
+
+        {
+            int cnt = update("delete from Integer where _key = 42");
+
+            assertEquals(0, cnt);
+            assertEquals(asSet(asList(1, 1), asList(2, 2)), query("select * from Integer"));
+        }
+
+        {
+            int cnt = update("delete from Integer where _key >= 42");
+
+            assertEquals(0, cnt);
+            assertEquals(asSet(asList(1, 1), asList(2, 2)), query("select * from Integer"));
+        }
+
+        {
+            int cnt = update("delete from Integer where _key = 1");
+
+            assertEquals(1, cnt);
+            assertEquals(asSet(asList(2, 2)), query("select * from Integer"));
+        }
+
+        {
+            int cnt = update("delete from Integer where _key <= 2");
+
+            assertTrue(query("select * from Integer").isEmpty());
+            assertEquals(1, cnt);
+        }
+    }
+
+    /**
+     * @param q Query.
+     * @return Row set.
+     */
+    private Set<List<?>> query(String q) {
+        return new HashSet<>(cache.query(new SqlFieldsQuery(q)).getAll());
+    }
+
+    /**
+     * @param q Query.
+     * @return Updated rows count.
+     */
+    private int update(String q) {
+        return Integer.parseInt(cache.query(new SqlFieldsQuery(q)).getAll().get(0).get(0).toString());
+    }
+
+    /** */
+    private Set<List<?>> asSet(List<?>... ls) {
+        return new HashSet<>(asList(ls));
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccIteratorWithConcurrentJdbcTransactionTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccIteratorWithConcurrentJdbcTransactionTest.java
new file mode 100644
index 0000000..235d87f
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccIteratorWithConcurrentJdbcTransactionTest.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+/**
+ *
+ */
+public class CacheMvccIteratorWithConcurrentJdbcTransactionTest extends CacheMvccIteratorWithConcurrentTransactionTest {
+    /** {@inheritDoc} */
+    @Override boolean jdbcTx() {
+        return true;
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccLocalEntriesWithConcurrentJdbcTransactionTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccLocalEntriesWithConcurrentJdbcTransactionTest.java
new file mode 100644
index 0000000..97c062f2
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccLocalEntriesWithConcurrentJdbcTransactionTest.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+/**
+ *
+ */
+public class CacheMvccLocalEntriesWithConcurrentJdbcTransactionTest extends
+    CacheMvccLocalEntriesWithConcurrentTransactionTest {
+    /** {@inheritDoc} */
+    @Override boolean jdbcTx() {
+        return true;
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccPartitionedBackupsTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccPartitionedBackupsTest.java
new file mode 100644
index 0000000..71d832c1
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccPartitionedBackupsTest.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import org.apache.ignite.cache.CacheMode;
+
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+
+/** */
+public class CacheMvccPartitionedBackupsTest extends CacheMvccBackupsAbstractTest {
+    /** {@inheritDoc} */
+    @Override protected CacheMode cacheMode() {
+        return PARTITIONED;
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccPartitionedSelectForUpdateQueryTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccPartitionedSelectForUpdateQueryTest.java
new file mode 100644
index 0000000..12209ab
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccPartitionedSelectForUpdateQueryTest.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import org.apache.ignite.cache.CacheMode;
+
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+
+/** */
+public class CacheMvccPartitionedSelectForUpdateQueryTest extends CacheMvccSelectForUpdateQueryAbstractTest {
+    /** {@inheritDoc} */
+    public CacheMode cacheMode() {
+        return PARTITIONED;
+    }
+
+    /**
+     *
+     */
+    public void testSelectForUpdateDistributedSegmented() throws Exception {
+        doTestSelectForUpdateDistributed("PersonSeg", false);
+    }
+
+    /**
+     *
+     */
+    public void testSelectForUpdateLocalSegmented() throws Exception {
+        doTestSelectForUpdateLocal("PersonSeg", false);
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccPartitionedSqlCoordinatorFailoverTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccPartitionedSqlCoordinatorFailoverTest.java
new file mode 100644
index 0000000..1362b4a
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccPartitionedSqlCoordinatorFailoverTest.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import org.apache.ignite.cache.CacheMode;
+
+import static org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.ReadMode.SCAN;
+import static org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.ReadMode.SQL;
+import static org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.WriteMode.DML;
+import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC;
+import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ;
+
+/**
+ * SQL Mvcc coordinator failover test for partitioned caches.
+ */
+public class CacheMvccPartitionedSqlCoordinatorFailoverTest extends CacheMvccAbstractSqlCoordinatorFailoverTest {
+    /** {@inheritDoc} */
+    @Override protected CacheMode cacheMode() {
+        return CacheMode.PARTITIONED;
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxSql_ClientServer_Backups2_CoordinatorFails() throws Exception {
+        accountsTxReadAll(4, 2, 2, DFLT_PARTITION_COUNT,
+            new InitIndexing(Integer.class, MvccTestAccount.class), true, SQL, DML, DFLT_TEST_TIME, RestartMode.RESTART_CRD);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxSql_Server_Backups1_CoordinatorFails_Persistence() throws Exception {
+        persistence = true;
+
+        accountsTxReadAll(2, 0, 1, 64,
+            new InitIndexing(Integer.class, MvccTestAccount.class), true, SQL, DML, DFLT_TEST_TIME, RestartMode.RESTART_CRD);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutAllGetAll_ClientServer_Backups3_RestartCoordinator_ScanDml() throws Exception {
+        putAllGetAll(RestartMode.RESTART_CRD  , 5, 2, 3, DFLT_PARTITION_COUNT,
+            new InitIndexing(Integer.class, Integer.class), SCAN, DML);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutAllGetAll_ClientServer_Backups1_RestartCoordinator_ScanDml_Persistence() throws Exception {
+        persistence = true;
+
+        putAllGetAll(RestartMode.RESTART_CRD  , 2, 1, 2, DFLT_PARTITION_COUNT,
+            new InitIndexing(Integer.class, Integer.class), SCAN, DML);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutAllGetAll_ClientServer_Backups2_RestartCoordinator_SqlDml_Persistence() throws Exception {
+        persistence = true;
+
+        putAllGetAll(RestartMode.RESTART_CRD, 4, 2, 2, 64,
+            new InitIndexing(Integer.class, Integer.class), SQL, DML);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutAllGetAll_ClientServer_Backups1_RestartCoordinator_SqlDml() throws Exception {
+        putAllGetAll(RestartMode.RESTART_CRD, 2, 1, 1, 64,
+            new InitIndexing(Integer.class, Integer.class), SQL, DML);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testUpdate_N_Objects_ClientServer_Backups2_Sql() throws Exception {
+        updateNObjectsTest(7, 3, 2, 2, DFLT_PARTITION_COUNT, DFLT_TEST_TIME,
+            new InitIndexing(Integer.class, Integer.class), SQL, DML, RestartMode.RESTART_CRD);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testUpdate_N_Objects_ClientServer_Backups1_Sql_Persistence() throws Exception {
+        persistence = true;
+
+        updateNObjectsTest(10, 2, 1, 1, DFLT_PARTITION_COUNT, DFLT_TEST_TIME,
+            new InitIndexing(Integer.class, Integer.class), SQL, DML, RestartMode.RESTART_CRD);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testSqlReadInProgressCoordinatorFails() throws Exception {
+        readInProgressCoordinatorFails(false, false, PESSIMISTIC, REPEATABLE_READ, SQL, DML, new InitIndexing(Integer.class, Integer.class));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testSqlReadInsideTxInProgressCoordinatorFails() throws Exception {
+        readInProgressCoordinatorFails(false, true, PESSIMISTIC, REPEATABLE_READ, SQL, DML, new InitIndexing(Integer.class, Integer.class));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testSqlReadInProgressCoordinatorFails_ReadDelay() throws Exception {
+        readInProgressCoordinatorFails(true, false, PESSIMISTIC, REPEATABLE_READ, SQL, DML, new InitIndexing(Integer.class, Integer.class));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testSqlReadInsideTxInProgressCoordinatorFails_ReadDelay() throws Exception {
+        readInProgressCoordinatorFails(true, true, PESSIMISTIC, REPEATABLE_READ, SQL, DML, new InitIndexing(Integer.class, Integer.class));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testReadInProgressCoordinatorFailsSimple_FromServer() throws Exception {
+        readInProgressCoordinatorFailsSimple(false, new InitIndexing(Integer.class, Integer.class), SQL, DML);
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccPartitionedSqlQueriesTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccPartitionedSqlQueriesTest.java
new file mode 100644
index 0000000..e0b4a24
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccPartitionedSqlQueriesTest.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import org.apache.ignite.cache.CacheMode;
+
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+
+/** */
+public class CacheMvccPartitionedSqlQueriesTest extends CacheMvccSqlQueriesAbstractTest {
+    /** {@inheritDoc} */
+    protected CacheMode cacheMode() {
+        return PARTITIONED;
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccPartitionedSqlTxQueriesTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccPartitionedSqlTxQueriesTest.java
new file mode 100644
index 0000000..199cfad
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccPartitionedSqlTxQueriesTest.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import org.apache.ignite.cache.CacheMode;
+
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+
+/** */
+public class CacheMvccPartitionedSqlTxQueriesTest extends CacheMvccSqlTxQueriesAbstractTest {
+    /** {@inheritDoc} */
+    protected CacheMode cacheMode() {
+        return PARTITIONED;
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccPartitionedSqlTxQueriesWithReducerTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccPartitionedSqlTxQueriesWithReducerTest.java
new file mode 100644
index 0000000..03de543
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccPartitionedSqlTxQueriesWithReducerTest.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import org.apache.ignite.cache.CacheMode;
+
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+
+/** */
+public class CacheMvccPartitionedSqlTxQueriesWithReducerTest extends CacheMvccSqlTxQueriesWithReducerAbstractTest {
+    /** {@inheritDoc} */
+    protected CacheMode cacheMode() {
+        return PARTITIONED;
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccReplicatedBackupsTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccReplicatedBackupsTest.java
new file mode 100644
index 0000000..02de0a3
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccReplicatedBackupsTest.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import org.apache.ignite.cache.CacheMode;
+
+import static org.apache.ignite.cache.CacheMode.REPLICATED;
+
+/** */
+public class CacheMvccReplicatedBackupsTest extends CacheMvccBackupsAbstractTest {
+    /** {@inheritDoc} */
+    @Override protected CacheMode cacheMode() {
+        return REPLICATED;
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccReplicatedSelectForUpdateQueryTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccReplicatedSelectForUpdateQueryTest.java
new file mode 100644
index 0000000..a458319
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccReplicatedSelectForUpdateQueryTest.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import org.apache.ignite.cache.CacheMode;
+
+import static org.apache.ignite.cache.CacheMode.REPLICATED;
+
+/** */
+public class CacheMvccReplicatedSelectForUpdateQueryTest extends CacheMvccSelectForUpdateQueryAbstractTest {
+    /** {@inheritDoc} */
+    public CacheMode cacheMode() {
+        return REPLICATED;
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccReplicatedSqlCoordinatorFailoverTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccReplicatedSqlCoordinatorFailoverTest.java
new file mode 100644
index 0000000..2f72bce
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccReplicatedSqlCoordinatorFailoverTest.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import org.apache.ignite.cache.CacheMode;
+
+/**
+ * SQL Mvcc coordinator failover test for replicated caches.
+ */
+public class CacheMvccReplicatedSqlCoordinatorFailoverTest extends CacheMvccAbstractSqlCoordinatorFailoverTest {
+    /** {@inheritDoc} */
+    @Override protected CacheMode cacheMode() {
+        return CacheMode.REPLICATED;
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccReplicatedSqlQueriesTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccReplicatedSqlQueriesTest.java
new file mode 100644
index 0000000..ba8a5c3
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccReplicatedSqlQueriesTest.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import org.apache.ignite.cache.CacheMode;
+
+import static org.apache.ignite.cache.CacheMode.REPLICATED;
+
+/** */
+public class CacheMvccReplicatedSqlQueriesTest extends CacheMvccSqlQueriesAbstractTest {
+    /** {@inheritDoc} */
+    protected CacheMode cacheMode() {
+        return REPLICATED;
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccReplicatedSqlTxQueriesTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccReplicatedSqlTxQueriesTest.java
new file mode 100644
index 0000000..bde2c5d
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccReplicatedSqlTxQueriesTest.java
@@ -0,0 +1,242 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.ThreadLocalRandom;
+import javax.cache.Cache;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.processors.query.GridQueryProcessor;
+import org.apache.ignite.transactions.Transaction;
+
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+import static org.apache.ignite.cache.CacheMode.REPLICATED;
+import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC;
+import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC;
+import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ;
+
+/** */
+public class CacheMvccReplicatedSqlTxQueriesTest extends CacheMvccSqlTxQueriesAbstractTest {
+    /** {@inheritDoc} */
+    protected CacheMode cacheMode() {
+        return REPLICATED;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        super.beforeTest();
+
+        ccfgs = null;
+        ccfg = null;
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testReplicatedJoinPartitionedClient() throws Exception {
+        checkReplicatedJoinPartitioned(true);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testReplicatedJoinPartitionedServer() throws Exception {
+        checkReplicatedJoinPartitioned(false);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void checkReplicatedJoinPartitioned(boolean client) throws Exception {
+        ccfgs = new CacheConfiguration[] {
+            cacheConfiguration(REPLICATED, FULL_SYNC, 0, DFLT_PARTITION_COUNT)
+                .setName("int")
+                .setIndexedTypes(Integer.class, Integer.class),
+            cacheConfiguration(PARTITIONED, FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+                .setIndexedTypes(Integer.class,
+                CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue.class),
+            cacheConfiguration(REPLICATED, FULL_SYNC, 0, DFLT_PARTITION_COUNT)
+                .setName("target")
+                .setIndexedTypes(Integer.class, Integer.class)
+        };
+
+        startGridsMultiThreaded(3);
+
+        this.client = true;
+
+        startGrid(3);
+
+        Ignite node = client ? grid(3) : grid(0);
+
+        List<List<?>> r;
+
+        try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TX_TIMEOUT);
+
+            r = runSql(node, "INSERT INTO \"int\".Integer(_key, _val) VALUES (1,1), (2,2), (3,3)");
+
+            assertEquals(3L, r.get(0).get(0));
+
+            tx.commit();
+        }
+
+        try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TX_TIMEOUT);
+
+            r = runSql(node, "INSERT INTO \"default\".MvccTestSqlIndexValue(_key, idxVal1) " +
+                "VALUES (1,10), (2, 20), (3, 30)");
+
+            assertEquals(3L, r.get(0).get(0));
+
+            tx.commit();
+        }
+
+        try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TX_TIMEOUT);
+
+            r = runSql(node, "INSERT INTO \"target\".Integer(_key, _val) " +
+                "SELECT a._key, a.idxVal1*b._val FROM \"default\".MvccTestSqlIndexValue a " +
+                "JOIN \"int\".Integer b ON a._key = b._key");
+
+            assertEquals(3L, r.get(0).get(0));
+
+            tx.commit();
+        }
+
+        for (int n = 0; n < 3; ++n) {
+            node = grid(n);
+
+            r = runSqlLocal(node, "SELECT _key, _val FROM \"target\".Integer ORDER BY _key");
+
+            assertEquals(3L, r.size());
+
+            assertEquals(1, r.get(0).get(0));
+            assertEquals(2, r.get(1).get(0));
+            assertEquals(3, r.get(2).get(0));
+
+            assertEquals(10, r.get(0).get(1));
+            assertEquals(40, r.get(1).get(1));
+            assertEquals(90, r.get(2).get(1));
+        }
+    }
+
+    /**
+     *
+     * @throws Exception If failed.
+     */
+    public void testReplicatedAndPartitionedUpdateSingleTransaction() throws Exception {
+        ccfgs = new CacheConfiguration[] {
+            cacheConfiguration(REPLICATED, FULL_SYNC, 0, DFLT_PARTITION_COUNT)
+                .setName("rep")
+                .setIndexedTypes(Integer.class, Integer.class),
+            cacheConfiguration(PARTITIONED, FULL_SYNC, 0, DFLT_PARTITION_COUNT)
+                .setIndexedTypes(Integer.class, MvccTestSqlIndexValue.class)
+                .setName("part"),
+        };
+
+        startGridsMultiThreaded(3);
+
+        client = true;
+
+        startGrid(3);
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite node = grid(rnd.nextInt(4));
+
+        List<List<?>> r;
+
+        Cache<Integer, Integer> repCache = node.cache("rep");
+
+        repCache.put(1, 1);
+        repCache.put(2, 2);
+        repCache.put(3, 3);
+
+        Cache<Integer, MvccTestSqlIndexValue> partCache = node.cache("part");
+
+        partCache.put(1, new MvccTestSqlIndexValue(1));
+        partCache.put(2, new MvccTestSqlIndexValue(2));
+        partCache.put(3, new MvccTestSqlIndexValue(3));
+
+        try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TX_TIMEOUT);
+
+            r = runSql(node, "UPDATE \"rep\".Integer SET _val = _key * 10");
+
+            assertEquals(3L, r.get(0).get(0));
+
+            r = runSql(node, "UPDATE  \"part\".MvccTestSqlIndexValue SET idxVal1 = _key * 10");
+
+            assertEquals(3L, r.get(0).get(0));
+
+            tx.commit();
+        }
+
+        r = runSql(node, "SELECT COUNT(1) FROM \"rep\".Integer r JOIN \"part\".MvccTestSqlIndexValue p" +
+            " ON r._key = p._key WHERE r._val = p.idxVal1");
+
+        assertEquals(3L, r.get(0).get(0));
+
+        for (int n = 0; n < 3; ++n) {
+            node = grid(n);
+
+            r = runSqlLocal(node, "SELECT _key, _val FROM \"rep\".Integer ORDER BY _key");
+
+            assertEquals(3L, r.size());
+
+            assertEquals(1, r.get(0).get(0));
+            assertEquals(2, r.get(1).get(0));
+            assertEquals(3, r.get(2).get(0));
+
+            assertEquals(10, r.get(0).get(1));
+            assertEquals(20, r.get(1).get(1));
+            assertEquals(30, r.get(2).get(1));
+        }
+    }
+
+    /**
+     * Run query.
+     *
+     * @param node Node.
+     * @param sqlText Query.
+     * @return Results.
+     */
+    private List<List<?>> runSql(Ignite node, String sqlText) {
+        GridQueryProcessor qryProc = ((IgniteEx)node).context().query();
+
+        return qryProc.querySqlFields(new SqlFieldsQuery(sqlText), false).getAll();
+    }
+
+    /**
+     * Run query locally.
+     *
+     * @param node Node.
+     * @param sqlText Query.
+     * @return Results.
+     */
+    private List<List<?>> runSqlLocal(Ignite node, String sqlText) {
+        GridQueryProcessor qryProc = ((IgniteEx)node).context().query();
+
+        return qryProc.querySqlFields(new SqlFieldsQuery(sqlText).setLocal(true), false).getAll();
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccReplicatedSqlTxQueriesWithReducerTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccReplicatedSqlTxQueriesWithReducerTest.java
new file mode 100644
index 0000000..173c43f
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccReplicatedSqlTxQueriesWithReducerTest.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import org.apache.ignite.cache.CacheMode;
+
+import static org.apache.ignite.cache.CacheMode.REPLICATED;
+
+/** */
+public class CacheMvccReplicatedSqlTxQueriesWithReducerTest extends CacheMvccSqlTxQueriesWithReducerAbstractTest {
+    /** {@inheritDoc} */
+    protected CacheMode cacheMode() {
+        return REPLICATED;
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccScanQueryWithConcurrentJdbcTransactionTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccScanQueryWithConcurrentJdbcTransactionTest.java
new file mode 100644
index 0000000..7272def
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccScanQueryWithConcurrentJdbcTransactionTest.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+/**
+ *
+ */
+public class CacheMvccScanQueryWithConcurrentJdbcTransactionTest extends
+    CacheMvccScanQueryWithConcurrentTransactionTest {
+    /** {@inheritDoc} */
+    @Override boolean jdbcTx() {
+        return true;
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccSelectForUpdateQueryAbstractTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccSelectForUpdateQueryAbstractTest.java
new file mode 100644
index 0000000..739aaf8
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccSelectForUpdateQueryAbstractTest.java
@@ -0,0 +1,357 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.sql.Connection;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.concurrent.Callable;
+import java.util.concurrent.TimeUnit;
+import javax.cache.CacheException;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.cache.QueryEntity;
+import org.apache.ignite.cache.query.FieldsQueryCursor;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.processors.query.IgniteSQLException;
+import org.apache.ignite.internal.util.typedef.X;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.transactions.Transaction;
+import org.apache.ignite.transactions.TransactionConcurrency;
+import org.apache.ignite.transactions.TransactionIsolation;
+
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+import static org.apache.ignite.internal.processors.cache.index.AbstractSchemaSelfTest.connect;
+import static org.apache.ignite.internal.processors.cache.index.AbstractSchemaSelfTest.execute;
+
+/**
+ * Test for {@code SELECT FOR UPDATE} queries.
+ */
+public abstract class CacheMvccSelectForUpdateQueryAbstractTest extends CacheMvccAbstractTest {
+    /** */
+    private static final int CACHE_SIZE = 50;
+
+    /** {@inheritDoc} */
+    @SuppressWarnings("unchecked")
+    @Override protected void beforeTest() throws Exception {
+        super.beforeTest();
+
+        disableScheduledVacuum = getName().equals("testSelectForUpdateAfterAbortedTx");
+
+        startGrids(3);
+
+        CacheConfiguration seg = new CacheConfiguration("segmented*");
+
+        seg.setCacheMode(cacheMode());
+
+        if (seg.getCacheMode() == PARTITIONED)
+            seg.setQueryParallelism(4);
+
+        grid(0).addCacheConfiguration(seg);
+
+        Thread.sleep(1000L);
+
+        try (Connection c = connect(grid(0))) {
+            execute(c, "create table person (id int primary key, firstName varchar, lastName varchar) " +
+                "with \"atomicity=transactional_snapshot,cache_name=Person\"");
+
+            execute(c, "create table person_seg (id int primary key, firstName varchar, lastName varchar) " +
+                "with \"atomicity=transactional_snapshot,cache_name=PersonSeg,template=segmented\"");
+
+            try (Transaction tx = grid(0).transactions().txStart(TransactionConcurrency.PESSIMISTIC,
+                TransactionIsolation.REPEATABLE_READ)) {
+
+                for (int i = 1; i <= CACHE_SIZE; i++) {
+                    execute(c, "insert into person(id, firstName, lastName) values(" + i + ",'" + i + "','" + i + "')");
+
+                    execute(c, "insert into person_seg(id, firstName, lastName) " +
+                        "values(" + i + ",'" + i + "','" + i + "')");
+                }
+
+                tx.commit();
+            }
+        }
+    }
+
+    /**
+     *
+     */
+    public void testSelectForUpdateDistributed() throws Exception {
+        doTestSelectForUpdateDistributed("Person", false);
+    }
+
+
+    /**
+     *
+     */
+    public void testSelectForUpdateLocal() throws Exception {
+        doTestSelectForUpdateLocal("Person", false);
+    }
+
+    /**
+     *
+     * @throws Exception If failed.
+     */
+    public void testSelectForUpdateOutsideTx() throws Exception {
+        doTestSelectForUpdateDistributed("Person", true);
+    }
+
+    /**
+     *
+     * @throws Exception If failed.
+     */
+    public void testSelectForUpdateOutsideTxLocal() throws Exception {
+        doTestSelectForUpdateLocal("Person", true);
+    }
+
+    /**
+     * @param cacheName Cache name.
+     * @param outsideTx Whether select is executed outside transaction
+     * @throws Exception If failed.
+     */
+    void doTestSelectForUpdateLocal(String cacheName, boolean outsideTx) throws Exception {
+        Ignite node = grid(0);
+
+        IgniteCache<Integer, ?> cache = node.cache(cacheName);
+
+        Transaction ignored = outsideTx ? null : node.transactions().txStart(TransactionConcurrency.PESSIMISTIC,
+            TransactionIsolation.REPEATABLE_READ);
+
+        try {
+            SqlFieldsQuery qry = new SqlFieldsQuery("select id, * from " + tableName(cache) + " order by id for update")
+                .setLocal(true);
+
+            FieldsQueryCursor<List<?>> query = cache.query(qry);
+
+            List<List<?>> res = query.getAll();
+
+            List<Integer> keys = new ArrayList<>();
+
+            for (List<?> r : res)
+                keys.add((Integer)r.get(0));
+
+            checkLocks(cacheName, keys, !outsideTx);
+        }
+        finally {
+            U.close(ignored, log);
+        }
+    }
+
+    /**
+     * @param cacheName Cache name.
+     * @param outsideTx Whether select is executed outside transaction
+     * @throws Exception If failed.
+     */
+    void doTestSelectForUpdateDistributed(String cacheName, boolean outsideTx) throws Exception {
+        Ignite node = grid(0);
+
+        IgniteCache<Integer, ?> cache = node.cache(cacheName);
+
+        Transaction ignored = outsideTx ? null : node.transactions().txStart(TransactionConcurrency.PESSIMISTIC,
+            TransactionIsolation.REPEATABLE_READ);
+
+        try {
+            SqlFieldsQuery qry = new SqlFieldsQuery("select id, * from " + tableName(cache) + " order by id for update")
+                .setPageSize(10);
+
+            FieldsQueryCursor<List<?>> query = cache.query(qry);
+
+            List<List<?>> res = query.getAll();
+
+            List<Integer> keys = new ArrayList<>();
+
+            for (List<?> r : res)
+                keys.add((Integer)r.get(0));
+
+            checkLocks(cacheName, keys, !outsideTx);
+        }
+        finally {
+            U.close(ignored, log);
+        }
+    }
+
+    /**
+     *
+     */
+    public void testSelectForUpdateWithUnion() {
+        assertQueryThrows("select id from person union select 1 for update",
+            "SELECT UNION FOR UPDATE is not supported.");
+    }
+
+    /**
+     *
+     */
+    public void testSelectForUpdateWithJoin() {
+        assertQueryThrows("select p1.id from person p1 join person p2 on p1.id = p2.id for update",
+            "SELECT FOR UPDATE with joins is not supported.");
+    }
+
+    /**
+     *
+     */
+    public void testSelectForUpdateWithLimit() {
+        assertQueryThrows("select id from person limit 0,5 for update",
+            "LIMIT/OFFSET clauses are not supported for SELECT FOR UPDATE.");
+    }
+
+    /**
+     *
+     */
+    public void testSelectForUpdateWithGroupings() {
+        assertQueryThrows("select count(*) from person for update",
+            "SELECT FOR UPDATE with aggregates and/or GROUP BY is not supported.");
+
+        assertQueryThrows("select lastName, count(*) from person group by lastName for update",
+            "SELECT FOR UPDATE with aggregates and/or GROUP BY is not supported.");
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testSelectForUpdateAfterAbortedTx() throws Exception {
+        assert disableScheduledVacuum;
+
+        Ignite node = grid(0);
+
+        IgniteCache<Integer, ?> cache = node.cache("Person");
+
+        List<List<?>> res;
+
+        try (Transaction tx = node.transactions().txStart(TransactionConcurrency.PESSIMISTIC,
+            TransactionIsolation.REPEATABLE_READ)) {
+
+            res = cache.query(new SqlFieldsQuery("update person set lastName=UPPER(lastName)")).getAll();
+
+            assertEquals((long)CACHE_SIZE, res.get(0).get(0));
+
+            tx.rollback();
+        }
+
+        try (Transaction tx = node.transactions().txStart(TransactionConcurrency.PESSIMISTIC,
+            TransactionIsolation.REPEATABLE_READ)) {
+
+            res = cache.query(new SqlFieldsQuery("select id, * from person order by id for update")).getAll();
+
+            assertEquals(CACHE_SIZE, res.size());
+
+            List<Integer> keys = new ArrayList<>();
+
+            for (List<?> r : res)
+                keys.add((Integer)r.get(0));
+
+            checkLocks("Person", keys, true);
+
+            tx.rollback();
+        }
+    }
+
+    /**
+     * Check that an attempt to get a lock on any key from given list fails by timeout.
+     *
+     * @param cacheName Cache name to check.
+     * @param keys Keys to check.
+     * @param locked Whether the key is locked
+     * @throws Exception if failed.
+     */
+    @SuppressWarnings({"ThrowableNotThrown", "unchecked"})
+    private void checkLocks(String cacheName, List<Integer> keys, boolean locked) throws Exception {
+        Ignite node = ignite(2);
+        IgniteCache cache = node.cache(cacheName);
+
+        List<IgniteInternalFuture<Integer>> calls = new ArrayList<>();
+
+        for (int key : keys) {
+            calls.add(GridTestUtils.runAsync(new Callable<Integer>() {
+                /** {@inheritDoc} */
+                @Override public Integer call() {
+                    try (Transaction ignored = node.transactions().txStart(TransactionConcurrency.PESSIMISTIC,
+                        TransactionIsolation.REPEATABLE_READ)) {
+                        List<List<?>> res = cache
+                            .query(
+                                new SqlFieldsQuery("select * from " + tableName(cache) +
+                                    " where id = " + key + " for update").setTimeout(1, TimeUnit.SECONDS)
+                            )
+                            .getAll();
+
+                        return (Integer)res.get(0).get(0);
+                    }
+                }
+            }));
+        }
+
+        for (IgniteInternalFuture fut : calls) {
+            if (!locked)
+                fut.get(TX_TIMEOUT);
+            else {
+                try {
+                    fut.get();
+                }
+                catch (Exception e) {
+                    CacheException e0 = X.cause(e, CacheException.class);
+
+                    assert e0 != null;
+
+                    assert e0.getMessage() != null &&
+                        e0.getMessage().contains("Failed to acquire lock within provided timeout");
+                }
+            }
+        }
+    }
+
+    /**
+     * @param cache Cache.
+     * @return Name of the table contained by this cache.
+     */
+    @SuppressWarnings("unchecked")
+    private static String tableName(IgniteCache<?, ?> cache) {
+        return ((Collection<QueryEntity>)cache.getConfiguration(CacheConfiguration.class).getQueryEntities())
+            .iterator().next().getTableName();
+    }
+
+    /**
+     * Test that query throws exception with expected message.
+     * @param qry SQL.
+     * @param exMsg Expected message.
+     */
+    private void assertQueryThrows(String qry, String exMsg) {
+        assertQueryThrows(qry, exMsg, false);
+
+        assertQueryThrows(qry, exMsg, true);
+    }
+
+    /**
+     * Test that query throws exception with expected message.
+     * @param qry SQL.
+     * @param exMsg Expected message.
+     * @param loc Local query flag.
+     */
+    @SuppressWarnings("ThrowableNotThrown")
+    private void assertQueryThrows(String qry, String exMsg, boolean loc) {
+        Ignite node = grid(0);
+
+        GridTestUtils.assertThrows(null, new Callable<Object>() {
+            @Override public Object call() {
+                return node.cache("Person").query(new SqlFieldsQuery(qry).setLocal(loc)).getAll();
+            }
+        }, IgniteSQLException.class, exMsg);
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccSizeTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccSizeTest.java
new file mode 100644
index 0000000..fe1304a
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccSizeTest.java
@@ -0,0 +1,488 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ForkJoinPool;
+import java.util.concurrent.Future;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.function.Consumer;
+import java.util.stream.IntStream;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteDataStreamer;
+import org.apache.ignite.cache.CacheAtomicityMode;
+import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.cache.affinity.Affinity;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode;
+import org.apache.ignite.internal.processors.query.IgniteSQLException;
+
+import static org.apache.ignite.cache.CachePeekMode.BACKUP;
+
+/**
+ *
+ */
+public class CacheMvccSizeTest extends CacheMvccAbstractTest {
+    /** {@inheritDoc} */
+    @Override protected CacheMode cacheMode() {
+        return CacheMode.PARTITIONED;
+    }
+
+    /** */
+    private void checkSizeModificationByOperation(String sql, boolean commit, int expSizeDelta) throws Exception {
+        checkSizeModificationByOperation(c -> {}, cache -> cache.query(q(sql)).getAll(), commit, expSizeDelta);
+    }
+
+    /** */
+    private void checkSizeModificationByOperation(String initSql, String sql, boolean commit,
+        int expSizeDelta) throws Exception {
+        checkSizeModificationByOperation(
+            cache -> cache.query(q(initSql)).getAll(),
+            cache -> cache.query(q(sql)).getAll(),
+            commit,
+            expSizeDelta);
+    }
+
+    /** */
+    private void checkSizeModificationByOperation(Consumer<IgniteCache<?, ?>> inTx, boolean commit,
+        int expSizeDelta) throws Exception {
+        checkSizeModificationByOperation(c -> {}, inTx, commit, expSizeDelta);
+    }
+
+    /** */
+    private void checkSizeModificationByOperation(Consumer<IgniteCache<?, ?>> beforeTx,
+        Consumer<IgniteCache<?, ?>> inTx, boolean commit, int expSizeDelta) throws Exception {
+        IgniteCache<Object, Object> tbl0 = grid(0).cache("person");
+
+        tbl0.query(q("delete from person"));
+
+        beforeTx.accept(tbl0);
+
+        int initSize = tbl0.size();
+
+        tbl0.query(q("begin"));
+
+        inTx.accept(tbl0);
+
+        // size is not changed before commit
+        assertEquals(0, tbl0.size() - initSize);
+
+        if (commit)
+            tbl0.query(q("commit"));
+        else
+            tbl0.query(q("rollback"));
+
+        assertEquals(expSizeDelta, tbl0.size() - initSize);
+        assertEquals(tbl0.size(), table(grid(1)).size());
+
+        assertEquals(tbl0.size(), tbl0.size(BACKUP));
+        assertEquals(tbl0.size(), table(grid(1)).size(BACKUP));
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testSql() throws Exception {
+        startGridsMultiThreaded(2);
+
+        createTable(grid(0));
+
+        checkSizeModificationByOperation("insert into person values(1, 'a')", true, 1);
+
+        checkSizeModificationByOperation("insert into person values(1, 'a')", false, 0);
+
+        checkSizeModificationByOperation(
+            personTbl -> personTbl.query(q("insert into person values(1, 'a')")),
+            personTbl -> {
+                try {
+                    personTbl.query(q("insert into person values(1, 'a')"));
+                }
+                catch (Exception e) {
+                    if (e.getCause() instanceof IgniteSQLException) {
+                        assertEquals(IgniteQueryErrorCode.DUPLICATE_KEY,
+                            ((IgniteSQLException)e.getCause()).statusCode());
+                    }
+                    else {
+                        e.printStackTrace();
+
+                        fail("Unexpected exceptions");
+                    }
+                }
+            },
+            true, 0);
+
+        checkSizeModificationByOperation("merge into person(id, name) values(1, 'a')", true, 1);
+
+        checkSizeModificationByOperation("merge into person(id, name) values(1, 'a')", false, 0);
+
+        checkSizeModificationByOperation(
+            "insert into person values(1, 'a')", "merge into person(id, name) values(1, 'b')", true, 0);
+
+        checkSizeModificationByOperation("update person set name = 'b' where id = 1", true, 0);
+
+        checkSizeModificationByOperation(
+            "insert into person values(1, 'a')", "update person set name = 'b' where id = 1", true, 0);
+
+        checkSizeModificationByOperation(
+            "insert into person values(1, 'a')", "delete from person where id = 1", true, -1);
+
+        checkSizeModificationByOperation(
+            "insert into person values(1, 'a')", "delete from person where id = 1", false, 0);
+
+        checkSizeModificationByOperation("delete from person where id = 1", true, 0);
+
+        checkSizeModificationByOperation(
+            "insert into person values(1, 'a')", "select * from person", true, 0);
+
+        checkSizeModificationByOperation("select * from person", true, 0);
+
+        checkSizeModificationByOperation(
+            "insert into person values(1, 'a')", "select * from person where id = 1 for update", true, 0);
+
+        checkSizeModificationByOperation("select * from person where id = 1 for update", true, 0);
+
+        checkSizeModificationByOperation(personTbl -> {
+            personTbl.query(q("insert into person values(1, 'a')"));
+
+            personTbl.query(q("insert into person values(%d, 'b')", keyInSamePartition(grid(0), "person", 1)));
+
+            personTbl.query(q("insert into person values(%d, 'c')", keyInDifferentPartition(grid(0), "person", 1)));
+        }, true, 3);
+
+        checkSizeModificationByOperation(personTbl -> {
+            personTbl.query(q("insert into person values(1, 'a')"));
+
+            personTbl.query(q("delete from person where id = 1"));
+        }, true, 0);
+
+        checkSizeModificationByOperation(personTbl -> {
+            personTbl.query(q("insert into person values(1, 'a')"));
+
+            personTbl.query(q("delete from person where id = 1"));
+
+            personTbl.query(q("insert into person values(1, 'a')"));
+        }, true, 1);
+
+        checkSizeModificationByOperation(
+            personTbl -> personTbl.query(q("insert into person values(1, 'a')")),
+            personTbl -> {
+                personTbl.query(q("delete from person where id = 1"));
+
+                personTbl.query(q("insert into person values(1, 'a')"));
+            }, true, 0);
+
+        checkSizeModificationByOperation(personTbl -> {
+            personTbl.query(q("merge into person(id, name) values(1, 'a')"));
+
+            personTbl.query(q("delete from person where id = 1"));
+        }, true, 0);
+
+        checkSizeModificationByOperation(personTbl -> {
+            personTbl.query(q("merge into person(id, name) values(1, 'a')"));
+
+            personTbl.query(q("delete from person where id = 1"));
+
+            personTbl.query(q("merge into person(id, name) values(1, 'a')"));
+        }, true, 1);
+
+        checkSizeModificationByOperation(
+            personTbl -> personTbl.query(q("merge into person(id, name) values(1, 'a')")),
+            personTbl -> {
+                personTbl.query(q("delete from person where id = 1"));
+
+                personTbl.query(q("merge into person(id, name) values(1, 'a')"));
+            }, true, 0);
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testInsertDeleteConcurrent() throws Exception {
+        startGridsMultiThreaded(2);
+
+        IgniteCache<?, ?> tbl0 = createTable(grid(0));
+
+        SqlFieldsQuery insert = new SqlFieldsQuery("insert into person(id, name) values(?, 'a')");
+
+        SqlFieldsQuery delete = new SqlFieldsQuery("delete from person where id = ?");
+
+        CompletableFuture<Integer> insertFut = CompletableFuture.supplyAsync(() -> {
+            int cnt = 0;
+
+            for (int i = 0; i < 1000; i++)
+                cnt += update(insert.setArgs(ThreadLocalRandom.current().nextInt(10)), tbl0);
+
+            return cnt;
+        });
+
+        CompletableFuture<Integer> deleteFut = CompletableFuture.supplyAsync(() -> {
+            int cnt = 0;
+
+            for (int i = 0; i < 1000; i++)
+                cnt += update(delete.setArgs(ThreadLocalRandom.current().nextInt(10)), tbl0);
+
+            return cnt;
+        });
+
+        int expSize = insertFut.join() - deleteFut.join();
+
+        assertEquals(expSize, tbl0.size());
+        assertEquals(expSize, table(grid(1)).size());
+
+        assertEquals(expSize, tbl0.size(BACKUP));
+        assertEquals(expSize, table(grid(1)).size(BACKUP));
+    }
+
+    /** */
+    private int update(SqlFieldsQuery qry, IgniteCache<?, ?> cache) {
+        try {
+            return Integer.parseInt(cache.query(qry).getAll().get(0).get(0).toString());
+        } catch (Exception e) {
+            return 0;
+        }
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testWriteConflictDoesNotChangeSize() throws Exception {
+        startGridsMultiThreaded(2);
+
+        IgniteCache<?, ?> tbl0 = createTable(grid(0));
+
+        tbl0.query(q("insert into person values(1, 'a')"));
+
+        tbl0.query(q("begin"));
+
+        tbl0.query(q("delete from person where id = 1"));
+
+        CompletableFuture<Void> conflictingStarted = new CompletableFuture<>();
+
+        CompletableFuture<Void> fut = CompletableFuture.runAsync(() -> {
+            tbl0.query(q("begin"));
+
+            try {
+                tbl0.query(q("select * from person")).getAll();
+                conflictingStarted.complete(null);
+
+                tbl0.query(q("merge into person(id, name) values(1, 'b')"));
+            }
+            finally {
+                tbl0.query(q("commit"));
+            }
+        });
+
+        conflictingStarted.join();
+        tbl0.query(q("commit"));
+
+        try {
+            fut.join();
+        }
+        catch (Exception e) {
+            if (e.getCause().getCause() instanceof IgniteSQLException)
+                assertTrue(e.getMessage().toLowerCase().contains("version mismatch"));
+            else {
+                e.printStackTrace();
+
+                fail("Unexpected exception");
+            }
+        }
+
+        assertEquals(0, tbl0.size());
+        assertEquals(0, table(grid(1)).size());
+
+        assertEquals(0, tbl0.size(BACKUP));
+        assertEquals(0, table(grid(1)).size(BACKUP));
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testDeleteChangesSizeAfterUnlock() throws Exception {
+        startGridsMultiThreaded(2);
+
+        IgniteCache<?, ?> tbl0 = createTable(grid(0));
+
+        tbl0.query(q("insert into person values(1, 'a')"));
+
+        tbl0.query(q("begin"));
+
+        tbl0.query(q("select * from person where id = 1 for update")).getAll();
+
+        CompletableFuture<Thread> asyncThread = new CompletableFuture<>();
+
+        CompletableFuture<Void> fut = CompletableFuture.runAsync(() -> {
+            tbl0.query(q("begin"));
+
+            try {
+                tbl0.query(q("select * from person")).getAll();
+
+                asyncThread.complete(Thread.currentThread());
+                tbl0.query(q("delete from person where id = 1"));
+            }
+            finally {
+                tbl0.query(q("commit"));
+            }
+        });
+
+        Thread concThread = asyncThread.join();
+
+        // wait until concurrent thread blocks awaiting entry mvcc lock release
+        while (concThread.getState() == Thread.State.RUNNABLE && !Thread.currentThread().isInterrupted());
+
+        tbl0.query(q("commit"));
+
+        fut.join();
+
+        assertEquals(0, tbl0.size());
+        assertEquals(0, table(grid(1)).size());
+
+        assertEquals(0, tbl0.size(BACKUP));
+        assertEquals(0, table(grid(1)).size(BACKUP));
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testDataStreamerModifiesReplicatedCacheSize() throws Exception {
+        startGridsMultiThreaded(2);
+
+        IgniteEx ignite = grid(0);
+
+        ignite.createCache(
+            new CacheConfiguration<>("test")
+                .setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL)
+                .setCacheMode(CacheMode.REPLICATED)
+        );
+
+        try (IgniteDataStreamer<Object, Object> streamer = ignite.dataStreamer("test")) {
+            streamer.addData(1, "a");
+
+            streamer.addData(keyInDifferentPartition(ignite, "test", 1), "b");
+        }
+
+        assertEquals(2, ignite.cache("test").size());
+
+        assertEquals(1, grid(0).cache("test").localSize());
+        assertEquals(1, grid(0).cache("test").localSize(BACKUP));
+
+        assertEquals(1, grid(1).cache("test").localSize());
+        assertEquals(1, grid(1).cache("test").localSize(BACKUP));
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testSizeIsConsistentAfterRebalance() throws Exception {
+        IgniteEx ignite = startGrid(0);
+
+        IgniteCache<?, ?> tbl = createTable(ignite);
+
+        for (int i = 0; i < 100; i++)
+            tbl.query(q("insert into person values(?, ?)").setArgs(i, i));
+
+        startGrid(1);
+
+        awaitPartitionMapExchange();
+
+        IgniteCache<?, ?> tbl0 = grid(0).cache("person");
+        IgniteCache<?, ?> tbl1 = grid(1).cache("person");
+
+        assert tbl0.localSize() != 0 && tbl1.localSize() != 0;
+
+        assertEquals(100, tbl1.size());
+        assertEquals(100, tbl0.localSize() + tbl1.localSize());
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testSizeIsConsistentAfterRebalanceDuringInsert() throws Exception {
+        IgniteEx ignite = startGrid(0);
+
+        IgniteCache<?, ?> tbl = createTable(ignite);
+
+        Future<?> f = null;
+
+        for (int i = 0; i < 100; i++) {
+            if (i == 50)
+                f = ForkJoinPool.commonPool().submit(() -> startGrid(1));
+
+            tbl.query(q("insert into person values(?, ?)").setArgs(i, i));
+        }
+
+        f.get();
+
+        awaitPartitionMapExchange();
+
+        IgniteCache<?, ?> tbl0 = grid(0).cache("person");
+        IgniteCache<?, ?> tbl1 = grid(1).cache("person");
+
+        assert tbl0.localSize() != 0 && tbl1.localSize() != 0;
+
+        assertEquals(100, tbl1.size());
+        assertEquals(100, tbl0.localSize() + tbl1.localSize());
+    }
+
+    /** */
+    private static IgniteCache<?, ?> table(IgniteEx ignite) {
+        assert ignite.cachex("person").configuration().getAtomicityMode() == CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT;
+        assert ignite.cachex("person").configuration().getCacheMode() == CacheMode.REPLICATED;
+
+        return ignite.cache("person");
+    }
+
+    /** */
+    private static IgniteCache<?, ?> createTable(IgniteEx ignite) {
+        IgniteCache<?, ?> sqlNexus = ignite.getOrCreateCache(new CacheConfiguration<>("sqlNexus").setSqlSchema("PUBLIC"));
+
+        sqlNexus.query(q("" +
+            "create table person(" +
+            "  id int primary key," +
+            "  name varchar" +
+            ") with \"atomicity=transactional_snapshot,template=replicated,cache_name=person\""));
+
+        return table(ignite);
+    }
+
+    /** */
+    private static SqlFieldsQuery q(String fSql, Object... args) {
+        return new SqlFieldsQuery(String.format(fSql, args));
+    }
+
+    /** */
+    private static int keyInSamePartition(Ignite ignite, String cacheName, int key) {
+        Affinity<Object> affinity = ignite.affinity(cacheName);
+
+        return IntStream.iterate(key + 1, i -> i + 1)
+            .filter(i -> affinity.partition(i) == affinity.partition(key))
+            .findFirst().getAsInt();
+    }
+
+    /** */
+    private static int keyInDifferentPartition(Ignite ignite, String cacheName, int key) {
+        Affinity<Object> affinity = ignite.affinity(cacheName);
+
+        return IntStream.iterate(key + 1, i -> i + 1)
+            .filter(i -> affinity.partition(i) != affinity.partition(key))
+            .findFirst().getAsInt();
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccSizeWithConcurrentJdbcTransactionTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccSizeWithConcurrentJdbcTransactionTest.java
new file mode 100644
index 0000000..437195f
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccSizeWithConcurrentJdbcTransactionTest.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+/**
+ *
+ */
+public class CacheMvccSizeWithConcurrentJdbcTransactionTest extends CacheMvccSizeWithConcurrentTransactionTest {
+    /** {@inheritDoc} */
+    @Override boolean jdbcTx() {
+        return true;
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccSqlConfigurationValidationTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccSqlConfigurationValidationTest.java
new file mode 100644
index 0000000..7e6c9e8
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccSqlConfigurationValidationTest.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.util.concurrent.Callable;
+import javax.cache.CacheException;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.transactions.Transaction;
+import org.apache.ignite.transactions.TransactionConcurrency;
+import org.apache.ignite.transactions.TransactionIsolation;
+
+/**
+ * Configuration validation for SQL configured caches.
+ */
+public class CacheMvccSqlConfigurationValidationTest extends CacheMvccAbstractTest {
+    /** {@inheritDoc} */
+    @Override protected CacheMode cacheMode() {
+        return CacheMode.PARTITIONED;
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testCacheGroupAtomicityModeMismatch1() throws Exception {
+        Ignite node = startGrid();
+
+        node.getOrCreateCache(DEFAULT_CACHE_NAME)
+            .query(new SqlFieldsQuery("CREATE TABLE City (id int primary key, name varchar, population int) WITH " +
+                "\"atomicity=transactional_snapshot,cache_group=group1,template=partitioned,backups=3,cache_name=City\""))
+            .getAll();;
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                node.cache(DEFAULT_CACHE_NAME)
+                    .query(new SqlFieldsQuery("CREATE TABLE Person (id int primary key, name varchar) WITH " +
+                        "\"atomicity=transactional,cache_group=group1,template=partitioned,backups=3,cache_name=Person\""))
+                    .getAll();
+
+                return null;
+            }
+        }, CacheException.class, "Atomicity mode mismatch for caches related to the same group");
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testCacheGroupAtomicityModeMismatch2() throws Exception {
+        Ignite node = startGrid();
+
+        node.getOrCreateCache(DEFAULT_CACHE_NAME)
+            .query(new SqlFieldsQuery("CREATE TABLE City (id int primary key, name varchar, population int) WITH " +
+                "\"atomicity=transactional,cache_group=group1,template=partitioned,backups=3,cache_name=City\""));
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                node.cache(DEFAULT_CACHE_NAME)
+                    .query(new SqlFieldsQuery("CREATE TABLE Person (id int primary key, name varchar) WITH " +
+                        "\"atomicity=transactional_snapshot,cache_group=group1,template=partitioned,backups=3,cache_name=Person\""))
+                    .getAll();
+
+                return null;
+            }
+        }, CacheException.class, "Atomicity mode mismatch for caches related to the same group");
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testTxDifferentMvccSettingsTransactional() throws Exception {
+        ccfg = defaultCacheConfiguration().setSqlSchema("PUBLIC");
+        Ignite node = startGrid();
+
+        IgniteCache cache = node.cache(DEFAULT_CACHE_NAME);
+
+        cache.query(new SqlFieldsQuery("CREATE TABLE Person (id int primary key, name varchar) WITH " +
+                "\"atomicity=transactional_snapshot,template=partitioned,backups=1\"")).getAll();
+
+        cache.query(new SqlFieldsQuery("CREATE TABLE City (id int primary key, name varchar, population int) WITH " +
+            "\"atomicity=transactional,template=partitioned,backups=3\"")).getAll();
+
+        GridTestUtils.assertThrows(log, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                try (Transaction tx = node.transactions().txStart(TransactionConcurrency.PESSIMISTIC, TransactionIsolation.REPEATABLE_READ)) {
+                    cache.query(new SqlFieldsQuery("SELECT * FROM Person, City")).getAll();
+
+                    tx.commit();
+                }
+
+                return null;
+            }
+        }, CacheException.class, "Caches with transactional_snapshot atomicity mode cannot participate in the same transaction");
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccSqlQueriesAbstractTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccSqlQueriesAbstractTest.java
new file mode 100644
index 0000000..796c0bb
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccSqlQueriesAbstractTest.java
@@ -0,0 +1,1613 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.LinkedHashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.atomic.AtomicBoolean;
+import javax.cache.processor.MutableEntry;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteTransactions;
+import org.apache.ignite.cache.CacheEntryProcessor;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.cache.query.SqlQuery;
+import org.apache.ignite.cache.query.annotations.QuerySqlField;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.internal.util.lang.GridInClosure3;
+import org.apache.ignite.internal.util.typedef.G;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.lang.IgniteInClosure;
+import org.apache.ignite.transactions.Transaction;
+
+import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC;
+import static org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.ReadMode.SQL;
+import static org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.ReadMode.SQL_SUM;
+import static org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.WriteMode.PUT;
+import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC;
+import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ;
+
+/**
+ * TODO IGNITE-6739: text/spatial indexes with mvcc.
+ * TODO IGNITE-6739: indexingSpi with mvcc.
+ * TODO IGNITE-6739: setQueryParallelism with mvcc.
+ * TODO IGNITE-6739: dynamic index create.
+ */
+@SuppressWarnings("unchecked")
+public abstract class CacheMvccSqlQueriesAbstractTest extends CacheMvccAbstractTest {
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxSql_SingleNode_SinglePartition() throws Exception {
+        accountsTxReadAll(1, 0, 0, 1,
+            new InitIndexing(Integer.class, MvccTestAccount.class), false, SQL, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxSql_WithRemoves_SingleNode_SinglePartition() throws Exception {
+        accountsTxReadAll(1, 0, 0, 1,
+            new InitIndexing(Integer.class, MvccTestAccount.class), true, SQL, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxSql_SingleNode() throws Exception {
+        accountsTxReadAll(1, 0, 0, 64,
+            new InitIndexing(Integer.class, MvccTestAccount.class), false, SQL, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxSql_SingleNode_Persistence() throws Exception {
+        persistence = true;
+
+        testAccountsTxSql_SingleNode();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxSumSql_SingleNode() throws Exception {
+        accountsTxReadAll(1, 0, 0, 64,
+            new InitIndexing(Integer.class, MvccTestAccount.class), false, SQL_SUM, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxSql_WithRemoves_SingleNode() throws Exception {
+        accountsTxReadAll(1, 0, 0, 64,
+            new InitIndexing(Integer.class, MvccTestAccount.class), true, SQL, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxSql_WithRemoves_SingleNode_Persistence() throws Exception {
+        persistence = true;
+
+        testAccountsTxSql_WithRemoves_SingleNode();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxSql_ClientServer_Backups2() throws Exception {
+        accountsTxReadAll(4, 2, 2, 64,
+            new InitIndexing(Integer.class, MvccTestAccount.class), false, SQL, PUT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testUpdateSingleValue_SingleNode() throws Exception {
+        updateSingleValue(true, false);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testUpdateSingleValue_LocalQuery_SingleNode() throws Exception {
+        updateSingleValue(true, true);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testUpdateSingleValue_ClientServer() throws Exception {
+        updateSingleValue(false, false);
+    }
+
+    /**
+     * @param singleNode {@code True} for test with single node.
+     * @param locQry Local query flag.
+     * @throws Exception If failed.
+     */
+    private void updateSingleValue(boolean singleNode, final boolean locQry) throws Exception {
+        final int VALS = 100;
+
+        final int writers = 4;
+
+        final int readers = 4;
+
+        final int INC_BY = 110;
+
+        final IgniteInClosure<IgniteCache<Object, Object>> init = new IgniteInClosure<IgniteCache<Object, Object>>() {
+            @Override public void apply(IgniteCache<Object, Object> cache) {
+                Map<Integer, MvccTestSqlIndexValue> vals = new HashMap<>();
+
+                for (int i = 0; i < VALS; i++)
+                    vals.put(i, new MvccTestSqlIndexValue(i));
+
+                cache.putAll(vals);
+            }
+        };
+
+        GridInClosure3<Integer, List<TestCache>, AtomicBoolean> writer =
+            new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
+                @Override public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
+                    ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                    int cnt = 0;
+
+                    while (!stop.get()) {
+                        TestCache<Integer, MvccTestSqlIndexValue> cache = randomCache(caches, rnd);
+
+                        try {
+                            Integer key = rnd.nextInt(VALS);
+
+                            cache.cache.invoke(key, new CacheEntryProcessor<Integer, MvccTestSqlIndexValue, Object>() {
+                                @Override public Object process(MutableEntry<Integer, MvccTestSqlIndexValue> e, Object... args) {
+                                    Integer key = e.getKey();
+
+                                    MvccTestSqlIndexValue val = e.getValue();
+
+                                    int newIdxVal;
+
+                                    if (val.idxVal1 < INC_BY) {
+                                        assertEquals(key.intValue(), val.idxVal1);
+
+                                        newIdxVal = val.idxVal1 + INC_BY;
+                                    }
+                                    else {
+                                        assertEquals(INC_BY + key, val.idxVal1);
+
+                                        newIdxVal = key;
+                                    }
+
+                                    e.setValue(new MvccTestSqlIndexValue(newIdxVal));
+
+                                    return null;
+                                }
+                            });
+                        }
+                        finally {
+                            cache.readUnlock();
+                        }
+                    }
+
+                    info("Writer finished, updates: " + cnt);
+                }
+            };
+
+        GridInClosure3<Integer, List<TestCache>, AtomicBoolean> reader =
+            new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
+                @Override public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
+                    ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                    List<SqlFieldsQuery> fieldsQrys = new ArrayList<>();
+
+                    fieldsQrys.add(
+                        new SqlFieldsQuery("select _key, idxVal1 from MvccTestSqlIndexValue where idxVal1=?").setLocal(locQry));
+
+                    fieldsQrys.add(new SqlFieldsQuery("select _key, idxVal1 from MvccTestSqlIndexValue where idxVal1=? or idxVal1=?").setLocal(locQry));
+
+                    fieldsQrys.add(new SqlFieldsQuery("select _key, idxVal1 from MvccTestSqlIndexValue where _key=?").setLocal(locQry));
+
+                    List<SqlQuery<Integer, MvccTestSqlIndexValue>> sqlQrys = new ArrayList<>();
+
+                    sqlQrys.add(new SqlQuery<Integer, MvccTestSqlIndexValue>(MvccTestSqlIndexValue.class, "idxVal1=?").setLocal(locQry));
+
+                    sqlQrys.add(new SqlQuery<Integer, MvccTestSqlIndexValue>(MvccTestSqlIndexValue.class, "idxVal1=? or idxVal1=?").setLocal(locQry));
+
+                    sqlQrys.add(new SqlQuery<Integer, MvccTestSqlIndexValue>(MvccTestSqlIndexValue.class, "_key=?").setLocal(locQry));
+
+                    while (!stop.get()) {
+                        Integer key = rnd.nextInt(VALS);
+
+                        int qryIdx = rnd.nextInt(3);
+
+                        TestCache<Integer, MvccTestSqlIndexValue> cache = randomCache(caches, rnd);
+
+                        List<List<?>> res;
+
+                        try {
+                            if (rnd.nextBoolean()) {
+                                SqlFieldsQuery qry = fieldsQrys.get(qryIdx);
+
+                                if (qryIdx == 1)
+                                    qry.setArgs(key, key + INC_BY);
+                                else
+                                    qry.setArgs(key);
+
+                                res = cache.cache.query(qry).getAll();
+                            }
+                            else {
+                                SqlQuery<Integer, MvccTestSqlIndexValue> qry = sqlQrys.get(qryIdx);
+
+                                if (qryIdx == 1)
+                                    qry.setArgs(key, key + INC_BY);
+                                else
+                                    qry.setArgs(key);
+
+                                res = new ArrayList<>();
+
+                                for (IgniteCache.Entry<Integer, MvccTestSqlIndexValue> e : cache.cache.query(qry).getAll()) {
+                                    List<Object> row = new ArrayList<>(2);
+
+                                    row.add(e.getKey());
+                                    row.add(e.getValue().idxVal1);
+
+                                    res.add(row);
+                                }
+                            }
+                        }
+                        finally {
+                            cache.readUnlock();
+                        }
+
+                        assertTrue(qryIdx == 0 || !res.isEmpty());
+
+                        if (!res.isEmpty()) {
+                            assertEquals(1, res.size());
+
+                            List<?> resVals = res.get(0);
+
+                            Integer key0 = (Integer)resVals.get(0);
+                            Integer val0 = (Integer)resVals.get(1);
+
+                            assertEquals(key, key0);
+                            assertTrue(val0.equals(key) || val0.equals(key + INC_BY));
+                        }
+                    }
+
+                    if (idx == 0) {
+                        SqlFieldsQuery qry = new SqlFieldsQuery("select _key, idxVal1 from MvccTestSqlIndexValue");
+
+                        TestCache<Integer, MvccTestSqlIndexValue> cache = randomCache(caches, rnd);
+
+                        List<List<?>> res;
+
+                        try {
+                            res = cache.cache.query(qry).getAll();
+                        }
+                        finally {
+                            cache.readUnlock();
+                        }
+
+                        assertEquals(VALS, res.size());
+
+                        for (List<?> vals : res)
+                            info("Value: " + vals);
+                    }
+                }
+            };
+
+        int srvs;
+        int clients;
+
+        if (singleNode) {
+            srvs = 1;
+            clients = 0;
+        }
+        else {
+            srvs = 4;
+            clients = 2;
+        }
+
+        readWriteTest(
+            null,
+            srvs,
+            clients,
+            0,
+            DFLT_PARTITION_COUNT,
+            writers,
+            readers,
+            DFLT_TEST_TIME,
+            new InitIndexing(Integer.class, MvccTestSqlIndexValue.class),
+            init,
+            writer,
+            reader);
+
+        for (Ignite node : G.allGrids())
+            checkActiveQueriesCleanup(node);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testJoinTransactional_SingleNode() throws Exception {
+        joinTransactional(true, false);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testJoinTransactional_ClientServer() throws Exception {
+        joinTransactional(false, false);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testJoinTransactional_DistributedJoins_ClientServer() throws Exception {
+        joinTransactional(false, true);
+    }
+
+    /**
+     * @param singleNode {@code True} for test with single node.
+     * @param distributedJoin {@code True} to test distributed joins.
+     * @throws Exception If failed.
+     */
+    private void joinTransactional(boolean singleNode, final boolean distributedJoin) throws Exception {
+        final int KEYS = 100;
+
+        final int writers = 4;
+
+        final int readers = 4;
+
+        GridInClosure3<Integer, List<TestCache>, AtomicBoolean> writer =
+            new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
+                @Override public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
+                    ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                    int cnt = 0;
+
+                    while (!stop.get()) {
+                        TestCache<Object, Object> cache = randomCache(caches, rnd);
+
+                        IgniteTransactions txs = cache.cache.unwrap(Ignite.class).transactions();
+
+                        try {
+                            try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                                Integer key = rnd.nextInt(KEYS);
+
+                                JoinTestChildKey childKey = new JoinTestChildKey(key);
+
+                                JoinTestChild child = (JoinTestChild)cache.cache.get(childKey);
+
+                                if (child == null) {
+                                    Integer parentKey = distributedJoin ? key + 100 : key;
+
+                                    child = new JoinTestChild(parentKey);
+
+                                    cache.cache.put(childKey, child);
+
+                                    JoinTestParent parent = new JoinTestParent(parentKey);
+
+                                    cache.cache.put(new JoinTestParentKey(parentKey), parent);
+                                }
+                                else {
+                                    cache.cache.remove(childKey);
+
+                                    cache.cache.remove(new JoinTestParentKey(child.parentId));
+                                }
+
+                                tx.commit();
+                            }
+
+                            cnt++;
+                        }
+                        finally {
+                            cache.readUnlock();
+                        }
+                    }
+
+                    info("Writer finished, updates: " + cnt);
+                }
+            };
+
+        GridInClosure3<Integer, List<TestCache>, AtomicBoolean> reader =
+            new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
+                @Override public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
+                    ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                    List<SqlFieldsQuery> qrys = new ArrayList<>();
+
+                    qrys.add(new SqlFieldsQuery("select c.parentId, p.id from " +
+                        "JoinTestChild c left outer join JoinTestParent p on (c.parentId = p.id)").
+                        setDistributedJoins(distributedJoin));
+
+                    qrys.add(new SqlFieldsQuery("select c.parentId, p.id from " +
+                        "JoinTestChild c left outer join JoinTestParent p on (c.parentId = p.id) where p.id = 10").
+                        setDistributedJoins(distributedJoin));
+
+                    qrys.add(new SqlFieldsQuery("select c.parentId, p.id from " +
+                        "JoinTestChild c left outer join JoinTestParent p on (c.parentId = p.id) where p.id != 10").
+                        setDistributedJoins(distributedJoin));
+
+                    while (!stop.get()) {
+                        TestCache<Object, Object> cache = randomCache(caches, rnd);
+
+                        try {
+                            for (SqlFieldsQuery qry : qrys) {
+                                List<List<?>> res = cache.cache.query(qry).getAll();
+
+                                if (!res.isEmpty()) {
+                                    for (List<?> resRow : res) {
+                                        Integer parentId = (Integer)resRow.get(1);
+
+                                        assertNotNull(parentId);
+                                    }
+                                }
+                            }
+                        }
+                        finally {
+                            cache.readUnlock();
+                        }
+                    }
+
+                    if (idx == 0) {
+                        TestCache<Object, Object> cache = randomCache(caches, rnd);
+
+                        try {
+                            List<List<?>> res = cache.cache.query(qrys.get(0)).getAll();
+
+                            info("Reader finished, result: " + res);
+                        }
+                        finally {
+                            cache.readUnlock();
+                        }
+                    }
+                }
+            };
+
+        int srvs;
+        int clients;
+
+        if (singleNode) {
+            srvs = 1;
+            clients = 0;
+        }
+        else {
+            srvs = 4;
+            clients = 2;
+        }
+
+        readWriteTest(
+            null,
+            srvs,
+            clients,
+            0,
+            DFLT_PARTITION_COUNT,
+            writers,
+            readers,
+            DFLT_TEST_TIME,
+            new InitIndexing(JoinTestParentKey.class, JoinTestParent.class,
+                JoinTestChildKey.class, JoinTestChild.class),
+            null,
+            writer,
+            reader);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testJoinTransactional_DistributedJoins_ClientServer2() throws Exception {
+        final int KEYS = 100;
+
+        final int writers = 1;
+
+        final int readers = 4;
+
+        final int CHILDREN_CNT = 10;
+
+        GridInClosure3<Integer, List<TestCache>, AtomicBoolean> writer =
+            new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
+                @Override public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
+                    ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                    int cnt = 0;
+
+                    while (!stop.get()) {
+                        TestCache<Object, Object> cache = randomCache(caches, rnd);
+
+                        IgniteTransactions txs = cache.cache.unwrap(Ignite.class).transactions();
+
+                        try {
+                            try (Transaction tx = txs.txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                                Integer key = rnd.nextInt(KEYS);
+
+                                JoinTestParentKey parentKey = new JoinTestParentKey(key);
+
+                                JoinTestParent parent = (JoinTestParent)cache.cache.get(parentKey);
+
+                                if (parent == null) {
+                                    for (int i = 0; i < CHILDREN_CNT; i++)
+                                        cache.cache.put(new JoinTestChildKey(key * 10_000 + i), new JoinTestChild(key));
+
+                                    cache.cache.put(parentKey, new JoinTestParent(key));
+                                }
+                                else {
+                                    for (int i = 0; i < CHILDREN_CNT; i++)
+                                        cache.cache.remove(new JoinTestChildKey(key * 10_000 + i));
+
+                                    cache.cache.remove(parentKey);
+                                }
+
+                                tx.commit();
+                            }
+
+                            cnt++;
+                        }
+                        finally {
+                            cache.readUnlock();
+                        }
+                    }
+
+                    info("Writer finished, updates: " + cnt);
+                }
+            };
+
+        GridInClosure3<Integer, List<TestCache>, AtomicBoolean> reader =
+            new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
+                @Override public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
+                    ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                    SqlFieldsQuery qry = new SqlFieldsQuery("select c.parentId, p.id from " +
+                        "JoinTestChild c left outer join JoinTestParent p on (c.parentId = p.id) where p.id=?").
+                        setDistributedJoins(true);
+
+                    int cnt = 0;
+
+                    while (!stop.get()) {
+                        TestCache<Object, Object> cache = randomCache(caches, rnd);
+
+                        qry.setArgs(rnd.nextInt(KEYS));
+
+                        try {
+                            List<List<?>> res = cache.cache.query(qry).getAll();
+
+                            if (!res.isEmpty())
+                                assertEquals(CHILDREN_CNT, res.size());
+
+                            cnt++;
+                        }
+                        finally {
+                            cache.readUnlock();
+                        }
+                    }
+
+                    info("Reader finished, read count: " + cnt);
+                }
+            };
+
+        readWriteTest(
+            null,
+            4,
+            2,
+            0,
+            DFLT_PARTITION_COUNT,
+            writers,
+            readers,
+            DFLT_TEST_TIME,
+            new InitIndexing(JoinTestParentKey.class, JoinTestParent.class,
+                JoinTestChildKey.class, JoinTestChild.class),
+            null,
+            writer,
+            reader);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testDistributedJoinSimple() throws Exception {
+        fail("https://issues.apache.org/jira/browse/IGNITE-9446");
+
+        startGridsMultiThreaded(4);
+
+        Ignite srv0 = ignite(0);
+
+        int[] backups = {0, 1, 2};
+
+        for (int b : backups) {
+            IgniteCache<Object, Object> cache = srv0.createCache(
+                cacheConfiguration(cacheMode(), FULL_SYNC, b, DFLT_PARTITION_COUNT).
+                    setIndexedTypes(JoinTestParentKey.class, JoinTestParent.class, JoinTestChildKey.class, JoinTestChild.class));
+
+            int cntr = 0;
+
+            int expCnt = 0;
+
+            for (int i = 0; i < 10; i++) {
+                JoinTestParentKey parentKey = new JoinTestParentKey(i);
+
+                cache.put(parentKey, new JoinTestParent(i));
+
+                for (int c = 0; c < i; c++) {
+                    JoinTestChildKey childKey = new JoinTestChildKey(cntr++);
+
+                    cache.put(childKey, new JoinTestChild(i));
+
+                    expCnt++;
+                }
+            }
+
+            SqlFieldsQuery qry = new SqlFieldsQuery("select c.parentId, p.id from " +
+                "JoinTestChild c join JoinTestParent p on (c.parentId = p.id)").
+                setDistributedJoins(true);
+
+            Map<Integer, Integer> resMap = new HashMap<>();
+
+            List<List<?>> res = cache.query(qry).getAll();
+
+            assertEquals(expCnt, res.size());
+
+            for (List<?> resRow : res) {
+                Integer parentId = (Integer)resRow.get(0);
+
+                Integer cnt = resMap.get(parentId);
+
+                if (cnt == null)
+                    resMap.put(parentId, 1);
+                else
+                    resMap.put(parentId, cnt + 1);
+            }
+
+            for (int i = 1; i < 10; i++)
+                assertEquals(i, (Object)resMap.get(i));
+
+            srv0.destroyCache(cache.getName());
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testCacheRecreate() throws Exception {
+        cacheRecreate(new InitIndexing(Integer.class, MvccTestAccount.class));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testCacheRecreateChangeIndexedType() throws Exception {
+        Ignite srv0 = startGrid(0);
+
+        final int PARTS = 64;
+
+        {
+            CacheConfiguration<Object, Object> ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 0, PARTS).
+                setIndexedTypes(Integer.class, MvccTestAccount.class);
+
+            IgniteCache<Integer, MvccTestAccount> cache = (IgniteCache)srv0.createCache(ccfg);
+
+            for (int k = 0; k < PARTS * 2; k++) {
+                assertNull(cache.get(k));
+
+                int vals = k % 3 + 1;
+
+                for (int v = 0; v < vals; v++)
+                    cache.put(k, new MvccTestAccount(v, 1));
+
+                assertEquals(vals - 1, cache.get(k).val);
+            }
+
+            assertEquals(PARTS * 2, cache.query(new SqlQuery<>(MvccTestAccount.class, "true")).getAll().size());
+
+            srv0.destroyCache(cache.getName());
+        }
+
+        {
+            CacheConfiguration<Object, Object> ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 0, PARTS).
+                setIndexedTypes(Integer.class, MvccTestSqlIndexValue.class);
+
+            IgniteCache<Integer, MvccTestSqlIndexValue> cache = (IgniteCache)srv0.createCache(ccfg);
+
+            for (int k = 0; k < PARTS * 2; k++) {
+                assertNull(cache.get(k));
+
+                int vals = k % 3 + 1;
+
+                for (int v = 0; v < vals; v++)
+                    cache.put(k, new MvccTestSqlIndexValue(v));
+
+                assertEquals(vals - 1, cache.get(k).idxVal1);
+            }
+
+            assertEquals(PARTS * 2, cache.query(new SqlQuery<>(MvccTestSqlIndexValue.class, "true")).getAll().size());
+
+            srv0.destroyCache(cache.getName());
+        }
+
+        {
+            CacheConfiguration<Object, Object> ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 0, PARTS).
+                setIndexedTypes(Long.class, Long.class);
+
+            IgniteCache<Long, Long> cache = (IgniteCache)srv0.createCache(ccfg);
+
+            for (int k = 0; k < PARTS * 2; k++) {
+                assertNull(cache.get((long)k));
+
+                int vals = k % 3 + 1;
+
+                for (int v = 0; v < vals; v++)
+                    cache.put((long)k, (long)v);
+
+                assertEquals((long)(vals - 1), (Object)cache.get((long)k));
+            }
+
+            assertEquals(PARTS * 2, cache.query(new SqlQuery<>(Long.class, "true")).getAll().size());
+
+            srv0.destroyCache(cache.getName());
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testChangeValueType1() throws Exception {
+        Ignite srv0 = startGrid(0);
+
+        CacheConfiguration<Object, Object> ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 0, DFLT_PARTITION_COUNT).
+            setIndexedTypes(Integer.class, MvccTestSqlIndexValue.class, Integer.class, Integer.class);
+
+        IgniteCache<Object, Object> cache = srv0.createCache(ccfg);
+
+        cache.put(1, new MvccTestSqlIndexValue(1));
+        cache.put(1, new MvccTestSqlIndexValue(2));
+
+        checkSingleResult(cache, new SqlFieldsQuery("select idxVal1 from MvccTestSqlIndexValue"), 2);
+
+        cache.put(1, 1);
+
+        assertEquals(0, cache.query(new SqlFieldsQuery("select idxVal1 from MvccTestSqlIndexValue")).getAll().size());
+
+        checkSingleResult(cache, new SqlFieldsQuery("select _val from Integer"), 1);
+
+        cache.put(1, 2);
+
+        checkSingleResult(cache, new SqlFieldsQuery("select _val from Integer"), 2);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testChangeValueType2() throws Exception {
+        Ignite srv0 = startGrid(0);
+
+        CacheConfiguration<Object, Object> ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 0, DFLT_PARTITION_COUNT).
+            setIndexedTypes(Integer.class, MvccTestSqlIndexValue.class, Integer.class, Integer.class);
+
+        IgniteCache<Object, Object> cache = srv0.createCache(ccfg);
+
+        cache.put(1, new MvccTestSqlIndexValue(1));
+        cache.put(1, new MvccTestSqlIndexValue(2));
+
+        checkSingleResult(cache, new SqlFieldsQuery("select idxVal1 from MvccTestSqlIndexValue"), 2);
+
+        cache.remove(1);
+
+        assertEquals(0, cache.query(new SqlFieldsQuery("select idxVal1 from MvccTestSqlIndexValue")).getAll().size());
+
+        cache.put(1, 1);
+
+        assertEquals(0, cache.query(new SqlFieldsQuery("select idxVal1 from MvccTestSqlIndexValue")).getAll().size());
+
+        checkSingleResult(cache, new SqlFieldsQuery("select _val from Integer"), 1);
+
+        cache.put(1, 2);
+
+        checkSingleResult(cache, new SqlFieldsQuery("select _val from Integer"), 2);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testCountTransactional_SingleNode() throws Exception {
+        countTransactional(true);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testCountTransactional_ClientServer() throws Exception {
+        countTransactional(false);
+    }
+
+    /**
+     * @param singleNode {@code True} for test with single node.
+     * @throws Exception If failed.
+     */
+    private void countTransactional(boolean singleNode) throws Exception {
+        final int writers = 4;
+
+        final int readers = 4;
+
+        final int THREAD_KEY_RANGE = 100;
+
+        final int VAL_RANGE = 10;
+
+        GridInClosure3<Integer, List<TestCache>, AtomicBoolean> writer =
+            new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
+                @Override public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
+                    ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                    int min = idx * THREAD_KEY_RANGE;
+                    int max = min + THREAD_KEY_RANGE;
+
+                    info("Thread range [min=" + min + ", max=" + max + ']');
+
+                    int cnt = 0;
+
+                    Set<Integer> keys = new LinkedHashSet<>();
+
+                    while (!stop.get()) {
+                        TestCache<Integer, MvccTestSqlIndexValue> cache = randomCache(caches, rnd);
+
+                        try {
+                            // Add or remove 10 keys.
+                            if (!keys.isEmpty() && (keys.size() == THREAD_KEY_RANGE || rnd.nextInt(3) == 0)) {
+                                Set<Integer> rmvKeys = new HashSet<>();
+
+                                for (Integer key : keys) {
+                                    rmvKeys.add(key);
+
+                                    if (rmvKeys.size() == 10)
+                                        break;
+                                }
+
+                                assertEquals(10, rmvKeys.size());
+
+                                cache.cache.removeAll(rmvKeys);
+
+                                keys.removeAll(rmvKeys);
+                            }
+                            else {
+                                TreeMap<Integer, MvccTestSqlIndexValue> map = new TreeMap<>();
+
+                                while (map.size() != 10) {
+                                    Integer key = rnd.nextInt(min, max);
+
+                                    if (keys.add(key))
+                                        map.put(key, new MvccTestSqlIndexValue(rnd.nextInt(VAL_RANGE)));
+                                }
+
+                                assertEquals(10, map.size());
+
+                                cache.cache.putAll(map);
+                            }
+                        }
+                        finally {
+                            cache.readUnlock();
+                        }
+                    }
+
+                    info("Writer finished, updates: " + cnt);
+                }
+            };
+
+        GridInClosure3<Integer, List<TestCache>, AtomicBoolean> reader =
+            new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
+                @Override public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
+                    ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                    List<SqlFieldsQuery> qrys = new ArrayList<>();
+
+                    qrys.add(new SqlFieldsQuery("select count(*) from MvccTestSqlIndexValue"));
+
+                    qrys.add(new SqlFieldsQuery(
+                        "select count(*) from MvccTestSqlIndexValue where idxVal1 >= 0 and idxVal1 <= " + VAL_RANGE));
+
+                    while (!stop.get()) {
+                        TestCache<Integer, MvccTestSqlIndexValue> cache = randomCache(caches, rnd);
+
+                        try {
+                            for (SqlFieldsQuery qry : qrys) {
+                                List<List<?>> res = cache.cache.query(qry).getAll();
+
+                                assertEquals(1, res.size());
+
+                                Long cnt = (Long)res.get(0).get(0);
+
+                                assertTrue(cnt % 10 == 0);
+                            }
+                        }
+                        finally {
+                            cache.readUnlock();
+                        }
+                    }
+                }
+            };
+
+        int srvs;
+        int clients;
+
+        if (singleNode) {
+            srvs = 1;
+            clients = 0;
+        }
+        else {
+            srvs = 4;
+            clients = 2;
+        }
+
+        readWriteTest(
+            null,
+            srvs,
+            clients,
+            0,
+            DFLT_PARTITION_COUNT,
+            writers,
+            readers,
+            DFLT_TEST_TIME,
+            new InitIndexing(Integer.class, MvccTestSqlIndexValue.class),
+            null,
+            writer,
+            reader);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testMaxMinTransactional_SingleNode() throws Exception {
+        maxMinTransactional(true);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testMaxMinTransactional_ClientServer() throws Exception {
+        maxMinTransactional(false);
+    }
+
+    /**
+     * @param singleNode {@code True} for test with single node.
+     * @throws Exception If failed.
+     */
+    private void maxMinTransactional(boolean singleNode) throws Exception {
+        final int writers = 1;
+
+        final int readers = 1;
+
+        final int THREAD_OPS = 10;
+
+        final int OP_RANGE = 10;
+
+        final int THREAD_KEY_RANGE = OP_RANGE * THREAD_OPS;
+
+        GridInClosure3<Integer, List<TestCache>, AtomicBoolean> writer =
+            new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
+                @Override public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
+                    ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                    int min = idx * THREAD_KEY_RANGE;
+
+                    info("Thread range [start=" + min + ']');
+
+                    int cnt = 0;
+
+                    boolean add = true;
+
+                    int op = 0;
+
+                    while (!stop.get()) {
+                        TestCache<Integer, MvccTestSqlIndexValue> cache = randomCache(caches, rnd);
+
+                        try {
+                            int startKey = min + op * OP_RANGE;
+
+                            if (add) {
+                                Map<Integer, MvccTestSqlIndexValue> vals = new HashMap<>();
+
+                                for (int i = 0; i < 10; i++) {
+                                    Integer key = startKey + i + 1;
+
+                                    vals.put(key, new MvccTestSqlIndexValue(key));
+                                }
+
+                                cache.cache.putAll(vals);
+
+                                // info("put " + vals.keySet());
+                            }
+                            else {
+                                Set<Integer> rmvKeys = new HashSet<>();
+
+                                for (int i = 0; i < 10; i++)
+                                    rmvKeys.add(startKey + i + 1);
+
+                                cache.cache.removeAll(rmvKeys);
+
+                                // info("remove " + rmvKeys);
+                            }
+
+                            if (++op == THREAD_OPS) {
+                                add = !add;
+
+                                op = 0;
+                            }
+                        }
+                        finally {
+                            cache.readUnlock();
+                        }
+                    }
+
+                    info("Writer finished, updates: " + cnt);
+                }
+            };
+
+        GridInClosure3<Integer, List<TestCache>, AtomicBoolean> reader =
+            new GridInClosure3<Integer, List<TestCache>, AtomicBoolean>() {
+                @Override public void apply(Integer idx, List<TestCache> caches, AtomicBoolean stop) {
+                    ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                    List<SqlFieldsQuery> maxQrys = new ArrayList<>();
+                    List<SqlFieldsQuery> minQrys = new ArrayList<>();
+
+                    maxQrys.add(new SqlFieldsQuery("select max(idxVal1) from MvccTestSqlIndexValue"));
+                    maxQrys.add(new SqlFieldsQuery("select max(idxVal1) from MvccTestSqlIndexValue where idxVal1 >= 0"));
+
+                    minQrys.add(new SqlFieldsQuery("select min(idxVal1) from MvccTestSqlIndexValue"));
+                    minQrys.add(new SqlFieldsQuery("select min(idxVal1) from MvccTestSqlIndexValue where idxVal1 >= 0"));
+
+                    while (!stop.get()) {
+                        TestCache<Integer, MvccTestSqlIndexValue> cache = randomCache(caches, rnd);
+
+                        try {
+                            for (SqlFieldsQuery qry : maxQrys) {
+                                List<List<?>> res = cache.cache.query(qry).getAll();
+
+                                assertEquals(1, res.size());
+
+                                Integer m = (Integer)res.get(0).get(0);
+
+                                assertTrue(m == null || m % 10 == 0);
+                            }
+
+                            for (SqlFieldsQuery qry : minQrys) {
+                                List<List<?>> res = cache.cache.query(qry).getAll();
+
+                                assertEquals(1, res.size());
+
+                                Integer m = (Integer)res.get(0).get(0);
+
+                                assertTrue(m == null || m % 10 == 1);
+                            }
+                        }
+                        finally {
+                            cache.readUnlock();
+                        }
+                    }
+                }
+            };
+
+        int srvs;
+        int clients;
+
+        if (singleNode) {
+            srvs = 1;
+            clients = 0;
+        }
+        else {
+            srvs = 4;
+            clients = 2;
+        }
+
+        readWriteTest(
+            null,
+            srvs,
+            clients,
+            0,
+            DFLT_PARTITION_COUNT,
+            writers,
+            readers,
+            DFLT_TEST_TIME,
+            new InitIndexing(Integer.class, MvccTestSqlIndexValue.class),
+            null,
+            writer,
+            reader);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testSqlQueriesWithMvcc() throws Exception {
+        Ignite srv0 = startGrid(0);
+
+        IgniteCache<Integer, MvccTestSqlIndexValue> cache = (IgniteCache)srv0.createCache(
+            cacheConfiguration(cacheMode(), FULL_SYNC, 0, DFLT_PARTITION_COUNT).
+                setIndexedTypes(Integer.class, MvccTestSqlIndexValue.class));
+
+        for (int i = 0; i < 10; i++)
+            cache.put(i, new MvccTestSqlIndexValue(i));
+
+        sqlQueriesWithMvcc(cache, true);
+
+        sqlQueriesWithMvcc(cache, false);
+
+        // TODO IGNITE-8031
+//        startGrid(1);
+//
+//        awaitPartitionMapExchange();
+//
+//        sqlQueriesWithMvcc(cache, false);
+    }
+
+    /**
+     * @param cache Cache.
+     * @param loc Local query flag.
+     */
+    private void sqlQueriesWithMvcc(IgniteCache<Integer, MvccTestSqlIndexValue> cache, boolean loc) {
+        assertEquals(10,
+            cache.query(new SqlQuery<>(MvccTestSqlIndexValue.class, "true").setLocal(loc)).getAll().size());
+
+        assertEquals(10,
+            cache.query(new SqlFieldsQuery("select idxVal1 from MvccTestSqlIndexValue").setLocal(loc)).getAll().size());
+
+        assertEquals(10,
+            cache.query(new SqlFieldsQuery("" +
+                "select (select count (*) from MvccTestSqlIndexValue where idxVal1 = t1.idxVal1) as c1," +
+                " (select 0 from dual) as c2" +
+                " from MvccTestSqlIndexValue as t1" +
+                " join (select * from MvccTestSqlIndexValue) as t2 on t1.idxVal1 = t2.idxVal1").setLocal(loc)).getAll().size());
+
+        checkSingleResult(cache,
+            new SqlFieldsQuery("select max(idxVal1) from MvccTestSqlIndexValue").setLocal(loc), 9);
+
+        checkSingleResult(cache,
+            new SqlFieldsQuery("select max(idxVal1) from MvccTestSqlIndexValue where idxVal1 > 0").setLocal(loc), 9);
+
+        checkSingleResult(cache,
+            new SqlFieldsQuery("select max(idxVal1) from MvccTestSqlIndexValue where idxVal1 < 5").setLocal(loc), 4);
+
+        checkSingleResult(cache,
+            new SqlFieldsQuery("select min(idxVal1) from MvccTestSqlIndexValue").setLocal(loc), 0);
+
+        checkSingleResult(cache,
+            new SqlFieldsQuery("select min(idxVal1) from MvccTestSqlIndexValue where idxVal1 < 100").setLocal(loc), 0);
+
+        checkSingleResult(cache,
+            new SqlFieldsQuery("select min(idxVal1) from MvccTestSqlIndexValue where idxVal1 < 5").setLocal(loc), 0);
+
+        checkSingleResult(cache,
+            new SqlFieldsQuery("select min(idxVal1) from MvccTestSqlIndexValue where idxVal1 > 5").setLocal(loc), 6);
+
+        checkSingleResult(cache,
+            new SqlFieldsQuery("select count(*) from MvccTestSqlIndexValue").setLocal(loc), 10L);
+
+        checkSingleResult(cache,
+            new SqlFieldsQuery("select count(*) from MvccTestSqlIndexValue where idxVal1 >= 0").setLocal(loc), 10L);
+
+        checkSingleResult(cache,
+            new SqlFieldsQuery("select count(*) from MvccTestSqlIndexValue where idxVal1 >= 0 and idxVal1 < 100").setLocal(loc), 10L);
+
+        checkSingleResult(cache,
+            new SqlFieldsQuery("select count(*) from MvccTestSqlIndexValue where idxVal1 >0 and idxVal1 < 5").setLocal(loc), 4L);
+
+        checkSingleResult(cache,
+            new SqlFieldsQuery("select count(*) from MvccTestSqlIndexValue where idxVal1 >= 1").setLocal(loc), 9L);
+
+        checkSingleResult(cache,
+            new SqlFieldsQuery("select count(*) from MvccTestSqlIndexValue where idxVal1 > 100").setLocal(loc), 0L);
+
+        checkSingleResult(cache,
+            new SqlFieldsQuery("select count(*) from MvccTestSqlIndexValue where idxVal1 = 1").setLocal(loc), 1L);
+    }
+
+    /**
+     * @param cache Cache.
+     * @param qry Query.
+     * @param exp Expected value.
+     */
+    private void checkSingleResult(IgniteCache cache, SqlFieldsQuery qry, Object exp) {
+        List<List<?>> res = cache.query(qry).getAll();
+
+        assertEquals(1, res.size());
+
+        List<?> row = res.get(0);
+
+        assertEquals(1, row.size());
+
+        assertEquals(exp, row.get(0));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testSqlSimple() throws Exception {
+        startGrid(0);
+
+        for (int i = 0; i < 4; i++)
+            sqlSimple(i * 512);
+
+        ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+        for (int i = 0; i < 5; i++)
+            sqlSimple(rnd.nextInt(2048));
+    }
+
+    /**
+     * @param inlineSize Inline size.
+     * @throws Exception If failed.
+     */
+    private void sqlSimple(int inlineSize) throws Exception {
+        Ignite srv0 = ignite(0);
+
+        IgniteCache<Integer, MvccTestSqlIndexValue> cache =  (IgniteCache)srv0.createCache(
+            cacheConfiguration(cacheMode(), FULL_SYNC, 0, DFLT_PARTITION_COUNT).
+                setIndexedTypes(Integer.class, MvccTestSqlIndexValue.class).
+                setSqlIndexMaxInlineSize(inlineSize));
+
+        Map<Integer, Integer> expVals = new HashMap<>();
+
+        checkValues(expVals, cache);
+
+        cache.put(1, new MvccTestSqlIndexValue(1));
+        expVals.put(1, 1);
+
+        checkValues(expVals, cache);
+
+        cache.put(1, new MvccTestSqlIndexValue(2));
+        expVals.put(1, 2);
+
+        checkValues(expVals, cache);
+
+        cache.put(2, new MvccTestSqlIndexValue(1));
+        expVals.put(2, 1);
+        cache.put(3, new MvccTestSqlIndexValue(1));
+        expVals.put(3, 1);
+        cache.put(4, new MvccTestSqlIndexValue(1));
+        expVals.put(4, 1);
+
+        checkValues(expVals, cache);
+
+        cache.remove(1);
+        expVals.remove(1);
+
+        checkValues(expVals, cache);
+
+        checkNoValue(1, cache);
+
+        cache.put(1, new MvccTestSqlIndexValue(10));
+        expVals.put(1, 10);
+
+        checkValues(expVals, cache);
+
+        checkActiveQueriesCleanup(srv0);
+
+        srv0.destroyCache(cache.getName());
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testSqlSimplePutRemoveRandom() throws Exception {
+        startGrid(0);
+
+        testSqlSimplePutRemoveRandom(0);
+
+        ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+        for (int i = 0; i < 3; i++)
+            testSqlSimplePutRemoveRandom(rnd.nextInt(2048));
+    }
+
+    /**
+     * @param inlineSize Inline size.
+     * @throws Exception If failed.
+     */
+    private void testSqlSimplePutRemoveRandom(int inlineSize) throws Exception {
+        Ignite srv0 = grid(0);
+
+        IgniteCache<Integer, MvccTestSqlIndexValue> cache = (IgniteCache) srv0.createCache(
+            cacheConfiguration(cacheMode(), FULL_SYNC, 0, DFLT_PARTITION_COUNT).
+                setIndexedTypes(Integer.class, MvccTestSqlIndexValue.class).
+                setSqlIndexMaxInlineSize(inlineSize));
+
+        Map<Integer, Integer> expVals = new HashMap<>();
+
+        final int KEYS = 100;
+        final int VALS = 10;
+
+        ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+        long stopTime = System.currentTimeMillis() + 5_000;
+
+        for (int i = 0; i < 100_000; i++) {
+            Integer key = rnd.nextInt(KEYS);
+
+            if (rnd.nextInt(5) == 0) {
+                cache.remove(key);
+
+                expVals.remove(key);
+            }
+            else {
+                Integer val = rnd.nextInt(VALS);
+
+                cache.put(key, new MvccTestSqlIndexValue(val));
+
+                expVals.put(key, val);
+            }
+
+            checkValues(expVals, cache);
+
+            if (System.currentTimeMillis() > stopTime) {
+                info("Stop test, iteration: " + i);
+
+                break;
+            }
+        }
+
+        for (int i = 0; i < KEYS; i++) {
+            if (!expVals.containsKey(i))
+                checkNoValue(i, cache);
+        }
+
+        checkActiveQueriesCleanup(srv0);
+
+        srv0.destroyCache(cache.getName());
+    }
+
+    /**
+     * @param key Key.
+     * @param cache Cache.
+     */
+    private void checkNoValue(Object key, IgniteCache cache) {
+        SqlQuery<Integer, MvccTestSqlIndexValue> qry;
+
+        qry = new SqlQuery<>(MvccTestSqlIndexValue.class, "_key = ?");
+
+        qry.setArgs(key);
+
+        List<IgniteCache.Entry<Integer, MvccTestSqlIndexValue>> res = cache.query(qry).getAll();
+
+        assertTrue(res.isEmpty());
+    }
+
+    /**
+     * @param expVals Expected values.
+     * @param cache Cache.
+     */
+    private void checkValues(Map<Integer, Integer> expVals, IgniteCache<Integer, MvccTestSqlIndexValue> cache) {
+        SqlFieldsQuery cntQry = new SqlFieldsQuery("select count(*) from MvccTestSqlIndexValue");
+
+        Long cnt = (Long)cache.query(cntQry).getAll().get(0).get(0);
+
+        assertEquals((long)expVals.size(), (Object)cnt);
+
+        SqlQuery<Integer, MvccTestSqlIndexValue> qry;
+
+        qry = new SqlQuery<>(MvccTestSqlIndexValue.class, "true");
+
+        Map<Integer, Integer> vals = new HashMap<>();
+
+        for (IgniteCache.Entry<Integer, MvccTestSqlIndexValue> e : cache.query(qry).getAll())
+            assertNull(vals.put(e.getKey(), e.getValue().idxVal1));
+
+        assertEquals(expVals, vals);
+
+        qry = new SqlQuery<>(MvccTestSqlIndexValue.class, "_key >= 0");
+
+        vals = new HashMap<>();
+
+        for (IgniteCache.Entry<Integer, MvccTestSqlIndexValue> e : cache.query(qry).getAll())
+            assertNull(vals.put(e.getKey(), e.getValue().idxVal1));
+
+        assertEquals(expVals, vals);
+
+        qry = new SqlQuery<>(MvccTestSqlIndexValue.class, "idxVal1 >= 0");
+
+        vals = new HashMap<>();
+
+        for (IgniteCache.Entry<Integer, MvccTestSqlIndexValue> e : cache.query(qry).getAll())
+            assertNull(vals.put(e.getKey(), e.getValue().idxVal1));
+
+        assertEquals(expVals, vals);
+
+        Map<Integer, Set<Integer>> expIdxVals = new HashMap<>();
+
+        for (Map.Entry<Integer, Integer> e : expVals.entrySet()) {
+            qry = new SqlQuery<>(MvccTestSqlIndexValue.class, "_key = ?");
+
+            qry.setArgs(e.getKey());
+
+            List<IgniteCache.Entry<Integer, MvccTestSqlIndexValue>> res = cache.query(qry).getAll();
+
+            assertEquals(1, res.size());
+            assertEquals(e.getKey(), res.get(0).getKey());
+            assertEquals(e.getValue(), (Integer)res.get(0).getValue().idxVal1);
+
+            SqlFieldsQuery fieldsQry = new SqlFieldsQuery("select _key, idxVal1 from MvccTestSqlIndexValue where _key=?");
+            fieldsQry.setArgs(e.getKey());
+
+            List<List<?>> fieldsRes = cache.query(fieldsQry).getAll();
+
+            assertEquals(1, fieldsRes.size());
+            assertEquals(e.getKey(), fieldsRes.get(0).get(0));
+            assertEquals(e.getValue(), fieldsRes.get(0).get(1));
+
+            Integer val = e.getValue();
+
+            Set<Integer> keys = expIdxVals.get(val);
+
+            if (keys == null)
+                expIdxVals.put(val, keys = new HashSet<>());
+
+            assertTrue(keys.add(e.getKey()));
+        }
+
+        for (Map.Entry<Integer, Set<Integer>> expE : expIdxVals.entrySet()) {
+            qry = new SqlQuery<>(MvccTestSqlIndexValue.class, "idxVal1 = ?");
+            qry.setArgs(expE.getKey());
+
+            vals = new HashMap<>();
+
+            for (IgniteCache.Entry<Integer, MvccTestSqlIndexValue> e : cache.query(qry).getAll()) {
+                assertNull(vals.put(e.getKey(), e.getValue().idxVal1));
+
+                assertEquals(expE.getKey(), (Integer)e.getValue().idxVal1);
+
+                assertTrue(expE.getValue().contains(e.getKey()));
+            }
+
+            assertEquals(expE.getValue().size(), vals.size());
+        }
+    }
+
+    /**
+     *
+     */
+    static class JoinTestParentKey implements Serializable {
+        /** */
+        private int key;
+
+        /**
+         * @param key Key.
+         */
+        JoinTestParentKey(int key) {
+            this.key = key;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean equals(Object o) {
+            if (this == o)
+                return true;
+
+            if (o == null || getClass() != o.getClass())
+                return false;
+
+            JoinTestParentKey that = (JoinTestParentKey)o;
+
+            return key == that.key;
+        }
+
+        /** {@inheritDoc} */
+        @Override public int hashCode() {
+            return key;
+        }
+    }
+
+    /**
+     *
+     */
+    static class JoinTestParent {
+        /** */
+        @QuerySqlField(index = true)
+        private int id;
+
+        /**
+         * @param id ID.
+         */
+        JoinTestParent(int id) {
+            this.id = id;
+        }
+
+        /** {@inheritDoc} */
+        @Override public String toString() {
+            return S.toString(JoinTestParent.class, this);
+        }
+    }
+
+    /**
+     *
+     */
+    static class JoinTestChildKey implements Serializable {
+        /** */
+        @QuerySqlField(index = true)
+        private int key;
+
+        /**
+         * @param key Key.
+         */
+        JoinTestChildKey(int key) {
+            this.key = key;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean equals(Object o) {
+            if (this == o)
+                return true;
+
+            if (o == null || getClass() != o.getClass())
+                return false;
+
+            JoinTestChildKey that = (JoinTestChildKey)o;
+
+            return key == that.key;
+        }
+
+        /** {@inheritDoc} */
+        @Override public int hashCode() {
+            return key;
+        }
+    }
+
+    /**
+     *
+     */
+    static class JoinTestChild {
+        /** */
+        @QuerySqlField(index = true)
+        private int parentId;
+
+        /**
+         * @param parentId Parent ID.
+         */
+        JoinTestChild(int parentId) {
+            this.parentId = parentId;
+        }
+
+        /** {@inheritDoc} */
+        @Override public String toString() {
+            return S.toString(JoinTestChild.class, this);
+        }
+    }
+
+    /**
+     *
+     */
+    static class MvccTestSqlIndexValue implements Serializable {
+        /** */
+        @QuerySqlField(index = true)
+        private int idxVal1;
+
+        /**
+         * @param idxVal1 Indexed value 1.
+         */
+        MvccTestSqlIndexValue(int idxVal1) {
+            this.idxVal1 = idxVal1;
+        }
+
+        /** {@inheritDoc} */
+        @Override public String toString() {
+            return S.toString(MvccTestSqlIndexValue.class, this);
+        }
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccSqlTxQueriesAbstractTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccSqlTxQueriesAbstractTest.java
new file mode 100644
index 0000000..2aad2d4
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccSqlTxQueriesAbstractTest.java
@@ -0,0 +1,1834 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.io.Serializable;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Random;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.Phaser;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+import javax.cache.Cache;
+import javax.cache.CacheException;
+import javax.cache.processor.EntryProcessor;
+import javax.cache.processor.EntryProcessorException;
+import javax.cache.processor.MutableEntry;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.cache.query.FieldsQueryCursor;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.cache.query.annotations.QuerySqlField;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.IgniteCacheProxy;
+import org.apache.ignite.internal.processors.cache.KeyCacheObject;
+import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal;
+import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
+import org.apache.ignite.internal.processors.cache.persistence.CacheDataRowAdapter;
+import org.apache.ignite.internal.processors.cache.query.SqlFieldsQueryEx;
+import org.apache.ignite.internal.processors.query.IgniteSQLException;
+import org.apache.ignite.internal.transactions.IgniteTxTimeoutCheckedException;
+import org.apache.ignite.internal.util.future.GridCompoundFuture;
+import org.apache.ignite.internal.util.lang.GridCursor;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.X;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.lang.IgniteBiTuple;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.transactions.Transaction;
+
+import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC;
+import static org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.ReadMode.SQL;
+import static org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.ReadMode.SQL_SUM;
+import static org.apache.ignite.internal.processors.cache.mvcc.CacheMvccAbstractTest.WriteMode.DML;
+import static org.apache.ignite.testframework.GridTestUtils.runAsync;
+import static org.apache.ignite.testframework.GridTestUtils.runMultiThreaded;
+import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC;
+import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ;
+
+/**
+ * Tests for transactional SQL.
+ */
+public abstract class CacheMvccSqlTxQueriesAbstractTest extends CacheMvccAbstractTest {
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxDmlSql_SingleNode_SinglePartition() throws Exception {
+        accountsTxReadAll(1, 0, 0, 1,
+            new InitIndexing(Integer.class, MvccTestAccount.class), false, SQL, DML);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxDmlSql_WithRemoves_SingleNode_SinglePartition() throws Exception {
+        accountsTxReadAll(1, 0, 0, 1,
+            new InitIndexing(Integer.class, MvccTestAccount.class), true, SQL, DML);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxDmlSql_SingleNode() throws Exception {
+        accountsTxReadAll(1, 0, 0, 64,
+            new InitIndexing(Integer.class, MvccTestAccount.class), false, SQL, DML);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxDmlSql_SingleNode_Persistence() throws Exception {
+        persistence = true;
+
+        testAccountsTxDmlSql_SingleNode();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxDmlSumSql_SingleNode() throws Exception {
+        accountsTxReadAll(1, 0, 0, 64,
+            new InitIndexing(Integer.class, MvccTestAccount.class), false, SQL_SUM, DML);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxDmlSumSql_WithRemoves_SingleNode() throws Exception {
+        accountsTxReadAll(1, 0, 0, 64,
+            new InitIndexing(Integer.class, MvccTestAccount.class), true, SQL_SUM, DML);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxDmlSumSql_WithRemoves__ClientServer_Backups0() throws Exception {
+        accountsTxReadAll(4, 2, 0, 64,
+            new InitIndexing(Integer.class, MvccTestAccount.class), true, SQL_SUM, DML);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxDmlSumSql_ClientServer_Backups2() throws Exception {
+        accountsTxReadAll(4, 2, 2, 64,
+            new InitIndexing(Integer.class, MvccTestAccount.class), true, SQL_SUM, DML);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxDmlSql_WithRemoves_SingleNode() throws Exception {
+        accountsTxReadAll(1, 0, 0, 64,
+            new InitIndexing(Integer.class, MvccTestAccount.class), true, SQL, DML);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxDmlSql_WithRemoves_SingleNode_Persistence() throws Exception {
+        persistence = true;
+
+        testAccountsTxDmlSql_WithRemoves_SingleNode();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxDmlSql_ClientServer_Backups0() throws Exception {
+        accountsTxReadAll(4, 2, 0, 64,
+            new InitIndexing(Integer.class, MvccTestAccount.class), false, SQL, DML);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxDmlSql_WithRemoves_ClientServer_Backups0() throws Exception {
+        accountsTxReadAll(4, 2, 0, 64,
+            new InitIndexing(Integer.class, MvccTestAccount.class), true, SQL, DML);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxDmlSql_WithRemoves_ClientServer_Backups0_Persistence() throws Exception {
+        persistence = true;
+
+        testAccountsTxDmlSql_WithRemoves_ClientServer_Backups0();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxDmlSql_ClientServer_Backups1() throws Exception {
+        accountsTxReadAll(3, 0, 1, 64,
+            new InitIndexing(Integer.class, MvccTestAccount.class), false, SQL, DML);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxDmlSql_WithRemoves_ClientServer_Backups1() throws Exception {
+        accountsTxReadAll(4, 2, 1, 64,
+            new InitIndexing(Integer.class, MvccTestAccount.class), true, SQL, DML);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxDmlSql_WithRemoves_ClientServer_Backups1_Persistence() throws Exception {
+        persistence = true;
+
+        testAccountsTxDmlSql_WithRemoves_ClientServer_Backups1();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxDmlSql_ClientServer_Backups2() throws Exception {
+        accountsTxReadAll(4, 2, 2, 64,
+            new InitIndexing(Integer.class, MvccTestAccount.class), false, SQL, DML);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxDmlSql_WithRemoves_ClientServer_Backups2() throws Exception {
+        accountsTxReadAll(4, 2, 2, 64,
+            new InitIndexing(Integer.class, MvccTestAccount.class), true, SQL, DML);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testAccountsTxDmlSql_ClientServer_Backups2_Persistence() throws Exception {
+        fail("https://issues.apache.org/jira/browse/IGNITE-9292");
+
+        persistence = true;
+
+        testAccountsTxDmlSql_ClientServer_Backups2();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryInsertStaticCache() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class);
+
+        startGridsMultiThreaded(4);
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite checkNode = grid(rnd.nextInt(4));
+        Ignite updateNode = grid(rnd.nextInt(4));
+
+        IgniteCache cache = checkNode.cache(DEFAULT_CACHE_NAME);
+
+        try (Transaction tx = updateNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TX_TIMEOUT);
+
+            SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO Integer (_key, _val) values (1,1),(2,2),(3,3)");
+
+            IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+            try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                assertEquals(3L, cur.iterator().next().get(0));
+            }
+
+            qry = new SqlFieldsQuery("INSERT INTO Integer (_key, _val) values (4,4),(5,5),(6,6)");
+
+            try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                assertEquals(3L, cur.iterator().next().get(0));
+            }
+
+            tx.commit();
+        }
+
+        assertEquals(1, cache.get(1));
+        assertEquals(2, cache.get(2));
+        assertEquals(3, cache.get(3));
+
+        assertEquals(4, cache.get(4));
+        assertEquals(5, cache.get(5));
+        assertEquals(6, cache.get(6));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryInsertStaticCacheImplicit() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class);
+
+        startGridsMultiThreaded(4);
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite checkNode = grid(rnd.nextInt(4));
+        Ignite updateNode = grid(rnd.nextInt(4));
+
+        IgniteCache cache = checkNode.cache(DEFAULT_CACHE_NAME);
+
+        SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO Integer (_key, _val) values (1,1),(2,2),(3,3)")
+            .setTimeout(TX_TIMEOUT, TimeUnit.MILLISECONDS);
+
+        IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+        try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+            assertEquals(3L, cur.iterator().next().get(0));
+        }
+
+        assertEquals(1, cache.get(1));
+        assertEquals(2, cache.get(2));
+        assertEquals(3, cache.get(3));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryDeleteStaticCache() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class);
+
+        startGridsMultiThreaded(4);
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite checkNode = grid(rnd.nextInt(4));
+        Ignite updateNode = grid(rnd.nextInt(4));
+
+        IgniteCache cache = checkNode.cache(DEFAULT_CACHE_NAME);
+
+        SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO Integer (_key, _val) values (1,1),(2,2),(3,3)")
+            .setTimeout(TX_TIMEOUT, TimeUnit.MILLISECONDS);
+
+        IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+        try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+            assertEquals(3L, cur.iterator().next().get(0));
+        }
+
+        assertEquals(1, cache.get(1));
+        assertEquals(2, cache.get(2));
+        assertEquals(3, cache.get(3));
+
+        try (Transaction tx = updateNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TX_TIMEOUT);
+
+            qry = new SqlFieldsQuery("DELETE FROM Integer WHERE 1 = 1");
+
+            try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                assertEquals(3L, cur.iterator().next().get(0));
+            }
+
+            tx.commit();
+        }
+
+        assertNull(cache.get(1));
+        assertNull(cache.get(2));
+        assertNull(cache.get(3));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryFastDeleteStaticCache() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class);
+
+        startGridsMultiThreaded(4);
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite checkNode = grid(rnd.nextInt(4));
+        Ignite updateNode = grid(rnd.nextInt(4));
+
+        IgniteCache cache = checkNode.cache(DEFAULT_CACHE_NAME);
+
+        SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO Integer (_key, _val) values (1,1),(2,2),(3,3)")
+            .setTimeout(TX_TIMEOUT, TimeUnit.MILLISECONDS);
+
+        IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+        try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+            assertEquals(3L, cur.iterator().next().get(0));
+        }
+        assertEquals(1, cache.get(1));
+        assertEquals(2, cache.get(2));
+        assertEquals(3, cache.get(3));
+
+        try (Transaction tx = updateNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TX_TIMEOUT);
+
+            qry = new SqlFieldsQuery("DELETE FROM Integer WHERE _key = 1");
+
+            try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                assertEquals(1L, cur.iterator().next().get(0));
+            }
+
+            tx.commit();
+        }
+
+        assertNull(cache.get(1));
+        assertEquals(2, cache.get(2));
+        assertEquals(3, cache.get(3));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryFastUpdateStaticCache() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class);
+
+        startGridsMultiThreaded(4);
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite checkNode = grid(rnd.nextInt(4));
+        Ignite updateNode = grid(rnd.nextInt(4));
+
+        IgniteCache cache = checkNode.cache(DEFAULT_CACHE_NAME);
+
+        SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO Integer (_key, _val) values (1,1),(2,2),(3,3)")
+            .setTimeout(TX_TIMEOUT, TimeUnit.MILLISECONDS);
+
+        IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+        try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+            assertEquals(3L, cur.iterator().next().get(0));
+        }
+        assertEquals(1, cache.get(1));
+        assertEquals(2, cache.get(2));
+        assertEquals(3, cache.get(3));
+
+        try (Transaction tx = updateNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TX_TIMEOUT);
+
+            qry = new SqlFieldsQuery("UPDATE Integer SET _val = 8 WHERE _key = 1");
+
+            try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                assertEquals(1L, cur.iterator().next().get(0));
+            }
+
+            tx.commit();
+        }
+
+        assertEquals(8, cache.get(1));
+        assertEquals(2, cache.get(2));
+        assertEquals(3, cache.get(3));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryFastDeleteObjectStaticCache() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, MvccTestSqlIndexValue.class);
+
+        startGridsMultiThreaded(4);
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite checkNode = grid(rnd.nextInt(4));
+        Ignite updateNode = grid(rnd.nextInt(4));
+
+        IgniteCache cache = checkNode.cache(DEFAULT_CACHE_NAME);
+
+        cache.putAll(F.asMap(
+            1, new MvccTestSqlIndexValue(1),
+            2, new MvccTestSqlIndexValue(2),
+            3, new MvccTestSqlIndexValue(3)));
+
+        assertEquals(new MvccTestSqlIndexValue(1), cache.get(1));
+        assertEquals(new MvccTestSqlIndexValue(2), cache.get(2));
+        assertEquals(new MvccTestSqlIndexValue(3), cache.get(3));
+
+        try (Transaction tx = updateNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TX_TIMEOUT);
+
+            SqlFieldsQuery qry = new SqlFieldsQuery("DELETE FROM MvccTestSqlIndexValue WHERE _key = 1");
+
+            IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+            try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                assertEquals(1L, cur.iterator().next().get(0));
+            }
+
+            tx.commit();
+        }
+
+        assertNull(cache.get(1));
+        assertEquals(new MvccTestSqlIndexValue(2), cache.get(2));
+        assertEquals(new MvccTestSqlIndexValue(3), cache.get(3));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryFastUpdateObjectStaticCache() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, MvccTestSqlIndexValue.class);
+
+        startGridsMultiThreaded(4);
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite checkNode = grid(rnd.nextInt(4));
+        Ignite updateNode = grid(rnd.nextInt(4));
+
+        IgniteCache cache = checkNode.cache(DEFAULT_CACHE_NAME);
+
+        cache.putAll(F.asMap(
+            1, new MvccTestSqlIndexValue(1),
+            2, new MvccTestSqlIndexValue(2),
+            3, new MvccTestSqlIndexValue(3)));
+
+        assertEquals(new MvccTestSqlIndexValue(1), cache.get(1));
+        assertEquals(new MvccTestSqlIndexValue(2), cache.get(2));
+        assertEquals(new MvccTestSqlIndexValue(3), cache.get(3));
+
+        try (Transaction tx = updateNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TX_TIMEOUT);
+
+            SqlFieldsQuery qry = new SqlFieldsQuery("UPDATE MvccTestSqlIndexValue SET idxVal1 = 8 WHERE _key = 1");
+
+            IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+            try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                assertEquals(1L, cur.iterator().next().get(0));
+            }
+
+            tx.commit();
+        }
+
+        assertEquals(new MvccTestSqlIndexValue(8), cache.get(1));
+        assertEquals(new MvccTestSqlIndexValue(2), cache.get(2));
+        assertEquals(new MvccTestSqlIndexValue(3), cache.get(3));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryDeleteStaticCacheImplicit() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class);
+
+        startGridsMultiThreaded(4);
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite checkNode = grid(rnd.nextInt(4));
+        Ignite updateNode = grid(rnd.nextInt(4));
+
+        IgniteCache cache = checkNode.cache(DEFAULT_CACHE_NAME);
+
+        cache.putAll(F.asMap(1, 1, 2, 2, 3, 3));
+
+        assertEquals(1, cache.get(1));
+        assertEquals(2, cache.get(2));
+        assertEquals(3, cache.get(3));
+
+        SqlFieldsQuery qry = new SqlFieldsQuery("DELETE FROM Integer WHERE 1 = 1")
+            .setTimeout(TX_TIMEOUT, TimeUnit.MILLISECONDS);
+
+        IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+        try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+            assertEquals(3L, cur.iterator().next().get(0));
+        }
+
+        assertNull(cache.get(1));
+        assertNull(cache.get(2));
+        assertNull(cache.get(3));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryUpdateStaticCache() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class);
+
+        startGridsMultiThreaded(4);
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite checkNode = grid(rnd.nextInt(4));
+        Ignite updateNode = grid(rnd.nextInt(4));
+
+        IgniteCache cache = checkNode.cache(DEFAULT_CACHE_NAME);
+
+        cache.putAll(F.asMap(1, 1, 2, 2, 3, 3));
+
+        assertEquals(1, cache.get(1));
+        assertEquals(2, cache.get(2));
+        assertEquals(3, cache.get(3));
+
+        try (Transaction tx = updateNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TX_TIMEOUT);
+
+            SqlFieldsQuery qry = new SqlFieldsQuery("UPDATE Integer SET _val = (_key * 10)");
+
+            IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+            try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                assertEquals(3L, cur.iterator().next().get(0));
+            }
+
+            tx.commit();
+        }
+
+        assertEquals(10, cache.get(1));
+        assertEquals(20, cache.get(2));
+        assertEquals(30, cache.get(3));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryUpdateStaticCacheImplicit() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class);
+
+        startGridsMultiThreaded(4);
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite checkNode = grid(rnd.nextInt(4));
+        Ignite updateNode = grid(rnd.nextInt(4));
+
+        IgniteCache cache = checkNode.cache(DEFAULT_CACHE_NAME);
+
+        cache.putAll(F.asMap(1, 1, 2, 2, 3, 3));
+
+        assertEquals(1, cache.get(1));
+        assertEquals(2, cache.get(2));
+        assertEquals(3, cache.get(3));
+
+        SqlFieldsQuery qry = new SqlFieldsQuery("UPDATE Integer SET _val = (_key * 10)")
+            .setTimeout(TX_TIMEOUT, TimeUnit.MILLISECONDS);
+
+        IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+        try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+            assertEquals(3L, cur.iterator().next().get(0));
+        }
+
+        assertEquals(10, cache.get(1));
+        assertEquals(20, cache.get(2));
+        assertEquals(30, cache.get(3));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryDeadlock() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class);
+
+        startGridsMultiThreaded(2);
+
+        client = true;
+
+        startGridsMultiThreaded(2, 2);
+
+        final CyclicBarrier barrier = new CyclicBarrier(2);
+        final AtomicInteger idx = new AtomicInteger();
+        final AtomicReference<Exception> ex = new AtomicReference<>();
+
+        multithreaded(new Runnable() {
+            @Override public void run() {
+                int id = idx.getAndIncrement();
+
+                IgniteEx node = grid(id);
+
+                try {
+                    try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                        tx.timeout(TX_TIMEOUT);
+
+                        IgniteCache<Object, Object> cache0 = node.cache(DEFAULT_CACHE_NAME);
+
+                        String qry1 = "INSERT INTO Integer (_key, _val) values (1,1),(2,2),(3,3)";
+                        String qry2 = "INSERT INTO Integer (_key, _val) values (4,4),(5,5),(6,6)";
+
+                        SqlFieldsQuery qry = new SqlFieldsQuery((id % 2) == 0 ? qry1 : qry2);
+
+                        try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                            cur.getAll();
+                        }
+
+                        barrier.await();
+
+                        qry = new SqlFieldsQuery((id % 2) == 0 ? qry2 : qry1);
+
+                        try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                            cur.getAll();
+                        }
+
+                        tx.commit();
+                    }
+                }
+                catch (Exception e) {
+                    onException(ex, e);
+                }
+            }
+        }, 2);
+
+        Exception ex0 = ex.get();
+
+        assertNotNull(ex0);
+
+        if (!X.hasCause(ex0, IgniteTxTimeoutCheckedException.class))
+            throw ex0;
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryDeadlockImplicit() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 0, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class);
+
+        startGridsMultiThreaded(2);
+
+        final Phaser phaser = new Phaser(2);
+        final AtomicReference<Exception> ex = new AtomicReference<>();
+
+        GridTestUtils.runAsync(new Runnable() {
+            @Override public void run() {
+                IgniteEx node = grid(0);
+
+                try {
+                    try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                        IgniteCache<Object, Object> cache0 = node.cache(DEFAULT_CACHE_NAME);
+
+                        SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO Integer (_key, _val) values (1,1),(2,2),(3,3)");
+
+                        try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                            cur.getAll();
+                        }
+
+                        awaitPhase(phaser, 2);
+
+                        tx.commit();
+                    }
+                }
+                catch (Exception e) {
+                    onException(ex, e);
+                }
+                finally {
+                    phaser.arrive();
+                }
+            }
+        });
+
+        phaser.arriveAndAwaitAdvance();
+
+        IgniteEx node = grid(1);
+
+        IgniteCache<Object, Object> cache0 = node.cache(DEFAULT_CACHE_NAME);
+
+        SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO Integer (_key, _val) values (1,1),(2,2),(3,3)")
+            .setTimeout(TX_TIMEOUT, TimeUnit.MILLISECONDS);
+
+        try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+            cur.getAll();
+        }
+        catch (Exception e) {
+            phaser.arrive();
+
+            onException(ex, e);
+        }
+
+        phaser.arriveAndAwaitAdvance();
+
+        Exception ex0 = ex.get();
+
+        assertNotNull(ex0);
+
+        if (!X.hasCause(ex0, IgniteTxTimeoutCheckedException.class))
+            throw ex0;
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryInsertClient() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class);
+
+        startGrid(0);
+
+        client = true;
+
+        startGrid(1);
+
+        awaitPartitionMapExchange();
+
+        Ignite checkNode = grid(0);
+        Ignite updateNode = grid(1);
+
+        IgniteCache cache = checkNode.cache(DEFAULT_CACHE_NAME);
+
+        try (Transaction tx = updateNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TX_TIMEOUT);
+
+            SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO Integer (_key, _val) values (1,1),(2,2),(3,3)");
+
+            IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+            try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                assertEquals(3L, cur.iterator().next().get(0));
+            }
+
+            qry = new SqlFieldsQuery("INSERT INTO Integer (_key, _val) values (4,4),(5,5),(6,6)");
+
+            try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                assertEquals(3L, cur.iterator().next().get(0));
+            }
+
+            tx.commit();
+        }
+
+        assertEquals(1, cache.get(1));
+        assertEquals(2, cache.get(2));
+        assertEquals(3, cache.get(3));
+
+        assertEquals(4, cache.get(4));
+        assertEquals(5, cache.get(5));
+        assertEquals(6, cache.get(6));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryInsertClientImplicit() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class);
+
+        startGrid(0);
+
+        client = true;
+
+        startGrid(1);
+
+        awaitPartitionMapExchange();
+
+        Ignite checkNode = grid(0);
+        Ignite updateNode = grid(1);
+
+        IgniteCache cache = checkNode.cache(DEFAULT_CACHE_NAME);
+
+        SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO Integer (_key, _val) values (1,1),(2,2),(3,3)")
+            .setTimeout(TX_TIMEOUT, TimeUnit.MILLISECONDS);
+
+        IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+        try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+            assertEquals(3L, cur.iterator().next().get(0));
+        }
+
+        assertEquals(1, cache.get(1));
+        assertEquals(2, cache.get(2));
+        assertEquals(3, cache.get(3));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryInsertSubquery() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class, Integer.class, MvccTestSqlIndexValue.class);
+
+        startGridsMultiThreaded(4);
+
+        awaitPartitionMapExchange();
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite checkNode = grid(rnd.nextInt(4));
+        Ignite updateNode = grid(rnd.nextInt(4));
+
+        IgniteCache cache = checkNode.cache(DEFAULT_CACHE_NAME);
+
+        cache.putAll(F.asMap(
+            1, new MvccTestSqlIndexValue(1),
+            2, new MvccTestSqlIndexValue(2),
+            3, new MvccTestSqlIndexValue(3)));
+
+        IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+        try (Transaction tx = updateNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TX_TIMEOUT);
+
+            SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO Integer (_key, _val)" +
+                " SELECT _key * 10, idxVal1 FROM MvccTestSqlIndexValue");
+
+            try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                assertEquals(3L, cur.iterator().next().get(0));
+            }
+
+            tx.commit();
+        }
+
+        assertEquals(1, cache0.get(10));
+        assertEquals(2, cache0.get(20));
+        assertEquals(3, cache0.get(30));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryInsertSubqueryImplicit() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class, Integer.class, MvccTestSqlIndexValue.class);
+
+        startGridsMultiThreaded(4);
+
+        awaitPartitionMapExchange();
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite checkNode = grid(rnd.nextInt(4));
+        Ignite updateNode = grid(rnd.nextInt(4));
+
+        IgniteCache cache = checkNode.cache(DEFAULT_CACHE_NAME);
+
+        cache.putAll(F.asMap(
+            1, new MvccTestSqlIndexValue(1),
+            2, new MvccTestSqlIndexValue(2),
+            3, new MvccTestSqlIndexValue(3)));
+
+        SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO Integer (_key, _val)" +
+            " SELECT _key * 10, idxVal1 FROM MvccTestSqlIndexValue")
+            .setTimeout(TX_TIMEOUT, TimeUnit.MILLISECONDS);
+
+        IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+        try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+            assertEquals(3L, cur.iterator().next().get(0));
+        }
+
+        assertEquals(1, cache0.get(10));
+        assertEquals(2, cache0.get(20));
+        assertEquals(3, cache0.get(30));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryUpdateSubquery() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class, Integer.class, MvccTestSqlIndexValue.class);
+
+        startGridsMultiThreaded(4);
+
+        awaitPartitionMapExchange();
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite checkNode = grid(rnd.nextInt(4));
+        Ignite updateNode = grid(rnd.nextInt(4));
+
+        IgniteCache cache = checkNode.cache(DEFAULT_CACHE_NAME);
+
+        cache.putAll(F.asMap(
+            1, new MvccTestSqlIndexValue(1),
+            2, new MvccTestSqlIndexValue(2),
+            3, new MvccTestSqlIndexValue(3)));
+
+        try (Transaction tx = updateNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TX_TIMEOUT);
+
+            SqlFieldsQuery qry = new SqlFieldsQuery("UPDATE MvccTestSqlIndexValue AS t " +
+                "SET (idxVal1) = (SELECT idxVal1*10 FROM MvccTestSqlIndexValue WHERE t._key = _key)");
+
+            IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+            try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                assertEquals(3L, cur.iterator().next().get(0));
+            }
+
+            tx.commit();
+        }
+
+        assertEquals(10, ((MvccTestSqlIndexValue)cache.get(1)).idxVal1);
+        assertEquals(20, ((MvccTestSqlIndexValue)cache.get(2)).idxVal1);
+        assertEquals(30, ((MvccTestSqlIndexValue)cache.get(3)).idxVal1);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryUpdateSubqueryImplicit() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class, Integer.class, MvccTestSqlIndexValue.class);
+
+        startGridsMultiThreaded(4);
+
+        awaitPartitionMapExchange();
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite checkNode = grid(rnd.nextInt(4));
+        Ignite updateNode = grid(rnd.nextInt(4));
+
+        IgniteCache cache = checkNode.cache(DEFAULT_CACHE_NAME);
+
+        cache.putAll(F.asMap(
+            1, new MvccTestSqlIndexValue(1),
+            2, new MvccTestSqlIndexValue(2),
+            3, new MvccTestSqlIndexValue(3)));
+
+        SqlFieldsQuery qry = new SqlFieldsQuery("UPDATE MvccTestSqlIndexValue AS t " +
+            "SET (idxVal1) = (SELECT idxVal1*10 FROM MvccTestSqlIndexValue WHERE t._key = _key)")
+            .setTimeout(TX_TIMEOUT, TimeUnit.MILLISECONDS);
+
+        IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+        try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+            assertEquals(3L, cur.iterator().next().get(0));
+        }
+
+        assertEquals(10, ((MvccTestSqlIndexValue)cache.get(1)).idxVal1);
+        assertEquals(20, ((MvccTestSqlIndexValue)cache.get(2)).idxVal1);
+        assertEquals(30, ((MvccTestSqlIndexValue)cache.get(3)).idxVal1);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryInsertMultithread() throws Exception {
+        final int THREAD_CNT = 8;
+        final int BATCH_SIZE = 1000;
+        final int ROUNDS = 10;
+
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class);
+
+        startGridsMultiThreaded(2);
+
+        client = true;
+
+        startGridsMultiThreaded(2, 2);
+
+        final AtomicInteger seq = new AtomicInteger();
+
+        multithreaded(new Runnable() {
+            @Override public void run() {
+                for (int r = 0; r < ROUNDS; r++) {
+                    StringBuilder bldr = new StringBuilder("INSERT INTO Integer (_key, _val) values ");
+
+                    int start = seq.getAndAdd(BATCH_SIZE);
+
+                    for (int i = start, end = start + BATCH_SIZE; i < end; i++) {
+                        if (i != start)
+                            bldr.append(',');
+
+                        bldr
+                            .append('(')
+                            .append(i)
+                            .append(',')
+                            .append(i)
+                            .append(')');
+                    }
+
+                    Random rnd = ThreadLocalRandom.current();
+
+                    Ignite checkNode = grid(rnd.nextInt(4));
+                    Ignite updateNode = grid(rnd.nextInt(4));
+
+                    IgniteCache cache = checkNode.cache(DEFAULT_CACHE_NAME);
+
+                    try (Transaction tx = updateNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                        tx.timeout(TX_TIMEOUT);
+
+                        SqlFieldsQuery qry = new SqlFieldsQuery(bldr.toString()).setPageSize(100);
+
+                        IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+                        try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                            assertEquals((long)BATCH_SIZE, cur.iterator().next().get(0));
+                        }
+
+                        tx.commit();
+                    }
+
+                    for (int i = start, end = start + BATCH_SIZE; i < end; i++)
+                        assertEquals(i, cache.get(i));
+                }
+
+            }
+        }, THREAD_CNT);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryInsertUpdateMultithread() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class);
+
+        startGridsMultiThreaded(2);
+
+        final Phaser phaser = new Phaser(2);
+        final AtomicReference<Exception> ex = new AtomicReference<>();
+
+        GridCompoundFuture fut = new GridCompoundFuture();
+
+        fut.add(multithreadedAsync(new Runnable() {
+            @Override public void run() {
+                IgniteEx node = grid(0);
+
+                try {
+                    try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                        tx.timeout(TX_TIMEOUT);
+
+                        IgniteCache<Object, Object> cache0 = node.cache(DEFAULT_CACHE_NAME);
+
+                        SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO Integer (_key, _val) values (1,1),(2,2),(3,3)");
+
+                        try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                            cur.getAll();
+                        }
+
+                        awaitPhase(phaser, 2);
+
+                        qry = new SqlFieldsQuery("INSERT INTO Integer (_key, _val) values (4,4),(5,5),(6,6)");
+
+                        try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                            cur.getAll();
+                        }
+
+                        tx.commit();
+                    }
+                }
+                catch (Exception e) {
+                    onException(ex, e);
+                }
+            }
+        }, 1));
+
+        fut.add(multithreadedAsync(new Runnable() {
+            @Override public void run() {
+                IgniteEx node = grid(1);
+
+                try {
+                    phaser.arriveAndAwaitAdvance();
+
+                    try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                        tx.timeout(TX_TIMEOUT);
+
+                        IgniteCache<Integer, Integer> cache0 = node.cache(DEFAULT_CACHE_NAME);
+
+                        cache0.invokeAllAsync(F.asSet(1, 2, 3, 4, 5, 6), new EntryProcessor<Integer, Integer, Void>() {
+                            @Override
+                            public Void process(MutableEntry<Integer, Integer> entry,
+                                Object... arguments) throws EntryProcessorException {
+                                entry.setValue(entry.getValue() * 10);
+
+                                return null;
+                            }
+                        });
+
+                        phaser.arrive();
+
+                        tx.commit();
+                    }
+                }
+                catch (Exception e) {
+                    onException(ex, e);
+                }
+            }
+        }, 1));
+
+        fut.markInitialized();
+
+        try {
+            fut.get(TX_TIMEOUT);
+        }
+        catch (IgniteCheckedException e) {
+            onException(ex, e);
+        }
+
+        Exception ex0 = ex.get();
+
+        if (ex0 != null)
+            throw ex0;
+
+        IgniteCache cache = grid(0).cache(DEFAULT_CACHE_NAME);
+
+        assertEquals(10, cache.get(1));
+        assertEquals(20, cache.get(2));
+        assertEquals(30, cache.get(3));
+        assertEquals(40, cache.get(4));
+        assertEquals(50, cache.get(5));
+        assertEquals(60, cache.get(6));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryInsertVersionConflict() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class);
+
+        startGridsMultiThreaded(2);
+
+        IgniteCache cache = grid(0).cache(DEFAULT_CACHE_NAME);
+
+        SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO Integer (_key, _val) values (1,1)");
+
+        try (FieldsQueryCursor<List<?>> cur = cache.query(qry)) {
+            assertEquals(1L, cur.iterator().next().get(0));
+        }
+
+        final CyclicBarrier barrier = new CyclicBarrier(2);
+        final AtomicReference<Exception> ex = new AtomicReference<>();
+
+        runMultiThreaded(new Runnable() {
+            @Override public void run() {
+                IgniteEx node = grid(0);
+
+                try {
+                    try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                        tx.timeout(TX_TIMEOUT);
+
+                        barrier.await();
+
+                        IgniteCache<Object, Object> cache0 = node.cache(DEFAULT_CACHE_NAME);
+
+                        SqlFieldsQuery qry;
+
+                        synchronized (barrier) {
+                            qry = new SqlFieldsQuery("SELECT * FROM Integer");
+
+                            try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                                assertEquals(1, cur.getAll().size());
+                            }
+                        }
+
+                        barrier.await();
+
+                        qry = new SqlFieldsQuery("UPDATE Integer SET _val = (_key * 10)");
+
+                        try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                            assertEquals(1L, cur.iterator().next().get(0));
+                        }
+
+                        tx.commit();
+                    }
+                }
+                catch (Exception e) {
+                    onException(ex, e);
+                }
+            }
+        }, 2, "tx-thread");
+
+        IgniteSQLException ex0 = X.cause(ex.get(), IgniteSQLException.class);
+
+        assertNotNull("Exception has not been thrown.", ex0);
+        assertEquals("Mvcc version mismatch.", ex0.getMessage());
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testInsertAndFastDeleteWithoutVersionConflict() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class);
+
+        startGridsMultiThreaded(2);
+
+        IgniteCache<?, ?> cache0 = grid(0).cache(DEFAULT_CACHE_NAME);
+
+        try (Transaction tx1 = grid(0).transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            // obtain tx version
+            cache0.query(new SqlFieldsQuery("select * from Integer where _key = 1"));
+
+            runAsync(() -> {
+                cache0.query(new SqlFieldsQuery("insert into Integer(_key, _val) values(?, ?)").setArgs(1, 1));
+            }).get();
+
+            cache0.query(new SqlFieldsQuery("delete from Integer where _key = ?").setArgs(1));
+
+            tx1.commit();
+        }
+        catch (Exception e) {
+            e.printStackTrace();
+
+            fail("Exception is not expected here");
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testInsertAndFastUpdateWithoutVersionConflict() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class);
+
+        startGridsMultiThreaded(2);
+
+        IgniteCache<?, ?> cache0 = grid(0).cache(DEFAULT_CACHE_NAME);
+
+        try (Transaction tx1 = grid(0).transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            // obtain tx version
+            cache0.query(new SqlFieldsQuery("select * from Integer where _key = 1"));
+
+            runAsync(() -> {
+                cache0.query(new SqlFieldsQuery("insert into Integer(_key, _val) values(?, ?)").setArgs(1, 1));
+            }).get();
+
+            cache0.query(new SqlFieldsQuery("update Integer set _val = ? where _key = ?").setArgs(1, 1));
+
+            tx1.commit();
+        }
+        catch (Exception e) {
+            e.printStackTrace();
+
+            fail("Exception is not expected here");
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testInsertFastUpdateConcurrent() throws Exception {
+        fail("https://issues.apache.org/jira/browse/IGNITE-9292");
+
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class);
+
+        startGridsMultiThreaded(2);
+
+        IgniteCache<?, ?> cache0 = grid(0).cache(DEFAULT_CACHE_NAME);
+
+        try {
+            for (int i = 0; i < 100; i++) {
+                int key = i;
+                CompletableFuture.allOf(
+                    CompletableFuture.runAsync(() -> {
+                        cache0.query(new SqlFieldsQuery("insert into Integer(_key, _val) values(?, ?)").setArgs(key, key));
+                    }),
+                    CompletableFuture.runAsync(() -> {
+                        cache0.query(new SqlFieldsQuery("update Integer set _val = ? where _key = ?").setArgs(key, key));
+                    })
+                ).get();
+            }
+        }
+        catch (Exception e) {
+            e.printStackTrace();
+
+            fail("Exception is not expected here");
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryInsertRollback() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class);
+
+        startGridsMultiThreaded(4);
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite checkNode = grid(rnd.nextInt(4));
+        Ignite updateNode = grid(rnd.nextInt(4));
+
+        IgniteCache cache = checkNode.cache(DEFAULT_CACHE_NAME);
+
+        try (Transaction tx = updateNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TX_TIMEOUT);
+
+            SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO Integer (_key, _val) values (1,1),(2,2),(3,3)");
+
+            IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+            try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                assertEquals(3L, cur.iterator().next().get(0));
+            }
+
+            qry = new SqlFieldsQuery("INSERT INTO Integer (_key, _val) values (4,4),(5,5),(6,6)");
+
+            try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                assertEquals(3L, cur.iterator().next().get(0));
+            }
+
+            tx.rollback();
+        }
+
+        for (int i = 1; i <= 6; i++)
+            assertTrue(cache.query(new SqlFieldsQuery("SELECT * FROM Integer WHERE _key = 1")).getAll().isEmpty());
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryInsertUpdateSameKeys() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class);
+
+        startGridsMultiThreaded(4);
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite checkNode = grid(rnd.nextInt(4));
+        final Ignite updateNode = grid(rnd.nextInt(4));
+
+        IgniteCache cache = checkNode.cache(DEFAULT_CACHE_NAME);
+
+        try (Transaction tx = updateNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TX_TIMEOUT);
+
+            SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO Integer (_key, _val) values (1,1),(2,2),(3,3)");
+
+            IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+            try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                assertEquals(3L, cur.iterator().next().get(0));
+            }
+
+            qry = new SqlFieldsQuery("UPDATE Integer SET _val = (_key * 10)");
+
+            try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                cur.getAll();
+            }
+
+            tx.commit();
+        }
+
+        assertEquals(10, cache.get(1));
+        assertEquals(20, cache.get(2));
+        assertEquals(30, cache.get(3));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryInsertUpdateSameKeysInSameOperation() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class);
+
+        startGridsMultiThreaded(4);
+
+        Random rnd = ThreadLocalRandom.current();
+
+        final Ignite updateNode = grid(rnd.nextInt(4));
+
+        GridTestUtils.assertThrows(null, new Callable<Object>() {
+            @Override public Object call() throws Exception {
+                try (Transaction tx = updateNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                    tx.timeout(TX_TIMEOUT);
+
+                    SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO Integer (_key, _val) values (1,1),(1,2),(1,3)");
+
+                    IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+                    cache0.query(qry).getAll();
+
+                    tx.commit();
+                }
+
+                return null;
+            }
+        }, CacheException.class, "Duplicate key during INSERT [key=KeyCacheObjectImpl");
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryPendingUpdates() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class);
+
+        startGridsMultiThreaded(4);
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite checkNode = grid(rnd.nextInt(4));
+        final Ignite updateNode = grid(rnd.nextInt(4));
+
+        IgniteCache cache = checkNode.cache(DEFAULT_CACHE_NAME);
+
+        try (Transaction tx = updateNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TX_TIMEOUT);
+
+            SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO Integer (_key, _val) values (1,1),(2,2),(3,3)");
+
+            IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+            try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                assertEquals(3L, cur.iterator().next().get(0));
+            }
+
+            try (FieldsQueryCursor<List<?>> cur = cache0.query(qry.setSql("UPDATE Integer SET _val = (_key * 10)"))) {
+                assertEquals(3L, cur.iterator().next().get(0));
+            }
+
+            for (List<?> row : cache0.query(qry.setSql("SELECT _key, _val FROM Integer")).getAll()) {
+                assertEquals((Integer)row.get(0) * 10, row.get(1));
+            }
+
+            try (FieldsQueryCursor<List<?>> cur = cache0.query(qry.setSql("UPDATE Integer SET _val = 15 where _key = 2"))) {
+                assertEquals(1L, cur.iterator().next().get(0));
+            }
+
+            for (List<?> row : cache0.query(qry.setSql("SELECT _key, _val FROM Integer")).getAll()) {
+                if ((Integer)row.get(0) == 2)
+                    assertEquals(15, row.get(1));
+                else
+                    assertEquals((Integer)row.get(0) * 10, row.get(1));
+            }
+
+            GridTestUtils.runAsync(new Runnable() {
+                @Override public void run() {
+                    SqlFieldsQuery qry = new SqlFieldsQuery("SELECT _key, _val FROM Integer");
+
+                    assertTrue(cache.query(qry).getAll().isEmpty());
+                }
+            }).get(TX_TIMEOUT);
+
+            cache0.query(qry.setSql("DELETE FROM Integer")).getAll();
+
+            assertTrue(cache0.query(qry.setSql("SELECT _key, _val FROM Integer")).getAll().isEmpty());
+
+            assertEquals(3L, cache0.query(qry.setSql("INSERT INTO Integer (_key, _val) values (1,1),(2,2),(3,3)")).getAll().iterator().next().get(0));
+
+            tx.commit();
+        }
+
+        assertEquals(1, cache.get(1));
+        assertEquals(2, cache.get(2));
+        assertEquals(3, cache.get(3));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testSelectProducesTransaction() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, MvccTestSqlIndexValue.class);
+
+        startGridsMultiThreaded(4);
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite node = grid(rnd.nextInt(4));
+
+        IgniteCache<Object, Object> cache = node.cache(DEFAULT_CACHE_NAME);
+
+        SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO MvccTestSqlIndexValue (_key, idxVal1) values (1,1),(2,2),(3,3)");
+
+        try (FieldsQueryCursor<List<?>> cur = cache.query(qry)) {
+            assertEquals(3L, cur.iterator().next().get(0));
+        }
+
+        SqlFieldsQueryEx qryEx = new SqlFieldsQueryEx("SELECT * FROM MvccTestSqlIndexValue", true);
+
+        qryEx.setAutoCommit(false);
+
+        try (FieldsQueryCursor<List<?>> cur = cache.query(qryEx)) {
+            assertEquals(3, cur.getAll().size());
+        }
+
+        try (GridNearTxLocal tx = cache.unwrap(IgniteEx.class).context().cache().context().tm().userTx()) {
+            assertNotNull(tx);
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testRepeatableRead() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, MvccTestSqlIndexValue.class);
+
+        startGridsMultiThreaded(4);
+
+        Random rnd = ThreadLocalRandom.current();
+
+        IgniteCache<Object, Object> cache = grid(rnd.nextInt(4)).cache(DEFAULT_CACHE_NAME);
+
+        try (FieldsQueryCursor<List<?>> cur = cache.query(
+            new SqlFieldsQuery("INSERT INTO MvccTestSqlIndexValue (_key, idxVal1) values (1,1),(2,2),(3,3)"))) {
+            assertEquals(3L, cur.iterator().next().get(0));
+        }
+
+        Ignite node = grid(rnd.nextInt(4));
+        IgniteCache<Object, Object> cache0 = node.cache(DEFAULT_CACHE_NAME);
+        SqlFieldsQuery qry = new SqlFieldsQuery("SELECT * FROM MvccTestSqlIndexValue");
+
+        try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TX_TIMEOUT);
+
+            try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                assertEquals(3, cur.getAll().size());
+            }
+
+            runAsync(new Runnable() {
+                @Override public void run() {
+                    IgniteCache<Object, Object> cache = grid(ThreadLocalRandom.current().nextInt(4))
+                        .cache(DEFAULT_CACHE_NAME);
+
+                    try (FieldsQueryCursor<List<?>> cur = cache.query(
+                        new SqlFieldsQuery("INSERT INTO MvccTestSqlIndexValue (_key, idxVal1) values (4,4),(5,5),(6,6)"))) {
+                        assertEquals(3L, cur.iterator().next().get(0));
+                    }
+                }
+            }).get(TX_TIMEOUT);
+
+            try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                assertEquals(3, cur.getAll().size());
+            }
+        }
+
+        try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+            assertEquals(6, cur.getAll().size());
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testIterator() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class);
+
+        startGrid(getConfiguration("grid").setMvccVacuumFrequency(Integer.MAX_VALUE));
+
+        Ignite client = startGrid(getConfiguration("client").setClientMode(true));
+
+        IgniteCache<Object, Object> cache = client.cache(DEFAULT_CACHE_NAME);
+
+        cache.put(1, 1);
+        cache.put(2, 2);
+        cache.put(3, 3);
+        cache.put(4, 4);
+
+        List<List<?>> res;
+
+        try (Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TX_TIMEOUT);
+
+            res = cache.query(new SqlFieldsQuery("UPDATE Integer SET _val = CASE _key " +
+                "WHEN 1 THEN 10 WHEN 2 THEN 20 ELSE 30 END")).getAll();
+
+            assertEquals(4L, res.get(0).get(0));
+
+            tx.rollback();
+        }
+
+        try (Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TX_TIMEOUT);
+
+            res = cache.query(new SqlFieldsQuery("UPDATE Integer SET _val = CASE _val " +
+                "WHEN 1 THEN 10 WHEN 2 THEN 20 ELSE 30 END")).getAll();
+
+            assertEquals(4L, res.get(0).get(0));
+
+            res = cache.query(new SqlFieldsQuery("UPDATE Integer SET _val = CASE _val " +
+                "WHEN 10 THEN 100 WHEN 20 THEN 200 ELSE 300 END")).getAll();
+
+            assertEquals(4L, res.get(0).get(0));
+
+            res = cache.query(new SqlFieldsQuery("DELETE FROM Integer WHERE _key = 4")).getAll();
+
+            assertEquals(1L, res.get(0).get(0));
+
+            tx.commit();
+        }
+
+        IgniteCache<Integer, Integer> cache0 = client.cache(DEFAULT_CACHE_NAME);
+
+        Iterator<Cache.Entry<Integer, Integer>> it = cache0.iterator();
+
+        Map<Integer, Integer> map = new HashMap<>();
+
+        while (it.hasNext()) {
+            Cache.Entry<Integer, Integer> e = it.next();
+
+            assertNull("duplicate key returned from iterator", map.putIfAbsent(e.getKey(), e.getValue()));
+        }
+
+        assertEquals(3, map.size());
+
+        assertEquals(100, map.get(1).intValue());
+        assertEquals(200, map.get(2).intValue());
+        assertEquals(300, map.get(3).intValue());
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testHints() throws Exception {
+        persistence = true;
+
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class);
+
+        Ignite node = startGrid(getConfiguration("grid").setMvccVacuumFrequency(100));
+
+        node.cluster().active(true);
+
+        Ignite client = startGrid(getConfiguration("client").setClientMode(true));
+
+        IgniteCache<Object, Object> cache = client.cache(DEFAULT_CACHE_NAME);
+
+        List<List<?>> res;
+
+        try (Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TX_TIMEOUT);
+
+            res = cache.query(new SqlFieldsQuery("INSERT INTO Integer (_key, _val) " +
+                "VALUES (1, 1), (2, 2), (3, 3), (4, 4)")).getAll();
+
+            assertEquals(4L, res.get(0).get(0));
+
+            tx.commit();
+        }
+
+        try (Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TX_TIMEOUT);
+
+            res = cache.query(new SqlFieldsQuery("UPDATE Integer SET _val = CASE _key " +
+                "WHEN 1 THEN 10 WHEN 2 THEN 20 ELSE 30 END")).getAll();
+
+            assertEquals(4L, res.get(0).get(0));
+
+            tx.rollback();
+        }
+
+        try (Transaction tx = client.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TX_TIMEOUT);
+
+            res = cache.query(new SqlFieldsQuery("UPDATE Integer SET _val = CASE _val " +
+                "WHEN 1 THEN 10 WHEN 2 THEN 20 ELSE 30 END")).getAll();
+
+            assertEquals(4L, res.get(0).get(0));
+
+            res = cache.query(new SqlFieldsQuery("UPDATE Integer SET _val = CASE _val " +
+                "WHEN 10 THEN 100 WHEN 20 THEN 200 ELSE 300 END")).getAll();
+
+            assertEquals(4L, res.get(0).get(0));
+
+            res = cache.query(new SqlFieldsQuery("DELETE FROM Integer WHERE _key = 4")).getAll();
+
+            assertEquals(1L, res.get(0).get(0));
+
+            tx.commit();
+        }
+
+        mvccProcessor(node).runVacuum().get(TX_TIMEOUT);
+
+        checkAllVersionsHints(node.cache(DEFAULT_CACHE_NAME));
+    }
+
+    /** */
+    private void checkAllVersionsHints(IgniteCache cache) throws IgniteCheckedException {
+        IgniteCacheProxy cache0 = (IgniteCacheProxy)cache;
+        GridCacheContext cctx = cache0.context();
+
+        assert cctx.mvccEnabled();
+
+        for (Object e : cache) {
+            IgniteBiTuple entry = (IgniteBiTuple)e;
+
+            KeyCacheObject key = cctx.toCacheKeyObject(entry.getKey());
+
+            GridCursor<CacheDataRow> cur = cctx.offheap().mvccAllVersionsCursor(cctx, key, CacheDataRowAdapter.RowData.LINK_WITH_HEADER);
+
+            while (cur.next()) {
+                CacheDataRow row = cur.get();
+
+                assertTrue(row.mvccTxState() != 0);
+            }
+        }
+    }
+
+    /**
+     * @param ex Exception holder.
+     * @param e Exception.
+     */
+    private <T extends Throwable> void onException(AtomicReference<T> ex, T e) {
+        if (!ex.compareAndSet(null, e))
+            ex.get().addSuppressed(e);
+    }
+
+    /**
+     * @param phaser Phaser.
+     * @param phase Phase to wait for.
+     */
+    private void awaitPhase(Phaser phaser, int phase) {
+        int p;
+        do {
+            p = phaser.arriveAndAwaitAdvance();
+        }
+        while (p < phase);
+    }
+
+    /**
+     *
+     */
+    static class MvccTestSqlIndexValue implements Serializable {
+        /** */
+        @QuerySqlField(index = true)
+        private int idxVal1;
+
+        /**
+         * @param idxVal1 Indexed value 1.
+         */
+        MvccTestSqlIndexValue(int idxVal1) {
+            this.idxVal1 = idxVal1;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean equals(Object o) {
+            if (this == o)
+                return true;
+            if (o == null || getClass() != o.getClass())
+                return false;
+            MvccTestSqlIndexValue value = (MvccTestSqlIndexValue)o;
+            return idxVal1 == value.idxVal1;
+        }
+
+        /** {@inheritDoc} */
+        @Override public int hashCode() {
+            return Objects.hash(idxVal1);
+        }
+
+        /** {@inheritDoc} */
+        @Override public String toString() {
+            return S.toString(MvccTestSqlIndexValue.class, this);
+        }
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccSqlTxQueriesWithReducerAbstractTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccSqlTxQueriesWithReducerAbstractTest.java
new file mode 100644
index 0000000..69cf108
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccSqlTxQueriesWithReducerAbstractTest.java
@@ -0,0 +1,829 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *       http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.Callable;
+import java.util.concurrent.CyclicBarrier;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicInteger;
+import java.util.concurrent.atomic.AtomicReference;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.cache.query.FieldsQueryCursor;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.processors.query.IgniteSQLException;
+import org.apache.ignite.internal.transactions.IgniteTxTimeoutCheckedException;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.X;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.transactions.Transaction;
+
+import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC;
+import static org.apache.ignite.testframework.GridTestUtils.runMultiThreaded;
+import static org.apache.ignite.transactions.TransactionConcurrency.PESSIMISTIC;
+import static org.apache.ignite.transactions.TransactionIsolation.REPEATABLE_READ;
+
+/**
+ * Tests for transactional SQL.
+ */
+public abstract class CacheMvccSqlTxQueriesWithReducerAbstractTest extends CacheMvccAbstractTest  {
+    /** */
+    private static final int TIMEOUT = 3000;
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        super.beforeTest();
+
+        ccfgs = null;
+        ccfg = null;
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryReducerInsert() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue.class);
+
+        startGridsMultiThreaded(4);
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite checkNode  = grid(rnd.nextInt(4));
+        Ignite updateNode = grid(rnd.nextInt(4));
+
+        IgniteCache<Integer, CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue> cache =
+            checkNode.cache(DEFAULT_CACHE_NAME);
+
+        cache.putAll(F.asMap(
+            1,new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(1),
+            2,new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(2),
+            3,new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(3)));
+
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(1), cache.get(1));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(2), cache.get(2));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(3), cache.get(3));
+
+        try (Transaction tx = updateNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TIMEOUT);
+
+            String sqlText = "INSERT INTO MvccTestSqlIndexValue (_key, idxVal1) " +
+                "SELECT DISTINCT _key + 3, idxVal1 + 3 FROM MvccTestSqlIndexValue";
+
+            SqlFieldsQuery qry = new SqlFieldsQuery(sqlText);
+
+            qry.setDistributedJoins(true);
+
+            IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+            try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                assertEquals(3L, cur.iterator().next().get(0));
+            }
+
+            tx.commit();
+        }
+
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(1), cache.get(1));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(2), cache.get(2));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(3), cache.get(3));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(4), cache.get(4));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(5), cache.get(5));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(6), cache.get(6));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryReducerInsertDuplicateKey() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue.class);
+
+        startGridsMultiThreaded(4);
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite checkNode  = grid(rnd.nextInt(4));
+        Ignite updateNode = grid(rnd.nextInt(4));
+
+        IgniteCache<Integer, CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue> cache =
+            checkNode.cache(DEFAULT_CACHE_NAME);
+
+        cache.putAll(F.asMap(
+            1,new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(1),
+            2,new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(2),
+            3,new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(3)));
+
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(1), cache.get(1));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(2), cache.get(2));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(3), cache.get(3));
+
+        try (Transaction tx = updateNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TIMEOUT);
+
+            String sqlText = "INSERT INTO MvccTestSqlIndexValue (_key, idxVal1) " +
+                "SELECT DISTINCT _key, idxVal1 FROM MvccTestSqlIndexValue";
+
+            SqlFieldsQuery qry = new SqlFieldsQuery(sqlText);
+
+            qry.setDistributedJoins(true);
+
+            IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+            GridTestUtils.assertThrowsAnyCause(log, new Callable<Object>() {
+                @Override public Object call() {
+                    return cache0.query(qry);
+                }
+            }, IgniteSQLException.class, "Duplicate key");
+
+            tx.rollback();
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryReducerMerge() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue.class);
+
+        startGridsMultiThreaded(4);
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite checkNode  = grid(rnd.nextInt(4));
+        Ignite updateNode = grid(rnd.nextInt(4));
+
+        IgniteCache<Integer, CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue> cache =
+            checkNode.cache(DEFAULT_CACHE_NAME);
+
+        cache.putAll(F.asMap(
+            1,new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(1),
+            2,new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(2),
+            3,new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(3)));
+
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(1), cache.get(1));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(2), cache.get(2));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(3), cache.get(3));
+
+        try (Transaction tx = updateNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TIMEOUT);
+
+            String sqlText = "MERGE INTO MvccTestSqlIndexValue (_key, idxVal1) " +
+                "SELECT DISTINCT _key * 2, idxVal1 FROM MvccTestSqlIndexValue";
+
+            SqlFieldsQuery qry = new SqlFieldsQuery(sqlText);
+
+            qry.setDistributedJoins(true);
+
+            IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+            try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                assertEquals(3L, cur.iterator().next().get(0));
+            }
+
+            tx.commit();
+        }
+
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(1), cache.get(1));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(1), cache.get(2));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(3), cache.get(3));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(2), cache.get(4));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(3), cache.get(6));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryReducerMultiBatchPerNodeServer() throws Exception {
+        checkMultiBatchPerNode(false);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryReducerMultiBatchPerNodeClient() throws Exception {
+        checkMultiBatchPerNode(true);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    private void checkMultiBatchPerNode(boolean client) throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue.class);
+
+        Ignite checkNode;
+        Ignite updateNode;
+
+        Random rnd = ThreadLocalRandom.current();
+
+        if (client) {
+            startGridsMultiThreaded(3);
+
+            updateNode = grid(rnd.nextInt(3));
+
+            this.client = true;
+
+            checkNode = startGrid(4);
+        }
+        else {
+            startGridsMultiThreaded(4);
+
+            checkNode  = grid(rnd.nextInt(4));
+            updateNode = grid(rnd.nextInt(4));
+        }
+
+        IgniteCache<Integer, CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue> cache =
+            checkNode.cache(DEFAULT_CACHE_NAME);
+
+        final int count = 6;
+
+        Map<Integer, CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue> vals = new HashMap<>(count);
+
+        for (int idx = 1; idx <= count; ++idx)
+            vals.put(idx, new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(idx));
+
+        cache.putAll(vals);
+
+        try (Transaction tx = updateNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TIMEOUT);
+
+            String sqlText = "INSERT INTO MvccTestSqlIndexValue (_key, idxVal1) " +
+                "SELECT DISTINCT _key + 6, idxVal1 + 6 FROM MvccTestSqlIndexValue";
+
+            SqlFieldsQuery qry = new SqlFieldsQuery(sqlText);
+
+            qry.setDistributedJoins(true);
+            qry.setPageSize(1);
+
+            IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+            try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                assertEquals((long)count, cur.iterator().next().get(0));
+            }
+
+            tx.commit();
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryReducerDelete() throws Exception {
+        ccfgs = new CacheConfiguration[] {
+            cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+                .setName("int")
+                .setIndexedTypes(Integer.class, Integer.class),
+            cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+                .setIndexedTypes(Integer.class,
+                CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue.class),
+        };
+
+        startGridsMultiThreaded(4);
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite checkNode  = grid(rnd.nextInt(4));
+        Ignite updateNode = grid(rnd.nextInt(4));
+
+        IgniteCache<Integer, Integer> cache = checkNode.cache("int");
+
+        cache.putAll(F.asMap(1, 1, 3, 3, 5, 5));
+
+        final int count = 6;
+
+        Map<Integer, CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue> vals = new HashMap<>(count);
+
+        for (int idx = 1; idx <= count; ++idx)
+            vals.put(idx, new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(idx));
+
+        IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+        cache0.putAll(vals);
+
+        try (Transaction tx = updateNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TIMEOUT);
+
+            String sqlText = "DELETE FROM MvccTestSqlIndexValue t " +
+                "WHERE EXISTS (SELECT 1 FROM \"int\".Integer WHERE t._key = _key)";
+
+            SqlFieldsQuery qry = new SqlFieldsQuery(sqlText);
+
+            try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                assertEquals(3L, cur.iterator().next().get(0));
+            }
+
+            tx.commit();
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryReducerUpdate() throws Exception {
+        ccfgs = new CacheConfiguration[] {
+            cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+                .setName("int")
+                .setIndexedTypes(Integer.class, Integer.class),
+            cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+                .setIndexedTypes(Integer.class,
+                CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue.class),
+        };
+
+        startGridsMultiThreaded(4);
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite checkNode  = grid(rnd.nextInt(4));
+        Ignite updateNode = grid(rnd.nextInt(4));
+
+        IgniteCache<Integer, Integer> cache = checkNode.cache("int");
+
+        cache.putAll(F.asMap(1, 5, 3, 1, 5, 3));
+
+        final int count = 6;
+
+        Map<Integer, CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue> vals = new HashMap<>(count);
+
+        for (int idx = 1; idx <= count; ++idx)
+            vals.put(idx, new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(idx));
+
+        IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+        cache0.putAll(vals);
+
+        try (Transaction tx = updateNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TIMEOUT);
+
+            String sqlText = "UPDATE MvccTestSqlIndexValue t SET idxVal1=" +
+                "(SELECT _val FROM \"int\".Integer WHERE t._key = _key)" +
+                " WHERE EXISTS (SELECT 1 FROM \"int\".Integer WHERE t._key = _key)";
+
+            SqlFieldsQuery qry = new SqlFieldsQuery(sqlText);
+
+            try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                assertEquals(3L, cur.iterator().next().get(0));
+            }
+
+            tx.commit();
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryReducerImplicitTxInsert() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue.class);
+
+        startGridsMultiThreaded(4);
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite checkNode  = grid(rnd.nextInt(4));
+        Ignite updateNode = grid(rnd.nextInt(4));
+
+        IgniteCache<Integer, CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue> cache =
+            checkNode.cache(DEFAULT_CACHE_NAME);
+
+        cache.putAll(F.asMap(
+            1,new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(1),
+            2,new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(2),
+            3,new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(3)));
+
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(1), cache.get(1));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(2), cache.get(2));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(3), cache.get(3));
+
+        String sqlText = "INSERT INTO MvccTestSqlIndexValue (_key, idxVal1) " +
+                "SELECT DISTINCT _key + 3, idxVal1 + 3 FROM MvccTestSqlIndexValue";
+
+        SqlFieldsQuery qry = new SqlFieldsQuery(sqlText);
+
+        qry.setTimeout(TX_TIMEOUT, TimeUnit.MILLISECONDS);
+
+        qry.setDistributedJoins(true);
+
+        IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+        try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+            assertEquals(3L, cur.iterator().next().get(0));
+        }
+
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(1), cache.get(1));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(2), cache.get(2));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(3), cache.get(3));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(4), cache.get(4));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(5), cache.get(5));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(6), cache.get(6));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryReducerRollbackInsert() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue.class);
+
+        startGridsMultiThreaded(4);
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite checkNode  = grid(rnd.nextInt(4));
+        Ignite updateNode = grid(rnd.nextInt(4));
+
+        IgniteCache<Integer, CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue> cache =
+            checkNode.cache(DEFAULT_CACHE_NAME);
+
+        cache.putAll(F.asMap(
+            1,new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(1),
+            2,new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(2),
+            3,new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(3)));
+
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(1), cache.get(1));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(2), cache.get(2));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(3), cache.get(3));
+
+        try (Transaction tx = updateNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+            tx.timeout(TIMEOUT);
+
+            String sqlText = "INSERT INTO MvccTestSqlIndexValue (_key, idxVal1) " +
+                "SELECT DISTINCT _key + 3, idxVal1 + 3 FROM MvccTestSqlIndexValue";
+
+            SqlFieldsQuery qry = new SqlFieldsQuery(sqlText);
+
+            qry.setDistributedJoins(true);
+
+            IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+            try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+                assertEquals(3L, cur.iterator().next().get(0));
+            }
+
+            tx.rollback();
+        }
+
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(1), sqlGet(1, cache).get(0).get(0));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(2), sqlGet(2, cache).get(0).get(0));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(3), sqlGet(3, cache).get(0).get(0));
+        assertTrue(sqlGet(4, cache).isEmpty());
+        assertTrue(sqlGet(5, cache).isEmpty());
+        assertTrue(sqlGet(6, cache).isEmpty());
+    }
+
+    /**
+     * @param key Key.
+     * @param cache Cache.
+     * @return Result.
+     */
+    private List<List> sqlGet(int key, IgniteCache cache) {
+        return cache.query(new SqlFieldsQuery("SELECT _val from MvccTestSqlIndexValue WHERE _key=" + key)).getAll();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryReducerDeadlockInsert() throws Exception {
+        ccfgs = new CacheConfiguration[] {
+            cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+                .setName("int")
+                .setIndexedTypes(Integer.class, Integer.class),
+            cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+                .setIndexedTypes(Integer.class,
+                CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue.class),
+        };
+
+        startGridsMultiThreaded(2);
+
+        client = true;
+
+        startGridsMultiThreaded(2, 2);
+
+        Ignite checkNode  = grid(2);
+
+        IgniteCache<Integer, Integer> cache = checkNode.cache("int");
+
+        HashMap<Integer, Integer> vals = new HashMap<>(100);
+
+        for (int idx = 0; idx < 100; ++idx)
+            vals.put(idx, idx);
+
+        cache.putAll(vals);
+
+        final CyclicBarrier barrier = new CyclicBarrier(2);
+        final AtomicInteger idx = new AtomicInteger(2);
+        final AtomicReference<Exception> ex = new AtomicReference<>();
+
+        multithreaded(new Runnable() {
+            @Override public void run() {
+                int id = idx.getAndIncrement();
+
+                IgniteEx node = grid(id);
+
+                try {
+                    try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                        tx.timeout(TIMEOUT);
+
+                        String sqlText = "INSERT INTO MvccTestSqlIndexValue (_key, idxVal1) " +
+                            "SELECT DISTINCT _key, _val FROM \"int\".Integer ORDER BY _key";
+
+                        String sqlAsc = sqlText + " ASC";
+                        String sqlDesc = sqlText + " DESC";
+
+                        SqlFieldsQuery qry = new SqlFieldsQuery((id % 2) == 0 ? sqlAsc : sqlDesc);
+
+                        IgniteCache<Object, Object> cache0 = node.cache(DEFAULT_CACHE_NAME);
+
+                        cache0.query(qry).getAll();
+
+                        barrier.await();
+
+                        qry = new SqlFieldsQuery((id % 2) == 0 ? sqlDesc : sqlAsc);
+
+                        cache0.query(qry).getAll();
+
+                        tx.commit();
+                    }
+                }
+                catch (Exception e) {
+                    onException(ex, e);
+                }
+            }
+        }, 2);
+
+        Exception ex0 = ex.get();
+
+        assertNotNull(ex0);
+
+        if (!X.hasCause(ex0, IgniteTxTimeoutCheckedException.class))
+            throw ex0;
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryReducerInsertVersionConflict() throws Exception {
+        ccfgs = new CacheConfiguration[] {
+            cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+                .setName("int")
+                .setIndexedTypes(Integer.class, Integer.class),
+            cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+                .setIndexedTypes(Integer.class,
+                CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue.class),
+        };
+
+        startGridsMultiThreaded(2);
+
+        client = true;
+
+        final Ignite checkNode  = startGrid(2);
+
+        IgniteCache<Integer, Integer> cache = checkNode.cache("int");
+
+        HashMap<Integer, Integer> vals = new HashMap<>(100);
+
+        for (int idx = 0; idx < 10; ++idx)
+            vals.put(idx, idx);
+
+        cache.putAll(vals);
+
+        awaitPartitionMapExchange();
+
+        IgniteCache cache0 = checkNode.cache(DEFAULT_CACHE_NAME);
+
+        cache0.query(new SqlFieldsQuery("INSERT INTO MvccTestSqlIndexValue (_key, idxVal1) " +
+            "SELECT _key, _val FROM \"int\".Integer")).getAll();
+
+        final CyclicBarrier barrier = new CyclicBarrier(2);
+        final AtomicReference<Exception> ex = new AtomicReference<>();
+
+        runMultiThreaded(new Runnable() {
+            @Override public void run() {
+                try {
+                    try (Transaction tx = checkNode.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
+                        tx.timeout(TX_TIMEOUT);
+
+                        barrier.await();
+
+                        SqlFieldsQuery qry = new SqlFieldsQuery("SELECT * FROM MvccTestSqlIndexValue");
+
+                        cache0.query(qry).getAll();
+
+                        barrier.await();
+
+                        String sqlText = "UPDATE MvccTestSqlIndexValue t SET idxVal1=" +
+                            "(SELECT _val FROM \"int\".Integer WHERE _key >= 5 AND _key <= 5 ORDER BY _key) WHERE _key = 5";
+
+                        qry = new SqlFieldsQuery(sqlText);
+
+                        cache0.query(qry).getAll();
+
+                        tx.commit();
+                    }
+                }
+                catch (Exception e) {
+                    onException(ex, e);
+                }
+            }
+        }, 2, "tx-thread");
+
+        IgniteSQLException ex0 = X.cause(ex.get(), IgniteSQLException.class);
+
+        assertNotNull("Exception has not been thrown.", ex0);
+        assertEquals("Mvcc version mismatch.", ex0.getMessage());
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryReducerInsertValues() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue.class);
+
+        startGridsMultiThreaded(4);
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite node  = grid(rnd.nextInt(4));
+
+        IgniteCache<Object, CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue> cache = node.cache(DEFAULT_CACHE_NAME);
+
+        SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO MvccTestSqlIndexValue (_key, idxVal1)" +
+            " values (1,?),(2,?),(3,?)");
+
+        qry.setArgs(1, 2, 3);
+
+        try (FieldsQueryCursor<List<?>> cur = cache.query(qry)) {
+            assertEquals(3L, cur.iterator().next().get(0));
+        }
+
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(1), cache.get(1));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(2), cache.get(2));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(3), cache.get(3));
+
+        qry = new SqlFieldsQuery("INSERT INTO MvccTestSqlIndexValue (_key, idxVal1) values (4,4)");
+
+        try (FieldsQueryCursor<List<?>> cur = cache.query(qry)) {
+            assertEquals(1L, cur.iterator().next().get(0));
+        }
+
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(4), cache.get(4));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryReducerMergeValues() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue.class);
+
+        startGridsMultiThreaded(4);
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite node  = grid(rnd.nextInt(4));
+
+        IgniteCache<Object, CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue> cache = node.cache(DEFAULT_CACHE_NAME);
+
+        cache.put(1, new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(1));
+        cache.put(3, new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(3));
+
+        SqlFieldsQuery qry = new SqlFieldsQuery("MERGE INTO MvccTestSqlIndexValue (_key, idxVal1)" +
+            " values (1,?),(2,?),(3,?)");
+
+        qry.setArgs(1, 4, 6);
+
+        try (FieldsQueryCursor<List<?>> cur = cache.query(qry)) {
+            assertEquals(3L, cur.iterator().next().get(0));
+        }
+
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(1), cache.get(1));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(4), cache.get(2));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(6), cache.get(3));
+
+        qry = new SqlFieldsQuery("MERGE INTO MvccTestSqlIndexValue (_key, idxVal1) values (4,4)");
+
+        try (FieldsQueryCursor<List<?>> cur = cache.query(qry)) {
+            assertEquals(1L, cur.iterator().next().get(0));
+        }
+
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(4), cache.get(4));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryReducerFastUpdate() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, Integer.class);
+
+        startGridsMultiThreaded(4);
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite checkNode  = grid(rnd.nextInt(4));
+        Ignite updateNode = grid(rnd.nextInt(4));
+
+        IgniteCache<Object, Object> cache = checkNode.cache(DEFAULT_CACHE_NAME);
+
+        cache.putAll(F.asMap(1,1,2,2,3,3));
+
+        assertEquals(1, cache.get(1));
+        assertEquals(2, cache.get(2));
+        assertEquals(3, cache.get(3));
+
+        IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+        SqlFieldsQuery qry = new SqlFieldsQuery("UPDATE Integer SET _val = 8 WHERE _key = ?").setArgs(1);
+
+        try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+             assertEquals(1L, cur.iterator().next().get(0));
+        }
+
+        qry = new SqlFieldsQuery("UPDATE Integer SET _val = 9 WHERE _key = 2");
+
+        try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+            assertEquals(1L, cur.iterator().next().get(0));
+        }
+
+        assertEquals(8, cache.get(1));
+        assertEquals(9, cache.get(2));
+        assertEquals(3, cache.get(3));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testQueryReducerFastDelete() throws Exception {
+        ccfg = cacheConfiguration(cacheMode(), FULL_SYNC, 2, DFLT_PARTITION_COUNT)
+            .setIndexedTypes(Integer.class, CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue.class);
+
+        startGridsMultiThreaded(4);
+
+        Random rnd = ThreadLocalRandom.current();
+
+        Ignite checkNode  = grid(rnd.nextInt(4));
+        Ignite updateNode = grid(rnd.nextInt(4));
+
+        IgniteCache<Object, Object> cache = checkNode.cache(DEFAULT_CACHE_NAME);
+
+        cache.putAll(F.asMap(
+            1,new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(1),
+            2,new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(2),
+            3,new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(3)));
+
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(1), cache.get(1));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(2), cache.get(2));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(3), cache.get(3));
+
+        IgniteCache<Object, Object> cache0 = updateNode.cache(DEFAULT_CACHE_NAME);
+
+        SqlFieldsQuery qry = new SqlFieldsQuery("DELETE FROM MvccTestSqlIndexValue WHERE _key = ?")
+            .setArgs(1);
+
+        try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+            assertEquals(1L, cur.iterator().next().get(0));
+        }
+
+        qry = new SqlFieldsQuery("DELETE FROM MvccTestSqlIndexValue WHERE _key = 2");
+
+        try (FieldsQueryCursor<List<?>> cur = cache0.query(qry)) {
+            assertEquals(1L, cur.iterator().next().get(0));
+        }
+
+        assertNull(cache.get(1));
+        assertNull(cache.get(2));
+        assertEquals(new CacheMvccSqlTxQueriesAbstractTest.MvccTestSqlIndexValue(3), cache.get(3));
+    }
+
+    /**
+     * @param ex Exception holder.
+     * @param e Exception.
+     */
+    private void onException(AtomicReference<Exception> ex, Exception e) {
+        if (!ex.compareAndSet(null, e))
+            ex.get().addSuppressed(e);
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccStreamingInsertTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccStreamingInsertTest.java
new file mode 100644
index 0000000..b07a187
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/mvcc/CacheMvccStreamingInsertTest.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.mvcc;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.util.List;
+import java.util.Properties;
+import java.util.concurrent.TimeUnit;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteJdbcDriver;
+import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.configuration.CacheConfiguration;
+
+import static java.util.Arrays.asList;
+
+/**
+ *
+ */
+public class CacheMvccStreamingInsertTest extends CacheMvccAbstractTest {
+    /** */
+    private IgniteCache<Object, Object> sqlNexus;
+
+    /** */
+    private Connection conn;
+
+    /** {@inheritDoc} */
+    @Override protected CacheMode cacheMode() {
+        return CacheMode.PARTITIONED;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        super.beforeTest();
+
+        Ignite ignite = startGrid(0);
+        sqlNexus = ignite.getOrCreateCache(new CacheConfiguration<>("sqlNexus").setSqlSchema("PUBLIC"));
+        sqlNexus.query(q("" +
+            "create table person(" +
+            "  id int not null primary key," +
+            "  name varchar not null" +
+            ") with \"atomicity=transactional_snapshot\""
+        ));
+
+        Properties props = new Properties();
+        props.setProperty(IgniteJdbcDriver.PROP_STREAMING, "true");
+        conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1", props);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testStreamingInsertWithoutOverwrite() throws Exception {
+        conn.createStatement().execute("SET STREAMING 1 BATCH_SIZE 2 ALLOW_OVERWRITE 0 " +
+            " PER_NODE_BUFFER_SIZE 1000 FLUSH_FREQUENCY 100");
+        sqlNexus.query(q("insert into person values(1, 'ivan')"));
+
+        PreparedStatement batchStmt = conn.prepareStatement("insert into person values(?, ?)");
+        batchStmt.setInt(1, 1);
+        batchStmt.setString(2, "foo");
+        batchStmt.addBatch();
+        batchStmt.setInt(1, 2);
+        batchStmt.setString(2, "bar");
+        batchStmt.addBatch();
+        TimeUnit.MILLISECONDS.sleep(500);
+
+        List<List<?>> rows = sqlNexus.query(q("select * from person")).getAll();
+        List<List<?>> exp = asList(
+            asList(1, "ivan"),
+            asList(2, "bar")
+        );
+        assertEquals(exp, rows);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testUpdateWithOverwrite() throws Exception {
+        conn.createStatement().execute("SET STREAMING 1 BATCH_SIZE 2 ALLOW_OVERWRITE 1 " +
+            " PER_NODE_BUFFER_SIZE 1000 FLUSH_FREQUENCY 100");
+        sqlNexus.query(q("insert into person values(1, 'ivan')"));
+
+        PreparedStatement batchStmt = conn.prepareStatement("insert into person values(?, ?)");
+        batchStmt.setInt(1, 1);
+        batchStmt.setString(2, "foo");
+        batchStmt.addBatch();
+        batchStmt.setInt(1, 2);
+        batchStmt.setString(2, "bar");
+        batchStmt.addBatch();
+        TimeUnit.MILLISECONDS.sleep(500);
+
+        List<List<?>> rows = sqlNexus.query(q("select * from person")).getAll();
+        List<List<?>> exp = asList(
+            asList(1, "foo"),
+            asList(2, "bar")
+        );
+        assertEquals(exp, rows);
+    }
+
+    /** */
+    private static SqlFieldsQuery q(String sql) {
+        return new SqlFieldsQuery(sql);
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRecoveryTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRecoveryTest.java
new file mode 100644
index 0000000..0b3ded4
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRecoveryTest.java
@@ -0,0 +1,1911 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.cache.persistence.db.wal;
+
+import java.io.File;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Random;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.TimeUnit;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.IgniteCompute;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.IgniteSystemProperties;
+import org.apache.ignite.cache.CacheAtomicityMode;
+import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.cache.CacheRebalanceMode;
+import org.apache.ignite.cache.CacheWriteSynchronizationMode;
+import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.cache.query.annotations.QuerySqlField;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.configuration.BinaryConfiguration;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.DataRegionConfiguration;
+import org.apache.ignite.configuration.DataStorageConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.configuration.WALMode;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.IgniteInternalFuture;
+import org.apache.ignite.internal.IgniteInterruptedCheckedException;
+import org.apache.ignite.internal.pagemem.FullPageId;
+import org.apache.ignite.internal.pagemem.PageUtils;
+import org.apache.ignite.internal.pagemem.wal.WALIterator;
+import org.apache.ignite.internal.pagemem.wal.WALPointer;
+import org.apache.ignite.internal.pagemem.wal.record.DataEntry;
+import org.apache.ignite.internal.pagemem.wal.record.DataRecord;
+import org.apache.ignite.internal.pagemem.wal.record.MemoryRecoveryRecord;
+import org.apache.ignite.internal.pagemem.wal.record.PageSnapshot;
+import org.apache.ignite.internal.pagemem.wal.record.TxRecord;
+import org.apache.ignite.internal.pagemem.wal.record.WALRecord;
+import org.apache.ignite.internal.pagemem.wal.record.delta.PageDeltaRecord;
+import org.apache.ignite.internal.processors.cache.GridCacheSharedContext;
+import org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager;
+import org.apache.ignite.internal.processors.cache.persistence.filename.PdsConsistentIdProcessor;
+import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetaStorage;
+import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx;
+import org.apache.ignite.internal.processors.cache.persistence.tree.io.TrackingPageIO;
+import org.apache.ignite.internal.processors.cache.version.GridCacheVersion;
+import org.apache.ignite.internal.util.GridUnsafe;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.PA;
+import org.apache.ignite.internal.util.typedef.PAX;
+import org.apache.ignite.internal.util.typedef.X;
+import org.apache.ignite.internal.util.typedef.internal.S;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteBiTuple;
+import org.apache.ignite.lang.IgniteCallable;
+import org.apache.ignite.lang.IgnitePredicate;
+import org.apache.ignite.lang.IgniteRunnable;
+import org.apache.ignite.resources.IgniteInstanceResource;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+import org.apache.ignite.testframework.junits.multijvm.IgniteProcessProxy;
+import org.apache.ignite.transactions.Transaction;
+import org.apache.ignite.transactions.TransactionConcurrency;
+import org.apache.ignite.transactions.TransactionIsolation;
+import org.junit.Assert;
+
+import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.DFLT_STORE_DIR;
+
+/**
+ *
+ */
+public class IgniteWalRecoveryTest extends GridCommonAbstractTest {
+    /** */
+    private static final String HAS_CACHE = "HAS_CACHE";
+
+    /** */
+    private static final int LARGE_ARR_SIZE = 1025;
+
+    /** */
+    private boolean fork;
+
+    /** */
+    private static final String CACHE_NAME = "partitioned";
+
+    /** */
+    private static final String RENAMED_CACHE_NAME = "partitioned0";
+
+    /** */
+    private static final String LOC_CACHE_NAME = "local";
+
+    /** */
+    private boolean renamed;
+
+    /** */
+    private int walSegmentSize;
+
+    /** Log only. */
+    private boolean logOnly;
+
+    /** {@inheritDoc} */
+    @Override protected boolean isMultiJvm() {
+        return fork;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        CacheConfiguration<Integer, IndexedObject> ccfg = renamed ?
+            new CacheConfiguration<>(RENAMED_CACHE_NAME) : new CacheConfiguration<>(CACHE_NAME);
+
+        ccfg.setAtomicityMode(CacheAtomicityMode.ATOMIC);
+        ccfg.setRebalanceMode(CacheRebalanceMode.SYNC);
+        ccfg.setAffinity(new RendezvousAffinityFunction(false, 32));
+        ccfg.setNodeFilter(new RemoteNodeFilter());
+        ccfg.setIndexedTypes(Integer.class, IndexedObject.class);
+
+        CacheConfiguration<Integer, IndexedObject> locCcfg = new CacheConfiguration<>(LOC_CACHE_NAME);
+        locCcfg.setCacheMode(CacheMode.LOCAL);
+        locCcfg.setIndexedTypes(Integer.class, IndexedObject.class);
+
+        cfg.setCacheConfiguration(ccfg, locCcfg);
+
+        DataStorageConfiguration dbCfg = new DataStorageConfiguration();
+
+        dbCfg.setPageSize(4 * 1024);
+
+        DataRegionConfiguration memPlcCfg = new DataRegionConfiguration();
+
+        memPlcCfg.setName("dfltDataRegion");
+        memPlcCfg.setInitialSize(1024L * 1024 * 1024);
+        memPlcCfg.setMaxSize(1024L * 1024 * 1024);
+        memPlcCfg.setPersistenceEnabled(true);
+
+        dbCfg.setDefaultDataRegionConfiguration(memPlcCfg);
+
+        dbCfg.setWalRecordIteratorBufferSize(1024 * 1024);
+
+        dbCfg.setWalHistorySize(2);
+
+        if (logOnly)
+            dbCfg.setWalMode(WALMode.LOG_ONLY);
+
+        if (walSegmentSize != 0)
+            dbCfg.setWalSegmentSize(walSegmentSize);
+
+        cfg.setDataStorageConfiguration(dbCfg);
+
+        BinaryConfiguration binCfg = new BinaryConfiguration();
+
+        binCfg.setCompactFooter(false);
+
+        cfg.setBinaryConfiguration(binCfg);
+
+        if (!getTestIgniteInstanceName(0).equals(gridName))
+            cfg.setUserAttributes(F.asMap(HAS_CACHE, true));
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        stopAllGrids();
+
+        cleanPersistenceDir();
+
+        renamed = false;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids();
+
+        logOnly = false;
+
+        cleanPersistenceDir();
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testWalBig() throws Exception {
+        IgniteEx ignite = startGrid(1);
+
+        ignite.cluster().active(true);
+
+        IgniteCache<Object, Object> cache = ignite.cache(CACHE_NAME);
+
+        Random rnd = new Random();
+
+        Map<Integer, IndexedObject> map = new HashMap<>();
+
+        for (int i = 0; i < 10_000; i++) {
+            if (i % 1000 == 0)
+                X.println(" >> " + i);
+
+            int k = rnd.nextInt(300_000);
+            IndexedObject v = new IndexedObject(rnd.nextInt(10_000));
+
+            cache.put(k, v);
+            map.put(k, v);
+        }
+
+        // Check.
+        for (Integer k : map.keySet())
+            assertEquals(map.get(k), cache.get(k));
+
+        stopGrid(1);
+
+        ignite = startGrid(1);
+
+        ignite.cluster().active(true);
+
+        cache = ignite.cache(CACHE_NAME);
+
+        // Check.
+        for (Integer k : map.keySet())
+            assertEquals(map.get(k), cache.get(k));
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testWalBigObjectNodeCancel() throws Exception {
+        final int MAX_SIZE_POWER = 21;
+
+        IgniteEx ignite = startGrid(1);
+
+        ignite.cluster().active(true);
+
+        IgniteCache<Object, Object> cache = ignite.cache(CACHE_NAME);
+
+        for (int i = 0; i < MAX_SIZE_POWER; ++i) {
+            int size = 1 << i;
+
+            cache.put("key_" + i, createTestData(size));
+        }
+
+        stopGrid(1, true);
+
+        ignite = startGrid(1);
+
+        ignite.cluster().active(true);
+
+        cache = ignite.cache(CACHE_NAME);
+
+        // Check.
+        for (int i = 0; i < MAX_SIZE_POWER; ++i) {
+            int size = 1 << i;
+
+            int[] data = createTestData(size);
+
+            int[] val = (int[])cache.get("key_" + i);
+
+            assertTrue("Invalid data. [key=key_" + i + ']', Arrays.equals(data, val));
+        }
+    }
+
+    /**
+     * @throws Exception If fail.
+     */
+    public void testSwitchClassLoader() throws Exception {
+        try {
+            final IgniteEx igniteEx = startGrid(1);
+
+            // CustomDiscoveryMessage will trigger service tasks
+            startGrid(2);
+
+            igniteEx.cluster().active(true);
+
+            IgniteCache<Integer, EnumVal> cache = igniteEx.cache(CACHE_NAME);
+
+            final ClassLoader oldCl = Thread.currentThread().getContextClassLoader();
+            final ClassLoader newCl = getExternalClassLoader();
+
+            Thread.currentThread().setContextClassLoader(newCl);
+
+            for (int i = 0; i < 10; i++)
+                cache.put(i, i % 2 == 0 ? EnumVal.VAL1 : EnumVal.VAL2);
+
+            for (int i = 0; i < 10; i++)
+                assert cache.containsKey(i);
+
+            // Invokes ClearTask with new class loader
+            cache.clear();
+
+            Thread.currentThread().setContextClassLoader(oldCl);
+
+            for (int i = 0; i < 10; i++)
+                cache.put(i, i % 2 == 0 ? EnumVal.VAL1 : EnumVal.VAL2);
+
+            for (int i = 0; i < 10; i++)
+                assert cache.containsKey(i);
+        }
+        finally {
+            stopAllGrids();
+        }
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testWalSimple() throws Exception {
+        try {
+            IgniteEx ignite = startGrid(1);
+
+            ignite.cluster().active(true);
+
+            IgniteCache<Object, Object> cache = ignite.cache(CACHE_NAME);
+
+            info(" --> step1");
+
+            for (int i = 0; i < 10_000; i += 2)
+                cache.put(i, new IndexedObject(i));
+
+            info(" --> step2");
+
+            for (int i = 0; i < 10_000; i += 3)
+                cache.put(i, new IndexedObject(i * 2));
+
+            info(" --> step3");
+
+            for (int i = 0; i < 10_000; i += 7)
+                cache.put(i, new IndexedObject(i * 3));
+
+            info(" --> check1");
+
+            // Check.
+            for (int i = 0; i < 10_000; i++) {
+                IndexedObject o;
+
+                if (i % 7 == 0)
+                    o = new IndexedObject(i * 3);
+                else if (i % 3 == 0)
+                    o = new IndexedObject(i * 2);
+                else if (i % 2 == 0)
+                    o = new IndexedObject(i);
+                else
+                    o = null;
+
+                assertEquals(o, cache.get(i));
+            }
+
+            stopGrid(1);
+
+            ignite = startGrid(1);
+
+            ignite.cluster().active(true);
+
+            cache = ignite.cache(CACHE_NAME);
+
+            info(" --> check2");
+
+            // Check.
+            for (int i = 0; i < 10_000; i++) {
+                IndexedObject o;
+
+                if (i % 7 == 0)
+                    o = new IndexedObject(i * 3);
+                else if (i % 3 == 0)
+                    o = new IndexedObject(i * 2);
+                else if (i % 2 == 0)
+                    o = new IndexedObject(i);
+                else
+                    o = null;
+
+                assertEquals(o, cache.get(i));
+            }
+
+            info(" --> ok");
+        }
+        finally {
+            stopAllGrids();
+        }
+    }
+
+    /**
+     * @throws Exception If fail.
+     */
+    public void testWalLargeValue() throws Exception {
+        try {
+            IgniteEx ignite = startGrid(1);
+
+            ignite.cluster().active(true);
+
+            IgniteCache<Object, Object> cache = ignite.cache(CACHE_NAME);
+
+            for (int i = 0; i < 10_000; i++) {
+                final byte[] data = new byte[i];
+
+                Arrays.fill(data, (byte)i);
+
+                cache.put(i, data);
+
+                if (i % 1000 == 0)
+                    X.println(" ---> put: " + i);
+            }
+
+            stopGrid(1);
+
+            ignite = startGrid(1);
+
+            ignite.cluster().active(true);
+
+            cache = ignite.cache(CACHE_NAME);
+
+            info(" --> check2");
+
+            for (int i = 0; i < 10_000; i++) {
+                final byte[] data = new byte[i];
+
+                Arrays.fill(data, (byte)i);
+
+                final byte[] loaded = (byte[]) cache.get(i);
+
+                Assert.assertArrayEquals(data, loaded);
+
+                if (i % 1000 == 0)
+                    X.println(" ---> get: " + i);
+            }
+        }
+        finally {
+            stopAllGrids();
+        }
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testWalRolloverMultithreadedDefault() throws Exception {
+        logOnly = false;
+
+        checkWalRolloverMultithreaded();
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testWalRolloverMultithreadedLogOnly() throws Exception {
+        logOnly = true;
+
+        checkWalRolloverMultithreaded();
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testHugeCheckpointRecord() throws Exception {
+        try {
+            final IgniteEx ignite = startGrid(1);
+
+            ignite.cluster().active(true);
+
+            for (int i = 0; i < 50; i++) {
+                CacheConfiguration<Object, Object> ccfg = new CacheConfiguration<>("cache-" + i);
+
+                // We can get 'too many open files' with default number of partitions.
+                ccfg.setAffinity(new RendezvousAffinityFunction(false, 128));
+
+                IgniteCache<Object, Object> cache = ignite.getOrCreateCache(ccfg);
+
+                cache.put(i, i);
+            }
+
+            final long endTime = System.currentTimeMillis() + 30_000;
+
+            IgniteInternalFuture<Long> fut = GridTestUtils.runMultiThreadedAsync(new Callable<Void>() {
+                @Override public Void call() {
+                    Random rnd = ThreadLocalRandom.current();
+
+                    while (U.currentTimeMillis() < endTime) {
+                        IgniteCache<Object, Object> cache = ignite.cache("cache-" + rnd.nextInt(50));
+
+                        cache.put(rnd.nextInt(50_000), rnd.nextInt());
+                    }
+
+                    return null;
+                }
+            }, 16, "put-thread");
+
+            while (System.currentTimeMillis() < endTime) {
+                ignite.context().cache().context().database().wakeupForCheckpoint("test").get();
+
+                U.sleep(500);
+            }
+
+            fut.get();
+        }
+        finally {
+            stopAllGrids();
+        }
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    private void checkWalRolloverMultithreaded() throws Exception {
+        walSegmentSize = 2 * 1024 * 1024;
+
+        final long endTime = System.currentTimeMillis() + 60 * 1000;
+
+        try {
+            IgniteEx ignite = startGrid(1);
+
+            ignite.cluster().active(true);
+
+            final IgniteCache<Object, Object> cache = ignite.cache(CACHE_NAME);
+
+            GridTestUtils.runMultiThreaded(new Callable<Void>() {
+                @Override public Void call() {
+                    Random rnd = ThreadLocalRandom.current();
+
+                    while (U.currentTimeMillis() < endTime)
+                        cache.put(rnd.nextInt(50_000), rnd.nextInt());
+
+                    return null;
+                }
+            }, 16, "put-thread");
+        }
+        finally {
+            stopAllGrids();
+        }
+    }
+
+    /**
+     * @throws Exception If fail.
+     */
+    public void testWalRenameDirSimple() throws Exception {
+        try {
+            IgniteEx ignite = startGrid(1);
+
+            ignite.cluster().active(true);
+
+            IgniteCache<Object, Object> cache = ignite.cache(CACHE_NAME);
+
+            for (int i = 0; i < 100; i++)
+                cache.put(i, new IndexedObject(i));
+
+            final Object consistentId = ignite.cluster().localNode().consistentId();
+
+            stopGrid(1);
+
+            final File cacheDir = cacheDir(CACHE_NAME, consistentId.toString());
+
+            renamed = cacheDir.renameTo(new File(cacheDir.getParent(), "cache-" + RENAMED_CACHE_NAME));
+
+            assert renamed;
+
+            ignite = startGrid(1);
+
+            ignite.cluster().active(true);
+
+            cache = ignite.cache(RENAMED_CACHE_NAME);
+
+            for (int i = 0; i < 100; i++)
+                assertEquals(new IndexedObject(i), cache.get(i));
+        }
+        finally {
+            stopAllGrids();
+        }
+    }
+
+    /**
+     * @param cacheName Cache name.
+     * @param consId Consistent ID.
+     * @return Cache dir.
+     * @throws IgniteCheckedException If fail.
+     */
+    private File cacheDir(final String cacheName, final String consId) throws IgniteCheckedException {
+        final String subfolderName
+            = PdsConsistentIdProcessor.genNewStyleSubfolderName(0, UUID.fromString(consId));
+
+        final File dbDir = U.resolveWorkDirectory(U.defaultWorkDirectory(), DFLT_STORE_DIR, false);
+
+        assert dbDir.exists();
+
+        final File consIdDir = new File(dbDir.getAbsolutePath(), subfolderName);
+
+        assert consIdDir.exists();
+
+        final File cacheDir = new File(consIdDir.getAbsolutePath(), "cache-" + cacheName);
+
+        assert cacheDir.exists();
+
+        return cacheDir;
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testRecoveryNoCheckpoint() throws Exception {
+        try {
+            IgniteEx ctrlGrid = startGrid(0);
+
+            fork = true;
+
+            IgniteEx cacheGrid = startGrid(1);
+
+            ctrlGrid.cluster().active(true);
+
+            ctrlGrid.compute(ctrlGrid.cluster().forRemotes()).run(new LoadRunnable(false));
+
+            info("Killing remote process...");
+
+            ((IgniteProcessProxy)cacheGrid).kill();
+
+            final IgniteEx g0 = ctrlGrid;
+
+            GridTestUtils.waitForCondition(new PA() {
+                /** {@inheritDoc} */
+                @Override public boolean apply() {
+                    return g0.cluster().nodes().size() == 1;
+                }
+            }, getTestTimeout());
+
+            fork = false;
+
+            // Now start the grid and verify that updates were restored from WAL.
+            cacheGrid = startGrid(1);
+
+            IgniteCache<Object, Object> cache = cacheGrid.cache(CACHE_NAME);
+
+            for (int i = 0; i < 10_000; i++)
+                assertEquals(new IndexedObject(i), cache.get(i));
+
+            List<List<?>> res = cache.query(new SqlFieldsQuery("select count(iVal) from IndexedObject")).getAll();
+
+            assertEquals(1, res.size());
+            assertEquals(10_000L, res.get(0).get(0));
+
+            IgniteCache<Object, Object> locCache = cacheGrid.cache(LOC_CACHE_NAME);
+
+            for (int i = 0; i < 10_000; i++)
+                assertEquals(new IndexedObject(i), locCache.get(i));
+        }
+        finally {
+            stopAllGrids();
+        }
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testRecoveryLargeNoCheckpoint() throws Exception {
+        try {
+            IgniteEx ctrlGrid = startGrid(0);
+
+            fork = true;
+
+            IgniteEx cacheGrid = startGrid(1);
+
+            ctrlGrid.cluster().active(true);
+
+            ctrlGrid.compute(ctrlGrid.cluster().forRemotes()).run(new LargeLoadRunnable(false));
+
+            info("Killing remote process...");
+
+            ((IgniteProcessProxy)cacheGrid).kill();
+
+            final IgniteEx g0 = ctrlGrid;
+
+            GridTestUtils.waitForCondition(new PA() {
+                /** {@inheritDoc} */
+                @Override public boolean apply() {
+                    return g0.cluster().nodes().size() == 1;
+                }
+            }, getTestTimeout());
+
+            fork = false;
+
+            // Now start the grid and verify that updates were restored from WAL.
+            cacheGrid = startGrid(1);
+
+            IgniteCache<Object, Object> cache = cacheGrid.cache(CACHE_NAME);
+            IgniteCache<Object, Object> locCache = cacheGrid.cache(LOC_CACHE_NAME);
+
+            for (int i = 0; i < 1000; i++) {
+                final long[] data = new long[LARGE_ARR_SIZE];
+
+                Arrays.fill(data, i);
+
+                Assert.assertArrayEquals(data, (long[])cache.get(i));
+                Assert.assertArrayEquals(data, (long[])locCache.get(i));
+            }
+        }
+        finally {
+            stopAllGrids();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override protected long getTestTimeout() {
+        return TimeUnit.MINUTES.toMillis(20);
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testRandomCrash() throws Exception {
+        try {
+            IgniteEx ctrlGrid = startGrid(0);
+
+            fork = true;
+
+            IgniteEx cacheGrid = startGrid(1);
+
+            ctrlGrid.cluster().active(true);
+
+            IgniteCompute rmt = ctrlGrid.compute(ctrlGrid.cluster().forRemotes());
+
+            rmt.run(new LoadRunnable(false));
+
+            info(">>> Finished cache population.");
+
+            rmt.run(new AsyncLoadRunnable());
+
+            Thread.sleep(20_000);
+
+            info(">>> Killing remote process...");
+
+            ((IgniteProcessProxy)cacheGrid).kill();
+
+            startGrid(1);
+
+            Boolean res = rmt.call(new VerifyCallable());
+
+            assertTrue(res);
+        }
+        finally {
+            stopAllGrids();
+        }
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testLargeRandomCrash() throws Exception {
+        try {
+            IgniteEx ctrlGrid = startGrid(0);
+
+            fork = true;
+
+            IgniteEx cacheGrid = startGrid(1);
+
+            ctrlGrid.cluster().active(true);
+
+            IgniteCompute rmt = ctrlGrid.compute(ctrlGrid.cluster().forRemotes());
+
+            rmt.run(new LargeLoadRunnable(false));
+
+            info(">>> Finished cache population.");
+
+            rmt.run(new AsyncLargeLoadRunnable());
+
+            Thread.sleep(20_000);
+
+            info(">>> Killing remote process...");
+
+            ((IgniteProcessProxy)cacheGrid).kill();
+
+            startGrid(1);
+
+            Boolean res = rmt.call(new VerifyLargeCallable());
+
+            assertTrue(res);
+        }
+        finally {
+            stopAllGrids();
+        }
+    }
+
+    /**
+     *
+     */
+    private static class RemoteNodeFilter implements IgnitePredicate<ClusterNode> {
+        /** {@inheritDoc} */
+        @Override public boolean apply(ClusterNode clusterNode) {
+            return clusterNode.attribute(HAS_CACHE) != null;
+        }
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testDestroyCache() throws Exception {
+        try {
+            IgniteEx ignite = startGrid(1);
+
+            ignite.cluster().active(true);
+
+            IgniteCache<Object, Object> cache = ignite.getOrCreateCache("test");
+
+            cache.put(1, new IndexedObject(1));
+
+            ignite.destroyCache("test");
+
+            cache = ignite.getOrCreateCache("test");
+
+            // No entry available after cache destroy.
+            assertNull(cache.get(1));
+        }
+        finally {
+            stopAllGrids();
+        }
+    }
+
+    /**
+     * @throws Exception If fail.
+     */
+    public void testEvictPartition() throws Exception {
+        try {
+            Ignite ignite1 = startGrid("node1");
+
+            ignite1.cluster().active(true);
+
+            IgniteCache<Object, Object> cache1 = ignite1.cache(CACHE_NAME);
+
+            for (int i = 0; i < 100; i++)
+                cache1.put(i, new IndexedObject(i));
+
+            Ignite ignite2 = startGrid("node2");
+
+            IgniteCache<Object, Object> cache2 = ignite2.cache(CACHE_NAME);
+
+            for (int i = 0; i < 100; i++) {
+                assertEquals(new IndexedObject(i), cache1.get(i));
+                assertEquals(new IndexedObject(i), cache2.get(i));
+            }
+
+            ignite1.close();
+            ignite2.close();
+
+            ignite1 = startGrid("node1");
+            ignite2 = startGrid("node2");
+
+            ignite1.cluster().active(true);
+
+            cache1 = ignite1.cache(CACHE_NAME);
+            cache2 = ignite2.cache(CACHE_NAME);
+
+            for (int i = 0; i < 100; i++) {
+                assertEquals(new IndexedObject(i), cache1.get(i));
+                assertEquals(new IndexedObject(i), cache2.get(i));
+            }
+        }
+        finally {
+            stopAllGrids();
+        }
+    }
+
+    /**
+     * @throws Exception If fail.
+     */
+    public void testMetastorage() throws Exception {
+        try {
+            int cnt = 5000;
+
+            IgniteEx ignite0 = (IgniteEx)startGrid("node1");
+            IgniteEx ignite1 = (IgniteEx)startGrid("node2");
+
+            ignite1.cluster().active(true);
+
+            GridCacheSharedContext<Object, Object> sharedCtx0 = ignite0.context().cache().context();
+            GridCacheSharedContext<Object, Object> sharedCtx1 = ignite1.context().cache().context();
+
+            MetaStorage storage0 = sharedCtx0.database().metaStorage();
+            MetaStorage storage1 = sharedCtx1.database().metaStorage();
+
+            assert storage0 != null;
+
+            for (int i = 0; i < cnt; i++) {
+                sharedCtx0.database().checkpointReadLock();
+
+                try {
+                    storage0.putData(String.valueOf(i), new byte[]{(byte)(i % 256), 2, 3});
+                }
+                finally {
+                    sharedCtx0.database().checkpointReadUnlock();
+                }
+
+                byte[] b1 = new byte[i + 3];
+                b1[0] = 1;
+                b1[1] = 2;
+                b1[2] = 3;
+
+                sharedCtx1.database().checkpointReadLock();
+
+                try {
+                    storage1.putData(String.valueOf(i), b1);
+                }
+                finally {
+                    sharedCtx1.database().checkpointReadUnlock();
+                }
+            }
+
+            for (int i = 0; i < cnt; i++) {
+                byte[] d1 = storage0.getData(String.valueOf(i));
+                assertEquals(3, d1.length);
+                assertEquals((byte)(i % 256), d1[0]);
+                assertEquals(2, d1[1]);
+                assertEquals(3, d1[2]);
+
+                byte[] d2 = storage1.getData(String.valueOf(i));
+                assertEquals(i + 3, d2.length);
+                assertEquals(1, d2[0]);
+                assertEquals(2, d2[1]);
+                assertEquals(3, d2[2]);
+            }
+
+        }
+        finally {
+            stopAllGrids();
+        }
+    }
+
+    /**
+     * @throws Exception If fail.
+     */
+    public void testMetastorageLargeArray() throws Exception {
+        try {
+            int cnt = 5000;
+            int arraySize = 32_768;
+
+            IgniteEx ignite = (IgniteEx)startGrid("node1");
+
+            ignite.cluster().active(true);
+
+            GridCacheSharedContext<Object, Object> sharedCtx = ignite.context().cache().context();
+
+            MetaStorage storage = sharedCtx.database().metaStorage();
+
+            for (int i = 0; i < cnt; i++) {
+                byte[] b1 = new byte[arraySize];
+                for (int k = 0; k < arraySize; k++) {
+                    b1[k] = (byte) (k % 100);
+                }
+
+                sharedCtx.database().checkpointReadLock();
+
+                try {
+                    storage.putData(String.valueOf(i), b1);
+                }
+                finally {
+                    sharedCtx.database().checkpointReadUnlock();
+                }
+            }
+
+            for (int i = 0; i < cnt; i++) {
+                byte[] d2 = storage.getData(String.valueOf(i));
+                assertEquals(arraySize, d2.length);
+
+                for (int k = 0; k < arraySize; k++) {
+                    assertEquals((byte) (k % 100), d2[k]);
+                }
+            }
+
+        }
+        finally {
+            stopAllGrids();
+        }
+    }
+
+    /**
+     * @throws Exception If fail.
+     */
+    public void testMetastorageRemove() throws Exception {
+        try {
+            int cnt = 400;
+
+            IgniteEx ignite0 = (IgniteEx)startGrid("node1");
+
+            ignite0.cluster().active(true);
+
+            GridCacheSharedContext<Object, Object> sharedCtx0 = ignite0.context().cache().context();
+
+            MetaStorage storage = sharedCtx0.database().metaStorage();
+
+            assert storage != null;
+
+            for (int i = 0; i < cnt; i++) {
+                sharedCtx0.database().checkpointReadLock();
+
+                try {
+                    storage.putData(String.valueOf(i), new byte[]{1, 2, 3});
+                }
+                finally {
+                    sharedCtx0.database().checkpointReadUnlock();
+                }
+            }
+
+            for (int i = 0; i < 10; i++) {
+                sharedCtx0.database().checkpointReadLock();
+
+                try {
+                    storage.removeData(String.valueOf(i));
+                }
+                finally {
+                    sharedCtx0.database().checkpointReadUnlock();
+                }
+            }
+
+            for (int i = 10; i < cnt; i++) {
+                byte[] d1 = storage.getData(String.valueOf(i));
+                assertEquals(3, d1.length);
+                assertEquals(1, d1[0]);
+                assertEquals(2, d1[1]);
+                assertEquals(3, d1[2]);
+            }
+
+        }
+        finally {
+            stopAllGrids();
+        }
+    }
+
+    /**
+     * @throws Exception If fail.
+     */
+    public void testMetastorageUpdate() throws Exception {
+        try {
+            int cnt = 2000;
+
+            IgniteEx ignite0 = (IgniteEx)startGrid("node1");
+
+            ignite0.cluster().active(true);
+
+            GridCacheSharedContext<Object, Object> sharedCtx0 = ignite0.context().cache().context();
+
+            MetaStorage storage = sharedCtx0.database().metaStorage();
+
+            assert storage != null;
+
+            for (int i = 0; i < cnt; i++) {
+                sharedCtx0.database().checkpointReadLock();
+
+                try {
+                    storage.putData(String.valueOf(i), new byte[]{1, 2, 3});
+                }
+                finally {
+                    sharedCtx0.database().checkpointReadUnlock();
+                }
+            }
+
+            for (int i = 0; i < cnt; i++) {
+                sharedCtx0.database().checkpointReadLock();
+
+                try {
+                    storage.putData(String.valueOf(i), new byte[]{2, 2, 3, 4});
+                }
+                finally {
+                    sharedCtx0.database().checkpointReadUnlock();
+                }
+            }
+
+            for (int i = 0; i < cnt; i++) {
+                byte[] d1 = storage.getData(String.valueOf(i));
+                assertEquals(4, d1.length);
+                assertEquals(2, d1[0]);
+                assertEquals(2, d1[1]);
+                assertEquals(3, d1[2]);
+            }
+        }
+        finally {
+            stopAllGrids();
+        }
+    }
+
+    /**
+     * @throws Exception If fail.
+     */
+    public void testMetastorageWalRestore() throws Exception {
+        try {
+            int cnt = 2000;
+
+            IgniteEx ignite0 = startGrid(0);
+
+            ignite0.cluster().active(true);
+
+            GridCacheSharedContext<Object, Object> sharedCtx0 = ignite0.context().cache().context();
+
+            MetaStorage storage = sharedCtx0.database().metaStorage();
+
+            assert storage != null;
+
+            for (int i = 0; i < cnt; i++) {
+                sharedCtx0.database().checkpointReadLock();
+
+                try {
+                    storage.putData(String.valueOf(i), new byte[]{1, 2, 3});
+                }
+                finally {
+                    sharedCtx0.database().checkpointReadUnlock();
+                }
+            }
+
+            for (int i = 0; i < cnt; i++) {
+                byte[] value = storage.getData(String.valueOf(i));
+                assert value != null;
+                assert value.length == 3;
+            }
+
+            stopGrid(0);
+
+            ignite0 = startGrid(0);
+
+            ignite0.cluster().active(true);
+
+            sharedCtx0 = ignite0.context().cache().context();
+
+            storage = sharedCtx0.database().metaStorage();
+
+            assert storage != null;
+
+            for (int i = 0; i < cnt; i++) {
+                byte[] value = storage.getData(String.valueOf(i));
+                assert value != null;
+            }
+        }
+        finally {
+            stopAllGrids();
+        }
+    }
+
+    /**
+     * @throws Exception if failed.
+     */
+    public void testApplyDeltaRecords() throws Exception {
+        try {
+            IgniteEx ignite0 = (IgniteEx)startGrid("node0");
+
+            ignite0.cluster().active(true);
+
+            IgniteCache<Object, Object> cache0 = ignite0.cache(CACHE_NAME);
+
+            for (int i = 0; i < 1000; i++)
+                cache0.put(i, new IndexedObject(i));
+
+            GridCacheSharedContext<Object, Object> sharedCtx = ignite0.context().cache().context();
+
+            GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager)sharedCtx.database();
+
+            db.waitForCheckpoint("test");
+            db.enableCheckpoints(false).get();
+
+            // Log something to know where to start.
+            WALPointer ptr = sharedCtx.wal().log(new MemoryRecoveryRecord(U.currentTimeMillis()));
+
+            info("Replay marker: " + ptr);
+
+            for (int i = 1000; i < 5000; i++)
+                cache0.put(i, new IndexedObject(i));
+
+            info("Done puts...");
+
+            for (int i = 2_000; i < 3_000; i++)
+                cache0.remove(i);
+
+            info("Done removes...");
+
+            for (int i = 5000; i < 6000; i++)
+                cache0.put(i, new IndexedObject(i));
+
+            info("Done puts...");
+
+            Map<FullPageId, byte[]> rolledPages = new HashMap<>();
+
+            int pageSize = sharedCtx.database().pageSize();
+
+            ByteBuffer buf = ByteBuffer.allocateDirect(pageSize);
+
+            // Now check that deltas can be correctly applied.
+            try (WALIterator it = sharedCtx.wal().replay(ptr)) {
+                while (it.hasNext()) {
+                    IgniteBiTuple<WALPointer, WALRecord> tup = it.next();
+
+                    WALRecord rec = tup.get2();
+
+                    if (rec instanceof PageSnapshot) {
+                        PageSnapshot page = (PageSnapshot)rec;
+
+                        rolledPages.put(page.fullPageId(), page.pageData());
+                    }
+                    else if (rec instanceof PageDeltaRecord) {
+                        PageDeltaRecord delta = (PageDeltaRecord)rec;
+
+                        FullPageId fullId = new FullPageId(delta.pageId(), delta.groupId());
+
+                        byte[] pageData = rolledPages.get(fullId);
+
+                        if (pageData == null) {
+                            pageData = new byte[pageSize];
+
+                            rolledPages.put(fullId, pageData);
+                        }
+
+                        assertNotNull("Missing page snapshot [page=" + fullId + ", delta=" + delta + ']', pageData);
+
+                        buf.order(ByteOrder.nativeOrder());
+
+                        buf.position(0);
+                        buf.put(pageData);
+                        buf.position(0);
+
+                        delta.applyDelta(sharedCtx.database().dataRegion(null).pageMemory(),
+                            GridUnsafe.bufferAddress(buf));
+
+                        buf.position(0);
+
+                        buf.get(pageData);
+                    }
+                }
+            }
+
+            info("Done apply...");
+
+            PageMemoryEx pageMem = (PageMemoryEx)db.dataRegion(null).pageMemory();
+
+            for (Map.Entry<FullPageId, byte[]> entry : rolledPages.entrySet()) {
+                FullPageId fullId = entry.getKey();
+
+                ignite0.context().cache().context().database().checkpointReadLock();
+
+                try {
+                    long page = pageMem.acquirePage(fullId.groupId(), fullId.pageId(), true);
+
+                    try {
+                        long bufPtr = pageMem.writeLock(fullId.groupId(), fullId.pageId(), page, true);
+
+                        try {
+                            byte[] data = entry.getValue();
+
+                            for (int i = 0; i < data.length; i++) {
+                                if (fullId.pageId() == TrackingPageIO.VERSIONS.latest().trackingPageFor(fullId.pageId(), db.pageSize()))
+                                    continue; // Skip tracking pages.
+
+                                assertEquals("page=" + fullId + ", pos=" + i, PageUtils.getByte(bufPtr, i), data[i]);
+                            }
+                        }
+                        finally {
+                            pageMem.writeUnlock(fullId.groupId(), fullId.pageId(), page, null, false, true);
+                        }
+                    }
+                    finally {
+                        pageMem.releasePage(fullId.groupId(), fullId.pageId(), page);
+                    }
+                }
+                finally {
+                    ignite0.context().cache().context().database().checkpointReadUnlock();
+                }
+            }
+
+            ignite0.close();
+        }
+        finally {
+            stopAllGrids();
+        }
+    }
+
+    /**
+     * Test recovery from WAL on 3 nodes in case of transactional cache.
+     *
+     * @throws Exception If fail.
+     */
+    public void testRecoveryOnTransactionalAndPartitionedCache() throws Exception {
+        IgniteEx ignite = (IgniteEx) startGrids(3);
+        ignite.cluster().active(true);
+
+        try {
+            final String cacheName = "transactional";
+
+            CacheConfiguration<Object, Object> cacheConfiguration = new CacheConfiguration<>(cacheName)
+                    .setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL)
+                    .setAffinity(new RendezvousAffinityFunction(false, 32))
+                    .setCacheMode(CacheMode.PARTITIONED)
+                    .setRebalanceMode(CacheRebalanceMode.SYNC)
+                    .setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC)
+                    .setBackups(2);
+
+            ignite.createCache(cacheConfiguration);
+
+            IgniteCache<Object, Object> cache = ignite.cache(cacheName);
+            Map<Object, Object> map = new HashMap<>();
+
+            final int transactions = 100;
+            final int operationsPerTransaction = 40;
+
+            Random random = new Random();
+
+            for (int t = 1; t <= transactions; t++) {
+                Transaction tx = ignite.transactions().txStart(
+                        TransactionConcurrency.OPTIMISTIC, TransactionIsolation.READ_COMMITTED);
+
+                Map<Object, Object> changesInTransaction = new HashMap<>();
+
+                for (int op = 0; op < operationsPerTransaction; op++) {
+                    int key = random.nextInt(1000) + 1;
+
+                    Object value = random.nextBoolean() ? randomString(random) + key : new BigObject(key);
+
+                    changesInTransaction.put(key, value);
+
+                    cache.put(key, value);
+                }
+
+                if (random.nextBoolean()) {
+                    tx.commit();
+                    map.putAll(changesInTransaction);
+                }
+                else {
+                    tx.rollback();
+                }
+
+                if (t % 50 == 0)
+                    log.info("Finished transaction " + t);
+            }
+
+            stopAllGrids();
+
+            ignite = (IgniteEx) startGrids(3);
+            ignite.cluster().active(true);
+
+            cache = ignite.cache(cacheName);
+
+            for (Object key : map.keySet()) {
+                Object expectedValue = map.get(key);
+                Object actualValue = cache.get(key);
+                Assert.assertEquals("Unexpected value for key " + key, expectedValue, actualValue);
+            }
+        }
+        finally {
+            stopAllGrids();
+        }
+    }
+
+    /**
+     * Test that all DataRecord WAL records are within transaction boundaries - PREPARED and COMMITTED markers.
+     *
+     * @throws Exception If any fail.
+     */
+    public void testTxRecordsConsistency() throws Exception {
+        System.setProperty(IgniteSystemProperties.IGNITE_WAL_LOG_TX_RECORDS, "true");
+
+        IgniteEx ignite = (IgniteEx) startGrids(3);
+        ignite.cluster().active(true);
+
+        try {
+            final String cacheName = "transactional";
+
+            CacheConfiguration<Object, Object> cacheConfiguration = new CacheConfiguration<>(cacheName)
+                    .setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL)
+                    .setAffinity(new RendezvousAffinityFunction(false, 32))
+                    .setCacheMode(CacheMode.PARTITIONED)
+                    .setRebalanceMode(CacheRebalanceMode.SYNC)
+                    .setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC)
+                    .setBackups(0);
+
+            ignite.createCache(cacheConfiguration);
+
+            IgniteCache<Object, Object> cache = ignite.cache(cacheName);
+
+            GridCacheSharedContext<Object, Object> sharedCtx = ignite.context().cache().context();
+
+            GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager)sharedCtx.database();
+
+            db.waitForCheckpoint("test");
+            db.enableCheckpoints(false).get();
+
+            // Log something to know where to start.
+            WALPointer startPtr = sharedCtx.wal().log(new MemoryRecoveryRecord(U.currentTimeMillis()));
+
+            final int transactions = 100;
+            final int operationsPerTransaction = 40;
+
+            Random random = new Random();
+
+            for (int t = 1; t <= transactions; t++) {
+                Transaction tx = ignite.transactions().txStart(
+                        TransactionConcurrency.OPTIMISTIC, TransactionIsolation.READ_COMMITTED);
+
+                for (int op = 0; op < operationsPerTransaction; op++) {
+                    int key = random.nextInt(1000) + 1;
+
+                    Object value = random.nextBoolean() ? randomString(random) + key : new BigObject(key);
+
+                    cache.put(key, value);
+                }
+
+                if (random.nextBoolean()) {
+                    tx.commit();
+                }
+                else {
+                    tx.rollback();
+                }
+
+                if (t % 50 == 0)
+                    log.info("Finished transaction " + t);
+            }
+
+            Set<GridCacheVersion> activeTransactions = new HashSet<>();
+
+            // Check that all DataRecords are within PREPARED and COMMITTED tx records.
+            try (WALIterator it = sharedCtx.wal().replay(startPtr)) {
+                while (it.hasNext()) {
+                    IgniteBiTuple<WALPointer, WALRecord> tup = it.next();
+
+                    WALRecord rec = tup.get2();
+
+                    if (rec instanceof TxRecord) {
+                        TxRecord txRecord = (TxRecord) rec;
+                        GridCacheVersion txId = txRecord.nearXidVersion();
+
+                        switch (txRecord.state()) {
+                            case PREPARED:
+                                assert !activeTransactions.contains(txId) : "Transaction is already present " + txRecord;
+
+                                activeTransactions.add(txId);
+
+                                break;
+                            case COMMITTED:
+                                assert activeTransactions.contains(txId) : "No PREPARE marker for transaction " + txRecord;
+
+                                activeTransactions.remove(txId);
+
+                                break;
+                            case ROLLED_BACK:
+                                activeTransactions.remove(txId);
+                                break;
+
+                            default:
+                                throw new IllegalStateException("Unknown Tx state of record " + txRecord);
+                        }
+                    } else if (rec instanceof DataRecord) {
+                        DataRecord dataRecord = (DataRecord) rec;
+
+                        for (DataEntry entry : dataRecord.writeEntries()) {
+                            GridCacheVersion txId = entry.nearXidVersion();
+
+                            assert activeTransactions.contains(txId) : "No transaction for entry " + entry;
+                        }
+                    }
+                }
+            }
+        }
+        finally {
+            System.clearProperty(IgniteSystemProperties.IGNITE_WAL_LOG_TX_RECORDS);
+            stopAllGrids();
+        }
+    }
+
+    /**
+     * Generate random lowercase string for test purposes.
+     */
+    private String randomString(Random random) {
+        int len = random.nextInt(50) + 1;
+
+        StringBuilder sb = new StringBuilder();
+        for (int i = 0; i < len; i++)
+            sb.append(random.nextInt(26) + 'a');
+
+        return sb.toString();
+    }
+
+    /**
+     * BigObject for test purposes that don't fit in page size.
+     */
+    private static class BigObject {
+        private final int index;
+
+        private final byte[] payload = new byte[4096];
+
+        BigObject(int index) {
+            this.index = index;
+            // Create pseudo-random array.
+            for (int i = 0; i < payload.length; i++)
+                if (i % index == 0)
+                    payload[i] = (byte) index;
+        }
+
+        @Override
+        public boolean equals(Object o) {
+            if (this == o) return true;
+            if (o == null || getClass() != o.getClass()) return false;
+            BigObject bigObject = (BigObject) o;
+            return index == bigObject.index &&
+                    Arrays.equals(payload, bigObject.payload);
+        }
+
+        @Override
+        public int hashCode() {
+            return Objects.hash(index, payload);
+        }
+    }
+
+    /**
+     * @param size Size of data.
+     * @return Test data.
+     */
+    private int[] createTestData(int size) {
+        int[] data = new int[size];
+
+        for (int d = 0; d < size; ++d)
+            data[d] = d;
+
+        return data;
+    }
+
+    /**
+     *
+     */
+    private static class LoadRunnable implements IgniteRunnable {
+        /** */
+        @IgniteInstanceResource
+        private Ignite ignite;
+
+        /** */
+        private boolean disableCheckpoints;
+
+        /**
+         * @param disableCheckpoints Disable checkpoints flag.
+         */
+        private LoadRunnable(boolean disableCheckpoints) {
+            this.disableCheckpoints = disableCheckpoints;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void run() {
+            ignite.log().info("Started load.");
+
+            if (disableCheckpoints) {
+                GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager)((IgniteEx)ignite).context()
+                    .cache().context().database();
+
+                try {
+                    dbMgr.enableCheckpoints(false).get();
+                }
+                catch (IgniteCheckedException e) {
+                    throw new IgniteException(e);
+                }
+            }
+
+            try {
+                boolean successfulWaiting = GridTestUtils.waitForCondition(new PAX() {
+                    @Override public boolean applyx() {
+                        return ignite.cache(CACHE_NAME) != null;
+                    }
+                }, 10_000);
+
+                assertTrue(successfulWaiting);
+            }
+            catch (IgniteInterruptedCheckedException e) {
+                throw new RuntimeException(e);
+            }
+
+            IgniteCache<Object, Object> cache = ignite.cache(CACHE_NAME);
+            IgniteCache<Object, Object> locCache = ignite.cache(LOC_CACHE_NAME);
+
+            for (int i = 0; i < 10_000; i++) {
+                cache.put(i, new IndexedObject(i));
+                locCache.put(i, new IndexedObject(i));
+            }
+
+            ignite.log().info("Finished load.");
+        }
+    }
+
+    /**
+     *
+     */
+    private static class AsyncLoadRunnable implements IgniteRunnable {
+        /** */
+        @IgniteInstanceResource
+        private Ignite ignite;
+
+        /** {@inheritDoc} */
+        @Override public void run() {
+            try {
+                boolean successfulWaiting = GridTestUtils.waitForCondition(new PAX() {
+                    @Override public boolean applyx() {
+                        return ignite.cache(CACHE_NAME) != null;
+                    }
+                }, 10_000);
+
+                assertTrue(successfulWaiting);
+            }
+            catch (IgniteInterruptedCheckedException e) {
+                throw new RuntimeException(e);
+            }
+
+            ignite.log().info(">>>>>>> Started load.");
+
+            for (int i = 0; i < 4; i++) {
+                ignite.scheduler().callLocal(new Callable<Object>() {
+                    @Override public Object call() {
+                        IgniteCache<Object, Object> cache = ignite.cache(CACHE_NAME);
+                        IgniteCache<Object, Object> locCache = ignite.cache(LOC_CACHE_NAME);
+
+                        ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                        int cnt = 0;
+
+                        while (!Thread.currentThread().isInterrupted()) {
+                            cache.put(rnd.nextInt(10_000), new IndexedObject(rnd.nextInt()));
+                            locCache.put(rnd.nextInt(10_000), new IndexedObject(rnd.nextInt()));
+
+                            cnt++;
+
+                            if (cnt > 0 && cnt % 1_000 == 0)
+                                ignite.log().info(">>>> Updated: " + cnt);
+                        }
+
+                        return null;
+                    }
+                });
+            }
+        }
+    }
+
+    /**
+     *
+     */
+    private static class VerifyCallable implements IgniteCallable<Boolean> {
+        /** */
+        @IgniteInstanceResource
+        private Ignite ignite;
+
+        /** {@inheritDoc} */
+        @Override public Boolean call() throws Exception {
+            try {
+                boolean successfulWaiting = GridTestUtils.waitForCondition(new PAX() {
+                    @Override public boolean applyx() {
+                        return ignite.cache(CACHE_NAME) != null;
+                    }
+                }, 10_000);
+
+                assertTrue(successfulWaiting);
+            }
+            catch (IgniteInterruptedCheckedException e) {
+                throw new RuntimeException(e);
+            }
+
+            IgniteCache<Object, Object> cache = ignite.cache(CACHE_NAME);
+            IgniteCache<Object, Object> locCache = ignite.cache(LOC_CACHE_NAME);
+
+            for (int i = 0; i < 10_000; i++) {
+                {
+                    Object val = cache.get(i);
+
+                    if (val == null) {
+                        ignite.log().warning("Failed to find a value for PARTITIONED cache key: " + i);
+
+                        return false;
+                    }
+                }
+
+                {
+                    Object val = locCache.get(i);
+
+                    if (val == null) {
+                        ignite.log().warning("Failed to find a value for LOCAL cache key: " + i);
+
+                        return false;
+                    }
+                }
+            }
+
+            return true;
+        }
+    }
+
+    /**
+     *
+     */
+    private static class LargeLoadRunnable implements IgniteRunnable {
+        /** */
+        @IgniteInstanceResource
+        private Ignite ignite;
+
+        /** */
+        private boolean disableCheckpoints;
+
+        /**
+         * @param disableCheckpoints Disable checkpoints flag.
+         */
+        private LargeLoadRunnable(boolean disableCheckpoints) {
+            this.disableCheckpoints = disableCheckpoints;
+        }
+
+        /** {@inheritDoc} */
+        @Override public void run() {
+            try {
+                boolean successfulWaiting = GridTestUtils.waitForCondition(new PAX() {
+                    @Override public boolean applyx() {
+                        return ignite.cache(CACHE_NAME) != null;
+                    }
+                }, 10_000);
+
+                assertTrue(successfulWaiting);
+            }
+            catch (IgniteInterruptedCheckedException e) {
+                throw new RuntimeException(e);
+            }
+
+            ignite.log().info("Started load.");
+
+            if (disableCheckpoints) {
+                GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager)((IgniteEx)ignite).context()
+                    .cache().context().database();
+
+                dbMgr.enableCheckpoints(false);
+            }
+
+            IgniteCache<Object, Object> cache = ignite.cache(CACHE_NAME);
+            IgniteCache<Object, Object> locCache = ignite.cache(LOC_CACHE_NAME);
+
+            for (int i = 0; i < 1000; i++) {
+                final long[] data = new long[LARGE_ARR_SIZE];
+
+                Arrays.fill(data, i);
+
+                cache.put(i, data);
+                locCache.put(i, data);
+            }
+
+            ignite.log().info("Finished load.");
+        }
+    }
+
+    /**
+     *
+     */
+    private static class AsyncLargeLoadRunnable implements IgniteRunnable {
+        /** */
+        @IgniteInstanceResource
+        private Ignite ignite;
+
+        /** {@inheritDoc} */
+        @Override public void run() {
+            try {
+                boolean successfulWaiting = GridTestUtils.waitForCondition(new PAX() {
+                    @Override public boolean applyx() {
+                        return ignite.cache(CACHE_NAME) != null;
+                    }
+                }, 10_000);
+
+                assertTrue(successfulWaiting);
+            }
+            catch (IgniteInterruptedCheckedException e) {
+                throw new RuntimeException(e);
+            }
+
+            ignite.log().info(">>>>>>> Started load.");
+
+            for (int i = 0; i < 1; i++) {
+                ignite.scheduler().callLocal(new Callable<Object>() {
+                    @Override public Object call() {
+                        IgniteCache<Object, Object> cache = ignite.cache(CACHE_NAME);
+
+                        ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+                        int cnt = 0;
+
+                        while (!Thread.currentThread().isInterrupted()) {
+                            final long[] data = new long[LARGE_ARR_SIZE];
+
+                            final int key = rnd.nextInt(1000);
+
+                            Arrays.fill(data, key);
+
+                            cache.put(key, data);
+
+                            cnt++;
+
+                            if (cnt > 0 && cnt % 1_000 == 0)
+                                ignite.log().info(">>>> Updated: " + cnt);
+                        }
+
+                        return null;
+                    }
+                });
+            }
+        }
+    }
+
+    /**
+     *
+     */
+    private static class VerifyLargeCallable implements IgniteCallable<Boolean> {
+        /** */
+        @IgniteInstanceResource
+        private Ignite ignite;
+
+        /** {@inheritDoc} */
+        @Override public Boolean call() throws Exception {
+            try {
+                boolean successfulWaiting = GridTestUtils.waitForCondition(new PAX() {
+                    @Override public boolean applyx() {
+                        return ignite.cache(CACHE_NAME) != null;
+                    }
+                }, 10_000);
+
+                assertTrue(successfulWaiting);
+            }
+            catch (IgniteInterruptedCheckedException e) {
+                throw new RuntimeException(e);
+            }
+
+            IgniteCache<Object, Object> cache = ignite.cache(CACHE_NAME);
+
+            for (int i = 0; i < 1000; i++) {
+                final long[] data = new long[LARGE_ARR_SIZE];
+
+                Arrays.fill(data, i);
+
+                final Object val = cache.get(i);
+
+                if (val == null) {
+                    ignite.log().warning("Failed to find a value for key: " + i);
+
+                    return false;
+                }
+
+                assertTrue(Arrays.equals(data, (long[])val));
+            }
+
+            return true;
+        }
+    }
+
+
+    /**
+     *
+     */
+    private static class IndexedObject {
+        /** */
+        @QuerySqlField(index = true)
+        private int iVal;
+
+        /**
+         * @param iVal Integer value.
+         */
+        private IndexedObject(int iVal) {
+            this.iVal = iVal;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean equals(Object o) {
+            if (this == o)
+                return true;
+
+            if (!(o instanceof IndexedObject))
+                return false;
+
+            IndexedObject that = (IndexedObject)o;
+
+            return iVal == that.iVal;
+        }
+
+        /** {@inheritDoc} */
+        @Override public int hashCode() {
+            return iVal;
+        }
+
+        /** {@inheritDoc} */
+        @Override public String toString() {
+            return S.toString(IndexedObject.class, this);
+        }
+    }
+
+    /**
+     *
+     */
+    private enum EnumVal {
+        /** */
+        VAL1,
+
+        /** */
+        VAL2,
+
+        /** */
+        VAL3
+    }
+}
diff --git a/modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRecoveryWithCompactionTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRecoveryWithCompactionTest.java
similarity index 100%
rename from modules/core/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRecoveryWithCompactionTest.java
rename to modules/indexing/src/test/java/org/apache/ignite/internal/processors/cache/persistence/db/wal/IgniteWalRecoveryWithCompactionTest.java
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/IgniteTwoRegionsRebuildIndexTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/IgniteTwoRegionsRebuildIndexTest.java
new file mode 100644
index 0000000..7d5b296
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/database/IgniteTwoRegionsRebuildIndexTest.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.database;
+
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteDataStreamer;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.DataRegionConfiguration;
+import org.apache.ignite.configuration.DataStorageConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+/**
+ * Tests the case when preformed index rebuild for created by client in-memory cache.
+ */
+public class IgniteTwoRegionsRebuildIndexTest extends GridCommonAbstractTest {
+    /** */
+    private static final String PERSISTED_CACHE = "persisted";
+
+    /** */
+    private static final String INMEMORY_CACHE = "inmemory";
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName);
+
+        boolean client = igniteInstanceName.startsWith("client");
+
+        DataStorageConfiguration dsCfg = new DataStorageConfiguration();
+
+        if (!client) {
+            DataRegionConfiguration drCfg1 = new DataRegionConfiguration();
+            drCfg1.setMaxSize(16 * 1024 * 1024);
+            drCfg1.setName("nopersistence");
+            drCfg1.setInitialSize(drCfg1.getMaxSize());
+            drCfg1.setPersistenceEnabled(false);
+
+            DataRegionConfiguration drCfg2 = new DataRegionConfiguration();
+            drCfg2.setMaxSize(16 * 1024 * 1024);
+            drCfg2.setName("persistence");
+            drCfg2.setInitialSize(drCfg2.getMaxSize());
+            drCfg2.setPersistenceEnabled(true);
+
+            dsCfg.setDataRegionConfigurations(drCfg1, drCfg2);
+
+            cfg.setDataStorageConfiguration(dsCfg);
+        }
+        else {
+            CacheConfiguration ccfg1 = new CacheConfiguration(PERSISTED_CACHE);
+            CacheConfiguration ccfg2 = new CacheConfiguration(INMEMORY_CACHE);
+
+            ccfg1.setDataRegionName("persistence");
+            ccfg2.setDataRegionName("nopersistence");
+
+            cfg.setCacheConfiguration(ccfg1, ccfg2);
+            cfg.setClientMode(true);
+        }
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        super.beforeTest();
+
+        cleanPersistenceDir();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        super.afterTest();
+
+        stopAllGrids();
+
+        cleanPersistenceDir();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testRebuildIndexes() throws Exception {
+        startGrid("server");
+        Ignite client = startGrid("client");
+
+        client.cluster().active(true);
+
+        populateData(client, PERSISTED_CACHE);
+        populateData(client, INMEMORY_CACHE);
+
+        stopGrid("server");
+        startGrid("server");
+
+        stopGrid("client");
+        startGrid("client");
+    }
+
+    /**
+     * @param ignite Ignite.
+     * @param cacheName Cache name.
+     */
+    private void populateData(Ignite ignite, String cacheName) {
+        try (IgniteDataStreamer<Object, Object> streamer = ignite.dataStreamer(cacheName)) {
+            for (int i = 0; i < 1000; i++)
+                streamer.addData(i, i);
+
+            streamer.flush();
+        }
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/SqlIllegalSchemaSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/SqlIllegalSchemaSelfTest.java
new file mode 100644
index 0000000..e56f8a2
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/SqlIllegalSchemaSelfTest.java
@@ -0,0 +1,187 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query;
+
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.Ignition;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+import javax.cache.CacheException;
+import java.util.concurrent.Callable;
+
+/**
+ * Tests for illegal SQL schemas in node and cache configurations.
+ */
+@SuppressWarnings({"ThrowableNotThrown", "unchecked"})
+public class SqlIllegalSchemaSelfTest extends GridCommonAbstractTest {
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids();
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testBadCacheName() throws Exception {
+        IgniteConfiguration cfg = getConfiguration();
+
+        cfg.setCacheConfiguration(new CacheConfiguration().setName(QueryUtils.SCHEMA_SYS));
+
+        GridTestUtils.assertThrows(log, new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                Ignition.start(cfg);
+
+                return null;
+            }
+        }, IgniteException.class, "SQL schema name derived from cache name is reserved (please set explicit SQL " +
+            "schema name through CacheConfiguration.setSqlSchema() or choose another cache name) [cacheName=IGNITE, " +
+            "schemaName=null]");
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testBadCacheNameDynamic() throws Exception {
+        Ignite node = startGrid();
+
+        GridTestUtils.assertThrows(log, new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                node.getOrCreateCache(new CacheConfiguration().setName(QueryUtils.SCHEMA_SYS));
+
+                return null;
+            }
+        }, CacheException.class, "SQL schema name derived from cache name is reserved (please set explicit SQL " +
+            "schema name through CacheConfiguration.setSqlSchema() or choose another cache name) [" +
+            "cacheName=IGNITE, schemaName=null]");
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testBadSchemaLower() throws Exception {
+        IgniteConfiguration cfg = getConfiguration();
+
+        cfg.setCacheConfiguration(new CacheConfiguration().setName("CACHE")
+            .setSqlSchema(QueryUtils.SCHEMA_SYS.toLowerCase()));
+
+        GridTestUtils.assertThrows(log, new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                Ignition.start(cfg);
+
+                return null;
+            }
+        }, IgniteException.class, "SQL schema name is reserved (please choose another one) [cacheName=CACHE, " +
+            "schemaName=ignite]");
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testBadSchemaLowerDynamic() throws Exception {
+        Ignite node = startGrid();
+
+        GridTestUtils.assertThrows(log, new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                node.getOrCreateCache(
+                    new CacheConfiguration().setName("CACHE").setSqlSchema(QueryUtils.SCHEMA_SYS.toLowerCase())
+                );
+
+                return null;
+            }
+        }, CacheException.class, "SQL schema name is reserved (please choose another one) [cacheName=CACHE, schemaName=ignite]");
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testBadSchemaUpper() throws Exception {
+        IgniteConfiguration cfg = getConfiguration();
+
+        cfg.setCacheConfiguration(new CacheConfiguration().setName("CACHE")
+            .setSqlSchema(QueryUtils.SCHEMA_SYS.toUpperCase()));
+
+        GridTestUtils.assertThrows(log, new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                Ignition.start(cfg);
+
+                return null;
+            }
+        }, IgniteException.class, "SQL schema name is reserved (please choose another one) [cacheName=CACHE, " +
+            "schemaName=IGNITE]");
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testBadSchemaUpperDynamic() throws Exception {
+        Ignite node = startGrid();
+
+        GridTestUtils.assertThrows(log, new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                node.getOrCreateCache(
+                    new CacheConfiguration().setName("CACHE").setSqlSchema(QueryUtils.SCHEMA_SYS.toUpperCase())
+                );
+
+                return null;
+            }
+        }, CacheException.class, "SQL schema name is reserved (please choose another one) [cacheName=CACHE, " +
+            "schemaName=IGNITE]");
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testBadSchemaQuoted() throws Exception {
+        IgniteConfiguration cfg = getConfiguration();
+
+        cfg.setCacheConfiguration(new CacheConfiguration().setName("CACHE")
+            .setSqlSchema("\"" + QueryUtils.SCHEMA_SYS.toUpperCase() + "\""));
+
+        GridTestUtils.assertThrows(log, new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                Ignition.start(cfg);
+
+                return null;
+            }
+        }, IgniteException.class, "SQL schema name is reserved (please choose another one) [cacheName=CACHE, " +
+            "schemaName=\"IGNITE\"]");
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testBadSchemaQuotedDynamic() throws Exception {
+        Ignite node = startGrid();
+
+        GridTestUtils.assertThrows(log, new Callable<Void>() {
+            @Override public Void call() throws Exception {
+                node.getOrCreateCache(
+                    new CacheConfiguration().setName("CACHE")
+                        .setSqlSchema("\"" + QueryUtils.SCHEMA_SYS.toUpperCase() + "\"")
+                );
+
+                return null;
+            }
+        }, CacheException.class, "SQL schema name is reserved (please choose another one) [cacheName=CACHE, " +
+            "schemaName=\"IGNITE\"]");
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/SqlSystemViewsSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/SqlSystemViewsSelfTest.java
index 9c7ce38..1a4dae7 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/SqlSystemViewsSelfTest.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/SqlSystemViewsSelfTest.java
@@ -17,27 +17,53 @@
 
 package org.apache.ignite.internal.processors.query;
 
+import java.sql.Time;
+import java.sql.Timestamp;
 import java.util.Collections;
 import java.util.List;
+import java.util.Random;
+import java.util.TimeZone;
 import java.util.UUID;
 import java.util.concurrent.Callable;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.cache.QueryEntity;
 import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.cluster.ClusterMetrics;
 import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.DataRegionConfiguration;
+import org.apache.ignite.configuration.DataStorageConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.ClusterMetricsSnapshot;
+import org.apache.ignite.internal.IgniteNodeAttributes;
 import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode;
+import org.apache.ignite.internal.util.typedef.F;
+import org.apache.ignite.internal.util.typedef.G;
 import org.apache.ignite.internal.util.typedef.X;
+import org.apache.ignite.lang.IgniteFuture;
+import org.apache.ignite.lang.IgniteRunnable;
 import org.apache.ignite.testframework.GridTestUtils;
 import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
 
 /**
- * Tests for ignite SQL meta views.
+ * Tests for ignite SQL system views.
  */
 public class SqlSystemViewsSelfTest extends GridCommonAbstractTest {
+    /** Metrics check attempts. */
+    private static final int METRICS_CHECK_ATTEMPTS = 10;
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        super.beforeTest();
+
+        cleanPersistenceDir();
+    }
+
     /** {@inheritDoc} */
     @Override protected void afterTest() throws Exception {
         stopAllGrids();
+
+        cleanPersistenceDir();
     }
 
     /**
@@ -70,7 +96,7 @@
      */
     private void assertSqlError(final String sql) {
         Throwable t = GridTestUtils.assertThrowsWithCause(new Callable<Void>() {
-            @Override public Void call() throws Exception {
+            @Override public Void call() {
                 execSql(sql);
 
                 return null;
@@ -85,7 +111,7 @@
     }
     
     /**
-     * Test meta views modifications.
+     * Test system views modifications.
      */
     public void testModifications() throws Exception {
         startGrid();
@@ -122,7 +148,7 @@
 
         IgniteCache cache = ignite.getOrCreateCache(DEFAULT_CACHE_NAME);
 
-        String sql = "SELECT ID FROM IGNITE.NODES WHERE IS_LOCAL = true";
+        String sql = "SELECT ID FROM IGNITE.NODES WHERE NODE_ORDER = 1";
 
         SqlFieldsQuery qry;
 
@@ -140,7 +166,7 @@
     }
 
     /**
-     * Test that we can't use cache tables and meta views in the same query.
+     * Test that we can't use cache tables and system views in the same query.
      */
     public void testCacheToViewJoin() throws Exception {
         Ignite ignite = startGrid();
@@ -158,90 +184,348 @@
     private void assertColumnTypes(List<?> rowData, Class<?> ... colTypes) {
         for (int i = 0; i < colTypes.length; i++) {
             if (rowData.get(i) != null)
-                assertEquals("Column " + i + " type", rowData.get(i).getClass(), colTypes[i]);
+                assertEquals("Column " + i + " type", colTypes[i], rowData.get(i).getClass());
         }
     }
 
     /**
-     * Test nodes meta view.
+     * Test nodes system view.
      *
      * @throws Exception If failed.
      */
     public void testNodesViews() throws Exception {
-        Ignite ignite1 = startGrid(getTestIgniteInstanceName(), getConfiguration());
-        Ignite ignite2 = startGrid(getTestIgniteInstanceName(1), getConfiguration().setClientMode(true));
-        Ignite ignite3 = startGrid(getTestIgniteInstanceName(2), getConfiguration().setDaemon(true));
+        Ignite igniteSrv = startGrid(getTestIgniteInstanceName(), getConfiguration().setMetricsUpdateFrequency(500L));
+
+        Ignite igniteCli = startGrid(getTestIgniteInstanceName(1), getConfiguration().setMetricsUpdateFrequency(500L)
+            .setClientMode(true));
+
+        startGrid(getTestIgniteInstanceName(2), getConfiguration().setMetricsUpdateFrequency(500L).setDaemon(true));
+
+        UUID nodeId0 = igniteSrv.cluster().localNode().id();
 
         awaitPartitionMapExchange();
 
-        List<List<?>> resAll = execSql("SELECT ID, CONSISTENT_ID, VERSION, IS_LOCAL, IS_CLIENT, IS_DAEMON, " +
+        List<List<?>> resAll = execSql("SELECT ID, CONSISTENT_ID, VERSION, IS_CLIENT, IS_DAEMON, " +
                 "NODE_ORDER, ADDRESSES, HOSTNAMES FROM IGNITE.NODES");
 
         assertColumnTypes(resAll.get(0), UUID.class, String.class, String.class, Boolean.class, Boolean.class,
-            Boolean.class, Integer.class, String.class, String.class);
+            Integer.class, String.class, String.class);
 
         assertEquals(3, resAll.size());
 
         List<List<?>> resSrv = execSql(
-            "SELECT ID, IS_LOCAL, NODE_ORDER FROM IGNITE.NODES WHERE IS_CLIENT = FALSE AND IS_DAEMON = FALSE"
+            "SELECT ID, NODE_ORDER FROM IGNITE.NODES WHERE IS_CLIENT = FALSE AND IS_DAEMON = FALSE"
         );
 
         assertEquals(1, resSrv.size());
 
-        assertEquals(ignite1.cluster().localNode().id(), resSrv.get(0).get(0));
+        assertEquals(nodeId0, resSrv.get(0).get(0));
 
-        assertEquals(true, resSrv.get(0).get(1));
-
-        assertEquals(1, resSrv.get(0).get(2));
+        assertEquals(1, resSrv.get(0).get(1));
 
         List<List<?>> resCli = execSql(
-            "SELECT ID, IS_LOCAL, NODE_ORDER FROM IGNITE.NODES WHERE IS_CLIENT = TRUE");
+            "SELECT ID, NODE_ORDER FROM IGNITE.NODES WHERE IS_CLIENT = TRUE");
 
         assertEquals(1, resCli.size());
 
-        assertEquals(ignite2.cluster().localNode().id(), resCli.get(0).get(0));
+        assertEquals(nodeId(1), resCli.get(0).get(0));
 
-        assertEquals(false, resCli.get(0).get(1));
-
-        assertEquals(2, resCli.get(0).get(2));
+        assertEquals(2, resCli.get(0).get(1));
 
         List<List<?>> resDaemon = execSql(
-            "SELECT ID, IS_LOCAL, NODE_ORDER FROM IGNITE.NODES WHERE IS_DAEMON = TRUE");
+            "SELECT ID, NODE_ORDER FROM IGNITE.NODES WHERE IS_DAEMON = TRUE");
 
         assertEquals(1, resDaemon.size());
 
-        assertEquals(ignite3.cluster().localNode().id(), resDaemon.get(0).get(0));
+        assertEquals(nodeId(2), resDaemon.get(0).get(0));
 
-        assertEquals(false, resDaemon.get(0).get(1));
-
-        assertEquals(3, resDaemon.get(0).get(2));
+        assertEquals(3, resDaemon.get(0).get(1));
 
         // Check index on ID column.
         assertEquals(0, execSql("SELECT ID FROM IGNITE.NODES WHERE ID = '-'").size());
 
         assertEquals(1, execSql("SELECT ID FROM IGNITE.NODES WHERE ID = ?",
-            ignite1.cluster().localNode().id()).size());
+            nodeId0).size());
 
         assertEquals(1, execSql("SELECT ID FROM IGNITE.NODES WHERE ID = ?",
-            ignite3.cluster().localNode().id()).size());
+            nodeId(2)).size());
 
-        // Check index on IS_LOCAL column.
-        assertEquals(1, execSql("SELECT ID FROM IGNITE.NODES WHERE IS_LOCAL = true").size());
-
-        // Check index on IS_LOCAL column with disjunction.
-        assertEquals(3, execSql("SELECT ID FROM IGNITE.NODES WHERE IS_LOCAL = true OR node_order=1 OR node_order=2 OR node_order=3").size());
+        // Check index on ID column with disjunction.
+        assertEquals(3, execSql("SELECT ID FROM IGNITE.NODES WHERE ID = ? " +
+            "OR node_order=1 OR node_order=2 OR node_order=3", nodeId0).size());
 
         // Check quick-count.
         assertEquals(3L, execSql("SELECT COUNT(*) FROM IGNITE.NODES").get(0).get(0));
 
         // Check joins
-        assertEquals(ignite1.cluster().localNode().id(), execSql("SELECT N1.ID FROM IGNITE.NODES N1 JOIN " +
-            "IGNITE.NODES N2 ON N1.IS_LOCAL = N2.IS_LOCAL JOIN IGNITE.NODES N3 ON N2.ID = N3.ID WHERE N3.IS_LOCAL = true")
+        assertEquals(nodeId0, execSql("SELECT N1.ID FROM IGNITE.NODES N1 JOIN " +
+            "IGNITE.NODES N2 ON N1.NODE_ORDER = N2.NODE_ORDER JOIN IGNITE.NODES N3 ON N2.ID = N3.ID " +
+            "WHERE N3.NODE_ORDER = 1")
             .get(0).get(0));
 
         // Check sub-query
-        assertEquals(ignite1.cluster().localNode().id(), execSql("SELECT N1.ID FROM IGNITE.NODES N1 " +
-            "WHERE NOT EXISTS (SELECT 1 FROM IGNITE.NODES N2 WHERE N2.ID = N1.ID AND N2.IS_LOCAL = false)")
+        assertEquals(nodeId0, execSql("SELECT N1.ID FROM IGNITE.NODES N1 " +
+            "WHERE NOT EXISTS (SELECT 1 FROM IGNITE.NODES N2 WHERE N2.ID = N1.ID AND N2.NODE_ORDER <> 1)")
             .get(0).get(0));
+
+        // Check node attributes view
+        String cliAttrName = IgniteNodeAttributes.ATTR_CLIENT_MODE;
+
+        assertColumnTypes(execSql("SELECT NODE_ID, NAME, VALUE FROM IGNITE.NODE_ATTRIBUTES").get(0),
+            UUID.class, String.class, String.class);
+
+        assertEquals(1,
+            execSql("SELECT NODE_ID FROM IGNITE.NODE_ATTRIBUTES WHERE NAME = ? AND VALUE = 'true'",
+                cliAttrName).size());
+
+        assertEquals(3,
+            execSql("SELECT NODE_ID FROM IGNITE.NODE_ATTRIBUTES WHERE NAME = ?", cliAttrName).size());
+
+        assertEquals(1,
+            execSql("SELECT NODE_ID FROM IGNITE.NODE_ATTRIBUTES WHERE NODE_ID = ? AND NAME = ? AND VALUE = 'true'",
+                nodeId(1), cliAttrName).size());
+
+        assertEquals(0,
+            execSql("SELECT NODE_ID FROM IGNITE.NODE_ATTRIBUTES WHERE NODE_ID = '-' AND NAME = ?",
+                cliAttrName).size());
+
+        assertEquals(0,
+            execSql("SELECT NODE_ID FROM IGNITE.NODE_ATTRIBUTES WHERE NODE_ID = ? AND NAME = '-'",
+                nodeId(1)).size());
+
+        // Check node metrics view.
+        String sqlAllMetrics = "SELECT NODE_ID, LAST_UPDATE_TIME, " +
+            "MAX_ACTIVE_JOBS, CUR_ACTIVE_JOBS, AVG_ACTIVE_JOBS, " +
+            "MAX_WAITING_JOBS, CUR_WAITING_JOBS, AVG_WAITING_JOBS, " +
+            "MAX_REJECTED_JOBS, CUR_REJECTED_JOBS, AVG_REJECTED_JOBS, TOTAL_REJECTED_JOBS, " +
+            "MAX_CANCELED_JOBS, CUR_CANCELED_JOBS, AVG_CANCELED_JOBS, TOTAL_CANCELED_JOBS, " +
+            "MAX_JOBS_WAIT_TIME, CUR_JOBS_WAIT_TIME, AVG_JOBS_WAIT_TIME, " +
+            "MAX_JOBS_EXECUTE_TIME, CUR_JOBS_EXECUTE_TIME, AVG_JOBS_EXECUTE_TIME, TOTAL_JOBS_EXECUTE_TIME, " +
+            "TOTAL_EXECUTED_JOBS, TOTAL_EXECUTED_TASKS, " +
+            "TOTAL_BUSY_TIME, TOTAL_IDLE_TIME, CUR_IDLE_TIME, BUSY_TIME_PERCENTAGE, IDLE_TIME_PERCENTAGE, " +
+            "TOTAL_CPU, CUR_CPU_LOAD, AVG_CPU_LOAD, CUR_GC_CPU_LOAD, " +
+            "HEAP_MEMORY_INIT, HEAP_MEMORY_USED, HEAP_MEMORY_COMMITED, HEAP_MEMORY_MAX, HEAP_MEMORY_TOTAL, " +
+            "NONHEAP_MEMORY_INIT, NONHEAP_MEMORY_USED, NONHEAP_MEMORY_COMMITED, NONHEAP_MEMORY_MAX, NONHEAP_MEMORY_TOTAL, " +
+            "UPTIME, JVM_START_TIME, NODE_START_TIME, LAST_DATA_VERSION, " +
+            "CUR_THREAD_COUNT, MAX_THREAD_COUNT, TOTAL_THREAD_COUNT, CUR_DAEMON_THREAD_COUNT, " +
+            "SENT_MESSAGES_COUNT, SENT_BYTES_COUNT, RECEIVED_MESSAGES_COUNT, RECEIVED_BYTES_COUNT, " +
+            "OUTBOUND_MESSAGES_QUEUE FROM IGNITE.NODE_METRICS";
+
+        List<List<?>> resMetrics = execSql(sqlAllMetrics);
+
+        assertColumnTypes(resMetrics.get(0), UUID.class, Timestamp.class,
+            Integer.class, Integer.class, Float.class, // Active jobs.
+            Integer.class, Integer.class, Float.class, // Waiting jobs.
+            Integer.class, Integer.class, Float.class, Integer.class, // Rejected jobs.
+            Integer.class, Integer.class, Float.class, Integer.class, // Canceled jobs.
+            Time.class, Time.class, Time.class, // Jobs wait time.
+            Time.class, Time.class, Time.class, Time.class, // Jobs execute time.
+            Integer.class, Integer.class, // Executed jobs/task.
+            Time.class, Time.class, Time.class, Float.class, Float.class, // Busy/idle time.
+            Integer.class, Double.class, Double.class, Double.class, // CPU.
+            Long.class, Long.class, Long.class, Long.class, Long.class, // Heap memory.
+            Long.class, Long.class, Long.class, Long.class, Long.class, // Nonheap memory.
+            Time.class, Timestamp.class, Timestamp.class, Long.class, // Uptime.
+            Integer.class, Integer.class, Long.class, Integer.class, // Threads.
+            Integer.class, Long.class, Integer.class, Long.class, // Sent/received messages.
+            Integer.class); // Outbound message queue.
+
+        assertEquals(3, resAll.size());
+
+        // Check join with nodes.
+        assertEquals(3, execSql("SELECT NM.LAST_UPDATE_TIME FROM IGNITE.NODES N " +
+            "JOIN IGNITE.NODE_METRICS NM ON N.ID = NM.NODE_ID").size());
+
+        // Check index on NODE_ID column.
+        assertEquals(1, execSql("SELECT LAST_UPDATE_TIME FROM IGNITE.NODE_METRICS WHERE NODE_ID = ?",
+            nodeId(1)).size());
+
+        // Check malformed value for indexed column.
+        assertEquals(0, execSql("SELECT LAST_UPDATE_TIME FROM IGNITE.NODE_METRICS WHERE NODE_ID = ?",
+            "-").size());
+
+        // Check quick-count.
+        assertEquals(3L, execSql("SELECT COUNT(*) FROM IGNITE.NODE_METRICS").get(0).get(0));
+
+        // Check metric values.
+
+        // Broadcast jobs to server and client nodes to get non zero metric values.
+        for (int i = 0; i < 100; i++) {
+            IgniteFuture<Void > fut = igniteSrv.compute(igniteSrv.cluster().forNodeId(nodeId0, nodeId(1)))
+                .broadcastAsync(
+                    new IgniteRunnable() {
+                        @Override public void run() {
+                            Random rnd = new Random();
+
+                            try {
+                                doSleep(rnd.nextInt(100));
+                            }
+                            catch (Throwable ignore) {
+                                // No-op.
+                            }
+                        }
+                    });
+
+            if (i % 10 == 0)
+                fut.cancel();
+        }
+
+        doSleep(igniteSrv.configuration().getMetricsUpdateFrequency() * 3L);
+
+        for (Ignite grid : G.allGrids()) {
+            UUID nodeId = grid.cluster().localNode().id();
+
+            // Metrics for node must be collected from another node to avoid race and get consistent metrics snapshot.
+            Ignite ignite = F.eq(nodeId, nodeId0) ? igniteCli : igniteSrv;
+
+            for (int i = 0; i < METRICS_CHECK_ATTEMPTS; i++) {
+                ClusterMetrics metrics = ignite.cluster().node(nodeId).metrics();
+
+                assertTrue(metrics instanceof ClusterMetricsSnapshot);
+
+                resMetrics = execSql(ignite, sqlAllMetrics + " WHERE NODE_ID = ?", nodeId);
+
+                log.info("Check metrics for node " + grid.name() + ", attempt " + (i + 1));
+
+                if (metrics.getLastUpdateTime() == ((Timestamp)resMetrics.get(0).get(1)).getTime()) {
+                    assertEquals(metrics.getMaximumActiveJobs(), resMetrics.get(0).get(2));
+                    assertEquals(metrics.getCurrentActiveJobs(), resMetrics.get(0).get(3));
+                    assertEquals(metrics.getAverageActiveJobs(), resMetrics.get(0).get(4));
+                    assertEquals(metrics.getMaximumWaitingJobs(), resMetrics.get(0).get(5));
+                    assertEquals(metrics.getCurrentWaitingJobs(), resMetrics.get(0).get(6));
+                    assertEquals(metrics.getAverageWaitingJobs(), resMetrics.get(0).get(7));
+                    assertEquals(metrics.getMaximumRejectedJobs(), resMetrics.get(0).get(8));
+                    assertEquals(metrics.getCurrentRejectedJobs(), resMetrics.get(0).get(9));
+                    assertEquals(metrics.getAverageRejectedJobs(), resMetrics.get(0).get(10));
+                    assertEquals(metrics.getTotalRejectedJobs(), resMetrics.get(0).get(11));
+                    assertEquals(metrics.getMaximumCancelledJobs(), resMetrics.get(0).get(12));
+                    assertEquals(metrics.getCurrentCancelledJobs(), resMetrics.get(0).get(13));
+                    assertEquals(metrics.getAverageCancelledJobs(), resMetrics.get(0).get(14));
+                    assertEquals(metrics.getTotalCancelledJobs(), resMetrics.get(0).get(15));
+                    assertEquals(metrics.getMaximumJobWaitTime(), convertToMilliseconds(resMetrics.get(0).get(16)));
+                    assertEquals(metrics.getCurrentJobWaitTime(), convertToMilliseconds(resMetrics.get(0).get(17)));
+                    assertEquals((long)metrics.getAverageJobWaitTime(), convertToMilliseconds(resMetrics.get(0).get(18)));
+                    assertEquals(metrics.getMaximumJobExecuteTime(), convertToMilliseconds(resMetrics.get(0).get(19)));
+                    assertEquals(metrics.getCurrentJobExecuteTime(), convertToMilliseconds(resMetrics.get(0).get(20)));
+                    assertEquals((long)metrics.getAverageJobExecuteTime(), convertToMilliseconds(resMetrics.get(0).get(21)));
+                    assertEquals(metrics.getTotalJobsExecutionTime(), convertToMilliseconds(resMetrics.get(0).get(22)));
+                    assertEquals(metrics.getTotalExecutedJobs(), resMetrics.get(0).get(23));
+                    assertEquals(metrics.getTotalExecutedTasks(), resMetrics.get(0).get(24));
+                    assertEquals(metrics.getTotalBusyTime(), convertToMilliseconds(resMetrics.get(0).get(25)));
+                    assertEquals(metrics.getTotalIdleTime(), convertToMilliseconds(resMetrics.get(0).get(26)));
+                    assertEquals(metrics.getCurrentIdleTime(), convertToMilliseconds(resMetrics.get(0).get(27)));
+                    assertEquals(metrics.getBusyTimePercentage(), resMetrics.get(0).get(28));
+                    assertEquals(metrics.getIdleTimePercentage(), resMetrics.get(0).get(29));
+                    assertEquals(metrics.getTotalCpus(), resMetrics.get(0).get(30));
+                    assertEquals(metrics.getCurrentCpuLoad(), resMetrics.get(0).get(31));
+                    assertEquals(metrics.getAverageCpuLoad(), resMetrics.get(0).get(32));
+                    assertEquals(metrics.getCurrentGcCpuLoad(), resMetrics.get(0).get(33));
+                    assertEquals(metrics.getHeapMemoryInitialized(), resMetrics.get(0).get(34));
+                    assertEquals(metrics.getHeapMemoryUsed(), resMetrics.get(0).get(35));
+                    assertEquals(metrics.getHeapMemoryCommitted(), resMetrics.get(0).get(36));
+                    assertEquals(metrics.getHeapMemoryMaximum(), resMetrics.get(0).get(37));
+                    assertEquals(metrics.getHeapMemoryTotal(), resMetrics.get(0).get(38));
+                    assertEquals(metrics.getNonHeapMemoryInitialized(), resMetrics.get(0).get(39));
+                    assertEquals(metrics.getNonHeapMemoryUsed(), resMetrics.get(0).get(40));
+                    assertEquals(metrics.getNonHeapMemoryCommitted(), resMetrics.get(0).get(41));
+                    assertEquals(metrics.getNonHeapMemoryMaximum(), resMetrics.get(0).get(42));
+                    assertEquals(metrics.getNonHeapMemoryTotal(), resMetrics.get(0).get(43));
+                    assertEquals(metrics.getUpTime(), convertToMilliseconds(resMetrics.get(0).get(44)));
+                    assertEquals(metrics.getStartTime(), ((Timestamp)resMetrics.get(0).get(45)).getTime());
+                    assertEquals(metrics.getNodeStartTime(), ((Timestamp)resMetrics.get(0).get(46)).getTime());
+                    assertEquals(metrics.getLastDataVersion(), resMetrics.get(0).get(47));
+                    assertEquals(metrics.getCurrentThreadCount(), resMetrics.get(0).get(48));
+                    assertEquals(metrics.getMaximumThreadCount(), resMetrics.get(0).get(49));
+                    assertEquals(metrics.getTotalStartedThreadCount(), resMetrics.get(0).get(50));
+                    assertEquals(metrics.getCurrentDaemonThreadCount(), resMetrics.get(0).get(51));
+                    assertEquals(metrics.getSentMessagesCount(), resMetrics.get(0).get(52));
+                    assertEquals(metrics.getSentBytesCount(), resMetrics.get(0).get(53));
+                    assertEquals(metrics.getReceivedMessagesCount(), resMetrics.get(0).get(54));
+                    assertEquals(metrics.getReceivedBytesCount(), resMetrics.get(0).get(55));
+                    assertEquals(metrics.getOutboundMessagesQueueSize(), resMetrics.get(0).get(56));
+
+                    break;
+                }
+                else {
+                    log.info("Metrics was updated in background, will retry check");
+
+                    if (i == METRICS_CHECK_ATTEMPTS - 1)
+                        fail("Failed to check metrics, attempts limit reached (" + METRICS_CHECK_ATTEMPTS + ')');
+                }
+            }
+        }
+    }
+
+    /**
+     * Test baseline topology system view.
+     */
+    public void testBaselineViews() throws Exception {
+        cleanPersistenceDir();
+
+        Ignite ignite = startGrid(getTestIgniteInstanceName(), getPdsConfiguration("node0"));
+        startGrid(getTestIgniteInstanceName(1), getPdsConfiguration("node1"));
+
+        ignite.cluster().active(true);
+
+        List<List<?>> res = execSql("SELECT CONSISTENT_ID, ONLINE FROM IGNITE.BASELINE_NODES ORDER BY CONSISTENT_ID");
+
+        assertColumnTypes(res.get(0), String.class, Boolean.class);
+
+        assertEquals(2, res.size());
+
+        assertEquals("node0", res.get(0).get(0));
+        assertEquals("node1", res.get(1).get(0));
+
+        assertEquals(true, res.get(0).get(1));
+        assertEquals(true, res.get(1).get(1));
+
+        stopGrid(getTestIgniteInstanceName(1));
+
+        res = execSql("SELECT CONSISTENT_ID FROM IGNITE.BASELINE_NODES WHERE ONLINE = false");
+
+        assertEquals(1, res.size());
+
+        assertEquals("node1", res.get(0).get(0));
+
+        Ignite ignite2 = startGrid(getTestIgniteInstanceName(2), getPdsConfiguration("node2"));
+
+        assertEquals(2, execSql(ignite2, "SELECT CONSISTENT_ID FROM IGNITE.BASELINE_NODES").size());
+
+        res = execSql("SELECT CONSISTENT_ID FROM IGNITE.NODES N WHERE NOT EXISTS (SELECT 1 FROM " +
+            "IGNITE.BASELINE_NODES B WHERE B.CONSISTENT_ID = N.CONSISTENT_ID)");
+
+        assertEquals(1, res.size());
+
+        assertEquals("node2", res.get(0).get(0));
+    }
+
+    /**
+     * Gets ignite configuration with persistence enabled.
+     */
+    private IgniteConfiguration getPdsConfiguration(String consistentId) throws Exception {
+        IgniteConfiguration cfg = getConfiguration();
+
+        cfg.setDataStorageConfiguration(
+            new DataStorageConfiguration().setDefaultDataRegionConfiguration(new DataRegionConfiguration()
+                .setMaxSize(100L * 1024L * 1024L).setPersistenceEnabled(true))
+        );
+
+        cfg.setConsistentId(consistentId);
+
+        return cfg;
+    }
+
+    /**
+     * Convert Time to milliseconds.
+     *
+     * Note: Returned Time values from SQL it's milliseconds since January 1, 1970, 00:00:00 GMT. To get right interval
+     * in milliseconds this value must be adjusted to current time zone.
+     *
+     * @param sqlTime Time value returned from SQL.
+     */
+    private long convertToMilliseconds(Object sqlTime) {
+        Time time0 = (Time)sqlTime;
+
+        return time0.getTime() + TimeZone.getDefault().getOffset(time0.getTime());
     }
 }
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/GridIndexRebuildSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/GridIndexRebuildSelfTest.java
new file mode 100644
index 0000000..c5f1441
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/GridIndexRebuildSelfTest.java
@@ -0,0 +1,240 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2;
+
+import java.io.File;
+import java.util.concurrent.CountDownLatch;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.processors.cache.IgniteCacheOffheapManager;
+import org.apache.ignite.internal.processors.cache.IgniteInternalCache;
+import org.apache.ignite.internal.processors.cache.index.DynamicIndexAbstractSelfTest;
+import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
+import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager;
+import org.apache.ignite.internal.processors.query.GridQueryProcessor;
+import org.apache.ignite.internal.util.lang.GridCursor;
+import org.apache.ignite.internal.util.typedef.internal.U;
+
+/**
+ * Index rebuild after node restart test.
+ */
+public class GridIndexRebuildSelfTest extends DynamicIndexAbstractSelfTest {
+    /** Data size. */
+    protected static final int AMOUNT = 300;
+
+    /** Data size. */
+    protected static final String CACHE_NAME = "T";
+
+    /** Test instance to allow interaction with static context. */
+    private static GridIndexRebuildSelfTest INSTANCE;
+
+    /** Latch to signal that rebuild may start. */
+    private final CountDownLatch rebuildLatch = new CountDownLatch(1);
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration commonConfiguration(int idx) throws Exception {
+        IgniteConfiguration cfg =  super.commonConfiguration(idx);
+
+        cfg.getDataStorageConfiguration().getDefaultDataRegionConfiguration()
+            .setMaxSize(300*1024L*1024L)
+            .setPersistenceEnabled(true);
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        super.beforeTest();
+
+        // Just in case.
+        cleanPersistenceDir();
+
+        INSTANCE = this;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        super.afterTest();
+
+        stopAllGrids();
+
+        cleanPersistenceDir();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        super.afterTestsStopped();
+
+        cleanPersistenceDir();
+    }
+
+    /**
+     * Do test.
+     * <p>
+     * Steps are as follows:
+     * <ul>
+     *     <li>Put some data;</li>
+     *     <li>Stop the node;</li>
+     *     <li>Remove index file;</li>
+     *     <li>Restart the node and block index rebuild;</li>
+     *     <li>For half of the keys do cache puts <b>before</b> corresponding key
+     *     has been processed during index rebuild;</li>
+     *     <li>Check that:
+     *         <ul>
+     *             <li>For MVCC case: some keys have all versions that existed before restart, while those
+     *             updated concurrently have only put version (one with mark value -1)
+     *             and latest version present before node restart;</li>
+     *             <li>For non MVCC case: keys updated concurrently must have mark values of -1 despite that
+     *             index rebuild for them has happened after put.</li>
+     *         </ul>
+     *     </li>
+     * </ul></p>
+     * @throws Exception if failed.
+     */
+    public void testIndexRebuild() throws Exception {
+        IgniteEx srv = startServer();
+
+        execute(srv, "CREATE TABLE T(k int primary key, v int) WITH \"cache_name=T,wrap_value=false," +
+            "atomicity=transactional_snapshot\"");
+
+        execute(srv, "CREATE INDEX IDX ON T(v)");
+
+        IgniteInternalCache cc = srv.cachex(CACHE_NAME);
+
+        assertNotNull(cc);
+
+        putData(srv, false);
+
+        checkDataState(srv, false);
+
+        File cacheWorkDir = ((FilePageStoreManager)cc.context().shared().pageStore()).cacheWorkDir(cc.configuration());
+
+        File idxPath = cacheWorkDir.toPath().resolve("index.bin").toFile();
+
+        stopAllGrids();
+
+        assertTrue(U.delete(idxPath));
+
+        srv = startServer();
+
+        putData(srv, true);
+
+        checkDataState(srv, true);
+    }
+
+    /**
+     * Check versions presence in index tree.
+     *
+     * @param srv Node.
+     * @param afterRebuild Whether index rebuild has occurred.
+     * @throws IgniteCheckedException if failed.
+     */
+    @SuppressWarnings({"ConstantConditions", "unchecked"})
+    protected void checkDataState(IgniteEx srv, boolean afterRebuild) throws IgniteCheckedException {
+        IgniteInternalCache icache = srv.cachex(CACHE_NAME);
+
+        IgniteCache cache = srv.cache(CACHE_NAME);
+
+        assertNotNull(icache);
+
+        for (IgniteCacheOffheapManager.CacheDataStore store : icache.context().offheap().cacheDataStores()) {
+            GridCursor<? extends CacheDataRow> cur = store.cursor();
+
+            while (cur.next()) {
+                CacheDataRow row = cur.get();
+
+                int key = row.key().value(icache.context().cacheObjectContext(), false);
+
+                if (!afterRebuild || key <= AMOUNT / 2)
+                    assertEquals(key, cache.get(key));
+                else
+                    assertEquals(-1, cache.get(key));
+            }
+        }
+    }
+
+    /**
+     * Put data to cache.
+     *
+     * @param node Node.
+     * @throws Exception if failed.
+     */
+    protected void putData(Ignite node, final boolean forConcurrentPut) throws Exception {
+        final IgniteCache<Integer, Integer> cache = node.cache(CACHE_NAME);
+
+        assertNotNull(cache);
+
+        for (int i = 1; i <= AMOUNT; i++) {
+            if (forConcurrentPut) {
+                // Concurrent put affects only second half of the keys.
+                if (i <= AMOUNT / 2)
+                    continue;
+
+                cache.put(i, -1);
+
+                rebuildLatch.countDown();
+            }
+            else {
+                // Data streamer is not used intentionally in order to preserve all versions.
+                for (int j = 1; j <= i; j++)
+                    cache.put(i, j);
+            }
+        }
+    }
+
+    /**
+     * Start server node.
+     *
+     * @return Started node.
+     * @throws Exception if failed.
+     */
+    protected IgniteEx startServer() throws Exception {
+        // Have to do this for each starting node - see GridQueryProcessor ctor, it nulls
+        // idxCls static field on each call.
+        GridQueryProcessor.idxCls = BlockingIndexing.class;
+
+        IgniteConfiguration cfg = serverConfiguration(0);
+
+        IgniteEx res = startGrid(cfg);
+
+        res.active(true);
+
+        return res;
+    }
+
+    /**
+     * Blocking indexing processor.
+     */
+    private static class BlockingIndexing extends IgniteH2Indexing {
+        /** Flag to ignore first rebuild performed on initial node start. */
+        private boolean firstRbld = true;
+
+        /** {@inheritDoc} */
+        @Override public void rebuildIndexesFromHash(String cacheName) throws IgniteCheckedException {
+            if (!firstRbld)
+                U.await(INSTANCE.rebuildLatch);
+            else
+                firstRbld = false;
+
+            super.rebuildIndexesFromHash(cacheName);
+        }
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/GridIndexRebuildWithMvccEnabledSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/GridIndexRebuildWithMvccEnabledSelfTest.java
new file mode 100644
index 0000000..cf68546
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/GridIndexRebuildWithMvccEnabledSelfTest.java
@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2;
+
+import java.io.File;
+import java.util.List;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.processors.cache.CacheObject;
+import org.apache.ignite.internal.processors.cache.CacheObjectContext;
+import org.apache.ignite.internal.processors.cache.IgniteCacheOffheapManager;
+import org.apache.ignite.internal.processors.cache.IgniteInternalCache;
+import org.apache.ignite.internal.processors.cache.mvcc.MvccVersion;
+import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow;
+import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager;
+import org.apache.ignite.internal.util.lang.GridCursor;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteBiTuple;
+
+/**
+ * Index rebuild after node restart test.
+ */
+public class GridIndexRebuildWithMvccEnabledSelfTest extends GridIndexRebuildSelfTest {
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration serverConfiguration(int idx, boolean filter) throws Exception {
+        return super.serverConfiguration(idx, filter)
+            .setMvccVacuumFrequency(Integer.MAX_VALUE);
+    }
+
+    /** {@inheritDoc} */
+    public void testIndexRebuild() throws Exception {
+        IgniteEx srv = startServer();
+
+        execute(srv, "CREATE TABLE T(k int primary key, v int) WITH \"cache_name=T,wrap_value=false," +
+            "atomicity=transactional_snapshot\"");
+
+        execute(srv, "CREATE INDEX IDX ON T(v)");
+
+        IgniteInternalCache cc = srv.cachex(CACHE_NAME);
+
+        assertNotNull(cc);
+
+        lockVersion(srv);
+
+        putData(srv, false);
+
+        checkDataState(srv, false);
+
+        File cacheWorkDir = ((FilePageStoreManager)cc.context().shared().pageStore()).cacheWorkDir(cc.configuration());
+
+        File idxPath = cacheWorkDir.toPath().resolve("index.bin").toFile();
+
+        stopAllGrids();
+
+        assertTrue(U.delete(idxPath));
+
+        srv = startServer();
+
+        putData(srv, true);
+
+        checkDataState(srv, true);
+    }
+
+    /**
+     * Lock coordinator version in order to keep MVCC versions in place.
+     *
+     * @param node Node.
+     * @throws IgniteCheckedException if failed.
+     */
+    private static void lockVersion(IgniteEx node) throws IgniteCheckedException {
+        node.context().coordinators().requestSnapshotAsync().get();
+    }
+
+    /** {@inheritDoc} */
+    protected void checkDataState(IgniteEx srv, boolean afterRebuild) throws IgniteCheckedException {
+        IgniteInternalCache icache = srv.cachex(CACHE_NAME);
+
+        assertNotNull(icache);
+
+        CacheObjectContext coCtx = icache.context().cacheObjectContext();
+
+        for (IgniteCacheOffheapManager.CacheDataStore store : icache.context().offheap().cacheDataStores()) {
+            GridCursor<? extends CacheDataRow> cur = store.cursor();
+
+            while (cur.next()) {
+                CacheDataRow row = cur.get();
+
+                int key = row.key().value(coCtx, false);
+
+                List<IgniteBiTuple<Object, MvccVersion>> vers = store.mvccFindAllVersions(icache.context(), row.key());
+
+                if (!afterRebuild || key <= AMOUNT / 2)
+                    assertEquals(key, vers.size());
+                else {
+                    // For keys affected by concurrent put there are two versions -
+                    // -1 (concurrent put mark) and newest restored value as long as put cleans obsolete versions.
+                    assertEquals(2, vers.size());
+
+                    Object val0 = ((CacheObject)vers.get(0).getKey()).value(coCtx, false);
+                    Object val1 = ((CacheObject)vers.get(1).getKey()).value(coCtx, false);
+
+                    assertEquals(-1, val0);
+                    assertEquals(key, val1);
+                }
+
+            }
+        }
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/GridIndexingSpiAbstractSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/GridIndexingSpiAbstractSelfTest.java
index 235b28b..6b76230 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/GridIndexingSpiAbstractSelfTest.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/GridIndexingSpiAbstractSelfTest.java
@@ -343,7 +343,7 @@
         // Fields query
         GridQueryFieldsResult fieldsRes =
             spi.queryLocalSqlFields(spi.schema("A"), "select a.a.name n1, a.a.age a1, b.a.name n2, " +
-            "b.a.age a2 from a.a, b.a where a.a.id = b.a.id ", Collections.emptySet(), null, false, 0, null);
+            "b.a.age a2 from a.a, b.a where a.a.id = b.a.id ", Collections.emptySet(), null, false, false, 0, null);
 
         String[] aliases = {"N1", "A1", "N2", "A2"};
         Object[] vals = { "Valera", 19, "Kolya", 25};
@@ -401,7 +401,7 @@
                 range *= 3;
 
                 GridQueryFieldsResult res = spi.queryLocalSqlFields(spi.schema("A"), sql, Arrays.<Object>asList(1,
-                    range), null, false, 0, null);
+                    range), null, false, false, 0, null);
 
                 assert res.iterator().hasNext();
 
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/H2ResultSetIteratorNullifyOnEndSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/H2ResultSetIteratorNullifyOnEndSelfTest.java
new file mode 100644
index 0000000..31b0b97
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/H2ResultSetIteratorNullifyOnEndSelfTest.java
@@ -0,0 +1,420 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2;
+
+import java.util.Collections;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Objects;
+import javax.cache.Cache;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.cache.query.QueryCursor;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.cache.query.SqlQuery;
+import org.apache.ignite.cache.query.annotations.QuerySqlField;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.internal.processors.cache.QueryCursorImpl;
+import org.apache.ignite.internal.processors.query.GridQueryCacheObjectsIterator;
+import org.apache.ignite.internal.processors.query.GridQueryProcessor;
+import org.apache.ignite.internal.util.lang.GridCloseableIterator;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+/**
+ * Test for iterator data link erasure after closing or completing
+ */
+public class H2ResultSetIteratorNullifyOnEndSelfTest extends GridCommonAbstractTest {
+    /** */
+    private static final int NODES_COUNT = 2;
+
+    /** */
+    private static final int PERSON_COUNT = 20;
+
+    /** */
+    private static final String SELECT_ALL_SQL = "SELECT p.* FROM Person p ORDER BY p.salary";
+
+    /** */
+    private static final String SELECT_MAX_SAL_SQLF = "select max(salary) from Person";
+
+    /**
+     * Non local SQL check nullification after close
+     */
+    public void testSqlQueryClose() {
+        SqlQuery<String, Person> qry = new SqlQuery<>(Person.class, SELECT_ALL_SQL);
+
+        QueryCursor<Cache.Entry<String, Person>> qryCurs = cache().query(qry);
+
+        qryCurs.iterator();
+
+        qryCurs.close();
+
+        H2ResultSetIterator h2It = extractIteratorInnerGridIteratorInnerH2ResultSetIterator(qryCurs);
+
+        checkIterator(h2It);
+    }
+
+    /**
+     * Non local SQL check nullification after complete
+     */
+    public void testSqlQueryComplete() {
+        SqlQuery<String, Person> qry = new SqlQuery<>(Person.class, SELECT_ALL_SQL);
+
+        QueryCursor<Cache.Entry<String, Person>> qryCurs = cache().query(qry);
+
+        qryCurs.getAll();
+
+        H2ResultSetIterator h2It = extractIteratorInnerGridIteratorInnerH2ResultSetIterator(qryCurs);
+
+        checkIterator(h2It);
+    }
+
+    /**
+     * Local SQL check nullification after close
+     */
+    public void testSqlQueryLocalClose() {
+        SqlQuery<String, Person> qry = new SqlQuery<>(Person.class, SELECT_ALL_SQL);
+
+        qry.setLocal(true);
+
+        QueryCursor<Cache.Entry<String, Person>> qryCurs = cache().query(qry);
+
+        qryCurs.iterator();
+
+        qryCurs.close();
+
+        H2ResultSetIterator h2It = extractIterableInnerH2ResultSetIterator(qryCurs);
+
+        checkIterator(h2It);
+    }
+
+    /**
+     * Local SQL check nullification after complete
+     */
+    public void testSqlQueryLocalComplete() {
+        SqlQuery<String, Person> qry = new SqlQuery<>(Person.class, SELECT_ALL_SQL);
+
+        qry.setLocal(true);
+
+        QueryCursor<Cache.Entry<String, Person>> qryCurs = cache().query(qry);
+
+        qryCurs.getAll();
+
+        H2ResultSetIterator h2It = extractIterableInnerH2ResultSetIterator(qryCurs);
+
+        checkIterator(h2It);
+    }
+
+    /**
+     * Non local SQL Fields check nullification after close
+     */
+    public void testSqlFieldsQueryClose() {
+        SqlFieldsQuery qry = new SqlFieldsQuery(SELECT_MAX_SAL_SQLF);
+
+        QueryCursor<List<?>> qryCurs = cache().query(qry);
+
+        qryCurs.iterator();
+
+        qryCurs.close();
+
+        H2ResultSetIterator h2It = extractGridIteratorInnerH2ResultSetIterator(qryCurs);
+
+        checkIterator(h2It);
+    }
+
+    /**
+     * Non local SQL Fields check nullification after complete
+     */
+    public void testSqlFieldsQueryComplete() {
+        SqlFieldsQuery qry = new SqlFieldsQuery(SELECT_MAX_SAL_SQLF);
+
+        QueryCursor<List<?>> qryCurs = cache().query(qry);
+
+        qryCurs.getAll();
+
+        H2ResultSetIterator h2It = extractGridIteratorInnerH2ResultSetIterator(qryCurs);
+
+        checkIterator(h2It);
+    }
+
+    /**
+     * Local SQL Fields check nullification after close
+     */
+    public void testSqlFieldsQueryLocalClose() {
+        SqlFieldsQuery qry = new SqlFieldsQuery(SELECT_MAX_SAL_SQLF);
+
+        qry.setLocal(true);
+
+        QueryCursor<List<?>> qryCurs = cache().query(qry);
+
+        qryCurs.iterator();
+
+        qryCurs.close();
+
+        H2ResultSetIterator h2It = extractGridIteratorInnerH2ResultSetIterator(qryCurs);
+
+        checkIterator(h2It);
+    }
+
+    /**
+     * Local SQL Fields check nullification after complete
+     */
+    public void testSqlFieldsQueryLocalComplete() {
+        SqlFieldsQuery qry = new SqlFieldsQuery(SELECT_MAX_SAL_SQLF);
+
+        qry.setLocal(true);
+
+        QueryCursor<List<?>> qryCurs = cache().query(qry);
+
+        qryCurs.getAll();
+
+        H2ResultSetIterator h2It = extractGridIteratorInnerH2ResultSetIterator(qryCurs);
+
+        checkIterator(h2It);
+    }
+
+    /**
+     * Common Assertion
+     * @param h2it target iterator
+     */
+    private void checkIterator(H2ResultSetIterator h2it){
+        if (Objects.nonNull(h2it))
+            assertNull(GridTestUtils.getFieldValue(h2it, H2ResultSetIterator.class, "data"));
+        else
+            fail();
+    }
+
+    /**
+     * Extract H2ResultSetIterator by reflection for non local SQL cases
+     * @param qryCurs source cursor
+     * @return target iterator or null of not extracted
+     */
+    private H2ResultSetIterator extractIteratorInnerGridIteratorInnerH2ResultSetIterator(
+        QueryCursor<Cache.Entry<String, Person>> qryCurs) {
+        if (QueryCursorImpl.class.isAssignableFrom(qryCurs.getClass())) {
+            Iterator inner = GridTestUtils.getFieldValue(qryCurs, QueryCursorImpl.class, "iter");
+
+            GridQueryCacheObjectsIterator it = GridTestUtils.getFieldValue(inner, inner.getClass(), "val$iter0");
+
+            Iterator<List<?>> h2RsIt = GridTestUtils.getFieldValue(it, GridQueryCacheObjectsIterator.class, "iter");
+
+            if (H2ResultSetIterator.class.isAssignableFrom(h2RsIt.getClass()))
+                return (H2ResultSetIterator)h2RsIt;
+        }
+        return null;
+    }
+
+    /**
+     * Extract H2ResultSetIterator by reflection for local SQL cases.
+     *
+     * @param qryCurs source cursor
+     * @return target iterator or null of not extracted
+     */
+    private H2ResultSetIterator extractIterableInnerH2ResultSetIterator(
+        QueryCursor<Cache.Entry<String, Person>> qryCurs) {
+        if (QueryCursorImpl.class.isAssignableFrom(qryCurs.getClass())) {
+            Iterable iterable = GridTestUtils.getFieldValue(qryCurs, QueryCursorImpl.class, "iterExec");
+
+            Iterator h2RsIt = GridTestUtils.getFieldValue(iterable, iterable.getClass(), "val$i");
+
+            if (H2ResultSetIterator.class.isAssignableFrom(h2RsIt.getClass()))
+                return (H2ResultSetIterator)h2RsIt;
+        }
+        return null;
+    }
+
+    /**
+     * Extract H2ResultSetIterator by reflection for SQL Fields cases.
+     *
+     * @param qryCurs source cursor
+     * @return target iterator or null of not extracted
+     */
+    private H2ResultSetIterator extractGridIteratorInnerH2ResultSetIterator(QueryCursor<List<?>> qryCurs) {
+        if (QueryCursorImpl.class.isAssignableFrom(qryCurs.getClass())) {
+            GridQueryCacheObjectsIterator it = GridTestUtils.getFieldValue(qryCurs, QueryCursorImpl.class, "iter");
+
+            Iterator<List<?>> h2RsIt = GridTestUtils.getFieldValue(it, GridQueryCacheObjectsIterator.class, "iter");
+
+            if (H2ResultSetIterator.class.isAssignableFrom(h2RsIt.getClass()))
+                return (H2ResultSetIterator)h2RsIt;
+        }
+        return null;
+    }
+
+    /**
+     * "onClose" should remove links to data.
+     */
+    public void testOnClose() {
+        try {
+            GridCloseableIterator it = indexing().queryLocalSql(
+                indexing().schema(cache().getName()),
+                cache().getName(),
+                SELECT_ALL_SQL,
+                null,
+                Collections.emptySet(),
+                "Person",
+                null,
+                null);
+
+            if (H2ResultSetIterator.class.isAssignableFrom(it.getClass())) {
+                H2ResultSetIterator h2it = (H2ResultSetIterator)it;
+
+                h2it.onClose();
+
+                assertNull(GridTestUtils.getFieldValue(h2it, H2ResultSetIterator.class, "data"));
+            }
+            else
+                fail();
+        }
+        catch (IgniteCheckedException e) {
+            fail(e.getMessage());
+        }
+    }
+
+    /**
+     * Complete iterate should remove links to data.
+     */
+    public void testOnComplete() {
+        try {
+            GridCloseableIterator it = indexing().queryLocalSql(
+                indexing().schema(cache().getName()),
+                cache().getName(),
+                SELECT_ALL_SQL,
+                null,
+                Collections.emptySet(),
+                "Person",
+                null,
+                null);
+
+            if (H2ResultSetIterator.class.isAssignableFrom(it.getClass())) {
+                H2ResultSetIterator h2it = (H2ResultSetIterator)it;
+
+                while (h2it.onHasNext())
+                    h2it.onNext();
+
+                assertNull(GridTestUtils.getFieldValue(h2it, H2ResultSetIterator.class, "data"));
+            }
+            else
+                fail();
+        }
+        catch (IgniteCheckedException e) {
+            fail(e.getMessage());
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        startGrids(NODES_COUNT);
+
+        ignite(0).createCache(
+            new CacheConfiguration<String, Person>("pers").setIndexedTypes(String.class, Person.class)
+        );
+
+        awaitPartitionMapExchange();
+
+        populateDataIntoPerson();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        stopAllGrids();
+    }
+
+    /**
+     * @return H2 indexing instance.
+     */
+    private IgniteH2Indexing indexing() {
+        GridQueryProcessor qryProcessor = grid(0).context().query();
+
+        return GridTestUtils.getFieldValue(qryProcessor, GridQueryProcessor.class, "idx");
+    }
+
+    /**
+     * @return Cache.
+     */
+    private IgniteCache<String, Person> cache() {
+        return grid(0).cache("pers");
+    }
+
+    /**
+     * Populate person cache with test data
+     */
+    private void populateDataIntoPerson() {
+        IgniteCache<String, Person> cache = cache();
+
+        int personId = 0;
+
+        for (int j = 0; j < PERSON_COUNT; j++) {
+            Person prsn = new Person();
+
+            prsn.setId("pers" + personId);
+            prsn.setName("Person name #" + personId);
+
+            cache.put(prsn.getId(), prsn);
+
+            personId++;
+        }
+    }
+
+    /**
+     *
+     */
+    private static class Person {
+        /** */
+        @QuerySqlField(index = true)
+        private String id;
+
+        /** */
+        @QuerySqlField(index = true)
+        private String name;
+
+        /** */
+        @QuerySqlField(index = true)
+        private int salary;
+
+        /** */
+        public String getId() {
+            return id;
+        }
+
+        /** */
+        public void setId(String id) {
+            this.id = id;
+        }
+
+        /** */
+        public String getName() {
+            return name;
+        }
+
+        /** */
+        public void setName(String name) {
+            this.name = name;
+        }
+
+        /** */
+        public int getSalary() {
+            return salary;
+        }
+
+        /** */
+        public void setSalary(int salary) {
+            this.salary = salary;
+        }
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/H2StatementCacheSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/H2StatementCacheSelfTest.java
new file mode 100644
index 0000000..655d039
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/H2StatementCacheSelfTest.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2;
+
+import java.sql.PreparedStatement;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+/**
+ *
+ */
+public class H2StatementCacheSelfTest extends GridCommonAbstractTest {
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testEviction() throws Exception {
+        H2StatementCache stmtCache = new H2StatementCache(1);
+        H2CachedStatementKey key1 = new H2CachedStatementKey("", "1");
+        PreparedStatement stmt1 = stmt();
+        stmtCache.put(key1, stmt1);
+
+        assertSame(stmt1, stmtCache.get(key1));
+
+        stmtCache.put(new H2CachedStatementKey("mydb", "2"), stmt());
+
+        assertNull(stmtCache.get(key1));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testLruEvictionInStoreOrder() throws Exception {
+        H2StatementCache stmtCache = new H2StatementCache(2);
+
+        H2CachedStatementKey key1 = new H2CachedStatementKey("", "1");
+        H2CachedStatementKey key2 = new H2CachedStatementKey("", "2");
+        stmtCache.put(key1, stmt());
+        stmtCache.put(key2, stmt());
+
+        stmtCache.put(new H2CachedStatementKey("", "3"), stmt());
+
+        assertNull(stmtCache.get(key1));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testLruEvictionInAccessOrder() throws Exception {
+        H2StatementCache stmtCache = new H2StatementCache(2);
+
+        H2CachedStatementKey key1 = new H2CachedStatementKey("", "1");
+        H2CachedStatementKey key2 = new H2CachedStatementKey("", "2");
+        stmtCache.put(key1, stmt());
+        stmtCache.put(key2, stmt());
+        stmtCache.get(key1);
+
+        stmtCache.put(new H2CachedStatementKey("", "3"), stmt());
+
+        assertNull(stmtCache.get(key2));
+    }
+
+    /**
+     *
+     */
+    private static PreparedStatement stmt() {
+        return new PreparedStatementExImpl(null);
+    }
+}
\ No newline at end of file
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/PreparedStatementExSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/PreparedStatementExSelfTest.java
new file mode 100644
index 0000000..22bff3b
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/PreparedStatementExSelfTest.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2;
+
+import java.sql.PreparedStatement;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+/**
+ *
+ */
+public class PreparedStatementExSelfTest extends GridCommonAbstractTest {
+    /**
+     * @throws Exception If failed.
+     */
+    public void testStoringMeta() throws Exception {
+        PreparedStatement stmt = stmt();
+
+        PreparedStatementEx wrapped = stmt.unwrap(PreparedStatementEx.class);
+
+        wrapped.putMeta(0, "0");
+
+        assertEquals("0", wrapped.meta(0));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testStoringMoreMetaKeepsExisting() throws Exception {
+        PreparedStatement stmt = stmt();
+
+        PreparedStatementEx wrapped = stmt.unwrap(PreparedStatementEx.class);
+
+        wrapped.putMeta(0, "0");
+        wrapped.putMeta(1, "1");
+
+        assertEquals("0", wrapped.meta(0));
+        assertEquals("1", wrapped.meta(1));
+    }
+
+    /**
+     *
+     */
+    private static PreparedStatement stmt() {
+        return new PreparedStatementExImpl(null);
+    }
+}
\ No newline at end of file
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/ThreadLocalObjectPoolSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/ThreadLocalObjectPoolSelfTest.java
new file mode 100644
index 0000000..b7b7a37
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/ThreadLocalObjectPoolSelfTest.java
@@ -0,0 +1,113 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2;
+
+import java.util.concurrent.CompletableFuture;
+import org.apache.ignite.internal.processors.query.h2.ThreadLocalObjectPool.Reusable;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+/**
+ *
+ */
+public class ThreadLocalObjectPoolSelfTest extends GridCommonAbstractTest {
+    /** */
+    private ThreadLocalObjectPool<Obj> pool = new ThreadLocalObjectPool<>(Obj::new, 1);
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testObjectIsReusedAfterRecycling() throws Exception {
+        Reusable<Obj> o1 = pool.borrow();
+        o1.recycle();
+        Reusable<Obj> o2 = pool.borrow();
+
+        assertSame(o1.object(), o2.object());
+        assertFalse(o1.object().isClosed());
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testBorrowedObjectIsNotReturnedTwice() throws Exception {
+        Reusable<Obj> o1 = pool.borrow();
+        Reusable<Obj> o2 = pool.borrow();
+
+        assertNotSame(o1.object(), o2.object());
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testObjectShouldBeClosedOnRecycleIfPoolIsFull() throws Exception {
+        Reusable<Obj> o1 = pool.borrow();
+        Reusable<Obj> o2 = pool.borrow();
+        o1.recycle();
+        o2.recycle();
+
+        assertTrue(o2.object().isClosed());
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testObjectShouldNotBeReturnedIfPoolIsFull() throws Exception {
+        Reusable<Obj> o1 = pool.borrow();
+        Reusable<Obj> o2 = pool.borrow();
+
+        o1.recycle();
+
+        assertEquals(1, pool.bagSize());
+
+        o2.recycle();
+
+        assertEquals(1, pool.bagSize());
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testObjectShouldReturnedToRecyclingThreadBag() throws Exception {
+        Reusable<Obj> o1 = pool.borrow();
+
+        CompletableFuture.runAsync(() -> {
+            o1.recycle();
+
+            assertEquals(1, pool.bagSize());
+        }).join();
+
+        assertEquals(0, pool.bagSize());
+    }
+
+    /** */
+    private static class Obj implements AutoCloseable {
+        /** */
+        private boolean closed = false;
+
+        /** {@inheritDoc} */
+        @Override public void close() {
+            closed = true;
+        }
+
+        /**
+         * @return {@code True} if closed.
+         */
+        public boolean isClosed() {
+            return closed;
+        }
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/sql/GridQueryParsingTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/sql/GridQueryParsingTest.java
index a362586..de77150 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/sql/GridQueryParsingTest.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/sql/GridQueryParsingTest.java
@@ -205,6 +205,7 @@
         checkQuery("select * from Person");
         checkQuery("select distinct * from Person");
         checkQuery("select p.name, date from Person p");
+        checkQuery("select p.name, date from Person p for update");
 
         checkQuery("select * from Person p, sch2.Address a");
         checkQuery("select * from Person, sch2.Address");
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/twostep/CacheQueryMemoryLeakTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/twostep/CacheQueryMemoryLeakTest.java
new file mode 100644
index 0000000..754504e
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/twostep/CacheQueryMemoryLeakTest.java
@@ -0,0 +1,159 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2.twostep;
+
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.cache.QueryEntity;
+import org.apache.ignite.cache.query.Query;
+import org.apache.ignite.cache.query.QueryCursor;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.cache.query.annotations.QuerySqlField;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+/** */
+public class CacheQueryMemoryLeakTest extends GridCommonAbstractTest {
+    /** */
+    private static final TcpDiscoveryVmIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true);
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
+        IgniteConfiguration igniteCfg = super.getConfiguration(igniteInstanceName);
+
+        ((TcpDiscoverySpi)igniteCfg.getDiscoverySpi()).setIpFinder(IP_FINDER);
+
+        if (igniteInstanceName.equals("client"))
+            igniteCfg.setClientMode(true);
+
+        return igniteCfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids();
+    }
+
+    /**
+     * Check, that query results are not accumulated, when result set size is a multiple of a {@link Query#pageSize}.
+     *
+     * @throws Exception If failed.
+     */
+    public void testResultIsMultipleOfPage() throws Exception {
+        IgniteEx srv = (IgniteEx)startGrid("server");
+        Ignite client = startGrid("client");
+
+        IgniteCache<Integer, Person> cache = startPeopleCache(client);
+
+        int pages = 3;
+        int pageSize = 1024;
+
+        for (int i = 0; i < pages * pageSize; i++) {
+            Person p = new Person("Person #" + i, 25);
+
+            cache.put(i, p);
+        }
+
+        for (int i = 0; i < 100; i++) {
+            Query<List<?>> qry = new SqlFieldsQuery("select * from people");
+
+            qry.setPageSize(pageSize);
+
+            QueryCursor<List<?>> cursor = cache.query(qry);
+
+            cursor.getAll();
+
+            cursor.close();
+        }
+
+        assertTrue("MapNodeResults is not cleared on the map node.", isMapNodeResultsEmpty(srv));
+    }
+
+    /**
+     * @param node Ignite node.
+     * @return {@code True}, if all MapQueryResults are removed from internal node's structures. {@code False}
+     * otherwise.
+     */
+    private boolean isMapNodeResultsEmpty(IgniteEx node) {
+        IgniteH2Indexing idx = (IgniteH2Indexing)node.context().query().getIndexing();
+
+        GridMapQueryExecutor mapQryExec = idx.mapQueryExecutor();
+
+        Map<UUID, MapNodeResults> qryRess =
+            GridTestUtils.getFieldValue(mapQryExec, GridMapQueryExecutor.class, "qryRess");
+
+        for (MapNodeResults nodeRess : qryRess.values()) {
+            Map<MapRequestKey, MapQueryResults> nodeQryRess =
+                GridTestUtils.getFieldValue(nodeRess, MapNodeResults.class, "res");
+
+            if (!nodeQryRess.isEmpty())
+                return false;
+        }
+
+        return true;
+    }
+
+    /**
+     * @param node Ignite instance.
+     * @return Cache.
+     */
+    private static IgniteCache<Integer, Person> startPeopleCache(Ignite node) {
+        CacheConfiguration<Integer, Person> cacheCfg = new CacheConfiguration<>("people");
+
+        QueryEntity qe = new QueryEntity(Integer.class, Person.class);
+
+        qe.setTableName("people");
+
+        cacheCfg.setQueryEntities(Collections.singleton(qe));
+
+        cacheCfg.setSqlSchema("PUBLIC");
+
+        return node.getOrCreateCache(cacheCfg);
+    }
+
+    /** */
+    @SuppressWarnings("unused")
+    public static class Person {
+        /** */
+        @QuerySqlField
+        private String name;
+
+        /** */
+        @QuerySqlField
+        private int age;
+
+        /**
+         * @param name Name.
+         * @param age Age.
+         */
+        public Person(String name, int age) {
+            this.name = name;
+            this.age = age;
+        }
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/twostep/DisappearedCacheCauseRetryMessageSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/twostep/DisappearedCacheCauseRetryMessageSelfTest.java
new file mode 100644
index 0000000..8c4358a
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/twostep/DisappearedCacheCauseRetryMessageSelfTest.java
@@ -0,0 +1,134 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2.twostep;
+
+import javax.cache.CacheException;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.cache.query.SqlQuery;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.managers.communication.GridIoMessage;
+import org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryCancelRequest;
+import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2QueryRequest;
+import org.apache.ignite.lang.IgniteInClosure;
+import org.apache.ignite.plugin.extensions.communication.Message;
+import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+import static org.apache.ignite.IgniteSystemProperties.IGNITE_SQL_RETRY_TIMEOUT;
+import static org.apache.ignite.internal.processors.query.h2.twostep.JoinSqlTestHelper.Organization;
+import static org.apache.ignite.internal.processors.query.h2.twostep.JoinSqlTestHelper.Person;
+
+/**
+ * Failed to reserve partitions for query (cache is not found on local node) Root cause test
+ */
+public class DisappearedCacheCauseRetryMessageSelfTest extends GridCommonAbstractTest {
+    /** */
+    private static final int NODES_COUNT = 2;
+    /** */
+    private static final String ORG = "org";
+    /** */
+    private IgniteCache<String, JoinSqlTestHelper.Person> personCache;
+    /** */
+    private IgniteCache<String, JoinSqlTestHelper.Organization> orgCache;
+
+    /** */
+    public void testDisappearedCacheCauseRetryMessage() {
+
+        SqlQuery<String, JoinSqlTestHelper.Person> qry = new SqlQuery<String, JoinSqlTestHelper.Person>(JoinSqlTestHelper.Person.class, JoinSqlTestHelper.JOIN_SQL).setArgs("Organization #0");
+
+        qry.setDistributedJoins(true);
+
+        try {
+            personCache.query(qry).getAll();
+
+            fail("No CacheException emitted.");
+        }
+        catch (CacheException e) {
+            assertTrue(e.getMessage(), e.getMessage().contains("Failed to reserve partitions for query (cache is not found on local node) ["));
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        cfg.setCommunicationSpi(new TcpCommunicationSpi(){
+
+            volatile long reqId = -1;
+            /** {@inheritDoc} */
+            @Override public void sendMessage(ClusterNode node, Message msg, IgniteInClosure<IgniteException> ackC) {
+                assert msg != null;
+
+                if ( GridIoMessage.class.isAssignableFrom(msg.getClass())){
+                    GridIoMessage gridMsg = (GridIoMessage)msg;
+
+                    if ( GridH2QueryRequest.class.isAssignableFrom( gridMsg.message().getClass() ) ){
+                        GridH2QueryRequest req = (GridH2QueryRequest) (gridMsg.message());
+                        reqId = req.requestId();
+                        orgCache.destroy();
+                    }
+                    else if ( GridQueryCancelRequest.class.isAssignableFrom( gridMsg.message().getClass() ) ){
+                        GridQueryCancelRequest req = (GridQueryCancelRequest) (gridMsg.message());
+
+                        if (reqId == req.queryRequestId())
+                            orgCache = DisappearedCacheCauseRetryMessageSelfTest.this.ignite(0).getOrCreateCache(new CacheConfiguration<String, Organization>(ORG)
+                                .setCacheMode(CacheMode.REPLICATED)
+                                .setIndexedTypes(String.class, JoinSqlTestHelper.Organization.class)
+                            );
+
+                    }
+                }
+
+                super.sendMessage(node, msg, ackC);
+            }
+        });
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        System.setProperty(IGNITE_SQL_RETRY_TIMEOUT, "5000");
+
+        startGridsMultiThreaded(NODES_COUNT, false);
+
+        personCache = ignite(0).getOrCreateCache(new CacheConfiguration<String, Person>("pers")
+            .setIndexedTypes(String.class, JoinSqlTestHelper.Person.class)
+        );
+
+        orgCache = ignite(0).getOrCreateCache(new CacheConfiguration<String, Organization>(ORG)
+            .setCacheMode(CacheMode.REPLICATED)
+            .setIndexedTypes(String.class, JoinSqlTestHelper.Organization.class)
+        );
+
+        awaitPartitionMapExchange();
+
+        JoinSqlTestHelper.populateDataIntoOrg(orgCache);
+
+        JoinSqlTestHelper.populateDataIntoPerson(personCache);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids();
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/twostep/DisappearedCacheWasNotFoundMessageSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/twostep/DisappearedCacheWasNotFoundMessageSelfTest.java
new file mode 100644
index 0000000..9928ed6
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/twostep/DisappearedCacheWasNotFoundMessageSelfTest.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2.twostep;
+
+import javax.cache.CacheException;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.cache.query.SqlQuery;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.managers.communication.GridIoMessage;
+import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2QueryRequest;
+import org.apache.ignite.lang.IgniteInClosure;
+import org.apache.ignite.plugin.extensions.communication.Message;
+import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+import static org.apache.ignite.IgniteSystemProperties.IGNITE_SQL_RETRY_TIMEOUT;
+import static org.apache.ignite.internal.processors.query.h2.twostep.JoinSqlTestHelper.Organization;
+import static org.apache.ignite.internal.processors.query.h2.twostep.JoinSqlTestHelper.Person;
+
+/**
+ * Grid cache context is not registered for cache id root cause message test
+ */
+public class DisappearedCacheWasNotFoundMessageSelfTest extends GridCommonAbstractTest {
+    /** */
+    private static final int NODES_COUNT = 2;
+    /** */
+    private static final String ORG = "org";
+    /** */
+    private IgniteCache<String, JoinSqlTestHelper.Person> personCache;
+    /** */
+    private IgniteCache<String, JoinSqlTestHelper.Organization> orgCache;
+
+    /** */
+    public void testDisappearedCacheWasNotFoundMessage() {
+        SqlQuery<String, Person> qry = new SqlQuery<String, Person>(Person.class, JoinSqlTestHelper.JOIN_SQL).setArgs("Organization #0");
+
+        qry.setDistributedJoins(true);
+
+        try {
+            personCache.query(qry).getAll();
+
+            fail("No CacheException emitted.");
+        }
+        catch (CacheException e) {
+            assertTrue(e.getMessage(), e.getMessage().contains("Cache not found on local node"));
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        cfg.setCommunicationSpi(new TcpCommunicationSpi(){
+            /** {@inheritDoc} */
+            @Override public void sendMessage(ClusterNode node, Message msg, IgniteInClosure<IgniteException> ackC) {
+                assert msg != null;
+
+                if ( GridIoMessage.class.isAssignableFrom(msg.getClass())){
+                    GridIoMessage gridMsg = (GridIoMessage)msg;
+
+                    if ( GridH2QueryRequest.class.isAssignableFrom( gridMsg.message().getClass() ) ){
+                        GridH2QueryRequest req = (GridH2QueryRequest) (gridMsg.message());
+
+                        req.requestId();
+
+                        orgCache.destroy();
+                    }
+                }
+
+                super.sendMessage(node, msg, ackC);
+            }
+        });
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        System.setProperty(IGNITE_SQL_RETRY_TIMEOUT, "5000");
+
+        startGridsMultiThreaded(NODES_COUNT, false);
+
+        personCache = ignite(0).getOrCreateCache(new CacheConfiguration<String, Person>("pers")
+            .setIndexedTypes(String.class, JoinSqlTestHelper.Person.class)
+        );
+
+        orgCache = ignite(0).getOrCreateCache(new CacheConfiguration<String, Organization>(ORG)
+                .setCacheMode(CacheMode.REPLICATED)
+                .setIndexedTypes(String.class, Organization.class)
+        );
+
+        awaitPartitionMapExchange();
+
+        JoinSqlTestHelper.populateDataIntoOrg(orgCache);
+
+        JoinSqlTestHelper.populateDataIntoPerson(personCache);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids();
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/twostep/JoinSqlTestHelper.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/twostep/JoinSqlTestHelper.java
new file mode 100644
index 0000000..fe7821a
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/twostep/JoinSqlTestHelper.java
@@ -0,0 +1,163 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2.twostep;
+
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.cache.query.annotations.QuerySqlField;
+
+/**
+ * Join sql test helper
+ */
+public class JoinSqlTestHelper {
+    /** */
+    private static final int ORG_COUNT = 100;
+
+    /** */
+    private static final int PERSON_PER_ORG_COUNT = 10;
+
+    /** */
+    static final String JOIN_SQL = "select * from Person, \"org\".Organization as org " +
+        "where Person.orgId = org.id " +
+        "and lower(org.name) = lower(?)";
+
+    /**
+     * Populate organization cache with test data
+     * @param cache @{IgniteCache}
+     */
+    static void populateDataIntoOrg(IgniteCache<String, Organization> cache) {
+        for (int i = 0; i < ORG_COUNT; i++) {
+            Organization org = new Organization();
+
+            org.setId("org" + i);
+
+            org.setName("Organization #" + i);
+
+            cache.put(org.getId(), org);
+        }
+    }
+
+    /**
+     * Populate person cache with test data
+     * @param cache @{IgniteCache}
+     */
+    static void populateDataIntoPerson(IgniteCache<String, Person> cache) {
+        int personId = 0;
+
+        for (int i = 0; i < ORG_COUNT; i++) {
+            Organization org = new Organization();
+
+            org.setId("org" + i);
+
+            org.setName("Organization #" + i);
+
+            for (int j = 0; j < PERSON_PER_ORG_COUNT; j++) {
+                Person prsn = new Person();
+
+                prsn.setId("pers" + personId);
+
+                prsn.setOrgId(org.getId());
+
+                prsn.setName("Person name #" + personId);
+
+                cache.put(prsn.getId(), prsn);
+
+                personId++;
+            }
+        }
+    }
+
+    /**
+     *
+     */
+    public static class Person {
+        /** */
+        @QuerySqlField(index = true)
+        private String id;
+
+        /** */
+        @QuerySqlField(index = true)
+        private String orgId;
+
+        /** */
+        @QuerySqlField(index = true)
+        private String name;
+
+        /** */
+        public String getId() {
+            return id;
+        }
+
+        /** */
+        public void setId(String id) {
+            this.id = id;
+        }
+
+        /** */
+        public String getOrgId() {
+            return orgId;
+        }
+
+        /** */
+        public void setOrgId(String orgId) {
+            this.orgId = orgId;
+        }
+
+        /** */
+        public String getName() {
+            return name;
+        }
+
+        /** */
+        public void setName(String name) {
+            this.name = name;
+        }
+    }
+
+    /**
+     *
+     */
+    public static class Organization {
+        /** */
+        @QuerySqlField(index = true)
+        private String id;
+
+        /** */
+        @QuerySqlField(index = true)
+        private String name;
+
+        /** */
+        public void setId(String id) {
+            this.id = id;
+        }
+
+        /** */
+        public String getId() {
+            return id;
+        }
+
+        /** */
+        public String getName() {
+            return name;
+        }
+
+        /** */
+        public void setName(String name) {
+            this.name = name;
+        }
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/twostep/NonCollocatedRetryMessageSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/twostep/NonCollocatedRetryMessageSelfTest.java
new file mode 100644
index 0000000..c602225
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/twostep/NonCollocatedRetryMessageSelfTest.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2.twostep;
+
+import java.util.List;
+import javax.cache.Cache;
+import javax.cache.CacheException;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.IgniteSystemProperties;
+import org.apache.ignite.cache.query.SqlQuery;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.IgniteInterruptedCheckedException;
+import org.apache.ignite.internal.managers.communication.GridIoMessage;
+import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2QueryRequest;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteInClosure;
+import org.apache.ignite.plugin.extensions.communication.Message;
+import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi;
+import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+import static org.apache.ignite.IgniteSystemProperties.IGNITE_SQL_RETRY_TIMEOUT;
+
+/**
+ * Failed to execute non-collocated query root cause message test
+ */
+public class NonCollocatedRetryMessageSelfTest extends GridCommonAbstractTest {
+    /** */
+    private static final int NODES_COUNT = 3;
+
+    /** */
+    private static final String ORG = "org";
+
+    /** */
+    private IgniteCache<String, JoinSqlTestHelper.Person> personCache;
+
+    /** */
+    public void testNonCollocatedRetryMessage() {
+        SqlQuery<String, JoinSqlTestHelper.Person> qry = new SqlQuery<String, JoinSqlTestHelper.Person>(JoinSqlTestHelper.Person.class, JoinSqlTestHelper.JOIN_SQL).setArgs("Organization #0");
+
+        qry.setDistributedJoins(true);
+
+        try {
+            List<Cache.Entry<String,JoinSqlTestHelper.Person>> prsns = personCache.query(qry).getAll();
+            fail("No CacheException emitted. Collection size="+prsns.size());
+        }
+        catch (CacheException e) {
+            assertTrue(e.getMessage(), e.getMessage().contains("Failed to execute non-collocated query"));
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        cfg.setCommunicationSpi(new TcpCommunicationSpi(){
+            volatile long reqId = -1;
+            /** {@inheritDoc} */
+            @Override public void sendMessage(ClusterNode node, Message msg, IgniteInClosure<IgniteException> ackC) {
+                assert msg != null;
+
+                if ( GridIoMessage.class.isAssignableFrom(msg.getClass())){
+                    GridIoMessage gridMsg = (GridIoMessage)msg;
+
+                    if ( GridH2QueryRequest.class.isAssignableFrom( gridMsg.message().getClass() ) ){
+                        GridH2QueryRequest req = (GridH2QueryRequest) (gridMsg.message());
+
+                        if (reqId < 0) {
+                            reqId = req.requestId();
+
+                            String shutName = getTestIgniteInstanceName(1);
+
+                            stopGrid(shutName, true, false);
+                        }
+                        else if( reqId != req.requestId() ){
+                            try {
+                                U.sleep(IgniteSystemProperties.getLong(IGNITE_SQL_RETRY_TIMEOUT, GridReduceQueryExecutor.DFLT_RETRY_TIMEOUT));
+                            }
+                            catch (IgniteInterruptedCheckedException e) {
+                                // no-op
+                            }
+                        }
+                    }
+                }
+                super.sendMessage(node, msg, ackC);
+            }
+        });
+
+        cfg.setDiscoverySpi(new TcpDiscoverySpi(){
+            public long getNodesJoined() {
+                return stats.joinedNodesCount();
+            }
+        });
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        System.setProperty(IGNITE_SQL_RETRY_TIMEOUT, "5000");
+
+        startGridsMultiThreaded(NODES_COUNT, false);
+
+        personCache = ignite(0).getOrCreateCache(new CacheConfiguration<String, JoinSqlTestHelper.Person>("pers")
+            .setBackups(1)
+            .setIndexedTypes(String.class, JoinSqlTestHelper.Person.class)
+        );
+
+        final IgniteCache<String, JoinSqlTestHelper.Organization> orgCache = ignite(0).getOrCreateCache(new CacheConfiguration<String, JoinSqlTestHelper.Organization>(ORG)
+            .setBackups(1)
+            .setIndexedTypes(String.class, JoinSqlTestHelper.Organization.class)
+        );
+
+        awaitPartitionMapExchange();
+
+        JoinSqlTestHelper.populateDataIntoOrg(orgCache);
+
+        JoinSqlTestHelper.populateDataIntoPerson(personCache);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids();
+    }
+
+
+}
+
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/twostep/RetryCauseMessageSelfTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/twostep/RetryCauseMessageSelfTest.java
new file mode 100644
index 0000000..ce38511
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/query/h2/twostep/RetryCauseMessageSelfTest.java
@@ -0,0 +1,417 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.query.h2.twostep;
+
+import java.util.UUID;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.atomic.AtomicLong;
+import javax.cache.CacheException;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.cache.query.SqlQuery;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.GridKernalContext;
+import org.apache.ignite.internal.processors.cache.GridCacheContext;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition;
+import org.apache.ignite.internal.processors.cache.distributed.dht.GridReservable;
+import org.apache.ignite.internal.processors.query.GridQueryProcessor;
+import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing;
+import org.apache.ignite.internal.processors.query.h2.opt.GridH2RetryException;
+import org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2QueryRequest;
+import org.apache.ignite.internal.util.GridSpinBusyLock;
+import org.apache.ignite.lang.IgniteInClosure;
+import org.apache.ignite.plugin.extensions.communication.Message;
+import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+import static org.apache.ignite.IgniteSystemProperties.IGNITE_SQL_RETRY_TIMEOUT;
+import static org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion.NONE;
+import static org.apache.ignite.internal.processors.query.h2.twostep.JoinSqlTestHelper.JOIN_SQL;
+import static org.apache.ignite.internal.processors.query.h2.twostep.JoinSqlTestHelper.Organization;
+import static org.apache.ignite.internal.processors.query.h2.twostep.JoinSqlTestHelper.Person;
+
+/**
+ * Test for 6 retry cases
+ */
+public class RetryCauseMessageSelfTest extends GridCommonAbstractTest {
+    /** */
+    private static final int NODES_COUNT = 2;
+
+    /** */
+    private static final String ORG_SQL = "select * from Organization";
+
+    /** */
+    private static final String ORG = "org";
+
+    /** */
+    private IgniteCache<String, Person> personCache;
+
+    /** */
+    private IgniteCache<String, Organization> orgCache;
+
+    /** */
+    private IgniteH2Indexing h2Idx;
+
+    /** */
+    @Override protected long getTestTimeout() {
+        return 600 * 1000;
+    }
+
+    /**
+     * Failed to reserve partitions for query (cache is not found on local node)
+     */
+    public void testSynthCacheWasNotFoundMessage() {
+        GridMapQueryExecutor mapQryExec = GridTestUtils.getFieldValue(h2Idx, IgniteH2Indexing.class, "mapQryExec");
+
+        GridTestUtils.setFieldValue(h2Idx, IgniteH2Indexing.class, "mapQryExec",
+            new MockGridMapQueryExecutor(null) {
+                @Override public void onMessage(UUID nodeId, Object msg) {
+                    if (GridH2QueryRequest.class.isAssignableFrom(msg.getClass())) {
+                        GridH2QueryRequest qryReq = (GridH2QueryRequest)msg;
+
+                        qryReq.caches().add(Integer.MAX_VALUE);
+
+                        startedExecutor.onMessage(nodeId, msg);
+
+                        qryReq.caches().remove(qryReq.caches().size() - 1);
+                    }
+                    else
+                        startedExecutor.onMessage(nodeId, msg);
+                }
+            }.insertRealExecutor(mapQryExec));
+
+        SqlQuery<String, Person> qry = new SqlQuery<String, Person>(Person.class, JOIN_SQL).setArgs("Organization #0");
+
+        qry.setDistributedJoins(true);
+
+        try {
+            personCache.query(qry).getAll();
+        }
+        catch (CacheException e) {
+            assertTrue(e.getMessage(), e.getMessage().contains("Failed to reserve partitions for query (cache is not found on local node) ["));
+
+            return;
+        }
+        finally {
+            GridTestUtils.setFieldValue(h2Idx, IgniteH2Indexing.class, "mapQryExec", mapQryExec);
+        }
+        fail();
+    }
+
+    /**
+     * Failed to reserve partitions for query (group reservation failed)
+     */
+    public void testGrpReservationFailureMessage() {
+        final GridMapQueryExecutor mapQryExec = GridTestUtils.getFieldValue(h2Idx, IgniteH2Indexing.class, "mapQryExec");
+
+        final ConcurrentMap<MapReservationKey, GridReservable> reservations = GridTestUtils.getFieldValue(mapQryExec, GridMapQueryExecutor.class, "reservations");
+
+        GridTestUtils.setFieldValue(h2Idx, IgniteH2Indexing.class, "mapQryExec",
+            new MockGridMapQueryExecutor(null) {
+                @Override public void onMessage(UUID nodeId, Object msg) {
+                    if (GridH2QueryRequest.class.isAssignableFrom(msg.getClass())) {
+                        final MapReservationKey grpKey = new MapReservationKey(ORG, null);
+
+                        reservations.put(grpKey, new GridReservable() {
+
+                            @Override public boolean reserve() {
+                                return false;
+                            }
+
+                            @Override public void release() {}
+                        });
+                    }
+                    startedExecutor.onMessage(nodeId, msg);
+                }
+            }.insertRealExecutor(mapQryExec));
+
+        SqlQuery<String, Person> qry = new SqlQuery<String, Person>(Person.class, JOIN_SQL).setArgs("Organization #0");
+
+        qry.setDistributedJoins(true);
+
+        try {
+            personCache.query(qry).getAll();
+        }
+        catch (CacheException e) {
+            assertTrue(e.getMessage().contains("Failed to reserve partitions for query (group reservation failed) ["));
+
+            return;
+        }
+        finally {
+            GridTestUtils.setFieldValue(h2Idx, IgniteH2Indexing.class, "mapQryExec", mapQryExec);
+        }
+        fail();
+    }
+
+    /**
+     * Failed to reserve partitions for query (partition of REPLICATED cache is not in OWNING state)
+     */
+    public void testReplicatedCacheReserveFailureMessage() {
+        GridMapQueryExecutor mapQryExec = GridTestUtils.getFieldValue(h2Idx, IgniteH2Indexing.class, "mapQryExec");
+
+        final GridKernalContext ctx = GridTestUtils.getFieldValue(mapQryExec, GridMapQueryExecutor.class, "ctx");
+
+        GridTestUtils.setFieldValue(h2Idx, IgniteH2Indexing.class, "mapQryExec",
+            new MockGridMapQueryExecutor(null) {
+                @Override public void onMessage(UUID nodeId, Object msg) {
+                    if (GridH2QueryRequest.class.isAssignableFrom(msg.getClass())) {
+                        GridH2QueryRequest qryReq = (GridH2QueryRequest)msg;
+
+                        GridCacheContext<?, ?> cctx = ctx.cache().context().cacheContext(qryReq.caches().get(0));
+
+                        GridDhtLocalPartition part = cctx.topology().localPartition(0, NONE, false);
+
+                        AtomicLong aState = GridTestUtils.getFieldValue(part, GridDhtLocalPartition.class, "state");
+
+                        long stateVal = aState.getAndSet(2);
+
+                        startedExecutor.onMessage(nodeId, msg);
+
+                        aState.getAndSet(stateVal);
+                    }
+                    else 
+                        startedExecutor.onMessage(nodeId, msg);
+                }
+            }.insertRealExecutor(mapQryExec));
+
+        SqlQuery<String, Organization> qry = new SqlQuery<>(Organization.class, ORG_SQL);
+
+        qry.setDistributedJoins(true);
+        try {
+            orgCache.query(qry).getAll();
+        }
+        catch (CacheException e) {
+            assertTrue(e.getMessage().contains("Failed to reserve partitions for query (partition of REPLICATED cache is not in OWNING state) ["));
+
+            return;
+        }
+        finally {
+            GridTestUtils.setFieldValue(h2Idx, IgniteH2Indexing.class, "mapQryExec", mapQryExec);
+        }
+        fail();
+    }
+
+    /**
+     * Failed to reserve partitions for query (partition of PARTITIONED cache cannot be reserved)
+     */
+    public void testPartitionedCacheReserveFailureMessage() {
+        GridMapQueryExecutor mapQryExec = GridTestUtils.getFieldValue(h2Idx, IgniteH2Indexing.class, "mapQryExec");
+
+        final GridKernalContext ctx = GridTestUtils.getFieldValue(mapQryExec, GridMapQueryExecutor.class, "ctx");
+
+        GridTestUtils.setFieldValue(h2Idx, IgniteH2Indexing.class, "mapQryExec",
+            new MockGridMapQueryExecutor(null) {
+                @Override public void onMessage(UUID nodeId, Object msg) {
+                    if (GridH2QueryRequest.class.isAssignableFrom(msg.getClass())) {
+                        GridH2QueryRequest qryReq = (GridH2QueryRequest)msg;
+
+                        GridCacheContext<?, ?> cctx = ctx.cache().context().cacheContext(qryReq.caches().get(0));
+
+                        GridDhtLocalPartition part = cctx.topology().localPartition(0, NONE, false);
+
+                        AtomicLong aState = GridTestUtils.getFieldValue(part, GridDhtLocalPartition.class, "state");
+
+                        long stateVal = aState.getAndSet(2);
+
+                        startedExecutor.onMessage(nodeId, msg);
+
+                        aState.getAndSet(stateVal);
+                    }
+                    else
+                        startedExecutor.onMessage(nodeId, msg);
+
+                }
+            }.insertRealExecutor(mapQryExec));
+
+        SqlQuery<String, Person> qry = new SqlQuery<String, Person>(Person.class, JOIN_SQL).setArgs("Organization #0");
+
+        qry.setDistributedJoins(true);
+        try {
+            personCache.query(qry).getAll();
+        }
+        catch (CacheException e) {
+            assertTrue(e.getMessage().contains("Failed to reserve partitions for query (partition of PARTITIONED " +
+                "cache is not found or not in OWNING state) "));
+
+            return;
+        }
+        finally {
+            GridTestUtils.setFieldValue(h2Idx, IgniteH2Indexing.class, "mapQryExec", mapQryExec);
+        }
+        fail();
+    }
+
+    /**
+     * Failed to execute non-collocated query (will retry)
+     */
+    public void testNonCollocatedFailureMessage() {
+        final GridMapQueryExecutor mapQryExec = GridTestUtils.getFieldValue(h2Idx, IgniteH2Indexing.class, "mapQryExec");
+
+        final ConcurrentMap<MapReservationKey, GridReservable> reservations = GridTestUtils.getFieldValue(mapQryExec, GridMapQueryExecutor.class, "reservations");
+
+        GridTestUtils.setFieldValue(h2Idx, IgniteH2Indexing.class, "mapQryExec",
+            new MockGridMapQueryExecutor(null) {
+                @Override public void onMessage(UUID nodeId, Object msg) {
+                    if (GridH2QueryRequest.class.isAssignableFrom(msg.getClass())) {
+                        final MapReservationKey grpKey = new MapReservationKey(ORG, null);
+
+                        reservations.put(grpKey, new GridReservable() {
+
+                            @Override public boolean reserve() {
+                                throw new GridH2RetryException("test retry exception");
+                            }
+
+                            @Override public void release() {
+                            }
+                        });
+                    }
+                    startedExecutor.onMessage(nodeId, msg);
+
+                }
+            }.insertRealExecutor(mapQryExec));
+
+        SqlQuery<String, Person> qry = new SqlQuery<String, Person>(Person.class, JOIN_SQL).setArgs("Organization #0");
+
+        qry.setDistributedJoins(true);
+        try {
+            personCache.query(qry).getAll();
+        }
+        catch (CacheException e) {
+            assertTrue(e.getMessage().contains("Failed to execute non-collocated query (will retry) ["));
+
+            return;
+        }
+        finally {
+            GridTestUtils.setFieldValue(h2Idx, IgniteH2Indexing.class, "mapQryExec", mapQryExec);
+        }
+        fail();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception {
+        IgniteConfiguration cfg = super.getConfiguration(gridName);
+
+        cfg.setCommunicationSpi(new TcpCommunicationSpi(){
+            /** {@inheritDoc} */
+            @Override public void sendMessage(ClusterNode node, Message msg, IgniteInClosure<IgniteException> ackC) {
+                assert msg != null;
+
+                super.sendMessage(node, msg, ackC);
+            }
+        });
+
+        return cfg;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTest() throws Exception {
+        System.setProperty(IGNITE_SQL_RETRY_TIMEOUT, "5000");
+
+        Ignite ignite = startGridsMultiThreaded(NODES_COUNT, false);
+
+        GridQueryProcessor qryProc = grid(ignite.name()).context().query();
+
+        h2Idx = GridTestUtils.getFieldValue(qryProc, GridQueryProcessor.class, "idx");
+
+        personCache = ignite(0).getOrCreateCache(new CacheConfiguration<String, Person>("pers")
+            .setIndexedTypes(String.class, Person.class)
+        );
+
+        orgCache = ignite(0).getOrCreateCache(new CacheConfiguration<String, Organization>(ORG)
+            .setCacheMode(CacheMode.REPLICATED)
+            .setIndexedTypes(String.class, Organization.class)
+        );
+
+        awaitPartitionMapExchange();
+
+        JoinSqlTestHelper.populateDataIntoOrg(orgCache);
+
+        JoinSqlTestHelper.populateDataIntoPerson(personCache);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTest() throws Exception {
+        stopAllGrids();
+    }
+
+
+    /**
+     * Wrapper around @{GridMapQueryExecutor}
+     */
+    private abstract static class MockGridMapQueryExecutor extends GridMapQueryExecutor {
+
+        /**
+         * Wrapped executor
+         */
+        GridMapQueryExecutor startedExecutor;
+
+        /** */
+        MockGridMapQueryExecutor insertRealExecutor(GridMapQueryExecutor realExecutor) {
+            this.startedExecutor = realExecutor;
+            return this;
+        }
+
+        /**
+         * @param busyLock Busy lock.
+         */
+        MockGridMapQueryExecutor(GridSpinBusyLock busyLock) {
+            super(busyLock);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void onMessage(UUID nodeId, Object msg) {
+            startedExecutor.onMessage(nodeId, msg);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void cancelLazyWorkers() {
+            startedExecutor.cancelLazyWorkers();
+        }
+
+        /** {@inheritDoc} */
+        @Override GridSpinBusyLock busyLock() {
+            return startedExecutor.busyLock();
+        }
+
+        /** {@inheritDoc} */
+        @Override public void onCacheStop(String cacheName) {
+            startedExecutor.onCacheStop(cacheName);
+        }
+
+        /** {@inheritDoc} */
+        @Override public void stopAndUnregisterCurrentLazyWorker() {
+            startedExecutor.stopAndUnregisterCurrentLazyWorker();
+        }
+
+        /** {@inheritDoc} */
+        @Override public void unregisterLazyWorker(MapQueryLazyWorker worker) {
+            startedExecutor.unregisterLazyWorker(worker);
+        }
+
+        /** {@inheritDoc} */
+        @Override public int registeredLazyWorkers() {
+            return startedExecutor.registeredLazyWorkers();
+        }
+    }
+
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/sql/IgniteCachePartitionedAtomicColumnConstraintsTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/sql/IgniteCachePartitionedAtomicColumnConstraintsTest.java
new file mode 100644
index 0000000..601090f
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/sql/IgniteCachePartitionedAtomicColumnConstraintsTest.java
@@ -0,0 +1,398 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.sql;
+
+import java.io.Serializable;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.Map;
+import java.util.function.Consumer;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import javax.cache.processor.EntryProcessorResult;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteException;
+import org.apache.ignite.cache.CacheAtomicityMode;
+import org.apache.ignite.cache.CacheEntryProcessor;
+import org.apache.ignite.cache.CacheMode;
+import org.apache.ignite.cache.QueryEntity;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.internal.util.typedef.T2;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+import org.jetbrains.annotations.NotNull;
+
+import static org.apache.ignite.cache.CacheAtomicityMode.ATOMIC;
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC;
+import static org.apache.ignite.internal.processors.query.QueryUtils.KEY_FIELD_NAME;
+import static org.apache.ignite.internal.processors.query.QueryUtils.VAL_FIELD_NAME;
+import static org.apache.ignite.testframework.GridTestUtils.assertThrowsWithCause;
+
+/** */
+public class IgniteCachePartitionedAtomicColumnConstraintsTest extends GridCommonAbstractTest {
+    /** */
+    private static final long FUT_TIMEOUT = 10_000L;
+
+    /** */
+    private static final String STR_CACHE_NAME = "STR_STR";
+
+    /** */
+    private static final String STR_ORG_CACHE_NAME = "STR_ORG";
+    
+    private static final String STR_ORG_WITH_FIELDS_CACHE_NAME = "STR_ORG_WITH_FIELDS";
+
+    /** */
+    private static final String OBJ_CACHE_NAME = "ORG_ADDRESS";
+
+    /** */
+    private Consumer<Runnable> shouldFail = (op) -> assertThrowsWithCause(op, IgniteException.class);
+
+    /** */
+    private Consumer<Runnable> shouldSucceed = Runnable::run;
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        startGrid(0);
+
+        Map<String, Integer> strStrPrecision = new HashMap<>();
+
+        strStrPrecision.put(KEY_FIELD_NAME, 5);
+        strStrPrecision.put(VAL_FIELD_NAME, 5);
+
+        jcache(grid(0), cacheConfiguration(new QueryEntity(String.class.getName(), String.class.getName())
+            .setFieldsPrecision(strStrPrecision)), STR_CACHE_NAME);
+
+        Map<String, Integer> orgAddressPrecision = new HashMap<>();
+
+        orgAddressPrecision.put("name", 5);
+        orgAddressPrecision.put("address", 5);
+
+        jcache(grid(0), cacheConfiguration(new QueryEntity(Organization.class.getName(), Address.class.getName())
+            .addQueryField("name", "java.lang.String", "name")
+            .addQueryField("address", "java.lang.String", "address")
+            .setFieldsPrecision(orgAddressPrecision)), OBJ_CACHE_NAME);
+
+        Map<String, Integer> strOrgPrecision = new HashMap<>();
+
+        strOrgPrecision.put(KEY_FIELD_NAME, 5);
+
+        jcache(grid(0), cacheConfiguration(new QueryEntity(String.class.getName(), Organization.class.getName())
+            .setFieldsPrecision(strOrgPrecision)), STR_ORG_CACHE_NAME);
+
+        jcache(grid(0), cacheConfiguration(new QueryEntity(String.class.getName(), Organization.class.getName())
+            .addQueryField("name", "java.lang.String", "name")
+            .addQueryField("address", "java.lang.String", "address")
+            .setFieldsPrecision(strOrgPrecision)), STR_ORG_WITH_FIELDS_CACHE_NAME);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutTooLongValueFail() throws Exception {
+        IgniteCache<String, String> cache = jcache(0, STR_CACHE_NAME);
+
+        T2<String, String> val = new T2<>("3", "123456");
+
+        checkPutAll(shouldFail, cache, new T2<>("1", "1"), val);
+
+        checkPutOps(shouldFail, cache, val);
+        
+        checkReplaceOps(shouldFail, cache, val, "1");
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutTooLongKeyFail() throws Exception {
+        IgniteCache<String, String> cache = jcache(0, STR_CACHE_NAME);
+
+        T2<String, String> val = new T2<>("123456", "2");
+
+        checkPutAll(shouldFail, cache, new T2<>("1", "1"), val);
+
+        checkPutOps(shouldFail, cache, val);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutTooLongValueFieldFail() throws Exception {
+        IgniteCache<Organization, Address> cache = jcache(0, OBJ_CACHE_NAME);
+
+        T2<Organization, Address> val = new T2<>(new Organization("3"), new Address("123456"));
+
+        checkPutAll(shouldFail, cache, new T2<>(new Organization("1"), new Address("1")), val);
+
+        checkPutOps(shouldFail, cache, val);
+
+        checkReplaceOps(shouldFail, cache, val, new Address("1"));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutTooLongKeyFieldFail() throws Exception {
+        IgniteCache<Organization, Address> cache = jcache(0, OBJ_CACHE_NAME);
+
+        T2<Organization, Address> val = new T2<>(new Organization("123456"), new Address("2"));
+
+        checkPutAll(shouldFail, cache, new T2<>(new Organization("1"), new Address("1")), val);
+
+        checkPutOps(shouldFail, cache, val);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutTooLongKeyFail2() throws Exception {
+        doCheckPutTooLongKeyFail2(STR_ORG_CACHE_NAME);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutTooLongKeyFail3() throws Exception {
+        doCheckPutTooLongKeyFail2(STR_ORG_WITH_FIELDS_CACHE_NAME);
+    }
+
+
+    private void doCheckPutTooLongKeyFail2(String cacheName) {
+        IgniteCache<String, Organization> cache = jcache(0, cacheName);
+
+        T2<String, Organization> val = new T2<>("123456", new Organization("1"));
+
+        checkPutAll(shouldFail, cache, new T2<>("1", new Organization("1")), val);
+
+        checkPutOps(shouldFail, cache, val);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutLongValue() throws Exception {
+        IgniteCache<String, String> cache = jcache(0, STR_CACHE_NAME);
+
+        T2<String, String> val = new T2<>("3", "12345");
+
+        checkPutAll(shouldSucceed, cache, new T2<>("1", "1"), val);
+
+        checkPutOps(shouldSucceed, cache, val);
+
+        checkReplaceOps(shouldSucceed, cache, val, "1");
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutLongKey() throws Exception {
+        IgniteCache<String, String> cache = jcache(0, STR_CACHE_NAME);
+
+        T2<String, String> val = new T2<>("12345", "2");
+
+        checkPutAll(shouldSucceed, cache, new T2<>("1", "1"), val);
+
+        checkPutOps(shouldSucceed, cache, val);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutLongValueField() throws Exception {
+        IgniteCache<Organization, Address> cache = jcache(0, OBJ_CACHE_NAME);
+
+        T2<Organization, Address> val = new T2<>(new Organization("3"), new Address("12345"));
+
+        checkPutAll(shouldSucceed, cache, new T2<>(new Organization("1"), new Address("1")), val);
+
+        checkPutOps(shouldSucceed, cache, val);
+
+        checkReplaceOps(shouldSucceed, cache, val, new Address("1"));
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutLongKeyField() throws Exception {
+        IgniteCache<Organization, Address> cache = jcache(0, OBJ_CACHE_NAME);
+
+        T2<Organization, Address> val = new T2<>(new Organization("12345"), new Address("2"));
+
+        checkPutAll(shouldSucceed, cache, new T2<>(new Organization("1"), new Address("1")), val);
+
+        checkPutOps(shouldSucceed, cache, val);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutLongKey2() throws Exception {
+        doCheckPutLongKey2(STR_ORG_CACHE_NAME);
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testPutLongKey3() throws Exception {
+        doCheckPutLongKey2(STR_ORG_WITH_FIELDS_CACHE_NAME);
+    }
+
+    private void doCheckPutLongKey2(String cacheName) {
+        IgniteCache<String, Organization> cache = jcache(0, cacheName);
+
+        T2<String, Organization> key2 = new T2<>("12345", new Organization("1"));
+
+        checkPutAll(shouldSucceed, cache, new T2<>("1", new Organization("1")), key2);
+
+        checkPutOps(shouldSucceed, cache, key2);
+    }
+
+    /** */
+    private <K, V> void checkReplaceOps(Consumer<Runnable> checker, IgniteCache<K, V> cache, T2<K, V> val, V okVal) {
+        K k = val.get1();
+        V v = val.get2();
+
+        cache.put(k, okVal);
+
+        CacheEntryProcessor<K, V, ?> entryProcessor = (e, arguments) -> {
+            e.setValue((V)arguments[0]);
+
+            return null;
+        };
+
+        Stream<Runnable> ops = Stream.of(
+            () -> cache.replace(k, v),
+            () -> cache.getAndReplace(k, v),
+            () -> cache.replace(k, okVal, v),
+            () -> cache.invoke(k, entryProcessor, v),
+            () -> cache.replaceAsync(k, v).get(FUT_TIMEOUT),
+            () -> cache.getAndReplaceAsync(k, v).get(FUT_TIMEOUT),
+            () -> cache.replaceAsync(k, okVal, v).get(FUT_TIMEOUT),
+            () -> cache.invokeAsync(k, entryProcessor, v).get(FUT_TIMEOUT)
+        );
+
+        ops.forEach(checker);
+    }
+
+    /** */
+    private <K, V> void checkPutOps(Consumer<Runnable> checker, IgniteCache<K, V> cache, T2<K, V> val) {
+        K k = val.get1();
+        V v = val.get2();
+
+        Stream<Runnable> ops = Stream.of(
+            () -> cache.put(k, v),
+            () -> cache.putIfAbsent(k, v),
+            () -> cache.getAndPut(k, v),
+            () -> cache.getAndPutIfAbsent(k, v),
+            () -> cache.putAsync(k, v).get(FUT_TIMEOUT),
+            () -> cache.putIfAbsentAsync(k, v).get(FUT_TIMEOUT),
+            () -> cache.getAndPutAsync(k, v).get(FUT_TIMEOUT),
+            () -> cache.getAndPutIfAbsentAsync(k, v).get(FUT_TIMEOUT)
+        );
+
+        ops.forEach(checker);
+    }
+
+    /** */
+    private <K, V> void checkPutAll(Consumer<Runnable> checker, IgniteCache<K, V> cache, T2<K, V>... entries) {
+        CacheEntryProcessor<K, V, ?> entryProcessor = (e, arguments) -> {
+            e.setValue(((Iterator<V>)arguments[0]).next());
+
+            return null;
+        };
+
+        Map<K, V> vals = Arrays.stream(entries).collect(Collectors.toMap(T2::get1, T2::get2));
+
+        Stream<Runnable> ops = Stream.of(
+            () -> cache.putAll(vals),
+            () -> cache.putAllAsync(vals).get(FUT_TIMEOUT),
+            () -> {
+                Map<K, ? extends EntryProcessorResult<?>> map =
+                    cache.invokeAll(vals.keySet(), entryProcessor, vals.values().iterator());
+
+                for (EntryProcessorResult<?> result : map.values())
+                    log.info(">>> " + result.get());
+            },
+            () -> {
+                Map<K, ? extends EntryProcessorResult<?>> map =
+                    cache.invokeAllAsync(vals.keySet(), entryProcessor, vals.values().iterator()).get(FUT_TIMEOUT);
+
+                for (EntryProcessorResult<?> result : map.values())
+                    log.info(">>> " + result.get());
+            }
+        );
+
+        ops.forEach(checker);
+    }
+
+    /**
+     * @param qryEntity Query entity.
+     * @return Cache configuration.
+     */
+    protected CacheConfiguration cacheConfiguration(QueryEntity qryEntity) {
+        CacheConfiguration<?, ?> cache = defaultCacheConfiguration();
+
+        cache.setCacheMode(cacheMode());
+        cache.setAtomicityMode(atomicityMode());
+        cache.setBackups(1);
+        cache.setWriteSynchronizationMode(FULL_SYNC);
+
+        cache.setQueryEntities(Collections.singletonList(qryEntity));
+
+        return cache;
+    }
+
+    /** */
+    @NotNull protected CacheAtomicityMode atomicityMode() {
+        return ATOMIC;
+    }
+
+    /** */
+    @NotNull protected CacheMode cacheMode() {
+        return PARTITIONED;
+    }
+
+    /** */
+    @SuppressWarnings("UnusedDeclaration")
+    private static class Organization implements Serializable {
+        /** Name. */
+        private final String name;
+
+        /**
+         * @param name Name.
+         */
+        private Organization(String name) {
+            this.name = name;
+        }
+    }
+
+    /** */
+    @SuppressWarnings("UnusedDeclaration")
+    private static class Address implements Serializable {
+        /** Name. */
+        private final String address;
+
+        /**
+         * @param address Address.
+         */
+        private Address(String address) {
+            this.address = address;
+        }
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/sql/IgniteCachePartitionedTransactionalColumnConstraintsTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/sql/IgniteCachePartitionedTransactionalColumnConstraintsTest.java
new file mode 100644
index 0000000..cd5c979
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/sql/IgniteCachePartitionedTransactionalColumnConstraintsTest.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.sql;
+
+import org.apache.ignite.cache.CacheAtomicityMode;
+import org.jetbrains.annotations.NotNull;
+
+/** */
+public class IgniteCachePartitionedTransactionalColumnConstraintsTest 
+    extends IgniteCachePartitionedAtomicColumnConstraintsTest {
+    /** {@inheritDoc} */
+    @NotNull protected CacheAtomicityMode atomicityMode() {
+        return CacheAtomicityMode.TRANSACTIONAL;
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/sql/IgniteCacheReplicatedAtomicColumnConstraintsTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/sql/IgniteCacheReplicatedAtomicColumnConstraintsTest.java
new file mode 100644
index 0000000..eba9f64
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/sql/IgniteCacheReplicatedAtomicColumnConstraintsTest.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.sql;
+
+import org.apache.ignite.cache.CacheMode;
+import org.jetbrains.annotations.NotNull;
+
+import static org.apache.ignite.cache.CacheMode.PARTITIONED;
+
+/** */
+public class IgniteCacheReplicatedAtomicColumnConstraintsTest 
+    extends IgniteCachePartitionedAtomicColumnConstraintsTest {
+    /** {@inheritDoc} */
+    @NotNull @Override protected CacheMode cacheMode() {
+        return PARTITIONED;
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/sql/IgniteCacheReplicatedTransactionalColumnConstraintsTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/sql/IgniteCacheReplicatedTransactionalColumnConstraintsTest.java
new file mode 100644
index 0000000..bddd69a
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/sql/IgniteCacheReplicatedTransactionalColumnConstraintsTest.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.sql;
+
+import org.apache.ignite.cache.CacheAtomicityMode;
+import org.jetbrains.annotations.NotNull;
+
+/** */
+public class IgniteCacheReplicatedTransactionalColumnConstraintsTest 
+    extends IgniteCacheReplicatedAtomicColumnConstraintsTest {
+    /** {@inheritDoc} */
+    @NotNull @Override protected CacheAtomicityMode atomicityMode() {
+        return CacheAtomicityMode.TRANSACTIONAL;
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/internal/processors/sql/IgniteSQLColumnConstraintsTest.java b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/sql/IgniteSQLColumnConstraintsTest.java
new file mode 100644
index 0000000..762743b
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/internal/processors/sql/IgniteSQLColumnConstraintsTest.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.sql;
+
+import java.util.List;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.internal.processors.odbc.SqlStateCode;
+import org.apache.ignite.internal.processors.query.IgniteSQLException;
+import org.apache.ignite.testframework.GridTestUtils;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+import static org.apache.ignite.internal.processors.odbc.SqlStateCode.CONSTRAINT_VIOLATION;
+
+/**
+ */
+public class IgniteSQLColumnConstraintsTest extends GridCommonAbstractTest {
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        startGrid(0);
+
+        execSQL("CREATE TABLE varchar_table(id INT PRIMARY KEY, str VARCHAR(5))");
+
+        execSQL("INSERT INTO varchar_table VALUES(?, ?)", 1, "12345");
+
+        execSQL("CREATE TABLE char_table(id INT PRIMARY KEY, str CHAR(5))");
+
+        execSQL("INSERT INTO char_table VALUES(?, ?)", 1, "12345");
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testCreateTableWithTooLongDefault() throws Exception {
+        checkSQLThrows("CREATE TABLE too_long_default(id INT PRIMARY KEY, str CHAR(5) DEFAULT '123456')");
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testInsertTooLongVarchar() throws Exception {
+        checkSQLThrows("INSERT INTO varchar_table VALUES(?, ?)", 2, "123456");
+
+        checkSQLThrows("UPDATE varchar_table SET str = ? WHERE id = ?", "123456", 1);
+
+        checkSQLThrows("MERGE INTO varchar_table(id, str) VALUES(?, ?)", 1, "123456");
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testInsertTooLongChar() throws Exception {
+        checkSQLThrows("INSERT INTO char_table VALUES(?, ?)", 2, "123456");
+
+        checkSQLThrows("UPDATE char_table SET str = ? WHERE id = ?", "123456", 1);
+
+        checkSQLThrows("MERGE INTO char_table(id, str) VALUES(?, ?)", 1, "123456");
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testConstraintsAfterAlterTable() throws Exception {
+        execSQL("CREATE TABLE char_table_2(id INT PRIMARY KEY, field INTEGER)");
+
+        execSQL("ALTER TABLE char_table_2 ADD COLUMN str CHAR(5) NOT NULL");
+        
+        execSQL("INSERT INTO char_table_2(id, str) VALUES(?, ?)", 1, "1");
+
+        checkSQLThrows("INSERT INTO char_table_2(id, str) VALUES(?, ?)", 2, "123456");
+
+        checkSQLThrows("UPDATE char_table_2 SET str = ? WHERE id = ?", "123456", 1);
+
+        checkSQLThrows("MERGE INTO char_table_2(id, str) VALUES(?, ?)", 1, "123456");
+    }
+
+    /**
+     * @throws Exception If failed.
+     */
+    public void testDropColumnWithConstraint() throws Exception {
+        execSQL("CREATE TABLE char_table_3(id INT PRIMARY KEY, field CHAR(5), field2 INTEGER)");
+
+        execSQL("INSERT INTO char_table_3(id, field, field2) VALUES(?, ?, ?)", 1, "12345", 1);
+
+        checkSQLThrows("INSERT INTO char_table_3(id, field, field2) VALUES(?, ?, ?)", 2, "123456", 1);
+
+        execSQL("ALTER TABLE char_table_3 DROP COLUMN field");
+
+        execSQL("INSERT INTO char_table_3(id, field2) VALUES(?, ?)", 3, 3);
+    }
+
+    public void testSqlState() throws Exception {
+        execSQL("CREATE TABLE char_table_4(id INT PRIMARY KEY, field CHAR(5))");
+
+        IgniteSQLException err = (IgniteSQLException)
+            checkSQLThrows("INSERT INTO char_table_4(id, field) VALUES(?, ?)", 1, "123456");
+
+        assertEquals(err.sqlState(), CONSTRAINT_VIOLATION);
+
+        execSQL("INSERT INTO char_table_4(id, field) VALUES(?, ?)", 2, "12345");
+
+        err = (IgniteSQLException)
+            checkSQLThrows("UPDATE char_table_4 SET field = ? WHERE id = ?", "123456", 2);
+
+        assertEquals(err.sqlState(), CONSTRAINT_VIOLATION);
+
+        err = (IgniteSQLException)
+            checkSQLThrows("MERGE INTO char_table_4(id, field) VALUES(?, ?)", 2, "123456");
+
+        assertEquals(err.sqlState(), CONSTRAINT_VIOLATION);
+    }
+
+    /** */
+    private Throwable checkSQLThrows(String sql, Object... args) {
+        return GridTestUtils.assertThrowsWithCause(() -> {
+            execSQL(sql, args);
+
+            return 0;
+        }, IgniteSQLException.class);
+    }
+
+    /** */
+    private List<?> execSQL(String sql, Object... args) {
+        SqlFieldsQuery qry = new SqlFieldsQuery(sql)
+            .setArgs(args);
+
+        return grid(0).context().query().querySqlFields(qry, true).getAll();
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/loadtests/h2indexing/GridTestEntity.java b/modules/indexing/src/test/java/org/apache/ignite/loadtests/h2indexing/GridTestEntity.java
deleted file mode 100644
index 015ec96..0000000
--- a/modules/indexing/src/test/java/org/apache/ignite/loadtests/h2indexing/GridTestEntity.java
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.loadtests.h2indexing;
-
-import java.util.Date;
-import org.apache.ignite.cache.query.annotations.QuerySqlField;
-
-/**
- * Test entity.
- */
-public class GridTestEntity {
-    /** */
-    @QuerySqlField(index = true)
-    private final String name;
-
-    /** */
-    @QuerySqlField(index = false)
-    private final Date date;
-
-    /**
-     * Constructor.
-     *
-     * @param name Name.
-     * @param date Date.
-     */
-    @SuppressWarnings("AssignmentToDateFieldFromParameter")
-    public GridTestEntity(String name, Date date) {
-        this.name = name;
-        this.date = date;
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean equals(Object o) {
-        if (this == o) return true;
-        if (o == null || getClass() != o.getClass()) return false;
-
-        GridTestEntity that = (GridTestEntity) o;
-
-        return !(date != null ? !date.equals(that.date) : that.date != null) &&
-            !(name != null ? !name.equals(that.name) : that.name != null);
-
-    }
-
-    /** {@inheritDoc} */
-    @Override public int hashCode() {
-        int res = name != null ? name.hashCode() : 0;
-
-        res = 31 * res + (date != null ? date.hashCode() : 0);
-
-        return res;
-    }
-}
\ No newline at end of file
diff --git a/modules/indexing/src/test/java/org/apache/ignite/loadtests/h2indexing/GridTreeBenchmark.java b/modules/indexing/src/test/java/org/apache/ignite/loadtests/h2indexing/GridTreeBenchmark.java
deleted file mode 100644
index d9bc8ce..0000000
--- a/modules/indexing/src/test/java/org/apache/ignite/loadtests/h2indexing/GridTreeBenchmark.java
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.loadtests.h2indexing;
-
-import java.util.Iterator;
-import java.util.Map;
-import java.util.UUID;
-import java.util.concurrent.BrokenBarrierException;
-import java.util.concurrent.ConcurrentNavigableMap;
-import java.util.concurrent.ConcurrentSkipListMap;
-import java.util.concurrent.CyclicBarrier;
-import java.util.concurrent.atomic.AtomicInteger;
-import org.apache.ignite.internal.util.snaptree.SnapTreeMap;
-
-/**
- * NavigableMaps PUT benchmark.
- */
-public class GridTreeBenchmark {
-    /** */
-    private static final int PUTS = 8000000;
-
-    /** */
-    private static final int THREADS = 8;
-
-    /** */
-    private static final int ITERATIONS = PUTS / THREADS;
-
-    /**
-     * Main method.
-     *
-     * @param args Command line args (not used).
-     * @throws BrokenBarrierException If failed.
-     * @throws InterruptedException If failed.
-     */
-    public static void main(String... args) throws BrokenBarrierException, InterruptedException {
-        doTestMaps();
-    }
-
-    /**
-     * @throws BrokenBarrierException If failed.
-     * @throws InterruptedException If failed.
-     */
-    private static void doTestAtomicInt() throws BrokenBarrierException, InterruptedException {
-        final AtomicInteger[] cnts = new AtomicInteger[8];
-
-        for (int i = 0; i < cnts.length; i++)
-            cnts[i] = new AtomicInteger();
-
-        final Thread[] ths = new Thread[THREADS];
-
-        final CyclicBarrier barrier = new CyclicBarrier(THREADS + 1);
-
-        final AtomicInteger cnt = new AtomicInteger();
-
-        for (int i = 0; i < ths.length; i++) {
-            ths[i] = new Thread(new Runnable() {
-                @Override public void run() {
-                    int idx = cnt.getAndIncrement();
-
-                    AtomicInteger x = cnts[idx % cnts.length];
-
-                    try {
-                        barrier.await();
-                    }
-                    catch (Exception e) {
-                        throw new IllegalStateException(e);
-                    }
-
-                    for (int i = 0; i < ITERATIONS; i++)
-                        x.incrementAndGet();
-                }
-            });
-
-            ths[i].start();
-        }
-
-        barrier.await();
-
-        long start = System.currentTimeMillis();
-
-        for (Thread t : ths)
-            t.join();
-
-        long time = System.currentTimeMillis() - start;
-
-        System.out.println(time);
-
-    }
-
-    /**
-     * @throws BrokenBarrierException If failed.
-     * @throws InterruptedException If failed.
-     */
-    private static void doTestMaps() throws BrokenBarrierException, InterruptedException {
-        final UUID[] data = generate();
-
-        @SuppressWarnings("unchecked")
-        final Map<UUID, UUID>[] maps = new Map[4];
-
-        for (int i = 0; i < maps.length; i++)
-            maps[i] =
-                new SnapTreeMap<>();
-
-
-        final Thread[] ths = new Thread[THREADS];
-
-        final CyclicBarrier barrier = new CyclicBarrier(THREADS + 1);
-
-        final AtomicInteger cnt = new AtomicInteger();
-
-        for (int i = 0; i < ths.length; i++) {
-            ths[i] = new Thread(new Runnable() {
-                @Override public void run() {
-                    int idx = cnt.getAndIncrement();
-
-                    int off = idx * ITERATIONS;
-
-                    Map<UUID, UUID> map = maps[idx % maps.length];
-
-                    try {
-                        barrier.await();
-                    }
-                    catch (Exception e) {
-                        throw new IllegalStateException(e);
-                    }
-
-                    for (int i = 0; i < ITERATIONS; i++) {
-                        UUID id = data[off + i];
-
-                        id = map.put(id, id);
-
-                        assert id == null;
-                    }
-                }
-            });
-
-            ths[i].start();
-        }
-
-        System.out.println("Sleep");
-        Thread.sleep(10000);
-
-        System.out.println("Go");
-        barrier.await();
-
-        long start = System.currentTimeMillis();
-
-        for (Thread t : ths)
-            t.join();
-
-        long time = System.currentTimeMillis() - start;
-
-        System.out.println(time);
-    }
-
-    /**
-     * @throws BrokenBarrierException If failed.
-     * @throws InterruptedException If failed.
-     */
-    private static void doBenchmark() throws BrokenBarrierException, InterruptedException {
-        int attemts = 20;
-        int warmups = 10;
-
-        long snapTreeTime = 0;
-        long skipListTime = 0;
-
-        for (int i = 0; i < attemts; i++) {
-            ConcurrentNavigableMap<UUID, UUID> skipList = new ConcurrentSkipListMap<>();
-            ConcurrentNavigableMap<UUID, UUID> snapTree = new SnapTreeMap<>();
-
-            UUID[] ids = generate();
-
-            boolean warmup = i < warmups;
-
-            snapTreeTime += doTest(snapTree, ids, warmup);
-            skipListTime += doTest(skipList, ids, warmup);
-
-            assert skipList.size() == snapTree.size();
-
-            Iterator<UUID> snapIt = snapTree.keySet().iterator();
-            Iterator<UUID> listIt = skipList.keySet().iterator();
-
-            for (int x = 0, len = skipList.size(); x < len; x++)
-                assert snapIt.next() == listIt.next();
-
-            System.out.println(i + " ==================");
-        }
-
-        attemts -= warmups;
-
-        System.out.println("Avg for GridSnapTreeMap: " + (snapTreeTime / attemts) + " ms");
-        System.out.println("Avg for ConcurrentSkipListMap: " + (skipListTime / attemts) + " ms");
-     }
-
-    /**
-     * @return UUIDs.
-     */
-    private static UUID[] generate() {
-        UUID[] ids = new UUID[ITERATIONS * THREADS];
-
-        for (int i = 0; i < ids.length; i++)
-            ids[i] = UUID.randomUUID();
-
-        return ids;
-    }
-
-    /**
-     * @param tree Tree.
-     * @param data Data.
-     * @param warmup Warmup.
-     * @return Time.
-     * @throws BrokenBarrierException If failed.
-     * @throws InterruptedException If failed.
-     */
-    private static long doTest(final ConcurrentNavigableMap<UUID, UUID> tree, final UUID[] data, boolean warmup)
-        throws BrokenBarrierException, InterruptedException {
-        Thread[] ths = new Thread[THREADS];
-
-        final CyclicBarrier barrier = new CyclicBarrier(THREADS + 1);
-
-        final AtomicInteger cnt = new AtomicInteger();
-
-        for (int i = 0; i < ths.length; i++) {
-            ths[i] = new Thread(new Runnable() {
-                @Override public void run() {
-                    int off = cnt.getAndIncrement() * ITERATIONS;
-
-                    try {
-                        barrier.await();
-                    }
-                    catch (Exception e) {
-                        throw new IllegalStateException(e);
-                    }
-
-                    for (int i = 0; i < ITERATIONS; i++) {
-                        UUID id = data[off + i];
-
-                        id = tree.put(id, id);
-
-                        assert id == null;
-                    }
-                }
-            });
-
-            ths[i].start();
-        }
-
-        barrier.await();
-
-        long start = System.currentTimeMillis();
-
-        for (Thread t : ths)
-            t.join();
-
-        long time = System.currentTimeMillis() - start;
-
-        if (!warmup) {
-            System.out.println(tree.getClass().getSimpleName() + "  " + time + " ms");
-
-            return time;
-        }
-
-        return 0;
-    }
-}
\ No newline at end of file
diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheMvccSqlTestSuite.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheMvccSqlTestSuite.java
new file mode 100644
index 0000000..ec60596
--- /dev/null
+++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheMvccSqlTestSuite.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.testsuites;
+
+import junit.framework.TestSuite;
+import org.apache.ignite.internal.processors.cache.index.SqlTransactionsCommandsWithMvccEnabledSelfTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccBulkLoadTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccDmlSimpleTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccIteratorWithConcurrentJdbcTransactionTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccLocalEntriesWithConcurrentJdbcTransactionTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccPartitionedBackupsTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccPartitionedSelectForUpdateQueryTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccPartitionedSqlCoordinatorFailoverTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccPartitionedSqlQueriesTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccPartitionedSqlTxQueriesTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccPartitionedSqlTxQueriesWithReducerTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccReplicatedBackupsTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccReplicatedSelectForUpdateQueryTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccReplicatedSqlCoordinatorFailoverTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccReplicatedSqlQueriesTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccReplicatedSqlTxQueriesTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccReplicatedSqlTxQueriesWithReducerTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccScanQueryWithConcurrentJdbcTransactionTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccSizeTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccSizeWithConcurrentJdbcTransactionTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccSqlConfigurationValidationTest;
+import org.apache.ignite.internal.processors.cache.mvcc.CacheMvccStreamingInsertTest;
+import org.apache.ignite.internal.processors.query.h2.GridIndexRebuildWithMvccEnabledSelfTest;
+
+/**
+ *
+ */
+public class IgniteCacheMvccSqlTestSuite extends TestSuite {
+    /**
+     * @return Test suite.
+     */
+    public static TestSuite suite() {
+        TestSuite suite = new TestSuite("IgniteCache SQL MVCC Test Suite");
+
+        // Simple tests.
+        suite.addTestSuite(CacheMvccSqlConfigurationValidationTest.class);
+        suite.addTestSuite(CacheMvccDmlSimpleTest.class);
+        suite.addTestSuite(SqlTransactionsCommandsWithMvccEnabledSelfTest.class);
+        suite.addTestSuite(CacheMvccSizeTest.class);
+
+        suite.addTestSuite(GridIndexRebuildWithMvccEnabledSelfTest.class);
+
+        // JDBC tests.
+        suite.addTestSuite(CacheMvccSizeWithConcurrentJdbcTransactionTest.class);
+        suite.addTestSuite(CacheMvccScanQueryWithConcurrentJdbcTransactionTest.class);
+        suite.addTestSuite(CacheMvccLocalEntriesWithConcurrentJdbcTransactionTest.class);
+        suite.addTestSuite(CacheMvccIteratorWithConcurrentJdbcTransactionTest.class);
+
+        // Load tests.
+        suite.addTestSuite(CacheMvccBulkLoadTest.class);
+        suite.addTestSuite(CacheMvccStreamingInsertTest.class);
+
+        suite.addTestSuite(CacheMvccPartitionedSqlQueriesTest.class);
+        suite.addTestSuite(CacheMvccReplicatedSqlQueriesTest.class);
+        suite.addTestSuite(CacheMvccPartitionedSqlTxQueriesTest.class);
+        suite.addTestSuite(CacheMvccReplicatedSqlTxQueriesTest.class);
+
+        suite.addTestSuite(CacheMvccPartitionedSqlTxQueriesWithReducerTest.class);
+        suite.addTestSuite(CacheMvccReplicatedSqlTxQueriesWithReducerTest.class);
+        suite.addTestSuite(CacheMvccPartitionedSelectForUpdateQueryTest.class);
+        suite.addTestSuite(CacheMvccReplicatedSelectForUpdateQueryTest.class);
+
+        // Failover tests.
+        suite.addTestSuite(CacheMvccPartitionedBackupsTest.class);
+        suite.addTestSuite(CacheMvccReplicatedBackupsTest.class);
+
+        suite.addTestSuite(CacheMvccPartitionedSqlCoordinatorFailoverTest.class);
+        suite.addTestSuite(CacheMvccReplicatedSqlCoordinatorFailoverTest.class);
+
+        return suite;
+    }
+}
diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java
index d70e5c3..7f67b35 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite.java
@@ -21,14 +21,11 @@
 import org.apache.ignite.internal.processors.cache.CacheIteratorScanQueryTest;
 import org.apache.ignite.internal.processors.cache.CacheLocalQueryDetailMetricsSelfTest;
 import org.apache.ignite.internal.processors.cache.CacheLocalQueryMetricsSelfTest;
-import org.apache.ignite.internal.processors.cache.CacheOffheapBatchIndexingBaseTest;
-import org.apache.ignite.internal.processors.cache.CacheOffheapBatchIndexingMultiTypeTest;
 import org.apache.ignite.internal.processors.cache.CacheOffheapBatchIndexingSingleTypeTest;
 import org.apache.ignite.internal.processors.cache.CachePartitionedQueryDetailMetricsDistributedSelfTest;
 import org.apache.ignite.internal.processors.cache.CachePartitionedQueryDetailMetricsLocalSelfTest;
 import org.apache.ignite.internal.processors.cache.CachePartitionedQueryMetricsDistributedSelfTest;
 import org.apache.ignite.internal.processors.cache.CachePartitionedQueryMetricsLocalSelfTest;
-import org.apache.ignite.internal.processors.cache.CacheQueryBuildValueTest;
 import org.apache.ignite.internal.processors.cache.CacheQueryEvictDataLostTest;
 import org.apache.ignite.internal.processors.cache.CacheQueryNewClientSelfTest;
 import org.apache.ignite.internal.processors.cache.CacheReplicatedQueryDetailMetricsDistributedSelfTest;
@@ -36,6 +33,7 @@
 import org.apache.ignite.internal.processors.cache.CacheReplicatedQueryMetricsDistributedSelfTest;
 import org.apache.ignite.internal.processors.cache.CacheReplicatedQueryMetricsLocalSelfTest;
 import org.apache.ignite.internal.processors.cache.CacheSqlQueryValueCopySelfTest;
+import org.apache.ignite.internal.processors.cache.DdlTransactionSelfTest;
 import org.apache.ignite.internal.processors.cache.GridCacheCrossCacheQuerySelfTest;
 import org.apache.ignite.internal.processors.cache.GridCacheFullTextQuerySelfTest;
 import org.apache.ignite.internal.processors.cache.GridCacheLazyQueryPartitionsReleaseTest;
@@ -49,7 +47,6 @@
 import org.apache.ignite.internal.processors.cache.IgniteBinaryObjectQueryArgumentsTest;
 import org.apache.ignite.internal.processors.cache.IgniteBinaryWrappedObjectFieldsQuerySelfTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheCollocatedQuerySelfTest;
-import org.apache.ignite.internal.processors.cache.IgniteCacheConfigVariationsQueryTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheDeleteSqlQuerySelfTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheDistributedJoinCollocatedAndNotTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheDistributedJoinCustomAffinityMapper;
@@ -61,7 +58,6 @@
 import org.apache.ignite.internal.processors.cache.IgniteCacheFieldsQueryNoDataSelfTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheFullTextQueryNodeJoiningSelfTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheInsertSqlQuerySelfTest;
-import org.apache.ignite.internal.processors.cache.IgniteCacheJoinPartitionedAndReplicatedCollocationTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheJoinPartitionedAndReplicatedTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheJoinQueryWithAffinityKeyTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheLargeResultSelfTest;
@@ -76,14 +72,12 @@
 import org.apache.ignite.internal.processors.cache.IgniteCacheQueryIndexSelfTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheQueryLoadSelfTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheSqlQueryErrorSelfTest;
-import org.apache.ignite.internal.processors.cache.IgniteCacheUnionDuplicatesTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheUpdateSqlQuerySelfTest;
 import org.apache.ignite.internal.processors.cache.IgniteCheckClusterStateBeforeExecuteQueryTest;
-import org.apache.ignite.internal.processors.cache.IgniteClientReconnectCacheQueriesFailoverTest;
 import org.apache.ignite.internal.processors.cache.IgniteCrossCachesJoinsQueryTest;
 import org.apache.ignite.internal.processors.cache.IgniteDynamicSqlRestoreTest;
-import org.apache.ignite.internal.processors.cache.IgniteErrorOnRebalanceTest;
 import org.apache.ignite.internal.processors.cache.IncorrectQueryEntityTest;
+import org.apache.ignite.internal.processors.cache.IndexingCachePartitionLossPolicySelfTest;
 import org.apache.ignite.internal.processors.cache.QueryEntityCaseMismatchTest;
 import org.apache.ignite.internal.processors.cache.SqlFieldsQuerySelfTest;
 import org.apache.ignite.internal.processors.cache.authentication.SqlUserCommandSelfTest;
@@ -117,12 +111,6 @@
 import org.apache.ignite.internal.processors.cache.index.DynamicIndexServerNodeFIlterBasicSelfTest;
 import org.apache.ignite.internal.processors.cache.index.DynamicIndexServerNodeFilterCoordinatorBasicSelfTest;
 import org.apache.ignite.internal.processors.cache.index.H2ConnectionLeaksSelfTest;
-import org.apache.ignite.internal.processors.cache.index.H2DynamicIndexingComplexClientAtomicPartitionedNoBackupsTest;
-import org.apache.ignite.internal.processors.cache.index.H2DynamicIndexingComplexClientTransactionalPartitionedNoBackupsTest;
-import org.apache.ignite.internal.processors.cache.index.H2DynamicIndexingComplexServerAtomicPartitionedNoBackupsTest;
-import org.apache.ignite.internal.processors.cache.index.H2DynamicIndexingComplexServerTransactionalPartitionedNoBackupsTest;
-import org.apache.ignite.internal.processors.cache.index.H2DynamicIndexingComplexAbstractTest;
-import org.apache.ignite.internal.processors.cache.index.IgniteDecimalSelfTest;
 import org.apache.ignite.internal.processors.cache.index.H2DynamicColumnsClientBasicSelfTest;
 import org.apache.ignite.internal.processors.cache.index.H2DynamicColumnsServerBasicSelfTest;
 import org.apache.ignite.internal.processors.cache.index.H2DynamicColumnsServerCoordinatorBasicSelfTest;
@@ -143,20 +131,20 @@
 import org.apache.ignite.internal.processors.cache.index.H2DynamicTableSelfTest;
 import org.apache.ignite.internal.processors.cache.index.H2RowCachePageEvictionTest;
 import org.apache.ignite.internal.processors.cache.index.H2RowCacheSelfTest;
+import org.apache.ignite.internal.processors.cache.index.IgniteDecimalSelfTest;
 import org.apache.ignite.internal.processors.cache.index.LongIndexNameTest;
 import org.apache.ignite.internal.processors.cache.index.OptimizedMarshallerIndexNameTest;
 import org.apache.ignite.internal.processors.cache.index.SchemaExchangeSelfTest;
+import org.apache.ignite.internal.processors.cache.index.SqlTransactionsComandsSelfTest;
 import org.apache.ignite.internal.processors.cache.local.IgniteCacheLocalAtomicQuerySelfTest;
 import org.apache.ignite.internal.processors.cache.local.IgniteCacheLocalFieldsQuerySelfTest;
 import org.apache.ignite.internal.processors.cache.local.IgniteCacheLocalQueryCancelOrTimeoutSelfTest;
 import org.apache.ignite.internal.processors.cache.local.IgniteCacheLocalQuerySelfTest;
 import org.apache.ignite.internal.processors.cache.query.CacheScanQueryFailoverTest;
 import org.apache.ignite.internal.processors.cache.query.GridCacheQueryTransformerSelfTest;
-import org.apache.ignite.internal.processors.cache.query.GridCircularQueueTest;
 import org.apache.ignite.internal.processors.cache.query.IgniteCacheQueryCacheDestroySelfTest;
 import org.apache.ignite.internal.processors.cache.query.IndexingSpiQuerySelfTest;
 import org.apache.ignite.internal.processors.cache.query.IndexingSpiQueryTxSelfTest;
-import org.apache.ignite.internal.processors.cache.query.IndexingSpiQueryWithH2IndexingSelfTest;
 import org.apache.ignite.internal.processors.client.ClientConnectorConfigurationValidationSelfTest;
 import org.apache.ignite.internal.processors.database.baseline.IgniteStableBaselineBinObjFieldsQuerySelfTest;
 import org.apache.ignite.internal.processors.query.IgniteCachelessQueriesSelfTest;
@@ -176,22 +164,34 @@
 import org.apache.ignite.internal.processors.query.IgniteSqlSplitterSelfTest;
 import org.apache.ignite.internal.processors.query.LazyQuerySelfTest;
 import org.apache.ignite.internal.processors.query.MultipleStatementsSqlQuerySelfTest;
-import org.apache.ignite.internal.processors.query.SqlSystemViewsSelfTest;
+import org.apache.ignite.internal.processors.query.SqlIllegalSchemaSelfTest;
 import org.apache.ignite.internal.processors.query.SqlPushDownFunctionTest;
 import org.apache.ignite.internal.processors.query.SqlSchemaSelfTest;
+import org.apache.ignite.internal.processors.query.SqlSystemViewsSelfTest;
 import org.apache.ignite.internal.processors.query.h2.GridH2IndexingInMemSelfTest;
 import org.apache.ignite.internal.processors.query.h2.GridH2IndexingOffheapSelfTest;
+import org.apache.ignite.internal.processors.query.h2.GridIndexRebuildSelfTest;
+import org.apache.ignite.internal.processors.query.h2.H2ResultSetIteratorNullifyOnEndSelfTest;
+import org.apache.ignite.internal.processors.query.h2.H2StatementCacheSelfTest;
 import org.apache.ignite.internal.processors.query.h2.IgniteSqlBigIntegerKeyTest;
 import org.apache.ignite.internal.processors.query.h2.IgniteSqlQueryMinMaxTest;
+import org.apache.ignite.internal.processors.query.h2.PreparedStatementExSelfTest;
+import org.apache.ignite.internal.processors.query.h2.ThreadLocalObjectPoolSelfTest;
 import org.apache.ignite.internal.processors.query.h2.sql.BaseH2CompareQueryTest;
 import org.apache.ignite.internal.processors.query.h2.sql.GridQueryParsingTest;
 import org.apache.ignite.internal.processors.query.h2.sql.H2CompareBigQueryDistributedJoinsTest;
 import org.apache.ignite.internal.processors.query.h2.sql.H2CompareBigQueryTest;
+import org.apache.ignite.internal.processors.sql.IgniteCachePartitionedAtomicColumnConstraintsTest;
+import org.apache.ignite.internal.processors.sql.IgniteCachePartitionedTransactionalColumnConstraintsTest;
+import org.apache.ignite.internal.processors.sql.IgniteCacheReplicatedAtomicColumnConstraintsTest;
+import org.apache.ignite.internal.processors.sql.IgniteCacheReplicatedTransactionalColumnConstraintsTest;
+import org.apache.ignite.internal.processors.sql.IgniteSQLColumnConstraintsTest;
 import org.apache.ignite.internal.processors.sql.SqlConnectorConfigurationValidationSelfTest;
 import org.apache.ignite.internal.sql.SqlParserBulkLoadSelfTest;
 import org.apache.ignite.internal.sql.SqlParserCreateIndexSelfTest;
 import org.apache.ignite.internal.sql.SqlParserDropIndexSelfTest;
 import org.apache.ignite.internal.sql.SqlParserSetStreamingSelfTest;
+import org.apache.ignite.internal.sql.SqlParserTransactionalKeywordsSelfTest;
 import org.apache.ignite.internal.sql.SqlParserUserSelfTest;
 import org.apache.ignite.spi.communication.tcp.GridOrderedMessageCancelSelfTest;
 import org.apache.ignite.sqltests.PartitionedSqlTest;
@@ -214,6 +214,7 @@
 
         suite.addTestSuite(SqlParserCreateIndexSelfTest.class);
         suite.addTestSuite(SqlParserDropIndexSelfTest.class);
+        suite.addTestSuite(SqlParserTransactionalKeywordsSelfTest.class);
         suite.addTestSuite(SqlParserBulkLoadSelfTest.class);
         suite.addTestSuite(SqlParserSetStreamingSelfTest.class);
 
@@ -221,6 +222,7 @@
         suite.addTestSuite(ClientConnectorConfigurationValidationSelfTest.class);
 
         suite.addTestSuite(SqlSchemaSelfTest.class);
+        suite.addTestSuite(SqlIllegalSchemaSelfTest.class);
         suite.addTestSuite(MultipleStatementsSqlQuerySelfTest.class);
 
         // Misc tests.
@@ -290,6 +292,7 @@
         suite.addTestSuite(IgniteCacheCollocatedQuerySelfTest.class);
         suite.addTestSuite(IgniteCacheLargeResultSelfTest.class);
         suite.addTestSuite(GridCacheQueryInternalKeysSelfTest.class);
+        suite.addTestSuite(H2ResultSetIteratorNullifyOnEndSelfTest.class);
         suite.addTestSuite(IgniteSqlBigIntegerKeyTest.class);
         suite.addTestSuite(IgniteCacheOffheapEvictQueryTest.class);
         suite.addTestSuite(IgniteCacheOffheapIndexScanTest.class);
@@ -360,6 +363,8 @@
         //suite.addTestSuite(H2DynamicIndexingComplexServerTransactionalPartitionedNoBackupsTest.class);
         suite.addTestSuite(H2DynamicIndexingComplexServerTransactionalReplicatedTest.class);
 
+        suite.addTestSuite(DdlTransactionSelfTest.class);
+
         // Fields queries.
         suite.addTestSuite(SqlFieldsQuerySelfTest.class);
         suite.addTestSuite(IgniteCacheLocalFieldsQuerySelfTest.class);
@@ -440,8 +445,18 @@
         suite.addTestSuite(OptimizedMarshallerIndexNameTest.class);
         suite.addTestSuite(SqlSystemViewsSelfTest.class);
 
+        suite.addTestSuite(GridIndexRebuildSelfTest.class);
+
+        suite.addTestSuite(SqlTransactionsComandsSelfTest.class);
+
         suite.addTestSuite(IgniteSqlDefaultValueTest.class);
         suite.addTestSuite(IgniteDecimalSelfTest.class);
+        suite.addTestSuite(IgniteSQLColumnConstraintsTest.class);
+
+        suite.addTestSuite(IgniteCachePartitionedAtomicColumnConstraintsTest.class);
+        suite.addTestSuite(IgniteCachePartitionedTransactionalColumnConstraintsTest.class);
+        suite.addTestSuite(IgniteCacheReplicatedAtomicColumnConstraintsTest.class);
+        suite.addTestSuite(IgniteCacheReplicatedTransactionalColumnConstraintsTest.class);
 
         // H2 Rows on-heap cache
         suite.addTestSuite(H2RowCacheSelfTest.class);
@@ -451,6 +466,13 @@
         suite.addTestSuite(SqlParserUserSelfTest.class);
         suite.addTestSuite(SqlUserCommandSelfTest.class);
 
+        suite.addTestSuite(ThreadLocalObjectPoolSelfTest.class);
+        suite.addTestSuite(H2StatementCacheSelfTest.class);
+        suite.addTestSuite(PreparedStatementExSelfTest.class);
+
+        // Partition loss.
+        suite.addTestSuite(IndexingCachePartitionLossPolicySelfTest.class);
+
         return suite;
     }
 }
diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite2.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite2.java
index 1b76283..536834c 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite2.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite2.java
@@ -18,7 +18,6 @@
 package org.apache.ignite.testsuites;
 
 import junit.framework.TestSuite;
-import org.apache.ignite.internal.processors.cache.QueryJoinWithDifferentNodeFiltersTest;
 import org.apache.ignite.internal.processors.cache.CacheScanPartitionQueryFallbackSelfTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheCrossCacheJoinRandomTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheObjectKeyIndexingSelfTest;
@@ -26,6 +25,7 @@
 import org.apache.ignite.internal.processors.cache.IgniteCacheQueryEvictsMultiThreadedSelfTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheQueryMultiThreadedSelfTest;
 import org.apache.ignite.internal.processors.cache.IgniteCacheSqlQueryMultiThreadedSelfTest;
+import org.apache.ignite.internal.processors.cache.QueryJoinWithDifferentNodeFiltersTest;
 import org.apache.ignite.internal.processors.cache.distributed.near.IgniteCacheClientQueryReplicatedNodeRestartSelfTest;
 import org.apache.ignite.internal.processors.cache.distributed.near.IgniteCacheDistributedQueryStopOnCancelOrTimeoutSelfTest;
 import org.apache.ignite.internal.processors.cache.distributed.near.IgniteCacheQueryNodeFailTest;
@@ -50,6 +50,11 @@
 import org.apache.ignite.internal.processors.query.IgniteCacheGroupsSqlDistributedJoinSelfTest;
 import org.apache.ignite.internal.processors.query.IgniteCacheGroupsSqlSegmentedIndexMultiNodeSelfTest;
 import org.apache.ignite.internal.processors.query.IgniteCacheGroupsSqlSegmentedIndexSelfTest;
+import org.apache.ignite.internal.processors.query.h2.twostep.CacheQueryMemoryLeakTest;
+import org.apache.ignite.internal.processors.query.h2.twostep.DisappearedCacheCauseRetryMessageSelfTest;
+import org.apache.ignite.internal.processors.query.h2.twostep.DisappearedCacheWasNotFoundMessageSelfTest;
+import org.apache.ignite.internal.processors.query.h2.twostep.NonCollocatedRetryMessageSelfTest;
+import org.apache.ignite.internal.processors.query.h2.twostep.RetryCauseMessageSelfTest;
 import org.apache.ignite.testframework.IgniteTestSuite;
 
 /**
@@ -107,6 +112,13 @@
 
         suite.addTestSuite(QueryJoinWithDifferentNodeFiltersTest.class);
 
+        suite.addTestSuite(CacheQueryMemoryLeakTest.class);
+
+        suite.addTestSuite(NonCollocatedRetryMessageSelfTest.class);
+        suite.addTestSuite(RetryCauseMessageSelfTest.class);
+        suite.addTestSuite(DisappearedCacheCauseRetryMessageSelfTest.class);
+        suite.addTestSuite(DisappearedCacheWasNotFoundMessageSelfTest.class);
+
         return suite;
     }
 }
diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite3.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite3.java
index e810d30..08511d9 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite3.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgniteCacheQuerySelfTestSuite3.java
@@ -47,6 +47,7 @@
 import org.apache.ignite.internal.processors.cache.query.continuous.CacheKeepBinaryIterationTest;
 import org.apache.ignite.internal.processors.cache.query.continuous.CacheKeepBinaryIterationStoreEnabledTest;
 import org.apache.ignite.internal.processors.cache.query.continuous.ClientReconnectContinuousQueryTest;
+import org.apache.ignite.internal.processors.cache.query.continuous.ContinuousQueryMarshallerTest;
 import org.apache.ignite.internal.processors.cache.query.continuous.ContinuousQueryPeerClassLoadingTest;
 import org.apache.ignite.internal.processors.cache.query.continuous.ContinuousQueryRemoteFilterMissingInClassPathSelfTest;
 import org.apache.ignite.internal.processors.cache.query.continuous.GridCacheContinuousQueryAtomicNearEnabledSelfTest;
@@ -132,6 +133,7 @@
         suite.addTestSuite(ClientReconnectContinuousQueryTest.class);
         suite.addTestSuite(ContinuousQueryPeerClassLoadingTest.class);
         suite.addTestSuite(ClientReconnectContinuousQueryTest.class);
+        suite.addTestSuite(ContinuousQueryMarshallerTest.class);
 
         suite.addTestSuite(CacheContinuousQueryConcurrentPartitionUpdateTest.class);
         suite.addTestSuite(CacheContinuousQueryEventBufferTest.class);
diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingCoreTestSuite.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingCoreTestSuite.java
index 491bab7..2989ccd 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingCoreTestSuite.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingCoreTestSuite.java
@@ -72,7 +72,9 @@
         suite.addTestSuite(IgnitePdsTxHistoricalRebalancingTest.class);
 
         suite.addTestSuite(IgniteWalRecoveryPPCTest.class);
+
         suite.addTestSuite(IgnitePdsDiskErrorsRecoveringTest.class);
+
         suite.addTestSuite(IgnitePdsCacheDestroyDuringCheckpointTest.class);
 
         suite.addTestSuite(IgnitePdsBinaryMetadataOnClusterRestartTest.class);
diff --git a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingTestSuite.java b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingTestSuite.java
index 033e5a9..67b9fad 100644
--- a/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingTestSuite.java
+++ b/modules/indexing/src/test/java/org/apache/ignite/testsuites/IgnitePdsWithIndexingTestSuite.java
@@ -25,6 +25,7 @@
 import org.apache.ignite.internal.processors.database.IgniteDbSingleNodeWithIndexingWalRestoreTest;
 import org.apache.ignite.internal.processors.database.IgnitePersistentStoreQueryWithMultipleClassesPerCacheTest;
 import org.apache.ignite.internal.processors.database.IgnitePersistentStoreSchemaLoadTest;
+import org.apache.ignite.internal.processors.database.IgniteTwoRegionsRebuildIndexTest;
 
 /**
  *
@@ -44,6 +45,7 @@
         suite.addTestSuite(IgnitePdsSingleNodeWithIndexingAndGroupPutGetPersistenceSelfTest.class);
         suite.addTestSuite(IgnitePersistentStoreSchemaLoadTest.class);
         suite.addTestSuite(IgnitePersistentStoreQueryWithMultipleClassesPerCacheTest.class);
+        suite.addTestSuite(IgniteTwoRegionsRebuildIndexTest.class);
 
         return suite;
     }
diff --git a/modules/indexing/src/test/resources/org/apache/ignite/internal/processors/cache/mvcc/mvcc_person.csv b/modules/indexing/src/test/resources/org/apache/ignite/internal/processors/cache/mvcc/mvcc_person.csv
new file mode 100644
index 0000000..ef7a087
--- /dev/null
+++ b/modules/indexing/src/test/resources/org/apache/ignite/internal/processors/cache/mvcc/mvcc_person.csv
@@ -0,0 +1,2 @@
+1,John
+2,Jack
diff --git a/modules/indexing/src/test/resources/org/apache/ignite/internal/processors/cache/mvcc/mvcc_person_broken.csv b/modules/indexing/src/test/resources/org/apache/ignite/internal/processors/cache/mvcc/mvcc_person_broken.csv
new file mode 100644
index 0000000..b5c2b3f
--- /dev/null
+++ b/modules/indexing/src/test/resources/org/apache/ignite/internal/processors/cache/mvcc/mvcc_person_broken.csv
@@ -0,0 +1,2 @@
+1,John
+2
diff --git a/modules/jta/src/main/java/org/apache/ignite/internal/processors/cache/jta/CacheJtaManager.java b/modules/jta/src/main/java/org/apache/ignite/internal/processors/cache/jta/CacheJtaManager.java
index 8dcdd57..88c86b2 100644
--- a/modules/jta/src/main/java/org/apache/ignite/internal/processors/cache/jta/CacheJtaManager.java
+++ b/modules/jta/src/main/java/org/apache/ignite/internal/processors/cache/jta/CacheJtaManager.java
@@ -167,6 +167,7 @@
                                 tCfg.getDefaultTxIsolation(),
                                 tCfg.getDefaultTxTimeout(),
                                 /*store enabled*/true,
+                                /*sql*/false,
                                 /*tx size*/0,
                                 null
                             );
diff --git a/modules/kubernetes/config/az/ignite-account-role.yaml b/modules/kubernetes/config/az/ignite-account-role.yaml
new file mode 100644
index 0000000..0071e43
--- /dev/null
+++ b/modules/kubernetes/config/az/ignite-account-role.yaml
@@ -0,0 +1,30 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: ignite
+  namespace: ignite
+rules:
+- apiGroups:
+  - ""
+  resources: # Here are resources you can access
+  - pods
+  - endpoints
+  verbs: # That is what you can do with them
+  - get
+  - list
+  - watch
diff --git a/modules/kubernetes/config/az/ignite-deployment.yaml b/modules/kubernetes/config/az/ignite-deployment.yaml
new file mode 100644
index 0000000..e1f6c22
--- /dev/null
+++ b/modules/kubernetes/config/az/ignite-deployment.yaml
@@ -0,0 +1,47 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# An example of a Kubernetes configuration for Ignite pods deployment.
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  # Custom Ignite cluster's name.
+  name: ignite-cluster
+spec:
+  # A number of Ignite pods to be started by Kubernetes initially.
+  replicas: 2
+  template:
+    metadata:
+      labels:
+        app: ignite
+    spec:
+      containers:
+        # Custom Ignite pod name.
+      - name: ignite-node
+        image: apacheignite/ignite:2.5.0
+        env:
+        - name: OPTION_LIBS
+          value: ignite-kubernetes
+        - name: CONFIG_URI
+          value: https://raw.githubusercontent.com/apache/ignite/master/modules/kubernetes/config/example-kube-persistence.xml
+        ports:
+        # Ports to open.
+        # Might be optional depending on your Kubernetes environment.
+        - containerPort: 11211 # REST port number.
+        - containerPort: 47100 # communication SPI port number.
+        - containerPort: 47500 # discovery SPI port number.
+        - containerPort: 49112 # JMX port number.
+        - containerPort: 10800 # SQL port number.
+        - containerPort: 10900 # Thin clients port number.
diff --git a/modules/kubernetes/config/az/ignite-namespace.yaml b/modules/kubernetes/config/az/ignite-namespace.yaml
new file mode 100644
index 0000000..e0154d4
--- /dev/null
+++ b/modules/kubernetes/config/az/ignite-namespace.yaml
@@ -0,0 +1,19 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: ignite
diff --git a/modules/kubernetes/config/az/ignite-persistence-storage-class.yaml b/modules/kubernetes/config/az/ignite-persistence-storage-class.yaml
new file mode 100644
index 0000000..7130a48
--- /dev/null
+++ b/modules/kubernetes/config/az/ignite-persistence-storage-class.yaml
@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#Microsoft Azure Configuration
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+  name: ignite-persistence-storage-class  #StorageClass name
+  namespace: ignite #Ignite namespace
+provisioner: kubernetes.io/azure-disk
+parameters:
+  storageaccounttype: Standard_LRS
+  kind: managed
diff --git a/modules/kubernetes/config/az/ignite-role-binding.yaml b/modules/kubernetes/config/az/ignite-role-binding.yaml
new file mode 100644
index 0000000..fdef0f0
--- /dev/null
+++ b/modules/kubernetes/config/az/ignite-role-binding.yaml
@@ -0,0 +1,27 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: ignite
+roleRef:
+  kind: ClusterRole
+  name: ignite
+  apiGroup: rbac.authorization.k8s.io
+subjects:
+- kind: ServiceAccount
+  name: ignite
+  namespace: ignite
diff --git a/modules/kubernetes/config/az/ignite-service-account.yaml b/modules/kubernetes/config/az/ignite-service-account.yaml
new file mode 100644
index 0000000..ef207af
--- /dev/null
+++ b/modules/kubernetes/config/az/ignite-service-account.yaml
@@ -0,0 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: ignite
+  namespace: ignite
diff --git a/modules/kubernetes/config/az/ignite-service.yaml b/modules/kubernetes/config/az/ignite-service.yaml
new file mode 100644
index 0000000..1caacff
--- /dev/null
+++ b/modules/kubernetes/config/az/ignite-service.yaml
@@ -0,0 +1,40 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata: 
+  # The name must be equal to TcpDiscoveryKubernetesIpFinder.serviceName
+  name: ignite
+  # The name must be equal to TcpDiscoveryKubernetesIpFinder.namespaceName
+  namespace: ignite
+spec:
+  type: LoadBalancer
+  ports:
+    - name: rest
+      port: 8080
+      targetPort: 8080
+    - name: sql
+      port: 10800
+      targetPort: 10800
+    - name: thinclients
+      port: 10900
+      targetPort: 10900
+  # Optional - remove 'sessionAffinity' property if the Ignite cluster
+  # and applications deployed within Kubernetes
+  sessionAffinity: ClientIP    
+  selector:
+    # Must be equal to the label set for Ignite pods.
+    app: ignite
diff --git a/modules/kubernetes/config/az/ignite-stateful-set.yaml b/modules/kubernetes/config/az/ignite-stateful-set.yaml
new file mode 100644
index 0000000..3ab039c
--- /dev/null
+++ b/modules/kubernetes/config/az/ignite-stateful-set.yaml
@@ -0,0 +1,74 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1beta2
+kind: StatefulSet
+metadata:
+  name: ignite
+  namespace: ignite
+spec:
+  selector:
+    matchLabels:
+      app: ignite
+  serviceName: ignite
+  replicas: 2
+  template:
+    metadata:
+      labels:
+        app: ignite
+    spec:
+      serviceAccountName: ignite
+      containers:
+      - name: ignite
+        image: apacheignite/ignite:2.6.0
+        env:
+        - name: OPTION_LIBS
+          value: ignite-kubernetes,ignite-rest-http
+        - name: CONFIG_URI
+          value: https://raw.githubusercontent.com/apache/ignite/master/modules/kubernetes/config/example-kube-persistence-and-wal.xml
+        - name: IGNITE_QUIET
+          value: "false"
+        - name: JVM_OPTS
+          value: "-Djava.net.preferIPv4Stack=true"
+        ports:
+        - containerPort: 11211 # JDBC port number.
+        - containerPort: 47100 # communication SPI port number.
+        - containerPort: 47500 # discovery SPI port number.
+        - containerPort: 49112 # JMX port number.
+        - containerPort: 10800 # SQL port number.
+        - containerPort: 8080 # REST port number.
+        - containerPort: 10900 #Thin clients port number.
+        volumeMounts:
+        - mountPath: "/wal"
+          name: ignite-wal
+        - mountPath: "/persistence"
+          name: ignite-persistence
+  volumeClaimTemplates:
+  - metadata:
+      name: ignite-persistence
+    spec:
+      accessModes: [ "ReadWriteOnce" ]
+      storageClassName: "ignite-persistence-storage-class"
+      resources:
+        requests:
+          storage: "1Gi"
+  - metadata:
+      name: ignite-wal
+    spec:
+      accessModes: [ "ReadWriteOnce" ]
+      storageClassName: "ignite-wal-storage-class"
+      resources:
+        requests:
+          storage: "1Gi"
diff --git a/modules/kubernetes/config/az/ignite-wal-storage-class.yaml b/modules/kubernetes/config/az/ignite-wal-storage-class.yaml
new file mode 100644
index 0000000..0e4874e
--- /dev/null
+++ b/modules/kubernetes/config/az/ignite-wal-storage-class.yaml
@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#Microsoft Azure Configuration
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+  name: ignite-wal-storage-class  #StorageClass name
+  namespace: ignite #Ignite namespace
+provisioner: kubernetes.io/azure-disk
+parameters:
+  storageaccounttype: Standard_LRS
+  kind: managed
diff --git a/modules/kubernetes/config/example-kube-persistence-and-wal.xml b/modules/kubernetes/config/example-kube-persistence-and-wal.xml
new file mode 100644
index 0000000..8f27b9b
--- /dev/null
+++ b/modules/kubernetes/config/example-kube-persistence-and-wal.xml
@@ -0,0 +1,76 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<!--
+    Configuration example with Kubernetes IP finder and Ignite persistence enabled.
+    WAL files and database files are stored in separate disk drives.
+-->
+<beans xmlns="http://www.springframework.org/schema/beans"
+       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+       xsi:schemaLocation="
+        http://www.springframework.org/schema/beans
+        http://www.springframework.org/schema/beans/spring-beans.xsd">
+
+    <bean class="org.apache.ignite.configuration.IgniteConfiguration">
+
+        <!-- Enabling Apache Ignite Persistent Store. -->
+        <property name="dataStorageConfiguration">
+            <bean class="org.apache.ignite.configuration.DataStorageConfiguration">
+
+                <property name="defaultDataRegionConfiguration">
+                    <bean class="org.apache.ignite.configuration.DataRegionConfiguration">
+                        <property name="persistenceEnabled" value="true"/>
+                    </bean>
+                </property>
+
+                <!--
+                   Sets a path to the root directory where data and indexes are
+                   to be persisted. It's assumed the directory is on a dedicated disk.
+                -->
+                <property name="storagePath" value="/persistence"/>
+
+                <!--
+                    Sets a path to the directory where WAL is stored.
+                    It's assumed the directory is on a dedicated disk.
+                -->
+                <property name="walPath" value="/wal"/>
+
+                <!--
+                    Sets a path to the directory where WAL archive is stored.
+                    It's assumed the directory is on the same drive with the WAL files.
+                -->
+                <property name="walArchivePath" value="/wal/archive"/>
+            </bean>
+        </property>
+
+        <!-- Explicitly configure TCP discovery SPI to provide list of initial nodes. -->
+        <property name="discoverySpi">
+            <bean class="org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi">
+                <property name="ipFinder">
+                    <!--
+                        Enables Kubernetes IP finder and setting custom namespace and service names.
+                    -->
+                    <bean class="org.apache.ignite.spi.discovery.tcp.ipfinder.kubernetes.TcpDiscoveryKubernetesIpFinder">
+                        <property name="namespace" value="ignite"/>
+                    </bean>
+                </property>
+            </bean>
+        </property>
+    </bean>
+</beans>
\ No newline at end of file
diff --git a/modules/kubernetes/config/example-kube-rbac.xml b/modules/kubernetes/config/example-kube-rbac.xml
new file mode 100644
index 0000000..7ab94da
--- /dev/null
+++ b/modules/kubernetes/config/example-kube-rbac.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<!--
+    Configuration example with Kubernetes IP finder enabled.
+-->
+<beans xmlns="http://www.springframework.org/schema/beans"
+       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+       xsi:schemaLocation="
+        http://www.springframework.org/schema/beans
+        http://www.springframework.org/schema/beans/spring-beans.xsd">
+
+    <bean class="org.apache.ignite.configuration.IgniteConfiguration">
+
+        <!-- Explicitly configure TCP discovery SPI to provide list of initial nodes. -->
+        <property name="discoverySpi">
+            <bean class="org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi">
+                <property name="ipFinder">
+                    <!--
+                        Enables Kubernetes IP finder with default settings.
+                    -->
+                    <bean class="org.apache.ignite.spi.discovery.tcp.ipfinder.kubernetes.TcpDiscoveryKubernetesIpFinder">
+                        <!-- Assumed that you have RBAC configured and `ignite` namespace created for it. -->
+                        <property name="namespace" value="ignite"/>
+                    </bean>
+                </property>
+            </bean>
+        </property>
+    </bean>
+</beans>
\ No newline at end of file
diff --git a/modules/kubernetes/config/gce/ignite-account-role.yaml b/modules/kubernetes/config/gce/ignite-account-role.yaml
new file mode 100644
index 0000000..0071e43
--- /dev/null
+++ b/modules/kubernetes/config/gce/ignite-account-role.yaml
@@ -0,0 +1,30 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: rbac.authorization.k8s.io/v1beta1
+kind: ClusterRole
+metadata:
+  name: ignite
+  namespace: ignite
+rules:
+- apiGroups:
+  - ""
+  resources: # Here are resources you can access
+  - pods
+  - endpoints
+  verbs: # That is what you can do with them
+  - get
+  - list
+  - watch
diff --git a/modules/kubernetes/config/gce/ignite-deployment.yaml b/modules/kubernetes/config/gce/ignite-deployment.yaml
new file mode 100644
index 0000000..e1f6c22
--- /dev/null
+++ b/modules/kubernetes/config/gce/ignite-deployment.yaml
@@ -0,0 +1,47 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# An example of a Kubernetes configuration for Ignite pods deployment.
+apiVersion: extensions/v1beta1
+kind: Deployment
+metadata:
+  # Custom Ignite cluster's name.
+  name: ignite-cluster
+spec:
+  # A number of Ignite pods to be started by Kubernetes initially.
+  replicas: 2
+  template:
+    metadata:
+      labels:
+        app: ignite
+    spec:
+      containers:
+        # Custom Ignite pod name.
+      - name: ignite-node
+        image: apacheignite/ignite:2.5.0
+        env:
+        - name: OPTION_LIBS
+          value: ignite-kubernetes
+        - name: CONFIG_URI
+          value: https://raw.githubusercontent.com/apache/ignite/master/modules/kubernetes/config/example-kube-persistence.xml
+        ports:
+        # Ports to open.
+        # Might be optional depending on your Kubernetes environment.
+        - containerPort: 11211 # REST port number.
+        - containerPort: 47100 # communication SPI port number.
+        - containerPort: 47500 # discovery SPI port number.
+        - containerPort: 49112 # JMX port number.
+        - containerPort: 10800 # SQL port number.
+        - containerPort: 10900 # Thin clients port number.
diff --git a/modules/kubernetes/config/gce/ignite-namespace.yaml b/modules/kubernetes/config/gce/ignite-namespace.yaml
new file mode 100644
index 0000000..e0154d4
--- /dev/null
+++ b/modules/kubernetes/config/gce/ignite-namespace.yaml
@@ -0,0 +1,19 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: ignite
diff --git a/modules/kubernetes/config/gce/ignite-persistence-storage-class.yaml b/modules/kubernetes/config/gce/ignite-persistence-storage-class.yaml
new file mode 100644
index 0000000..9148bda
--- /dev/null
+++ b/modules/kubernetes/config/gce/ignite-persistence-storage-class.yaml
@@ -0,0 +1,26 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#Google Compute Engine configuration
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+  name: ignite-persistence-storage-class  #StorageClass Name
+  namespace: ignite         #Ignite namespace
+provisioner: kubernetes.io/gce-pd
+parameters:
+  type: pd-standard #Type pd-standard or pd-ssd. Default: pd-standard
+  zones: europe-west1-b	
+  replication-type: none
diff --git a/modules/kubernetes/config/gce/ignite-role-binding.yaml b/modules/kubernetes/config/gce/ignite-role-binding.yaml
new file mode 100644
index 0000000..fdef0f0
--- /dev/null
+++ b/modules/kubernetes/config/gce/ignite-role-binding.yaml
@@ -0,0 +1,27 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+kind: ClusterRoleBinding
+apiVersion: rbac.authorization.k8s.io/v1beta1
+metadata:
+  name: ignite
+roleRef:
+  kind: ClusterRole
+  name: ignite
+  apiGroup: rbac.authorization.k8s.io
+subjects:
+- kind: ServiceAccount
+  name: ignite
+  namespace: ignite
diff --git a/modules/kubernetes/config/gce/ignite-service-account.yaml b/modules/kubernetes/config/gce/ignite-service-account.yaml
new file mode 100644
index 0000000..ef207af
--- /dev/null
+++ b/modules/kubernetes/config/gce/ignite-service-account.yaml
@@ -0,0 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  name: ignite
+  namespace: ignite
diff --git a/modules/kubernetes/config/gce/ignite-service.yaml b/modules/kubernetes/config/gce/ignite-service.yaml
new file mode 100644
index 0000000..1caacff
--- /dev/null
+++ b/modules/kubernetes/config/gce/ignite-service.yaml
@@ -0,0 +1,40 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: v1
+kind: Service
+metadata: 
+  # The name must be equal to TcpDiscoveryKubernetesIpFinder.serviceName
+  name: ignite
+  # The name must be equal to TcpDiscoveryKubernetesIpFinder.namespaceName
+  namespace: ignite
+spec:
+  type: LoadBalancer
+  ports:
+    - name: rest
+      port: 8080
+      targetPort: 8080
+    - name: sql
+      port: 10800
+      targetPort: 10800
+    - name: thinclients
+      port: 10900
+      targetPort: 10900
+  # Optional - remove 'sessionAffinity' property if the Ignite cluster
+  # and applications deployed within Kubernetes
+  sessionAffinity: ClientIP    
+  selector:
+    # Must be equal to the label set for Ignite pods.
+    app: ignite
diff --git a/modules/kubernetes/config/gce/ignite-stateful-set.yaml b/modules/kubernetes/config/gce/ignite-stateful-set.yaml
new file mode 100644
index 0000000..3ab039c
--- /dev/null
+++ b/modules/kubernetes/config/gce/ignite-stateful-set.yaml
@@ -0,0 +1,74 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+apiVersion: apps/v1beta2
+kind: StatefulSet
+metadata:
+  name: ignite
+  namespace: ignite
+spec:
+  selector:
+    matchLabels:
+      app: ignite
+  serviceName: ignite
+  replicas: 2
+  template:
+    metadata:
+      labels:
+        app: ignite
+    spec:
+      serviceAccountName: ignite
+      containers:
+      - name: ignite
+        image: apacheignite/ignite:2.6.0
+        env:
+        - name: OPTION_LIBS
+          value: ignite-kubernetes,ignite-rest-http
+        - name: CONFIG_URI
+          value: https://raw.githubusercontent.com/apache/ignite/master/modules/kubernetes/config/example-kube-persistence-and-wal.xml
+        - name: IGNITE_QUIET
+          value: "false"
+        - name: JVM_OPTS
+          value: "-Djava.net.preferIPv4Stack=true"
+        ports:
+        - containerPort: 11211 # JDBC port number.
+        - containerPort: 47100 # communication SPI port number.
+        - containerPort: 47500 # discovery SPI port number.
+        - containerPort: 49112 # JMX port number.
+        - containerPort: 10800 # SQL port number.
+        - containerPort: 8080 # REST port number.
+        - containerPort: 10900 #Thin clients port number.
+        volumeMounts:
+        - mountPath: "/wal"
+          name: ignite-wal
+        - mountPath: "/persistence"
+          name: ignite-persistence
+  volumeClaimTemplates:
+  - metadata:
+      name: ignite-persistence
+    spec:
+      accessModes: [ "ReadWriteOnce" ]
+      storageClassName: "ignite-persistence-storage-class"
+      resources:
+        requests:
+          storage: "1Gi"
+  - metadata:
+      name: ignite-wal
+    spec:
+      accessModes: [ "ReadWriteOnce" ]
+      storageClassName: "ignite-wal-storage-class"
+      resources:
+        requests:
+          storage: "1Gi"
diff --git a/modules/kubernetes/config/gce/ignite-wal-storage-class.yaml b/modules/kubernetes/config/gce/ignite-wal-storage-class.yaml
new file mode 100644
index 0000000..221b873
--- /dev/null
+++ b/modules/kubernetes/config/gce/ignite-wal-storage-class.yaml
@@ -0,0 +1,26 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#Google Compute Engine Configuration
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+  name: ignite-wal-storage-class #StorageClass Name
+  namespace: ignite #Ignite namespace
+provisioner: kubernetes.io/gce-pd
+parameters:
+  type: pd-standard #Volume type pd-standard or pd-ssd. Default: pd-standard
+  zones: europe-west1-b	
+  replication-type: none
diff --git a/modules/mesos/pom.xml b/modules/mesos/pom.xml
index d3d50ab..1841650 100644
--- a/modules/mesos/pom.xml
+++ b/modules/mesos/pom.xml
@@ -35,9 +35,10 @@
     <url>http://ignite.apache.org</url>
 
     <properties>
-        <mesos.version>0.22.0</mesos.version>
-        <ignite.latest.url>http://ignite.run/download_ignite.php</ignite.latest.url>
-        <ignite.direct.url>https://archive.apache.org/dist/ignite/%s/apache-ignite-%s-bin.zip</ignite.direct.url>
+        <mesos.version>1.5.0</mesos.version>
+        <ignite.version.url>https://ignite.apache.org/latest</ignite.version.url>
+        <ignite.path>/ignite/%s/apache-ignite-%s-bin.zip</ignite.path>
+        <apache.mirror.url>https://www.apache.org/dyn/closer.cgi?as_json=1</apache.mirror.url>
     </properties>
 
     <dependencies>
@@ -54,6 +55,12 @@
         </dependency>
 
         <dependency>
+            <groupId>com.fasterxml.jackson.core</groupId>
+            <artifactId>jackson-databind</artifactId>
+            <version>${jackson.version}</version>
+        </dependency>
+
+        <dependency>
             <groupId>junit</groupId>
             <artifactId>junit</artifactId>
             <version>4.11</version>
@@ -76,23 +83,30 @@
                         <phase>generate-sources</phase>
                         <configuration>
                             <target>
-                                <echo message="Update download url in mesos module." />
-                                <echo message="Direct link ${ignite.direct.url}." />
-                                <echo message="Latest link ${ignite.latest.url}." />
+                                <echo message="Update download url in mesos module."/>
+                                <echo message="Latest version ${ignite.version.url}."/>
 
                                 <replaceregexp byline="true" encoding="UTF-8">
-                                    <regexp pattern="(.*DOWNLOAD_LINK = &quot;)(.*)(&quot;.*)" />
-                                    <substitution expression="\1${ignite.latest.url}\3" />
+                                    <regexp pattern="(.*IGNITE_LATEST_VERSION_URL = &quot;)(.*)(&quot;.*)"/>
+                                    <substitution expression="\1${ignite.version.url}\3"/>
                                     <fileset dir="${basedir}/">
-                                        <include name="**/IgniteProvider.java" />
+                                        <include name="**/IgniteProvider.java"/>
                                     </fileset>
                                 </replaceregexp>
 
                                 <replaceregexp byline="true" encoding="UTF-8">
-                                    <regexp pattern="(.*DOWNLOAD_URL_PATTERN = &quot;)(.*)(&quot;.*)" />
-                                    <substitution expression="\1${ignite.direct.url}\3" />
+                                    <regexp pattern="(.*APACHE_MIRROR_URL = &quot;)(.*)(&quot;.*)"/>
+                                    <substitution expression="\1${apache.mirror.url}\3"/>
                                     <fileset dir="${basedir}/">
-                                        <include name="**/IgniteProvider.java" />
+                                        <include name="**/IgniteProvider.java"/>
+                                    </fileset>
+                                </replaceregexp>
+
+                                <replaceregexp byline="true" encoding="UTF-8">
+                                    <regexp pattern="(.*IGNITE_PATH = &quot;)(.*)(&quot;.*)"/>
+                                    <substitution expression="\1${ignite.path}\3"/>
+                                    <fileset dir="${basedir}/">
+                                        <include name="**/IgniteProvider.java"/>
                                     </fileset>
                                 </replaceregexp>
                             </target>
diff --git a/modules/mesos/src/main/java/org/apache/ignite/mesos/IgniteFramework.java b/modules/mesos/src/main/java/org/apache/ignite/mesos/IgniteFramework.java
index eea510a..addd3e0 100644
--- a/modules/mesos/src/main/java/org/apache/ignite/mesos/IgniteFramework.java
+++ b/modules/mesos/src/main/java/org/apache/ignite/mesos/IgniteFramework.java
@@ -17,7 +17,6 @@
 
 package org.apache.ignite.mesos;
 
-import com.google.protobuf.ByteString;
 import java.util.logging.Level;
 import java.util.logging.Logger;
 import org.apache.ignite.mesos.resource.IgniteProvider;
@@ -105,7 +104,7 @@
 
             Protos.Credential cred = Protos.Credential.newBuilder()
                 .setPrincipal(System.getenv(DEFAULT_PRINCIPAL))
-                .setSecret(ByteString.copyFrom(System.getenv(DEFAULT_SECRET).getBytes()))
+                .setSecret(System.getenv(DEFAULT_SECRET))
                 .build();
 
             driver = new MesosSchedulerDriver(scheduler, igniteFramework.getFrameworkInfo(), clusterProps.masterUrl(),
diff --git a/modules/mesos/src/main/java/org/apache/ignite/mesos/resource/IgniteProvider.java b/modules/mesos/src/main/java/org/apache/ignite/mesos/resource/IgniteProvider.java
index 551e46a..cdfd621 100644
--- a/modules/mesos/src/main/java/org/apache/ignite/mesos/resource/IgniteProvider.java
+++ b/modules/mesos/src/main/java/org/apache/ignite/mesos/resource/IgniteProvider.java
@@ -17,9 +17,13 @@
 
 package org.apache.ignite.mesos.resource;
 
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import java.io.BufferedReader;
 import java.io.File;
 import java.io.FileOutputStream;
 import java.io.IOException;
+import java.io.InputStreamReader;
 import java.net.HttpURLConnection;
 import java.net.URL;
 import java.nio.channels.Channels;
@@ -27,19 +31,38 @@
 import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.nio.file.StandardCopyOption;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.util.stream.Collectors;
 import org.apache.ignite.mesos.ClusterProperties;
 
+import static org.apache.ignite.mesos.ClusterProperties.IGNITE_VERSION;
+
 /**
  * Class downloads and stores Ignite.
  */
 public class IgniteProvider {
+    /** Logger. */
+    private static final Logger log = Logger.getLogger(IgniteProvider.class.getSimpleName());
+
     // This constants are set by maven-ant-plugin.
     /** */
-    private static final String DOWNLOAD_LINK = "http://ignite.run/download_ignite.php";
-
-    /** */
     private static final String DOWNLOAD_URL_PATTERN = "https://archive.apache.org/dist/ignite/%s/apache-ignite-%s-bin.zip";
 
+    /** URL for request Ignite latest version. */
+    private final static String IGNITE_LATEST_VERSION_URL = "";
+
+    /** Mirrors. */
+    private static final String APACHE_MIRROR_URL = "";
+
+    /** Ignite on Apache URL path. */
+    private static final String IGNITE_PATH = "";
+
+    /** Version pattern. */
+    private static final Pattern VERSION_PATTERN = Pattern.compile("(?<=version=).*\\S+");
+
     /** */
     private String downloadFolder;
 
@@ -52,8 +75,8 @@
 
     /**
      * @param ver Ignite version.
-     * @throws IOException If downloading failed.
      * @return Path to latest ignite.
+     * @throws IOException If downloading failed.
      */
     public String getIgnite(String ver) throws IOException {
         return downloadIgnite(ver);
@@ -61,34 +84,93 @@
 
     /**
      * @param ver Ignite version which will be downloaded. If {@code null} will download the latest ignite version.
-     * @throws IOException If downloading failed.
      * @return Ignite archive.
+     * @throws IOException If downloading failed.
      */
     public String downloadIgnite(String ver) throws IOException {
         assert ver != null;
 
         URL url;
 
+        // get the latest version.
         if (ver.equals(ClusterProperties.DEFAULT_IGNITE_VERSION)) {
-            URL updateUrl = new URL(DOWNLOAD_LINK);
+            try {
+                ver = findLatestVersion();
 
-            HttpURLConnection conn = (HttpURLConnection)updateUrl.openConnection();
-
-            int code = conn.getResponseCode();
-
-            if (code == 200)
-                url = conn.getURL();
-            else
-                throw new RuntimeException("Failed to download ignite distributive. Maybe set incorrect version? " +
-                    "[resCode:" + code + ", ver: " + ver + "]");
+                // and try to retrieve from a mirror.
+                url = new URL(String.format(findMirror() + IGNITE_PATH, ver, ver));
+            }
+            catch (Exception e) {
+                // fallback to archive.
+                url = new URL(String.format(DOWNLOAD_URL_PATTERN, ver, ver));
+            }
         }
-        else
-            url = new URL(String.format(DOWNLOAD_URL_PATTERN, ver.replace("-incubating", ""), ver));
+        else {
+            // or from archive.
+            url = new URL(String.format(DOWNLOAD_URL_PATTERN, ver, ver));
+        }
 
         return downloadIgnite(url);
     }
 
     /**
+     * Attempts to retrieve the preferred mirror.
+     *
+     * @return Mirror url.
+     * @throws IOException If failed.
+     */
+    private String findMirror() throws IOException {
+        String response = getHttpContents(new URL(APACHE_MIRROR_URL));
+
+        if (response == null)
+            throw new RuntimeException("Failed to retrieve mirrors");
+
+        ObjectMapper mapper = new ObjectMapper();
+        JsonNode mirrorUrl = mapper.readTree(response).get("preferred");
+
+        if (mirrorUrl == null)
+            throw new RuntimeException("Failed to find the preferred mirror");
+
+        return mirrorUrl.asText();
+    }
+
+    /**
+     * Attempts to obtain the latest version.
+     *
+     * @return Latest version.
+     * @throws IOException If failed.
+     */
+    private String findLatestVersion() throws IOException {
+        String response = getHttpContents(new URL(IGNITE_LATEST_VERSION_URL));
+
+        if (response == null)
+            throw new RuntimeException("Failed to identify the latest version. Specify it with " + IGNITE_VERSION);
+
+        Matcher m = VERSION_PATTERN.matcher(response);
+        if (m.find())
+            return m.group();
+        else
+            throw new RuntimeException("Failed to retrieve the latest version. Specify it with " + IGNITE_VERSION);
+    }
+
+    /**
+     * @param url Url.
+     * @return Contents.
+     * @throws IOException If failed.
+     */
+    private String getHttpContents(URL url) throws IOException {
+        HttpURLConnection conn = (HttpURLConnection)url.openConnection();
+
+        int code = conn.getResponseCode();
+
+        if (code != 200)
+            throw null;
+
+        BufferedReader rd = new BufferedReader(new InputStreamReader(conn.getInputStream(), "UTF-8"));
+        return rd.lines().collect(Collectors.joining());
+    }
+
+    /**
      * Downloads ignite by URL if this version wasn't downloaded before.
      *
      * @param url URL to Ignite.
@@ -110,7 +192,9 @@
                 if (fileExist(fileName))
                     return fileName;
 
-                FileOutputStream outFile = new FileOutputStream(downloadFolder + fileName);
+                log.log(Level.INFO, "Downloading from {0}", url.toString());
+
+                FileOutputStream outFile = new FileOutputStream(Paths.get(downloadFolder, fileName).toFile());
 
                 outFile.getChannel().transferFrom(Channels.newChannel(conn.getInputStream()), 0, Long.MAX_VALUE);
 
@@ -119,7 +203,7 @@
                 return fileName;
             }
             else
-                throw new RuntimeException("Got unexpected response code. Response code: " + code);
+                throw new RuntimeException("Got unexpected response code. Response code: " + code + " from " + url);
         }
         catch (IOException e) {
             throw new RuntimeException("Failed to download Ignite.", e);
diff --git a/modules/mesos/src/test/java/org/apache/ignite/mesos/IgniteSchedulerSelfTest.java b/modules/mesos/src/test/java/org/apache/ignite/mesos/IgniteSchedulerSelfTest.java
index 099daa2..4e48569 100644
--- a/modules/mesos/src/test/java/org/apache/ignite/mesos/IgniteSchedulerSelfTest.java
+++ b/modules/mesos/src/test/java/org/apache/ignite/mesos/IgniteSchedulerSelfTest.java
@@ -466,6 +466,12 @@
         }
 
         /** {@inheritDoc} */
+        @Override public Protos.Status acceptOffers(Collection<Protos.OfferID> collection,
+            Collection<Protos.Offer.Operation> collection1, Protos.Filters filters) {
+            return null;
+        }
+
+        /** {@inheritDoc} */
         @Override public Protos.Status declineOffer(Protos.OfferID offerId, Protos.Filters filters) {
             declinedOffer = offerId;
 
@@ -485,6 +491,11 @@
         }
 
         /** {@inheritDoc} */
+        @Override public Protos.Status suppressOffers() {
+            return null;
+        }
+
+        /** {@inheritDoc} */
         @Override public Protos.Status acknowledgeStatusUpdate(Protos.TaskStatus status) {
             return null;
         }
diff --git a/modules/ml/pom.xml b/modules/ml/pom.xml
index fac5ab4..ad31da2 100644
--- a/modules/ml/pom.xml
+++ b/modules/ml/pom.xml
@@ -21,6 +21,9 @@
 -->
 <project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
     <modelVersion>4.0.0</modelVersion>
+    <properties>
+        <commons.math.version>3.6.1</commons.math.version>
+    </properties>
 
     <parent>
         <artifactId>ignite-parent</artifactId>
@@ -47,7 +50,7 @@
             <type>test-jar</type>
             <scope>test</scope>
         </dependency>
-        
+
         <dependency>
 	    <groupId>org.apache.ignite</groupId>
 	    <artifactId>ignite-indexing</artifactId>
@@ -60,7 +63,7 @@
 	    <artifactId>ignite-spring</artifactId>
 	    <version>${project.version}</version>
         </dependency>
-       
+
         <dependency>
             <groupId>it.unimi.dsi</groupId>
             <artifactId>fastutil</artifactId>
@@ -118,6 +121,12 @@
             <version>${mockito.version}</version>
             <scope>test</scope>
         </dependency>
+
+        <dependency>
+            <groupId>org.apache.commons</groupId>
+            <artifactId>commons-math3</artifactId>
+            <version>${commons.math.version}</version>
+        </dependency>
     </dependencies>
 
     <profiles>
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/ClusterizationModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/ClusterizationModel.java
index 474a463..43e1899 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/ClusterizationModel.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/ClusterizationModel.java
@@ -22,8 +22,8 @@
 /** Base interface for all clusterization models. */
 public interface ClusterizationModel<P, V> extends Model<P, V> {
     /** Gets the clusters count. */
-    public int amountOfClusters();
+    public int getAmountOfClusters();
 
     /** Get cluster centers. */
-    public P[] centers();
+    public P[] getCenters();
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/KMeansModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/KMeansModel.java
index bdfa1b6..e07f4f0 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/KMeansModel.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/KMeansModel.java
@@ -54,12 +54,12 @@
     }
 
     /** Amount of centers in clusterization. */
-    @Override public int amountOfClusters() {
+    @Override public int getAmountOfClusters() {
         return centers.length;
     }
 
     /** Get centers of clusters. */
-    @Override public Vector[] centers() {
+    @Override public Vector[] getCenters() {
         return Arrays.copyOf(centers, centers.length);
     }
 
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/KMeansTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/KMeansTrainer.java
index 7dbc78a..a20d5da 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/KMeansTrainer.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/clustering/kmeans/KMeansTrainer.java
@@ -18,8 +18,12 @@
 package org.apache.ignite.ml.clustering.kmeans;
 
 import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Optional;
 import java.util.Random;
+import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
@@ -35,8 +39,8 @@
 import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
 import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
 import org.apache.ignite.ml.math.util.MapUtil;
-import org.apache.ignite.ml.structures.LabeledDataset;
 import org.apache.ignite.ml.structures.LabeledVector;
+import org.apache.ignite.ml.structures.LabeledVectorSet;
 import org.apache.ignite.ml.structures.partition.LabeledDatasetPartitionDataBuilderOnHeap;
 import org.apache.ignite.ml.trainers.SingleLabelDatasetTrainer;
 
@@ -69,21 +73,41 @@
      */
     @Override public <K, V> KMeansModel fit(DatasetBuilder<K, V> datasetBuilder,
         IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, Double> lbExtractor) {
+
+        return updateModel(null, datasetBuilder, featureExtractor, lbExtractor);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected <K, V> KMeansModel updateModel(KMeansModel mdl, DatasetBuilder<K, V> datasetBuilder,
+        IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, Double> lbExtractor) {
+
         assert datasetBuilder != null;
 
-        PartitionDataBuilder<K, V, EmptyContext, LabeledDataset<Double, LabeledVector>> partDataBuilder = new LabeledDatasetPartitionDataBuilderOnHeap<>(
+        PartitionDataBuilder<K, V, EmptyContext, LabeledVectorSet<Double, LabeledVector>> partDataBuilder = new LabeledDatasetPartitionDataBuilderOnHeap<>(
             featureExtractor,
             lbExtractor
         );
 
         Vector[] centers;
 
-        try (Dataset<EmptyContext, LabeledDataset<Double, LabeledVector>> dataset = datasetBuilder.build(
+        try (Dataset<EmptyContext, LabeledVectorSet<Double, LabeledVector>> dataset = datasetBuilder.build(
             (upstream, upstreamSize) -> new EmptyContext(),
             partDataBuilder
         )) {
-            final int cols = dataset.compute(org.apache.ignite.ml.structures.Dataset::colSize, (a, b) -> a == null ? b : a);
-            centers = initClusterCentersRandomly(dataset, k);
+            final Integer cols = dataset.compute(org.apache.ignite.ml.structures.Dataset::colSize, (a, b) -> {
+                if (a == null)
+                    return b == null ? 0 : b;
+                if (b == null)
+                    return a;
+                return b;
+            });
+
+            if (cols == null)
+                return getLastTrainedModelOrThrowEmptyDatasetException(mdl);
+
+            centers = Optional.ofNullable(mdl)
+                .map(KMeansModel::getCenters)
+                .orElseGet(() -> initClusterCentersRandomly(dataset, k));
 
             boolean converged = false;
             int iteration = 0;
@@ -105,7 +129,10 @@
                 }
 
                 iteration++;
-                centers = newCentroids;
+                for (int i = 0; i < centers.length; i++) {
+                    if (newCentroids[i] != null)
+                        centers[i] = newCentroids[i];
+                }
             }
         }
         catch (Exception e) {
@@ -114,6 +141,11 @@
         return new KMeansModel(centers, distance);
     }
 
+    /** {@inheritDoc} */
+    @Override protected boolean checkState(KMeansModel mdl) {
+        return mdl.getCenters().length == k && mdl.distanceMeasure().equals(distance);
+    }
+
     /**
      * Prepares the data to define new centroids on current iteration.
      *
@@ -123,11 +155,10 @@
      * @return Helper data to calculate the new centroids.
      */
     private TotalCostAndCounts calcDataForNewCentroids(Vector[] centers,
-        Dataset<EmptyContext, LabeledDataset<Double, LabeledVector>> dataset, int cols) {
+        Dataset<EmptyContext, LabeledVectorSet<Double, LabeledVector>> dataset, int cols) {
         final Vector[] finalCenters = centers;
 
         return dataset.compute(data -> {
-
             TotalCostAndCounts res = new TotalCostAndCounts();
 
             for (int i = 0; i < data.rowSize(); i++) {
@@ -142,13 +173,22 @@
 
                 int finalI = i;
                 res.sums.compute(centroidIdx,
-                    (IgniteBiFunction<Integer, Vector, Vector>)(ind, v) -> v.plus(data.getRow(finalI).features()));
+                    (IgniteBiFunction<Integer, Vector, Vector>)(ind, v) -> {
+                        Vector features = data.getRow(finalI).features();
+                        return v == null ? features : v.plus(features);
+                    });
 
                 res.counts.merge(centroidIdx, 1,
                     (IgniteBiFunction<Integer, Integer, Integer>)(i1, i2) -> i1 + i2);
             }
             return res;
-        }, (a, b) -> a == null ? b : a.merge(b));
+        }, (a, b) -> {
+            if (a == null)
+                return b == null ? new TotalCostAndCounts() : b;
+            if (b == null)
+                return a;
+            return a.merge(b);
+        });
     }
 
     /**
@@ -178,28 +218,66 @@
      * @param k Amount of clusters.
      * @return K cluster centers.
      */
-    private Vector[] initClusterCentersRandomly(Dataset<EmptyContext, LabeledDataset<Double, LabeledVector>> dataset,
+    private Vector[] initClusterCentersRandomly(Dataset<EmptyContext, LabeledVectorSet<Double, LabeledVector>> dataset,
         int k) {
-
         Vector[] initCenters = new DenseVector[k];
 
+        // Gets k or less vectors from each partition.
         List<LabeledVector> rndPnts = dataset.compute(data -> {
             List<LabeledVector> rndPnt = new ArrayList<>();
-            rndPnt.add(data.getRow(new Random(seed).nextInt(data.rowSize())));
-            return rndPnt;
-        }, (a, b) -> a == null ? b : Stream.concat(a.stream(), b.stream()).collect(Collectors.toList()));
 
-        for (int i = 0; i < k; i++) {
-            final LabeledVector rndPnt = rndPnts.get(new Random(seed).nextInt(rndPnts.size()));
-            rndPnts.remove(rndPnt);
-            initCenters[i] = rndPnt.features();
+            if (data.rowSize() != 0) {
+                if (data.rowSize() > k) { // If it's enough rows in partition to pick k vectors.
+                    final Random random = new Random(seed);
+
+                    for (int i = 0; i < k; i++) {
+                        Set<Integer> uniqueIndices = new HashSet<>();
+                        int nextIdx = random.nextInt(data.rowSize());
+                        int maxRandomSearch = k; // It required to make the next cycle is finite.
+                        int cntr = 0;
+
+                        // Repeat nextIdx generation if it was picked earlier.
+                        while (uniqueIndices.contains(nextIdx) && cntr < maxRandomSearch) {
+                            nextIdx = random.nextInt(data.rowSize());
+                            cntr++;
+                        }
+                        uniqueIndices.add(nextIdx);
+
+                        rndPnt.add(data.getRow(nextIdx));
+                    }
+                }
+                else // If it's not enough vectors to pick k vectors.
+                    for (int i = 0; i < data.rowSize(); i++)
+                        rndPnt.add(data.getRow(i));
+            }
+            return rndPnt;
+        }, (a, b) -> {
+            if (a == null)
+                return b == null ? new ArrayList<>() : b;
+            if (b == null)
+                return a;
+            return Stream.concat(a.stream(), b.stream()).collect(Collectors.toList());
+        });
+
+        // Shuffle them.
+        Collections.shuffle(rndPnts);
+
+        // Pick k vectors randomly.
+        if (rndPnts.size() >= k) {
+            for (int i = 0; i < k; i++) {
+                final LabeledVector rndPnt = rndPnts.get(new Random(seed).nextInt(rndPnts.size()));
+                rndPnts.remove(rndPnt);
+                initCenters[i] = rndPnt.features();
+            }
         }
+        else
+            throw new RuntimeException("The KMeans Trainer required more than " + k + " vectors to find " + k + " clusters");
 
         return initCenters;
     }
 
     /** Service class used for statistics. */
-    private static class TotalCostAndCounts {
+    public static class TotalCostAndCounts {
         /** */
         double totalCost;
 
@@ -209,13 +287,25 @@
         /** Count of points closest to the center with a given index. */
         ConcurrentHashMap<Integer, Integer> counts = new ConcurrentHashMap<>();
 
+        /** Count of points closest to the center with a given index. */
+        ConcurrentHashMap<Integer, ConcurrentHashMap<Double, Integer>> centroidStat = new ConcurrentHashMap<>();
+
         /** Merge current */
         TotalCostAndCounts merge(TotalCostAndCounts other) {
             this.totalCost += totalCost;
             this.sums = MapUtil.mergeMaps(sums, other.sums, Vector::plus, ConcurrentHashMap::new);
             this.counts = MapUtil.mergeMaps(counts, other.counts, (i1, i2) -> i1 + i2, ConcurrentHashMap::new);
+            this.centroidStat = MapUtil.mergeMaps(centroidStat, other.centroidStat, (m1, m2) ->
+                MapUtil.mergeMaps(m1, m2, (i1, i2) -> i1 + i2, ConcurrentHashMap::new), ConcurrentHashMap::new);
             return this;
         }
+
+        /**
+         * @return centroid statistics.
+         */
+        public ConcurrentHashMap<Integer, ConcurrentHashMap<Double, Integer>> getCentroidStat() {
+            return centroidStat;
+        }
     }
 
     /**
@@ -223,7 +313,7 @@
      *
      * @return The parameter value.
      */
-    public int getK() {
+    public int getAmountOfClusters() {
         return k;
     }
 
@@ -233,7 +323,7 @@
      * @param k The parameter value.
      * @return Model with new amount of clusters parameter value.
      */
-    public KMeansTrainer withK(int k) {
+    public KMeansTrainer withAmountOfClusters(int k) {
         this.k = k;
         return this;
     }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/BaggingModelTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/BaggingModelTrainer.java
index f439789..493c1da 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/composition/BaggingModelTrainer.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/BaggingModelTrainer.java
@@ -177,4 +177,24 @@
             return VectorUtils.of(newFeaturesValues);
         });
     }
+
+    /**
+     * Learn new models on dataset and create new Compositions over them and already learned models.
+     *
+     * @param mdl Learned model.
+     * @param datasetBuilder Dataset builder.
+     * @param featureExtractor Feature extractor.
+     * @param lbExtractor Label extractor.
+     * @param <K> Type of a key in {@code upstream} data.
+     * @param <V> Type of a value in {@code upstream} data.
+     * @return New models composition.
+     */
+    @Override public <K, V> ModelsComposition updateModel(ModelsComposition mdl, DatasetBuilder<K, V> datasetBuilder,
+        IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, Double> lbExtractor) {
+
+        ArrayList<Model<Vector, Double>> newModels = new ArrayList<>(mdl.getModels());
+        newModels.addAll(fit(datasetBuilder, featureExtractor, lbExtractor).getModels());
+
+        return new ModelsComposition(newModels, predictionsAggregator);
+    }
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/ModelsComposition.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/ModelsComposition.java
index e14fa6d..36ee626 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/composition/ModelsComposition.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/ModelsComposition.java
@@ -19,6 +19,8 @@
 
 import java.util.Collections;
 import java.util.List;
+import org.apache.ignite.ml.Exportable;
+import org.apache.ignite.ml.Exporter;
 import org.apache.ignite.ml.Model;
 import org.apache.ignite.ml.composition.predictionsaggregator.PredictionsAggregator;
 import org.apache.ignite.ml.math.primitives.vector.Vector;
@@ -27,7 +29,7 @@
 /**
  * Model consisting of several models and prediction aggregation strategy.
  */
-public class ModelsComposition implements Model<Vector, Double> {
+public class ModelsComposition implements Model<Vector, Double>, Exportable<ModelsCompositionFormat> {
     /**
      * Predictions aggregator.
      */
@@ -78,6 +80,12 @@
     }
 
     /** {@inheritDoc} */
+    @Override public <P> void saveModel(Exporter<ModelsCompositionFormat, P> exporter, P path) {
+        ModelsCompositionFormat format = new ModelsCompositionFormat(models, predictionsAggregator);
+        exporter.save(format, path);
+    }
+
+    /** {@inheritDoc} */
     @Override public String toString() {
         return toString(false);
     }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/ModelsCompositionFormat.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/ModelsCompositionFormat.java
new file mode 100644
index 0000000..68af0a9
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/ModelsCompositionFormat.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.composition;
+
+import java.io.Serializable;
+import java.util.List;
+import org.apache.ignite.ml.Model;
+import org.apache.ignite.ml.composition.predictionsaggregator.PredictionsAggregator;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+
+/**
+ * ModelsComposition representation.
+ *
+ * @see ModelsComposition
+ */
+public class ModelsCompositionFormat implements Serializable {
+    /** Serial version uid. */
+    private static final long serialVersionUID = 9115341364082681837L;
+
+    /** Models. */
+    private List<Model<Vector, Double>> models;
+
+    /** Predictions aggregator. */
+    private PredictionsAggregator predictionsAggregator;
+
+    /**
+     * Creates an instance of ModelsCompositionFormat.
+     *
+     * @param models Models.
+     * @param predictionsAggregator Predictions aggregator.
+     */
+    public ModelsCompositionFormat(List<Model<Vector, Double>> models,PredictionsAggregator predictionsAggregator) {
+        this.models = models;
+        this.predictionsAggregator = predictionsAggregator;
+    }
+
+    /** */
+    public List<Model<Vector, Double>> models() {
+        return models;
+    }
+
+    /** */
+    public PredictionsAggregator predictionsAggregator() {
+        return predictionsAggregator;
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBBinaryClassifierTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBBinaryClassifierTrainer.java
index 53a6219..f6ddfed 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBBinaryClassifierTrainer.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBBinaryClassifierTrainer.java
@@ -19,24 +19,23 @@
 
 import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.List;
 import java.util.Set;
 import java.util.stream.Collectors;
-import org.apache.ignite.internal.util.typedef.internal.A;
+import org.apache.ignite.ml.composition.boosting.loss.LogLoss;
+import org.apache.ignite.ml.composition.boosting.loss.Loss;
 import org.apache.ignite.ml.dataset.DatasetBuilder;
 import org.apache.ignite.ml.dataset.primitive.builder.context.EmptyContextBuilder;
 import org.apache.ignite.ml.math.functions.IgniteBiFunction;
 import org.apache.ignite.ml.math.functions.IgniteFunction;
-import org.apache.ignite.ml.math.functions.IgniteTriFunction;
 import org.apache.ignite.ml.math.primitives.vector.Vector;
-import org.apache.ignite.ml.structures.LabeledDataset;
 import org.apache.ignite.ml.structures.LabeledVector;
+import org.apache.ignite.ml.structures.LabeledVectorSet;
 import org.apache.ignite.ml.structures.partition.LabeledDatasetPartitionDataBuilderOnHeap;
 
 /**
- * Trainer for binary classifier using Gradient Boosting.
- * As preparing stage this algorithm learn labels in dataset and create mapping dataset labels to 0 and 1.
- * This algorithm uses gradient of Logarithmic Loss metric [LogLoss] by default in each step of learning.
+ * Trainer for binary classifier using Gradient Boosting. As preparing stage this algorithm learn labels in dataset and
+ * create mapping dataset labels to 0 and 1. This algorithm uses gradient of Logarithmic Loss metric [LogLoss] by
+ * default in each step of learning.
  */
 public abstract class GDBBinaryClassifierTrainer extends GDBTrainer {
     /** External representation of first class. */
@@ -51,9 +50,7 @@
      * @param cntOfIterations Count of learning iterations.
      */
     public GDBBinaryClassifierTrainer(double gradStepSize, Integer cntOfIterations) {
-        super(gradStepSize,
-            cntOfIterations,
-            LossGradientPerPredictionFunctions.LOG_LOSS);
+        super(gradStepSize, cntOfIterations, new LogLoss());
     }
 
     /**
@@ -61,35 +58,37 @@
      *
      * @param gradStepSize Grad step size.
      * @param cntOfIterations Count of learning iterations.
-     * @param lossGradient Gradient of loss function. First argument is sample size, second argument is valid answer, third argument is current model prediction.
+     * @param loss Loss function.
      */
-    public GDBBinaryClassifierTrainer(double gradStepSize,
-        Integer cntOfIterations,
-        IgniteTriFunction<Long, Double, Double, Double> lossGradient) {
-
-        super(gradStepSize, cntOfIterations, lossGradient);
+    public GDBBinaryClassifierTrainer(double gradStepSize, Integer cntOfIterations, Loss loss) {
+        super(gradStepSize, cntOfIterations, loss);
     }
 
     /** {@inheritDoc} */
-    @Override protected <V, K> void learnLabels(DatasetBuilder<K, V> builder, IgniteBiFunction<K, V, Vector> featureExtractor,
+    @Override protected <V, K> boolean learnLabels(DatasetBuilder<K, V> builder,
+        IgniteBiFunction<K, V, Vector> featureExtractor,
         IgniteBiFunction<K, V, Double> lExtractor) {
 
-        List<Double> uniqLabels = new ArrayList<Double>(
-            builder.build(new EmptyContextBuilder<>(), new LabeledDatasetPartitionDataBuilderOnHeap<>(featureExtractor, lExtractor))
-                .compute((IgniteFunction<LabeledDataset<Double,LabeledVector>, Set<Double>>) x ->
+        Set<Double> uniqLabels = builder.build(new EmptyContextBuilder<>(), new LabeledDatasetPartitionDataBuilderOnHeap<>(featureExtractor, lExtractor))
+            .compute((IgniteFunction<LabeledVectorSet<Double, LabeledVector>, Set<Double>>)x ->
                     Arrays.stream(x.labels()).boxed().collect(Collectors.toSet()), (a, b) -> {
-                        if (a == null)
-                            return b;
-                        if (b == null)
-                            return a;
-                        a.addAll(b);
+                    if (a == null)
+                        return b;
+                    if (b == null)
                         return a;
-                    }
-                ));
+                    a.addAll(b);
+                    return a;
+                }
+            );
 
-        A.ensure(uniqLabels.size() == 2, "Binary classifier expects two types of labels in learning dataset");
-        externalFirstCls = uniqLabels.get(0);
-        externalSecondCls = uniqLabels.get(1);
+        if (uniqLabels != null && uniqLabels.size() == 2) {
+            ArrayList<Double> lblsArray = new ArrayList<>(uniqLabels);
+            externalFirstCls = lblsArray.get(0);
+            externalSecondCls = lblsArray.get(1);
+            return true;
+        } else {
+            return false;
+        }
     }
 
     /** {@inheritDoc} */
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBLearningStrategy.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBLearningStrategy.java
index 375748a..737495e2 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBLearningStrategy.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBLearningStrategy.java
@@ -22,6 +22,10 @@
 import java.util.List;
 import org.apache.ignite.ml.Model;
 import org.apache.ignite.ml.composition.ModelsComposition;
+import org.apache.ignite.ml.composition.boosting.convergence.ConvergenceChecker;
+import org.apache.ignite.ml.composition.boosting.convergence.ConvergenceCheckerFactory;
+import org.apache.ignite.ml.composition.boosting.convergence.mean.MeanAbsValueConvergenceCheckerFactory;
+import org.apache.ignite.ml.composition.boosting.loss.Loss;
 import org.apache.ignite.ml.composition.predictionsaggregator.WeightedPredictionsAggregator;
 import org.apache.ignite.ml.dataset.DatasetBuilder;
 import org.apache.ignite.ml.environment.LearningEnvironment;
@@ -29,9 +33,9 @@
 import org.apache.ignite.ml.math.functions.IgniteBiFunction;
 import org.apache.ignite.ml.math.functions.IgniteFunction;
 import org.apache.ignite.ml.math.functions.IgniteSupplier;
-import org.apache.ignite.ml.math.functions.IgniteTriFunction;
 import org.apache.ignite.ml.math.primitives.vector.Vector;
 import org.apache.ignite.ml.trainers.DatasetTrainer;
+import org.jetbrains.annotations.NotNull;
 
 /**
  * Learning strategy for gradient boosting.
@@ -44,7 +48,7 @@
     protected int cntOfIterations;
 
     /** Loss of gradient. */
-    protected IgniteTriFunction<Long, Double, Double, Double> lossGradient;
+    protected Loss loss;
 
     /** External label to internal mapping. */
     protected IgniteFunction<Double, Double> externalLbToInternalMapping;
@@ -61,9 +65,15 @@
     /** Composition weights. */
     protected double[] compositionWeights;
 
+    /** Check convergence strategy factory. */
+    protected ConvergenceCheckerFactory checkConvergenceStgyFactory = new MeanAbsValueConvergenceCheckerFactory(0.001);
+
+    /** Default gradient step size. */
+    private double defaultGradStepSize;
+
     /**
-     * Implementation of gradient boosting iterations. At each step of iterations this algorithm
-     * build a regression model based on gradient of loss-function for current models composition.
+     * Implementation of gradient boosting iterations. At each step of iterations this algorithm build a regression
+     * model based on gradient of loss-function for current models composition.
      *
      * @param datasetBuilder Dataset builder.
      * @param featureExtractor Feature extractor.
@@ -73,18 +83,43 @@
     public <K, V> List<Model<Vector, Double>> learnModels(DatasetBuilder<K, V> datasetBuilder,
         IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, Double> lbExtractor) {
 
-        List<Model<Vector, Double>> models = new ArrayList<>();
+        return update(null, datasetBuilder, featureExtractor, lbExtractor);
+    }
+
+    /**
+     * Gets state of model in arguments, compare it with training parameters of trainer and if they are fit then
+     * trainer updates model in according to new data and return new model. In other case trains new model.
+     *
+     * @param mdlToUpdate Learned model.
+     * @param datasetBuilder Dataset builder.
+     * @param featureExtractor Feature extractor.
+     * @param lbExtractor Label extractor.
+     * @param <K> Type of a key in {@code upstream} data.
+     * @param <V> Type of a value in {@code upstream} data.
+     * @return Updated models list.
+     */
+    public <K,V> List<Model<Vector, Double>> update(GDBTrainer.GDBModel mdlToUpdate,
+        DatasetBuilder<K, V> datasetBuilder, IgniteBiFunction<K, V, Vector> featureExtractor,
+        IgniteBiFunction<K, V, Double> lbExtractor) {
+
+        List<Model<Vector, Double>> models = initLearningState(mdlToUpdate);
+
+        ConvergenceChecker<K, V> convCheck = checkConvergenceStgyFactory.create(sampleSize,
+            externalLbToInternalMapping, loss, datasetBuilder, featureExtractor, lbExtractor);
+
         DatasetTrainer<? extends Model<Vector, Double>, Double> trainer = baseMdlTrainerBuilder.get();
         for (int i = 0; i < cntOfIterations; i++) {
-            double[] weights = Arrays.copyOf(compositionWeights, i);
+            double[] weights = Arrays.copyOf(compositionWeights, models.size());
 
             WeightedPredictionsAggregator aggregator = new WeightedPredictionsAggregator(weights, meanLabelValue);
-            Model<Vector, Double> currComposition = new ModelsComposition(models, aggregator);
+            ModelsComposition currComposition = new ModelsComposition(models, aggregator);
+            if (convCheck.isConverged(datasetBuilder, currComposition))
+                break;
 
             IgniteBiFunction<K, V, Double> lbExtractorWrap = (k, v) -> {
                 Double realAnswer = externalLbToInternalMapping.apply(lbExtractor.apply(k, v));
                 Double mdlAnswer = currComposition.apply(featureExtractor.apply(k, v));
-                return -lossGradient.apply(sampleSize, realAnswer, mdlAnswer);
+                return -loss.gradient(sampleSize, realAnswer, mdlAnswer);
             };
 
             long startTs = System.currentTimeMillis();
@@ -97,6 +132,29 @@
     }
 
     /**
+     * Restores state of already learned model if can and sets learning parameters according to this state.
+     *
+     * @param mdlToUpdate Model to update.
+     * @return list of already learned models.
+     */
+    @NotNull protected List<Model<Vector, Double>> initLearningState(GDBTrainer.GDBModel mdlToUpdate) {
+        List<Model<Vector, Double>> models = new ArrayList<>();
+        if(mdlToUpdate != null) {
+            models.addAll(mdlToUpdate.getModels());
+            WeightedPredictionsAggregator aggregator = (WeightedPredictionsAggregator) mdlToUpdate.getPredictionsAggregator();
+            meanLabelValue = aggregator.getBias();
+            compositionWeights = new double[models.size() + cntOfIterations];
+            for(int i = 0; i < models.size(); i++)
+                compositionWeights[i] = aggregator.getWeights()[i];
+        } else {
+            compositionWeights = new double[cntOfIterations];
+        }
+
+        Arrays.fill(compositionWeights, models.size(), compositionWeights.length, defaultGradStepSize);
+        return models;
+    }
+
+    /**
      * Sets learning environment.
      *
      * @param environment Learning Environment.
@@ -117,12 +175,12 @@
     }
 
     /**
-     * Sets gradient of loss function.
+     * Loss function.
      *
-     * @param lossGradient Loss gradient.
+     * @param loss Loss function.
      */
-    public GDBLearningStrategy withLossGradient(IgniteTriFunction<Long, Double, Double, Double> lossGradient) {
-        this.lossGradient = lossGradient;
+    public GDBLearningStrategy withLossGradient(Loss loss) {
+        this.loss = loss;
         return this;
     }
 
@@ -141,7 +199,8 @@
      *
      * @param buildBaseMdlTrainer Build base model trainer.
      */
-    public GDBLearningStrategy withBaseModelTrainerBuilder(IgniteSupplier<DatasetTrainer<? extends Model<Vector, Double>, Double>> buildBaseMdlTrainer) {
+    public GDBLearningStrategy withBaseModelTrainerBuilder(
+        IgniteSupplier<DatasetTrainer<? extends Model<Vector, Double>, Double>> buildBaseMdlTrainer) {
         this.baseMdlTrainerBuilder = buildBaseMdlTrainer;
         return this;
     }
@@ -175,4 +234,34 @@
         this.compositionWeights = compositionWeights;
         return this;
     }
+
+    /**
+     * Sets CheckConvergenceStgyFactory.
+     *
+     * @param factory Factory.
+     */
+    public GDBLearningStrategy withCheckConvergenceStgyFactory(ConvergenceCheckerFactory factory) {
+        this.checkConvergenceStgyFactory = factory;
+        return this;
+    }
+
+    /**
+     * Sets default gradient step size.
+     *
+     * @param defaultGradStepSize Default gradient step size.
+     */
+    public GDBLearningStrategy withDefaultGradStepSize(double defaultGradStepSize) {
+        this.defaultGradStepSize = defaultGradStepSize;
+        return this;
+    }
+
+    /** */
+    public double[] getCompositionWeights() {
+        return compositionWeights;
+    }
+
+    /** */
+    public double getMeanValue() {
+        return meanLabelValue;
+    }
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBRegressionTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBRegressionTrainer.java
index 201586e..8c1afd7 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBRegressionTrainer.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBRegressionTrainer.java
@@ -17,6 +17,7 @@
 
 package org.apache.ignite.ml.composition.boosting;
 
+import org.apache.ignite.ml.composition.boosting.loss.SquaredError;
 import org.apache.ignite.ml.dataset.DatasetBuilder;
 import org.apache.ignite.ml.math.functions.IgniteBiFunction;
 import org.apache.ignite.ml.math.primitives.vector.Vector;
@@ -33,15 +34,14 @@
      * @param cntOfIterations Count of learning iterations.
      */
     public GDBRegressionTrainer(double gradStepSize, Integer cntOfIterations) {
-        super(gradStepSize,
-            cntOfIterations,
-            LossGradientPerPredictionFunctions.MSE);
+        super(gradStepSize, cntOfIterations, new SquaredError());
     }
 
     /** {@inheritDoc} */
-    @Override protected <V, K> void learnLabels(DatasetBuilder<K, V> builder, IgniteBiFunction<K, V, Vector> featureExtractor,
+    @Override protected <V, K> boolean learnLabels(DatasetBuilder<K, V> builder, IgniteBiFunction<K, V, Vector> featureExtractor,
         IgniteBiFunction<K, V, Double> lExtractor) {
 
+        return true;
     }
 
     /** {@inheritDoc} */
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBTrainer.java
index 5a0f52a..89cc6b1 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBTrainer.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/GDBTrainer.java
@@ -22,6 +22,9 @@
 import org.apache.ignite.lang.IgniteBiTuple;
 import org.apache.ignite.ml.Model;
 import org.apache.ignite.ml.composition.ModelsComposition;
+import org.apache.ignite.ml.composition.boosting.convergence.ConvergenceCheckerFactory;
+import org.apache.ignite.ml.composition.boosting.convergence.mean.MeanAbsValueConvergenceCheckerFactory;
+import org.apache.ignite.ml.composition.boosting.loss.Loss;
 import org.apache.ignite.ml.composition.predictionsaggregator.WeightedPredictionsAggregator;
 import org.apache.ignite.ml.dataset.Dataset;
 import org.apache.ignite.ml.dataset.DatasetBuilder;
@@ -30,7 +33,7 @@
 import org.apache.ignite.ml.environment.logging.MLLogger;
 import org.apache.ignite.ml.knn.regression.KNNRegressionTrainer;
 import org.apache.ignite.ml.math.functions.IgniteBiFunction;
-import org.apache.ignite.ml.math.functions.IgniteTriFunction;
+import org.apache.ignite.ml.math.functions.IgniteFunction;
 import org.apache.ignite.ml.math.primitives.vector.Vector;
 import org.apache.ignite.ml.regressions.linear.LinearRegressionLSQRTrainer;
 import org.apache.ignite.ml.regressions.linear.LinearRegressionSGDTrainer;
@@ -52,7 +55,7 @@
  *
  * But in practice Decision Trees is most used regressors (see: {@link DecisionTreeRegressionTrainer}).
  */
-public abstract class GDBTrainer extends DatasetTrainer<Model<Vector, Double>, Double> {
+public abstract class GDBTrainer extends DatasetTrainer<ModelsComposition, Double> {
     /** Gradient step. */
     private final double gradientStep;
 
@@ -60,63 +63,81 @@
     private final int cntOfIterations;
 
     /**
-     * Gradient of loss function. First argument is sample size, second argument is valid answer, third argument is
-     * current model prediction.
+     * Loss function.
      */
-    protected final IgniteTriFunction<Long, Double, Double, Double> lossGradient;
+    protected final Loss loss;
+
+    /** Check convergence strategy factory. */
+    protected ConvergenceCheckerFactory checkConvergenceStgyFactory = new MeanAbsValueConvergenceCheckerFactory(0.001);
 
     /**
      * Constructs GDBTrainer instance.
      *
      * @param gradStepSize Grad step size.
      * @param cntOfIterations Count of learning iterations.
-     * @param lossGradient Gradient of loss function. First argument is sample size, second argument is valid answer
+     * @param loss Gradient of loss function. First argument is sample size, second argument is valid answer
      * third argument is current model prediction.
      */
-    public GDBTrainer(double gradStepSize, Integer cntOfIterations,
-        IgniteTriFunction<Long, Double, Double, Double> lossGradient) {
+    public GDBTrainer(double gradStepSize, Integer cntOfIterations, Loss loss) {
         gradientStep = gradStepSize;
         this.cntOfIterations = cntOfIterations;
-        this.lossGradient = lossGradient;
+        this.loss = loss;
     }
 
     /** {@inheritDoc} */
-    @Override public <K, V> Model<Vector, Double> fit(DatasetBuilder<K, V> datasetBuilder,
+    @Override public <K, V> ModelsComposition fit(DatasetBuilder<K, V> datasetBuilder,
         IgniteBiFunction<K, V, Vector> featureExtractor,
         IgniteBiFunction<K, V, Double> lbExtractor) {
 
-        learnLabels(datasetBuilder, featureExtractor, lbExtractor);
+        return updateModel(null, datasetBuilder, featureExtractor, lbExtractor);
+    }
 
-        IgniteBiTuple<Double, Long> initAndSampleSize = computeInitialValue(datasetBuilder,
-            featureExtractor, lbExtractor);
+    /** {@inheritDoc} */
+    @Override protected <K, V> ModelsComposition updateModel(ModelsComposition mdl, DatasetBuilder<K, V> datasetBuilder,
+        IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, Double> lbExtractor) {
+
+        if (!learnLabels(datasetBuilder, featureExtractor, lbExtractor))
+            return getLastTrainedModelOrThrowEmptyDatasetException(mdl);
+
+        IgniteBiTuple<Double, Long> initAndSampleSize = computeInitialValue(datasetBuilder, featureExtractor, lbExtractor);
+        if(initAndSampleSize == null)
+            return getLastTrainedModelOrThrowEmptyDatasetException(mdl);
+
         Double mean = initAndSampleSize.get1();
         Long sampleSize = initAndSampleSize.get2();
 
-        double[] compositionWeights = new double[cntOfIterations];
-        Arrays.fill(compositionWeights, gradientStep);
-        WeightedPredictionsAggregator resAggregator = new WeightedPredictionsAggregator(compositionWeights, mean);
-
         long learningStartTs = System.currentTimeMillis();
 
-        List<Model<Vector, Double>> models = getLearningStrategy()
+        GDBLearningStrategy stgy = getLearningStrategy()
             .withBaseModelTrainerBuilder(this::buildBaseModelTrainer)
             .withExternalLabelToInternal(this::externalLabelToInternal)
             .withCntOfIterations(cntOfIterations)
-            .withCompositionWeights(compositionWeights)
             .withEnvironment(environment)
-            .withLossGradient(lossGradient)
+            .withLossGradient(loss)
             .withSampleSize(sampleSize)
             .withMeanLabelValue(mean)
-            .learnModels(datasetBuilder, featureExtractor, lbExtractor);
+            .withDefaultGradStepSize(gradientStep)
+            .withCheckConvergenceStgyFactory(checkConvergenceStgyFactory);
+
+        List<Model<Vector, Double>> models;
+        if (mdl != null)
+            models = stgy.update((GDBModel)mdl, datasetBuilder, featureExtractor, lbExtractor);
+        else
+            models = stgy.learnModels(datasetBuilder, featureExtractor, lbExtractor);
 
         double learningTime = (double)(System.currentTimeMillis() - learningStartTs) / 1000.0;
         environment.logger(getClass()).log(MLLogger.VerboseLevel.LOW, "The training time was %.2fs", learningTime);
 
-        return new ModelsComposition(models, resAggregator) {
-            @Override public Double apply(Vector features) {
-                return internalLabelToExternal(super.apply(features));
-            }
-        };
+        WeightedPredictionsAggregator resAggregator = new WeightedPredictionsAggregator(
+            stgy.getCompositionWeights(),
+            stgy.getMeanValue()
+        );
+        return new GDBModel(models, resAggregator, this::internalLabelToExternal);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected boolean checkState(ModelsComposition mdl) {
+        return mdl instanceof GDBModel;
     }
 
     /**
@@ -125,8 +146,9 @@
      * @param builder Dataset builder.
      * @param featureExtractor Feature extractor.
      * @param lExtractor Labels extractor.
+     * @return true if labels learning was successful.
      */
-    protected abstract <V, K> void learnLabels(DatasetBuilder<K, V> builder,
+    protected abstract <V, K> boolean learnLabels(DatasetBuilder<K, V> builder,
         IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, Double> lExtractor);
 
     /**
@@ -181,7 +203,8 @@
                 }
             );
 
-            meanTuple.set1(meanTuple.get1() / meanTuple.get2());
+            if (meanTuple != null)
+                meanTuple.set1(meanTuple.get1() / meanTuple.get2());
             return meanTuple;
         }
         catch (Exception e) {
@@ -190,6 +213,17 @@
     }
 
     /**
+     * Sets CheckConvergenceStgyFactory.
+     *
+     * @param factory Factory.
+     * @return trainer.
+     */
+    public GDBTrainer withCheckConvergenceStgyFactory(ConvergenceCheckerFactory factory) {
+        this.checkConvergenceStgyFactory = factory;
+        return this;
+    }
+
+    /**
      * Returns learning strategy.
      *
      * @return learning strategy.
@@ -197,4 +231,35 @@
     protected GDBLearningStrategy getLearningStrategy() {
         return new GDBLearningStrategy();
     }
+
+    /**
+     * GDB model.
+     */
+    public static class GDBModel extends ModelsComposition {
+        /** Serial version uid. */
+        private static final long serialVersionUID = 3476661240155508004L;
+
+        /** Internal to external lbl mapping. */
+        private final IgniteFunction<Double, Double> internalToExternalLblMapping;
+
+        /**
+         * Creates an instance of GDBModel.
+         *
+         * @param models Models.
+         * @param predictionsAggregator Predictions aggregator.
+         * @param internalToExternalLblMapping Internal to external lbl mapping.
+         */
+        public GDBModel(List<? extends Model<Vector, Double>> models,
+            WeightedPredictionsAggregator predictionsAggregator,
+            IgniteFunction<Double, Double> internalToExternalLblMapping) {
+
+            super(models, predictionsAggregator);
+            this.internalToExternalLblMapping = internalToExternalLblMapping;
+        }
+
+        /** {@inheritDoc} */
+        @Override public Double apply(Vector features) {
+            return internalToExternalLblMapping.apply(super.apply(features));
+        }
+    }
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/LossGradientPerPredictionFunctions.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/LossGradientPerPredictionFunctions.java
deleted file mode 100644
index 488c0e3..0000000
--- a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/LossGradientPerPredictionFunctions.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.ml.composition.boosting;
-
-import org.apache.ignite.ml.math.functions.IgniteTriFunction;
-
-/**
- * Contains implementations of per-prediction loss functions for gradient boosting algorithm.
- */
-public class LossGradientPerPredictionFunctions {
-    /** Mean squared error loss for regression. */
-    public static IgniteTriFunction<Long, Double, Double, Double> MSE =
-        (sampleSize, answer, prediction) -> (2.0 / sampleSize) * (prediction - answer);
-
-    /** Logarithmic loss for binary classification. */
-    public static IgniteTriFunction<Long, Double, Double, Double> LOG_LOSS =
-        (sampleSize, answer, prediction) -> (prediction - answer) / (prediction * (1.0 - prediction));
-}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/ConvergenceChecker.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/ConvergenceChecker.java
new file mode 100644
index 0000000..3f6e8ca
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/ConvergenceChecker.java
@@ -0,0 +1,140 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.composition.boosting.convergence;
+
+import java.io.Serializable;
+import org.apache.ignite.ml.composition.ModelsComposition;
+import org.apache.ignite.ml.composition.boosting.loss.Loss;
+import org.apache.ignite.ml.dataset.Dataset;
+import org.apache.ignite.ml.dataset.DatasetBuilder;
+import org.apache.ignite.ml.dataset.primitive.FeatureMatrixWithLabelsOnHeapData;
+import org.apache.ignite.ml.dataset.primitive.FeatureMatrixWithLabelsOnHeapDataBuilder;
+import org.apache.ignite.ml.dataset.primitive.builder.context.EmptyContextBuilder;
+import org.apache.ignite.ml.dataset.primitive.context.EmptyContext;
+import org.apache.ignite.ml.math.functions.IgniteBiFunction;
+import org.apache.ignite.ml.math.functions.IgniteFunction;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+
+/**
+ * Contains logic of error computing and convergence checking for Gradient Boosting algorithms.
+ *
+ * @param <K> Type of a key in upstream data.
+ * @param <V> Type of a value in upstream data.
+ */
+public abstract class ConvergenceChecker<K, V> implements Serializable {
+    /** Serial version uid. */
+    private static final long serialVersionUID = 710762134746674105L;
+
+    /** Sample size. */
+    private long sampleSize;
+
+    /** External label to internal mapping. */
+    private IgniteFunction<Double, Double> externalLbToInternalMapping;
+
+    /** Loss function. */
+    private Loss loss;
+
+    /** Feature extractor. */
+    private IgniteBiFunction<K, V, Vector> featureExtractor;
+
+    /** Label extractor. */
+    private IgniteBiFunction<K, V, Double> lbExtractor;
+
+    /** Precision of convergence check. */
+    private double precision;
+
+    /**
+     * Constructs an instance of ConvergenceChecker.
+     *
+     * @param sampleSize Sample size.
+     * @param externalLbToInternalMapping External label to internal mapping.
+     * @param loss Loss gradient.
+     * @param datasetBuilder Dataset builder.
+     * @param featureExtractor Feature extractor.
+     * @param lbExtractor Label extractor.
+     * @param precision
+     */
+    public ConvergenceChecker(long sampleSize,
+        IgniteFunction<Double, Double> externalLbToInternalMapping, Loss loss,
+        DatasetBuilder<K, V> datasetBuilder, IgniteBiFunction<K, V, Vector> featureExtractor,
+        IgniteBiFunction<K, V, Double> lbExtractor,
+        double precision) {
+
+        assert precision < 1 && precision >= 0;
+
+        this.sampleSize = sampleSize;
+        this.externalLbToInternalMapping = externalLbToInternalMapping;
+        this.loss = loss;
+        this.featureExtractor = featureExtractor;
+        this.lbExtractor = lbExtractor;
+        this.precision = precision;
+    }
+
+    /**
+     * Checks convergency on dataset.
+     *
+     * @param currMdl Current model.
+     * @return true if GDB is converged.
+     */
+    public boolean isConverged(DatasetBuilder<K, V> datasetBuilder, ModelsComposition currMdl) {
+        try (Dataset<EmptyContext, FeatureMatrixWithLabelsOnHeapData> dataset = datasetBuilder.build(
+            new EmptyContextBuilder<>(),
+            new FeatureMatrixWithLabelsOnHeapDataBuilder<>(featureExtractor, lbExtractor)
+        )) {
+            return isConverged(dataset, currMdl);
+        }
+        catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    /**
+     * Checks convergency on dataset.
+     *
+     * @param dataset Dataset.
+     * @param currMdl Current model.
+     * @return true if GDB is converged.
+     */
+    public boolean isConverged(Dataset<EmptyContext, ? extends FeatureMatrixWithLabelsOnHeapData> dataset, ModelsComposition currMdl) {
+        Double error = computeMeanErrorOnDataset(dataset, currMdl);
+        return error < precision || error.isNaN();
+    }
+
+    /**
+     * Compute error for given model on learning dataset.
+     *
+     * @param dataset Learning dataset.
+     * @param mdl Model.
+     * @return error mean value.
+     */
+    public abstract Double computeMeanErrorOnDataset(
+        Dataset<EmptyContext, ? extends FeatureMatrixWithLabelsOnHeapData> dataset,
+        ModelsComposition mdl);
+
+    /**
+     * Compute error for the specific vector of dataset.
+     *
+     * @param currMdl Current model.
+     * @return error.
+     */
+    public double computeError(Vector features, Double answer, ModelsComposition currMdl) {
+        Double realAnswer = externalLbToInternalMapping.apply(answer);
+        Double mdlAnswer = currMdl.apply(features);
+        return -loss.gradient(sampleSize, realAnswer, mdlAnswer);
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/ConvergenceCheckerFactory.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/ConvergenceCheckerFactory.java
new file mode 100644
index 0000000..7592f50
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/ConvergenceCheckerFactory.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.composition.boosting.convergence;
+
+import org.apache.ignite.ml.composition.boosting.loss.Loss;
+import org.apache.ignite.ml.dataset.DatasetBuilder;
+import org.apache.ignite.ml.math.functions.IgniteBiFunction;
+import org.apache.ignite.ml.math.functions.IgniteFunction;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+
+/**
+ * Factory for ConvergenceChecker.
+ */
+public abstract class ConvergenceCheckerFactory {
+    /** Precision of error checking. If error <= precision then it is equated to 0.0*/
+    protected double precision;
+
+    /**
+     * Creates an instance of ConvergenceCheckerFactory.
+     *
+     * @param precision Precision [0 <= precision < 1].
+     */
+    public ConvergenceCheckerFactory(double precision) {
+        this.precision = precision;
+    }
+
+    /**
+     * Create an instance of ConvergenceChecker.
+     *
+     * @param sampleSize Sample size.
+     * @param externalLbToInternalMapping External label to internal mapping.
+     * @param loss Loss function.
+     * @param datasetBuilder Dataset builder.
+     * @param featureExtractor Feature extractor.
+     * @param lbExtractor Label extractor.
+     * @return ConvergenceCheckerFactory instance.
+     */
+    public abstract <K,V> ConvergenceChecker<K,V> create(long sampleSize,
+        IgniteFunction<Double, Double> externalLbToInternalMapping, Loss loss,
+        DatasetBuilder<K, V> datasetBuilder,
+        IgniteBiFunction<K, V, Vector> featureExtractor,
+        IgniteBiFunction<K, V, Double> lbExtractor);
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/mean/MeanAbsValueConvergenceChecker.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/mean/MeanAbsValueConvergenceChecker.java
new file mode 100644
index 0000000..7340bfa
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/mean/MeanAbsValueConvergenceChecker.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.composition.boosting.convergence.mean;
+
+import org.apache.ignite.lang.IgniteBiTuple;
+import org.apache.ignite.ml.composition.ModelsComposition;
+import org.apache.ignite.ml.composition.boosting.convergence.ConvergenceChecker;
+import org.apache.ignite.ml.composition.boosting.loss.Loss;
+import org.apache.ignite.ml.dataset.Dataset;
+import org.apache.ignite.ml.dataset.DatasetBuilder;
+import org.apache.ignite.ml.dataset.primitive.FeatureMatrixWithLabelsOnHeapData;
+import org.apache.ignite.ml.dataset.primitive.context.EmptyContext;
+import org.apache.ignite.ml.math.functions.IgniteBiFunction;
+import org.apache.ignite.ml.math.functions.IgniteFunction;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
+
+/**
+ * Use mean value of errors for estimating error on dataset.
+ *
+ * @param <K> Type of a key in upstream data.
+ * @param <V> Type of a value in upstream data.
+ */
+public class MeanAbsValueConvergenceChecker<K,V> extends ConvergenceChecker<K,V> {
+    /** Serial version uid. */
+    private static final long serialVersionUID = 8534776439755210864L;
+
+    /**
+     * Creates an intance of MeanAbsValueConvergenceChecker.
+     *
+     * @param sampleSize Sample size.
+     * @param externalLbToInternalMapping External label to internal mapping.
+     * @param loss Loss.
+     * @param datasetBuilder Dataset builder.
+     * @param featureExtractor Feature extractor.
+     * @param lbExtractor Label extractor.
+     */
+    public MeanAbsValueConvergenceChecker(long sampleSize, IgniteFunction<Double, Double> externalLbToInternalMapping,
+        Loss loss, DatasetBuilder<K, V> datasetBuilder, IgniteBiFunction<K, V, Vector> featureExtractor,
+        IgniteBiFunction<K, V, Double> lbExtractor,
+        double precision) {
+
+        super(sampleSize, externalLbToInternalMapping, loss, datasetBuilder, featureExtractor, lbExtractor, precision);
+    }
+
+    /** {@inheritDoc} */
+    @Override public Double computeMeanErrorOnDataset(Dataset<EmptyContext, ? extends FeatureMatrixWithLabelsOnHeapData> dataset,
+        ModelsComposition mdl) {
+
+        IgniteBiTuple<Double, Long> sumAndCnt = dataset.compute(
+            partition -> computeStatisticOnPartition(mdl, partition),
+            this::reduce
+        );
+
+        if(sumAndCnt == null || sumAndCnt.getValue() == 0)
+            return Double.NaN;
+        return sumAndCnt.getKey() / sumAndCnt.getValue();
+    }
+
+    /**
+     * Compute sum of absolute value of errors and count of rows in partition.
+     *
+     * @param mdl Model.
+     * @param part Partition.
+     * @return Tuple (sum of errors, count of rows)
+     */
+    private IgniteBiTuple<Double, Long> computeStatisticOnPartition(ModelsComposition mdl, FeatureMatrixWithLabelsOnHeapData part) {
+        Double sum = 0.0;
+
+        for(int i = 0; i < part.getFeatures().length; i++) {
+            double error = computeError(VectorUtils.of(part.getFeatures()[i]), part.getLabels()[i], mdl);
+            sum += Math.abs(error);
+        }
+
+        return new IgniteBiTuple<>(sum, (long) part.getLabels().length);
+    }
+
+    /**
+     * Merge left and right statistics from partitions.
+     *
+     * @param left Left.
+     * @param right Right.
+     * @return merged value.
+     */
+    private IgniteBiTuple<Double, Long> reduce(IgniteBiTuple<Double, Long> left, IgniteBiTuple<Double, Long> right) {
+        if (left == null) {
+            if (right != null)
+                return right;
+            else
+                return new IgniteBiTuple<>(0.0, 0L);
+        }
+
+        if (right == null)
+            return left;
+
+        return new IgniteBiTuple<>(
+            left.getKey() + right.getKey(),
+            right.getValue() + left.getValue()
+        );
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/mean/MeanAbsValueConvergenceCheckerFactory.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/mean/MeanAbsValueConvergenceCheckerFactory.java
new file mode 100644
index 0000000..f02a606
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/mean/MeanAbsValueConvergenceCheckerFactory.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.composition.boosting.convergence.mean;
+
+import org.apache.ignite.ml.composition.boosting.convergence.ConvergenceChecker;
+import org.apache.ignite.ml.composition.boosting.convergence.ConvergenceCheckerFactory;
+import org.apache.ignite.ml.composition.boosting.loss.Loss;
+import org.apache.ignite.ml.dataset.DatasetBuilder;
+import org.apache.ignite.ml.math.functions.IgniteBiFunction;
+import org.apache.ignite.ml.math.functions.IgniteFunction;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+
+/**
+ * Factory for {@link MeanAbsValueConvergenceChecker}.
+ */
+public class MeanAbsValueConvergenceCheckerFactory extends ConvergenceCheckerFactory {
+    /**
+     * @param precision Precision.
+     */
+    public MeanAbsValueConvergenceCheckerFactory(double precision) {
+        super(precision);
+    }
+
+    /** {@inheritDoc} */
+    @Override public <K, V> ConvergenceChecker<K, V> create(long sampleSize,
+        IgniteFunction<Double, Double> externalLbToInternalMapping, Loss loss, DatasetBuilder<K, V> datasetBuilder,
+        IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, Double> lbExtractor) {
+
+        return new MeanAbsValueConvergenceChecker<>(sampleSize, externalLbToInternalMapping, loss,
+            datasetBuilder, featureExtractor, lbExtractor, precision);
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/mean/package-info.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/mean/package-info.java
new file mode 100644
index 0000000..1ab6e66
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/mean/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * <!-- Package description. -->
+ * Contains implementation of convergence checking computer by mean of absolute value of errors in dataset.
+ */
+package org.apache.ignite.ml.composition.boosting.convergence.mean;
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/median/MedianOfMedianConvergenceChecker.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/median/MedianOfMedianConvergenceChecker.java
new file mode 100644
index 0000000..7e66a9c
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/median/MedianOfMedianConvergenceChecker.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.composition.boosting.convergence.median;
+
+import java.util.Arrays;
+import org.apache.ignite.ml.composition.ModelsComposition;
+import org.apache.ignite.ml.composition.boosting.convergence.ConvergenceChecker;
+import org.apache.ignite.ml.composition.boosting.loss.Loss;
+import org.apache.ignite.ml.dataset.Dataset;
+import org.apache.ignite.ml.dataset.DatasetBuilder;
+import org.apache.ignite.ml.dataset.primitive.FeatureMatrixWithLabelsOnHeapData;
+import org.apache.ignite.ml.dataset.primitive.context.EmptyContext;
+import org.apache.ignite.ml.math.functions.IgniteBiFunction;
+import org.apache.ignite.ml.math.functions.IgniteFunction;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
+
+/**
+ * Use median of median on partitions value of errors for estimating error on dataset. This algorithm may be less
+ * sensitive to
+ *
+ * @param <K> Type of a key in upstream data.
+ * @param <V> Type of a value in upstream data.
+ */
+public class MedianOfMedianConvergenceChecker<K, V> extends ConvergenceChecker<K, V> {
+    /** Serial version uid. */
+    private static final long serialVersionUID = 4902502002933415287L;
+
+    /**
+     * Creates an instance of MedianOfMedianConvergenceChecker.
+     *
+     * @param sampleSize Sample size.
+     * @param lblMapping External label to internal mapping.
+     * @param loss Loss function.
+     * @param datasetBuilder Dataset builder.
+     * @param fExtr Feature extractor.
+     * @param lbExtr Label extractor.
+     * @param precision Precision.
+     */
+    public MedianOfMedianConvergenceChecker(long sampleSize, IgniteFunction<Double, Double> lblMapping, Loss loss,
+        DatasetBuilder<K, V> datasetBuilder, IgniteBiFunction<K, V, Vector> fExtr,
+        IgniteBiFunction<K, V, Double> lbExtr, double precision) {
+
+        super(sampleSize, lblMapping, loss, datasetBuilder, fExtr, lbExtr, precision);
+    }
+
+    /** {@inheritDoc} */
+    @Override public Double computeMeanErrorOnDataset(Dataset<EmptyContext, ? extends FeatureMatrixWithLabelsOnHeapData> dataset,
+        ModelsComposition mdl) {
+
+        double[] medians = dataset.compute(
+            data -> computeMedian(mdl, data),
+            this::reduce
+        );
+
+        if(medians == null)
+            return Double.POSITIVE_INFINITY;
+        return getMedian(medians);
+    }
+
+    /**
+     * Compute median value on data partition.
+     *
+     * @param mdl Model.
+     * @param data Data.
+     * @return median value.
+     */
+    private double[] computeMedian(ModelsComposition mdl, FeatureMatrixWithLabelsOnHeapData data) {
+        double[] errors = new double[data.getLabels().length];
+        for (int i = 0; i < errors.length; i++)
+            errors[i] = Math.abs(computeError(VectorUtils.of(data.getFeatures()[i]), data.getLabels()[i], mdl));
+        return new double[] {getMedian(errors)};
+    }
+
+    /**
+     * Compute median value on array of errors.
+     *
+     * @param errors Error values.
+     * @return median value of errors.
+     */
+    private double getMedian(double[] errors) {
+        if(errors.length == 0)
+            return Double.POSITIVE_INFINITY;
+
+        Arrays.sort(errors);
+        final int middleIdx = (errors.length - 1) / 2;
+        if (errors.length % 2 == 1)
+            return errors[middleIdx];
+        else
+            return (errors[middleIdx + 1] + errors[middleIdx]) / 2;
+    }
+
+    /**
+     * Merge median values among partitions.
+     *
+     * @param left Left partition.
+     * @param right Right partition.
+     * @return merged median values.
+     */
+    private double[] reduce(double[] left, double[] right) {
+        if (left == null)
+            return right;
+        if(right == null)
+            return left;
+
+        double[] res = new double[left.length + right.length];
+        System.arraycopy(left, 0, res, 0, left.length);
+        System.arraycopy(right, 0, res, left.length, right.length);
+        return res;
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/median/MedianOfMedianConvergenceCheckerFactory.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/median/MedianOfMedianConvergenceCheckerFactory.java
new file mode 100644
index 0000000..a1affe0
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/median/MedianOfMedianConvergenceCheckerFactory.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.composition.boosting.convergence.median;
+
+import org.apache.ignite.ml.composition.boosting.convergence.ConvergenceChecker;
+import org.apache.ignite.ml.composition.boosting.convergence.ConvergenceCheckerFactory;
+import org.apache.ignite.ml.composition.boosting.loss.Loss;
+import org.apache.ignite.ml.dataset.DatasetBuilder;
+import org.apache.ignite.ml.math.functions.IgniteBiFunction;
+import org.apache.ignite.ml.math.functions.IgniteFunction;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+
+/**
+ * Factory for {@link MedianOfMedianConvergenceChecker}.
+ */
+public class MedianOfMedianConvergenceCheckerFactory extends ConvergenceCheckerFactory {
+    /**
+     * @param precision Precision.
+     */
+    public MedianOfMedianConvergenceCheckerFactory(double precision) {
+        super(precision);
+    }
+
+    /** {@inheritDoc} */
+    @Override public <K, V> ConvergenceChecker<K, V> create(long sampleSize,
+        IgniteFunction<Double, Double> externalLbToInternalMapping, Loss loss, DatasetBuilder<K, V> datasetBuilder,
+        IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, Double> lbExtractor) {
+
+        return new MedianOfMedianConvergenceChecker<>(sampleSize, externalLbToInternalMapping, loss,
+            datasetBuilder, featureExtractor, lbExtractor, precision);
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/median/package-info.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/median/package-info.java
new file mode 100644
index 0000000..3798ef9
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/median/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * <!-- Package description. -->
+ * Contains implementation of convergence checking computer by median of medians of errors in dataset.
+ */
+package org.apache.ignite.ml.composition.boosting.convergence.median;
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/package-info.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/package-info.java
new file mode 100644
index 0000000..6d42c62
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/package-info.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * <!-- Package description. -->
+ * Package contains implementation of convergency checking algorithms for gradient boosting.
+ * This algorithms may stop training of gradient boosting if it achieve error on dataset less than precision
+ * specified by user.
+ */
+package org.apache.ignite.ml.composition.boosting.convergence;
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/simple/ConvergenceCheckerStub.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/simple/ConvergenceCheckerStub.java
new file mode 100644
index 0000000..716d04e
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/simple/ConvergenceCheckerStub.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.composition.boosting.convergence.simple;
+
+import org.apache.ignite.ml.composition.ModelsComposition;
+import org.apache.ignite.ml.composition.boosting.convergence.ConvergenceChecker;
+import org.apache.ignite.ml.composition.boosting.loss.Loss;
+import org.apache.ignite.ml.dataset.Dataset;
+import org.apache.ignite.ml.dataset.DatasetBuilder;
+import org.apache.ignite.ml.dataset.primitive.FeatureMatrixWithLabelsOnHeapData;
+import org.apache.ignite.ml.dataset.primitive.context.EmptyContext;
+import org.apache.ignite.ml.math.functions.IgniteBiFunction;
+import org.apache.ignite.ml.math.functions.IgniteFunction;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+
+/**
+ * This strategy skip estimating error on dataset step.
+ * According to this strategy, training will stop after reaching the maximum number of iterations.
+ *
+ * @param <K> Type of a key in upstream data.
+ * @param <V> Type of a value in upstream data.
+ */
+public class ConvergenceCheckerStub<K,V> extends ConvergenceChecker<K,V> {
+    /** Serial version uid. */
+    private static final long serialVersionUID = 8534776439755210864L;
+
+    /**
+     * Creates an intance of ConvergenceCheckerStub.
+     *
+     * @param sampleSize Sample size.
+     * @param externalLbToInternalMapping External label to internal mapping.
+     * @param loss Loss function.
+     * @param datasetBuilder Dataset builder.
+     * @param featureExtractor Feature extractor.
+     * @param lbExtractor Label extractor.
+     */
+    public ConvergenceCheckerStub(long sampleSize,
+        IgniteFunction<Double, Double> externalLbToInternalMapping, Loss loss,
+        DatasetBuilder<K, V> datasetBuilder,
+        IgniteBiFunction<K, V, Vector> featureExtractor,
+        IgniteBiFunction<K, V, Double> lbExtractor) {
+
+        super(sampleSize, externalLbToInternalMapping, loss, datasetBuilder,
+            featureExtractor, lbExtractor, 0.0);
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean isConverged(DatasetBuilder<K, V> datasetBuilder, ModelsComposition currMdl) {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean isConverged(Dataset<EmptyContext, ? extends FeatureMatrixWithLabelsOnHeapData> dataset,
+        ModelsComposition currMdl) {
+        return false;
+    }
+
+    /** {@inheritDoc} */
+    @Override public Double computeMeanErrorOnDataset(Dataset<EmptyContext, ? extends FeatureMatrixWithLabelsOnHeapData> dataset,
+        ModelsComposition mdl) {
+
+        throw new UnsupportedOperationException();
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/simple/ConvergenceCheckerStubFactory.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/simple/ConvergenceCheckerStubFactory.java
new file mode 100644
index 0000000..a0f0d5c
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/simple/ConvergenceCheckerStubFactory.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.composition.boosting.convergence.simple;
+
+import org.apache.ignite.ml.composition.boosting.convergence.ConvergenceChecker;
+import org.apache.ignite.ml.composition.boosting.convergence.ConvergenceCheckerFactory;
+import org.apache.ignite.ml.composition.boosting.loss.Loss;
+import org.apache.ignite.ml.dataset.DatasetBuilder;
+import org.apache.ignite.ml.math.functions.IgniteBiFunction;
+import org.apache.ignite.ml.math.functions.IgniteFunction;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+
+/**
+ * Factory for {@link ConvergenceCheckerStub}.
+ */
+public class ConvergenceCheckerStubFactory extends ConvergenceCheckerFactory {
+    /**
+     * Create an instance of ConvergenceCheckerStubFactory.
+     */
+    public ConvergenceCheckerStubFactory() {
+        super(0.0);
+    }
+
+    /** {@inheritDoc} */
+    @Override public <K, V> ConvergenceChecker<K, V> create(long sampleSize,
+        IgniteFunction<Double, Double> externalLbToInternalMapping, Loss loss,
+        DatasetBuilder<K, V> datasetBuilder, IgniteBiFunction<K, V, Vector> featureExtractor,
+        IgniteBiFunction<K, V, Double> lbExtractor) {
+
+        return new ConvergenceCheckerStub<>(sampleSize, externalLbToInternalMapping, loss,
+            datasetBuilder, featureExtractor, lbExtractor);
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/simple/package-info.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/simple/package-info.java
new file mode 100644
index 0000000..915903a
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/convergence/simple/package-info.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * <!-- Package description. -->
+ * Contains implementation of Stub for convergence checking.
+ * By this implementation gradient boosting will train new submodels until count of models achieving max value [count
+ * of iterations parameter].
+ */
+package org.apache.ignite.ml.composition.boosting.convergence.simple;
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/loss/LogLoss.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/loss/LogLoss.java
new file mode 100644
index 0000000..19ef70b
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/loss/LogLoss.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.composition.boosting.loss;
+
+/**
+ * Logistic regression loss function.
+ */
+public class LogLoss implements Loss {
+    /** Serial version uid. */
+    private static final long serialVersionUID = 2251384437214194977L;
+
+    /** {@inheritDoc} */
+    @Override public double error(long sampleSize, double answer, double prediction) {
+        return -(answer * Math.log(prediction) + (1 - answer) * Math.log(1 - prediction));
+    }
+
+    /** {@inheritDoc} */
+    @Override public double gradient(long sampleSize, double answer, double prediction) {
+        return (prediction - answer) / (prediction * (1.0 - prediction));
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/loss/Loss.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/loss/Loss.java
new file mode 100644
index 0000000..72fff30
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/loss/Loss.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.composition.boosting.loss;
+
+import java.io.Serializable;
+
+/**
+ * Loss interface of computing error or gradient of error on specific row in dataset.
+ */
+public interface Loss extends Serializable {
+    /**
+     * Error value for model answer.
+     *
+     * @param sampleSize Sample size.
+     * @param lb Label.
+     * @param mdlAnswer Model answer.
+     * @return error value.
+     */
+    public double error(long sampleSize, double lb, double mdlAnswer);
+
+    /**
+     * Error gradient value for model answer.
+     *
+     * @param sampleSize Sample size.
+     * @param lb Label.
+     * @param mdlAnswer Model answer.
+     * @return error value.
+     */
+    public double gradient(long sampleSize, double lb, double mdlAnswer);
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/loss/SquaredError.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/loss/SquaredError.java
new file mode 100644
index 0000000..8f2f17e
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/loss/SquaredError.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.composition.boosting.loss;
+
+/**
+ * Represent error function as E(label, modelAnswer) = 1/N * (label - prediction)^2
+ */
+public class SquaredError implements Loss {
+    /** Serial version uid. */
+    private static final long serialVersionUID = 564886150646352157L;
+
+    /** {@inheritDoc} */
+    @Override public double error(long sampleSize, double lb, double prediction) {
+        return Math.pow(lb - prediction, 2) / sampleSize;
+    }
+
+    /** {@inheritDoc} */
+    @Override public double gradient(long sampleSize, double lb, double prediction) {
+        return (2.0 / sampleSize) * (prediction - lb);
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/loss/package-info.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/loss/package-info.java
new file mode 100644
index 0000000..83a5e39
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/boosting/loss/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * <!-- Package description. -->
+ * Contains loss functions for Gradient Boosting algorithms.
+ */
+package org.apache.ignite.ml.composition.boosting.loss;
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/composition/predictionsaggregator/WeightedPredictionsAggregator.java b/modules/ml/src/main/java/org/apache/ignite/ml/composition/predictionsaggregator/WeightedPredictionsAggregator.java
index c37fdf7..5e0f7f1 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/composition/predictionsaggregator/WeightedPredictionsAggregator.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/composition/predictionsaggregator/WeightedPredictionsAggregator.java
@@ -20,7 +20,7 @@
 import org.apache.ignite.internal.util.typedef.internal.A;
 
 /**
- * Predictions aggregator returning weighted sum of predictions.
+ * Predictions aggregator returning weighted plus of predictions.
  * result(p1, ..., pn) = bias + p1*w1 + ... + pn*wn
  */
 public class WeightedPredictionsAggregator implements PredictionsAggregator {
@@ -86,4 +86,14 @@
         return builder.append(bias > 0 ? " + " : " - ").append(String.format("%.4f", bias))
             .append("]").toString();
     }
+
+    /** */
+    public double[] getWeights() {
+        return weights;
+    }
+
+    /** */
+    public double getBias() {
+        return bias;
+    }
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/dataset/feature/BucketMeta.java b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/feature/BucketMeta.java
new file mode 100644
index 0000000..4ac9adb
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/feature/BucketMeta.java
@@ -0,0 +1,88 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.dataset.feature;
+
+import java.io.Serializable;
+
+/**
+ * Bucket meta-information for feature histogram.
+ */
+public class BucketMeta implements Serializable {
+    /** Feature meta. */
+    private final FeatureMeta featureMeta;
+
+    /** Bucket size. */
+    private double bucketSize;
+
+    /** Min value of feature. */
+    private double minVal;
+
+    /**
+     * Creates an instance of BucketMeta.
+     *
+     * @param featureMeta Feature meta.
+     */
+    public BucketMeta(FeatureMeta featureMeta) {
+        this.featureMeta = featureMeta;
+    }
+
+    /**
+     * Returns bucket id for feature value.
+     *
+     * @param val Value.
+     * @return bucket id.
+     */
+    public int getBucketId(Double val) {
+        if(featureMeta.isCategoricalFeature())
+            return (int) Math.rint(val);
+
+        return (int) Math.rint((val - minVal) / bucketSize);
+    }
+
+    /**
+     * Returns mean value by bucket id.
+     *
+     * @param bucketId Bucket id.
+     * @return mean value of feature.
+     */
+    public double bucketIdToValue(int bucketId) {
+        if(featureMeta.isCategoricalFeature())
+            return (double) bucketId;
+
+        return minVal + (bucketId + 0.5) * bucketSize;
+    }
+
+    /**
+     * @param minVal Min value.
+     */
+    public void setMinVal(double minVal) {
+        this.minVal = minVal;
+    }
+
+    /**
+     * @param bucketSize Bucket size.
+     */
+    public void setBucketSize(double bucketSize) {
+        this.bucketSize = bucketSize;
+    }
+
+    /** */
+    public FeatureMeta getFeatureMeta() {
+        return featureMeta;
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/dataset/feature/DistributionComputer.java b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/feature/DistributionComputer.java
new file mode 100644
index 0000000..7582da0
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/feature/DistributionComputer.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.dataset.feature;
+
+import java.util.TreeMap;
+
+/**
+ * Interface specifies an object that can compute some discrete distribution.
+ */
+public interface DistributionComputer {
+    /**
+     * Compute distribution function.
+     *
+     * @return Map represents discrete distribution function.
+     */
+    public TreeMap<Integer, Double> computeDistributionFunction();
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/dataset/feature/FeatureMeta.java b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/feature/FeatureMeta.java
new file mode 100644
index 0000000..a7d846f
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/feature/FeatureMeta.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.dataset.feature;
+
+import java.io.Serializable;
+
+/**
+ * Feature meta class.
+ */
+public class FeatureMeta implements Serializable {
+    /** Serial version uid. */
+    private static final long serialVersionUID = -2990950807063111877L;
+
+    /** Name. */
+    private final String name;
+
+    /** Id of feature in feature vector. */
+    private final int featureId;
+
+    /** Is categorical feature flag. */
+    private final boolean isCategoricalFeature;
+
+    /**
+     * Create an instance of Feature meta.
+     *
+     * @param name Feature name.
+     * @param featureId Feature id.
+     * @param isCategoricalFeature Is categorical feature.
+     */
+    public FeatureMeta(String name, int featureId, boolean isCategoricalFeature) {
+        this.name = name;
+        this.featureId = featureId;
+        this.isCategoricalFeature = isCategoricalFeature;
+    }
+
+    /** */
+    public int getFeatureId() {
+        return featureId;
+    }
+
+    /** */
+    public boolean isCategoricalFeature() {
+        return isCategoricalFeature;
+    }
+
+    /** */
+    public String getName() {
+        return name;
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/dataset/feature/Histogram.java b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/feature/Histogram.java
new file mode 100644
index 0000000..6784af1
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/feature/Histogram.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.dataset.feature;
+
+import java.io.Serializable;
+import java.util.Optional;
+import java.util.Set;
+
+/**
+ * Interface of histogram over type T.
+ *
+ * @param <T> Type of object for histogram.
+ * @param <H> Type of histogram that can be used in math operations with this histogram.
+ */
+public interface Histogram<T, H extends Histogram<T, H>> extends Serializable {
+    /**
+     * Add object to histogram.
+     *
+     * @param val Value.
+     */
+    public void addElement(T val);
+
+    /**
+     *
+     * @return bucket ids.
+     */
+    public Set<Integer> buckets();
+
+    /**
+     *
+     * @param bucketId Bucket id.
+     * @return value in according to bucket id.
+     */
+    public Optional<Double> getValue(Integer bucketId);
+
+    /**
+     * @param other Other histogram.
+     * @return sum of this and other histogram.
+     */
+    public H plus(H other);
+
+    /**
+     * Compares histogram with other and returns true if they are equals
+     *
+     * @param other Other histogram.
+     * @return true if histograms are equal.
+     */
+    public boolean isEqualTo(H other);
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/dataset/feature/ObjectHistogram.java b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/feature/ObjectHistogram.java
new file mode 100644
index 0000000..d894c3f
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/feature/ObjectHistogram.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.dataset.feature;
+
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.TreeMap;
+import org.apache.ignite.ml.math.functions.IgniteFunction;
+
+/**
+ * Basic implementation of {@link Histogram} that implements also {@link DistributionComputer}.
+ *
+ * @param <T> Type of object for histogram.
+ */
+public class ObjectHistogram<T> implements Histogram<T, ObjectHistogram<T>>, DistributionComputer {
+    /** Serial version uid. */
+    private static final long serialVersionUID = -2708731174031404487L;
+
+    /** Bucket mapping. */
+    private final IgniteFunction<T, Integer> bucketMapping;
+
+    /** Mapping to counter. */
+    private final IgniteFunction<T, Double> mappingToCntr;
+
+    /** Histogram. */
+    private final Map<Integer, Double> hist;
+
+    /**
+     * Create an instance of ObjectHistogram.
+     *
+     * @param bucketMapping Bucket mapping.
+     * @param mappingToCntr Mapping to counter.
+     */
+    public ObjectHistogram(IgniteFunction<T, Integer> bucketMapping,
+        IgniteFunction<T, Double> mappingToCntr) {
+
+        this.bucketMapping = bucketMapping;
+        this.mappingToCntr = mappingToCntr;
+        this.hist = new TreeMap<>(Integer::compareTo);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void addElement(T val) {
+        Integer bucket = bucketMapping.apply(val);
+        Double cntrVal = mappingToCntr.apply(val);
+
+        assert cntrVal >= 0;
+        Double bucketVal = hist.getOrDefault(bucket, 0.0);
+        hist.put(bucket, bucketVal + cntrVal);
+    }
+
+    /** {@inheritDoc} */
+    @Override public Set<Integer> buckets() {
+        return hist.keySet();
+    }
+
+    /** {@inheritDoc} */
+    @Override public Optional<Double> getValue(Integer bucketId) {
+        return Optional.ofNullable(hist.get(bucketId));
+    }
+
+    /** {@inheritDoc} */
+    @Override public TreeMap<Integer, Double> computeDistributionFunction() {
+        TreeMap<Integer, Double> res = new TreeMap<>();
+
+        double accum = 0.0;
+        for (Integer bucket : hist.keySet()) {
+            accum += hist.get(bucket);
+            res.put(bucket, accum);
+        }
+
+        return res;
+    }
+
+    /** {@inheritDoc} */
+    @Override public ObjectHistogram<T> plus(ObjectHistogram<T> other) {
+        ObjectHistogram<T> res = new ObjectHistogram<>(bucketMapping, mappingToCntr);
+        addTo(this.hist, res.hist);
+        addTo(other.hist, res.hist);
+        return res;
+    }
+
+    /**
+     * Adds bucket values to target histogram.
+     *
+     * @param from From.
+     * @param to To.
+     */
+    private void addTo(Map<Integer, Double> from, Map<Integer, Double> to) {
+        from.forEach((bucket, value) -> {
+            Double putVal = to.getOrDefault(bucket, 0.0);
+            to.put(bucket, putVal + value);
+        });
+    }
+
+    /** {@inheritDoc} */
+    public boolean isEqualTo(ObjectHistogram<T> other) {
+        Set<Integer> totalBuckets = new HashSet<>(buckets());
+        totalBuckets.addAll(other.buckets());
+        if(totalBuckets.size() != buckets().size())
+            return false;
+
+        for(Integer bucketId : totalBuckets) {
+            double leftVal = hist.get(bucketId);
+            double rightVal = other.hist.get(bucketId);
+            if(Math.abs(leftVal - rightVal) > 0.001)
+                return false;
+        }
+
+        return true;
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/dataset/feature/package-info.java b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/feature/package-info.java
new file mode 100644
index 0000000..a0c346b
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/feature/package-info.java
@@ -0,0 +1,24 @@
+/*
+
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * <!-- Package description. -->
+ * Package for helper classes over features such as {@link org.apache.ignite.ml.dataset.feature.ObjectHistogram} or
+ * {@link org.apache.ignite.ml.dataset.feature.FeatureMeta}.
+ */
+package org.apache.ignite.ml.dataset.feature;
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/dataset/impl/bootstrapping/BootstrappedDatasetBuilder.java b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/impl/bootstrapping/BootstrappedDatasetBuilder.java
new file mode 100644
index 0000000..8707e3a
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/impl/bootstrapping/BootstrappedDatasetBuilder.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.dataset.impl.bootstrapping;
+
+import java.util.Arrays;
+import java.util.Iterator;
+import org.apache.commons.math3.distribution.PoissonDistribution;
+import org.apache.ignite.ml.dataset.PartitionDataBuilder;
+import org.apache.ignite.ml.dataset.UpstreamEntry;
+import org.apache.ignite.ml.dataset.primitive.context.EmptyContext;
+import org.apache.ignite.ml.math.functions.IgniteBiFunction;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+
+/**
+ * Builder for bootstrapped dataset. Bootstrapped dataset consist of several subsamples created in according to random
+ * sampling with replacements selection of vectors from original dataset. This realization uses
+ * {@link BootstrappedVector} containing each vector from original sample with counters of repetitions
+ * for each subsample. As heuristic this implementation uses Poisson Distribution for generating counter values.
+ *
+ * @param <K> Type of a key in {@code upstream} data.
+ * @param <V> Type of a value in {@code upstream} data.
+ */
+public class BootstrappedDatasetBuilder<K,V> implements PartitionDataBuilder<K,V, EmptyContext, BootstrappedDatasetPartition> {
+    /** Serial version uid. */
+    private static final long serialVersionUID = 8146220902914010559L;
+
+    /** Feature extractor. */
+    private final IgniteBiFunction<K, V, Vector> featureExtractor;
+
+    /** Label extractor. */
+    private final IgniteBiFunction<K, V, Double> lbExtractor;
+
+    /** Samples count. */
+    private final int samplesCnt;
+
+    /** Subsample size. */
+    private final double subsampleSize;
+
+    /**
+     * Creates an instance of BootstrappedDatasetBuilder.
+     *
+     * @param featureExtractor Feature extractor.
+     * @param lbExtractor Label extractor.
+     * @param samplesCnt Samples count.
+     * @param subsampleSize Subsample size.
+     */
+    public BootstrappedDatasetBuilder(IgniteBiFunction<K, V, Vector> featureExtractor,
+        IgniteBiFunction<K, V, Double> lbExtractor, int samplesCnt, double subsampleSize) {
+
+        this.featureExtractor = featureExtractor;
+        this.lbExtractor = lbExtractor;
+        this.samplesCnt = samplesCnt;
+        this.subsampleSize = subsampleSize;
+    }
+
+    /** {@inheritDoc} */
+    @Override public BootstrappedDatasetPartition build(Iterator<UpstreamEntry<K, V>> upstreamData, long upstreamDataSize,
+        EmptyContext ctx) {
+
+        BootstrappedVector[] dataset = new BootstrappedVector[Math.toIntExact(upstreamDataSize)];
+
+        int cntr = 0;
+        PoissonDistribution poissonDistribution = new PoissonDistribution(subsampleSize);
+        while(upstreamData.hasNext()) {
+            UpstreamEntry<K, V> nextRow = upstreamData.next();
+            Vector features = featureExtractor.apply(nextRow.getKey(), nextRow.getValue());
+            Double lb = lbExtractor.apply(nextRow.getKey(), nextRow.getValue());
+            int[] repetitionCounters = new int[samplesCnt];
+            Arrays.setAll(repetitionCounters, i -> poissonDistribution.sample());
+            dataset[cntr++] = new BootstrappedVector(features, lb, repetitionCounters);
+        }
+
+        return new BootstrappedDatasetPartition(dataset);
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/dataset/impl/bootstrapping/BootstrappedDatasetPartition.java b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/impl/bootstrapping/BootstrappedDatasetPartition.java
new file mode 100644
index 0000000..2155d1a
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/impl/bootstrapping/BootstrappedDatasetPartition.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.dataset.impl.bootstrapping;
+
+import java.util.Arrays;
+import java.util.Iterator;
+import org.jetbrains.annotations.NotNull;
+
+/**
+ * Partition of bootstrapped vectors.
+ */
+public class BootstrappedDatasetPartition implements AutoCloseable, Iterable<BootstrappedVector> {
+    /** Vectors. */
+    private final BootstrappedVector[] vectors;
+
+    /**
+     * Creates an instance of BootstrappedDatasetPartition.
+     *
+     * @param vectors Vectors.
+     */
+    public BootstrappedDatasetPartition(BootstrappedVector[] vectors) {
+        this.vectors = vectors;
+    }
+
+    /**
+     * Returns vector from dataset in according to row id.
+     *
+     * @param rowId Row id.
+     * @return Vector.
+     */
+    public BootstrappedVector getRow(int rowId) {
+        return vectors[rowId];
+    }
+
+    /**
+     * Returns rows count.
+     *
+     * @return rows count.
+     */
+    public int getRowsCount() {
+        return vectors.length;
+    }
+
+    /** {@inheritDoc} */
+    @NotNull @Override public Iterator<BootstrappedVector> iterator() {
+        return Arrays.stream(vectors).iterator();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void close() throws Exception {
+        //NOP
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/dataset/impl/bootstrapping/BootstrappedVector.java b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/impl/bootstrapping/BootstrappedVector.java
new file mode 100644
index 0000000..aedd0fd
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/impl/bootstrapping/BootstrappedVector.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.dataset.impl.bootstrapping;
+
+import java.io.IOException;
+import java.io.ObjectInput;
+import java.io.ObjectOutput;
+import java.util.Arrays;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.structures.LabeledVector;
+
+/**
+ * Represents vector with repetitions counters for subsamples in bootstrapped dataset.
+ * Each counter shows the number of repetitions of the vector for the n-th sample.
+ */
+public class BootstrappedVector extends LabeledVector<Vector, Double> {
+    /** Serial version uid. */
+    private static final long serialVersionUID = -4583008673032917259L;
+
+    /** Counters show the number of repetitions of the vector for the n-th sample. */
+    private int[] counters;
+
+    /**
+     * Creates an instance of BootstrappedVector.
+     *
+     * @param features Features.
+     * @param lb Label.
+     * @param counters Repetitions counters.
+     */
+    public BootstrappedVector(Vector features, double lb, int[] counters) {
+        super(features, lb);
+        this.counters = counters;
+    }
+
+    /**
+     * @return repetitions counters vector.
+     */
+    public int[] counters() {
+        return counters;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean equals(Object o) {
+        if (this == o)
+            return true;
+        if (o == null || getClass() != o.getClass())
+            return false;
+        if (!super.equals(o))
+            return false;
+        BootstrappedVector vector = (BootstrappedVector)o;
+        return Arrays.equals(counters, vector.counters);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int hashCode() {
+        int result = super.hashCode();
+        result = 31 * result + Arrays.hashCode(counters);
+        return result;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void writeExternal(ObjectOutput out) throws IOException {
+        super.writeExternal(out);
+        out.writeObject(counters);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
+        super.readExternal(in);
+        counters = (int[]) in.readObject();
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/dataset/impl/bootstrapping/package-info.java b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/impl/bootstrapping/package-info.java
new file mode 100644
index 0000000..5fcf629
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/impl/bootstrapping/package-info.java
@@ -0,0 +1,23 @@
+/*
+
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * <!-- Package description. -->
+ * Base package for bootstrapped implementation of machine learning dataset.
+ */
+package org.apache.ignite.ml.dataset.impl.bootstrapping;
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/dataset/primitive/FeatureMatrixWithLabelsOnHeapData.java b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/primitive/FeatureMatrixWithLabelsOnHeapData.java
new file mode 100644
index 0000000..9dbc1a9
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/primitive/FeatureMatrixWithLabelsOnHeapData.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.dataset.primitive;
+
+/**
+ * A partition {@code data} of the containing matrix of features and vector of labels stored in heap.
+ */
+public class FeatureMatrixWithLabelsOnHeapData implements AutoCloseable {
+    /** Matrix with features. */
+    private final double[][] features;
+
+    /** Vector with labels. */
+    private final double[] labels;
+
+    /**
+     * Constructs an instance of FeatureMatrixWithLabelsOnHeapData.
+     *
+     * @param features Features.
+     * @param labels Labels.
+     */
+    public FeatureMatrixWithLabelsOnHeapData(double[][] features, double[] labels) {
+        assert features.length == labels.length : "Features and labels have to be the same length";
+
+        this.features = features;
+        this.labels = labels;
+    }
+
+    /** */
+    public double[][] getFeatures() {
+        return features;
+    }
+
+    /** */
+    public double[] getLabels() {
+        return labels;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void close() {
+        // Do nothing, GC will clean up.
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/dataset/primitive/FeatureMatrixWithLabelsOnHeapDataBuilder.java b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/primitive/FeatureMatrixWithLabelsOnHeapDataBuilder.java
new file mode 100644
index 0000000..be1724c
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/dataset/primitive/FeatureMatrixWithLabelsOnHeapDataBuilder.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.dataset.primitive;
+
+import java.io.Serializable;
+import java.util.Iterator;
+import org.apache.ignite.ml.dataset.PartitionDataBuilder;
+import org.apache.ignite.ml.dataset.UpstreamEntry;
+import org.apache.ignite.ml.math.functions.IgniteBiFunction;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.tree.data.DecisionTreeData;
+
+/**
+ * A partition {@code data} builder that makes {@link DecisionTreeData}.
+ *
+ * @param <K> Type of a key in <tt>upstream</tt> data.
+ * @param <V> Type of a value in <tt>upstream</tt> data.
+ * @param <C> Type of a partition <tt>context</tt>.
+ */
+public class FeatureMatrixWithLabelsOnHeapDataBuilder<K, V, C extends Serializable>
+    implements PartitionDataBuilder<K, V, C, FeatureMatrixWithLabelsOnHeapData> {
+    /** Serial version uid. */
+    private static final long serialVersionUID = 6273736987424171813L;
+
+    /** Function that extracts features from an {@code upstream} data. */
+    private final IgniteBiFunction<K, V, Vector> featureExtractor;
+
+    /** Function that extracts labels from an {@code upstream} data. */
+    private final IgniteBiFunction<K, V, Double> lbExtractor;
+
+    /**
+     * Constructs a new instance of decision tree data builder.
+     *
+     * @param featureExtractor Function that extracts features from an {@code upstream} data.
+     * @param lbExtractor Function that extracts labels from an {@code upstream} data.
+     */
+    public FeatureMatrixWithLabelsOnHeapDataBuilder(IgniteBiFunction<K, V, Vector> featureExtractor,
+        IgniteBiFunction<K, V, Double> lbExtractor) {
+        this.featureExtractor = featureExtractor;
+        this.lbExtractor = lbExtractor;
+    }
+
+    /** {@inheritDoc} */
+    @Override public FeatureMatrixWithLabelsOnHeapData build(Iterator<UpstreamEntry<K, V>> upstreamData, long upstreamDataSize, C ctx) {
+        double[][] features = new double[Math.toIntExact(upstreamDataSize)][];
+        double[] labels = new double[Math.toIntExact(upstreamDataSize)];
+
+        int ptr = 0;
+        while (upstreamData.hasNext()) {
+            UpstreamEntry<K, V> entry = upstreamData.next();
+
+            features[ptr] = featureExtractor.apply(entry.getKey(), entry.getValue()).asArray();
+
+            labels[ptr] = lbExtractor.apply(entry.getKey(), entry.getValue());
+
+            ptr++;
+        }
+
+        return new FeatureMatrixWithLabelsOnHeapData(features, labels);
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/environment/LearningEnvironmentBuilder.java b/modules/ml/src/main/java/org/apache/ignite/ml/environment/LearningEnvironmentBuilder.java
index be56ccc..91e832d 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/environment/LearningEnvironmentBuilder.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/environment/LearningEnvironmentBuilder.java
@@ -60,8 +60,10 @@
         switch (stgyType) {
             case NO_PARALLELISM:
                 this.parallelismStgy = NoParallelismStrategy.INSTANCE;
+                break;
             case ON_DEFAULT_POOL:
                 this.parallelismStgy = new DefaultParallelismStrategy();
+                break;
         }
         return this;
     }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/environment/parallelism/ParallelismStrategy.java b/modules/ml/src/main/java/org/apache/ignite/ml/environment/parallelism/ParallelismStrategy.java
index cdf2d50..e7228f8 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/environment/parallelism/ParallelismStrategy.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/environment/parallelism/ParallelismStrategy.java
@@ -26,9 +26,13 @@
  * bagging, learning submodels for One-vs-All model, Cross-Validation etc.
  */
 public interface ParallelismStrategy {
+
+    /**
+     * The type of parallelism.
+     */
     public enum Type {
-        NO_PARALLELISM,
-        ON_DEFAULT_POOL
+        /** No parallelism. */NO_PARALLELISM,
+        /** On default pool. */ON_DEFAULT_POOL
     }
 
     /**
@@ -38,6 +42,13 @@
      */
     public <T> Promise<T> submit(IgniteSupplier<T> task);
 
+    /**
+     * Submit the list of tasks.
+     *
+     * @param tasks The task list.
+     * @param <T> The type of return value.
+     * @return The result of submit operation.
+     */
     public default <T> List<Promise<T>> submit(List<IgniteSupplier<T>> tasks) {
         List<Promise<T>> results = new ArrayList<>();
         for(IgniteSupplier<T> task : tasks)
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/knn/KNNUtils.java b/modules/ml/src/main/java/org/apache/ignite/ml/knn/KNNUtils.java
index b5a0cdb..d7bccd8 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/knn/KNNUtils.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/knn/KNNUtils.java
@@ -23,8 +23,8 @@
 import org.apache.ignite.ml.dataset.primitive.context.EmptyContext;
 import org.apache.ignite.ml.math.functions.IgniteBiFunction;
 import org.apache.ignite.ml.math.primitives.vector.Vector;
-import org.apache.ignite.ml.structures.LabeledDataset;
 import org.apache.ignite.ml.structures.LabeledVector;
+import org.apache.ignite.ml.structures.LabeledVectorSet;
 import org.apache.ignite.ml.structures.partition.LabeledDatasetPartitionDataBuilderOnHeap;
 import org.jetbrains.annotations.Nullable;
 
@@ -40,14 +40,14 @@
      * @param lbExtractor Label extractor.
      * @return Dataset.
      */
-    @Nullable public static <K, V> Dataset<EmptyContext, LabeledDataset<Double, LabeledVector>> buildDataset(DatasetBuilder<K, V> datasetBuilder, IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, Double> lbExtractor) {
-        PartitionDataBuilder<K, V, EmptyContext, LabeledDataset<Double, LabeledVector>> partDataBuilder
+    @Nullable public static <K, V> Dataset<EmptyContext, LabeledVectorSet<Double, LabeledVector>> buildDataset(DatasetBuilder<K, V> datasetBuilder, IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, Double> lbExtractor) {
+        PartitionDataBuilder<K, V, EmptyContext, LabeledVectorSet<Double, LabeledVector>> partDataBuilder
             = new LabeledDatasetPartitionDataBuilderOnHeap<>(
             featureExtractor,
             lbExtractor
         );
 
-        Dataset<EmptyContext, LabeledDataset<Double, LabeledVector>> dataset = null;
+        Dataset<EmptyContext, LabeledVectorSet<Double, LabeledVector>> dataset = null;
 
         if (datasetBuilder != null) {
             dataset = datasetBuilder.build(
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/knn/NNClassificationModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/knn/NNClassificationModel.java
new file mode 100644
index 0000000..d435f91
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/knn/NNClassificationModel.java
@@ -0,0 +1,233 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.knn;
+
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import org.apache.ignite.ml.Exportable;
+import org.apache.ignite.ml.Exporter;
+import org.apache.ignite.ml.Model;
+import org.apache.ignite.ml.knn.classification.KNNModelFormat;
+import org.apache.ignite.ml.knn.classification.NNStrategy;
+import org.apache.ignite.ml.math.distances.DistanceMeasure;
+import org.apache.ignite.ml.math.distances.EuclideanDistance;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.structures.LabeledVector;
+import org.apache.ignite.ml.structures.LabeledVectorSet;
+import org.apache.ignite.ml.util.ModelTrace;
+import org.jetbrains.annotations.NotNull;
+
+/**
+ * Common methods and fields for all kNN and aNN models
+ * to predict label based on neighbours' labels.
+ */
+public abstract class NNClassificationModel implements Model<Vector, Double>, Exportable<KNNModelFormat> {
+    /** Amount of nearest neighbors. */
+    protected int k = 5;
+
+    /** Distance measure. */
+    protected DistanceMeasure distanceMeasure = new EuclideanDistance();
+
+    /** kNN strategy. */
+    protected NNStrategy stgy = NNStrategy.SIMPLE;
+
+    /**
+     * Set up parameter of the NN model.
+     * @param k Amount of nearest neighbors.
+     * @return Model.
+     */
+    public NNClassificationModel withK(int k) {
+        this.k = k;
+        return this;
+    }
+
+    /**
+     * Set up parameter of the NN model.
+     * @param stgy Strategy of calculations.
+     * @return Model.
+     */
+    public NNClassificationModel withStrategy(NNStrategy stgy) {
+        this.stgy = stgy;
+        return this;
+    }
+
+    /**
+     * Set up parameter of the NN model.
+     * @param distanceMeasure Distance measure.
+     * @return Model.
+     */
+    public NNClassificationModel withDistanceMeasure(DistanceMeasure distanceMeasure) {
+        this.distanceMeasure = distanceMeasure;
+        return this;
+    }
+
+    /** */
+    protected LabeledVectorSet<Double, LabeledVector> buildLabeledDatasetOnListOfVectors(
+        List<LabeledVector> neighborsFromPartitions) {
+        LabeledVector[] arr = new LabeledVector[neighborsFromPartitions.size()];
+        for (int i = 0; i < arr.length; i++)
+            arr[i] = neighborsFromPartitions.get(i);
+
+        return new LabeledVectorSet<Double, LabeledVector>(arr);
+    }
+
+    /**
+     * Iterates along entries in distance map and fill the resulting k-element array.
+     *
+     * @param trainingData The training data.
+     * @param distanceIdxPairs The distance map.
+     * @return K-nearest neighbors.
+     */
+    @NotNull protected LabeledVector[] getKClosestVectors(LabeledVectorSet<Double, LabeledVector> trainingData,
+                                                          TreeMap<Double, Set<Integer>> distanceIdxPairs) {
+        LabeledVector[] res;
+
+        if (trainingData.rowSize() <= k) {
+            res = new LabeledVector[trainingData.rowSize()];
+            for (int i = 0; i < trainingData.rowSize(); i++)
+                res[i] = trainingData.getRow(i);
+        }
+        else {
+            res = new LabeledVector[k];
+            int i = 0;
+            final Iterator<Double> iter = distanceIdxPairs.keySet().iterator();
+            while (i < k) {
+                double key = iter.next();
+                Set<Integer> idxs = distanceIdxPairs.get(key);
+                for (Integer idx : idxs) {
+                    res[i] = trainingData.getRow(idx);
+                    i++;
+                    if (i >= k)
+                        break; // go to next while-loop iteration
+                }
+            }
+        }
+
+        return res;
+    }
+
+    /**
+     * Computes distances between given vector and each vector in training dataset.
+     *
+     * @param v The given vector.
+     * @param trainingData The training dataset.
+     * @return Key - distanceMeasure from given features before features with idx stored in value. Value is presented
+     * with Set because there can be a few vectors with the same distance.
+     */
+    @NotNull protected TreeMap<Double, Set<Integer>> getDistances(Vector v, LabeledVectorSet<Double, LabeledVector> trainingData) {
+        TreeMap<Double, Set<Integer>> distanceIdxPairs = new TreeMap<>();
+
+        for (int i = 0; i < trainingData.rowSize(); i++) {
+
+            LabeledVector labeledVector = trainingData.getRow(i);
+            if (labeledVector != null) {
+                double distance = distanceMeasure.compute(v, labeledVector.features());
+                putDistanceIdxPair(distanceIdxPairs, i, distance);
+            }
+        }
+        return distanceIdxPairs;
+    }
+
+    /** */
+    protected void putDistanceIdxPair(Map<Double, Set<Integer>> distanceIdxPairs, int i, double distance) {
+        if (distanceIdxPairs.containsKey(distance)) {
+            Set<Integer> idxs = distanceIdxPairs.get(distance);
+            idxs.add(i);
+        }
+        else {
+            Set<Integer> idxs = new HashSet<>();
+            idxs.add(i);
+            distanceIdxPairs.put(distance, idxs);
+        }
+    }
+
+    /** */
+    protected double getClassWithMaxVotes(Map<Double, Double> clsVotes) {
+        return Collections.max(clsVotes.entrySet(), Map.Entry.comparingByValue()).getKey();
+    }
+
+    /** */
+    protected double getClassVoteForVector(NNStrategy stgy, double distance) {
+        if (stgy.equals(NNStrategy.WEIGHTED))
+            return 1 / distance; // strategy.WEIGHTED
+        else
+            return 1.0; // strategy.SIMPLE
+    }
+
+    /** */
+    public DistanceMeasure getDistanceMeasure() {
+        return distanceMeasure;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int hashCode() {
+        int res = 1;
+
+        res = res * 37 + k;
+        res = res * 37 + distanceMeasure.hashCode();
+        res = res * 37 + stgy.hashCode();
+
+        return res;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+
+        if (obj == null || getClass() != obj.getClass())
+            return false;
+
+        NNClassificationModel that = (NNClassificationModel)obj;
+
+        return k == that.k && distanceMeasure.equals(that.distanceMeasure) && stgy.equals(that.stgy);
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return toString(false);
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString(boolean pretty) {
+        return ModelTrace.builder("KNNClassificationModel", pretty)
+            .addField("k", String.valueOf(k))
+            .addField("measure", distanceMeasure.getClass().getSimpleName())
+            .addField("strategy", stgy.name())
+            .toString();
+    }
+
+    /**
+     * Sets parameters from other model to this model.
+     *
+     * @param mdl Model.
+     */
+    protected void copyParametersFrom(NNClassificationModel mdl) {
+        this.k = mdl.k;
+        this.distanceMeasure = mdl.distanceMeasure;
+        this.stgy = mdl.stgy;
+    }
+
+    /** */
+    public abstract <P> void saveModel(Exporter<KNNModelFormat, P> exporter, P path);
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ANNClassificationModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ANNClassificationModel.java
new file mode 100644
index 0000000..bec82a9
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ANNClassificationModel.java
@@ -0,0 +1,208 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.knn.ann;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.TreeMap;
+import org.apache.ignite.ml.Exporter;
+import org.apache.ignite.ml.knn.NNClassificationModel;
+import org.apache.ignite.ml.knn.classification.KNNModelFormat;
+import org.apache.ignite.ml.knn.classification.NNStrategy;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.structures.LabeledVector;
+import org.apache.ignite.ml.structures.LabeledVectorSet;
+import org.apache.ignite.ml.util.ModelTrace;
+import org.jetbrains.annotations.NotNull;
+
+/**
+ * ANN model to predict labels in multi-class classification task.
+ */
+public class ANNClassificationModel extends NNClassificationModel  {
+    /** */
+    private static final long serialVersionUID = -127312378991350345L;
+
+    /** The labeled set of candidates. */
+    private final LabeledVectorSet<ProbableLabel, LabeledVector> candidates;
+
+    /** Centroid statistics. */
+    private final ANNClassificationTrainer.CentroidStat centroindsStat;
+
+    /**
+     * Build the model based on a candidates set.
+     * @param centers The candidates set.
+     * @param centroindsStat
+     */
+    public ANNClassificationModel(LabeledVectorSet<ProbableLabel, LabeledVector> centers,
+        ANNClassificationTrainer.CentroidStat centroindsStat) {
+       this.candidates = centers;
+       this.centroindsStat = centroindsStat;
+    }
+
+    /** */
+    public LabeledVectorSet<ProbableLabel, LabeledVector> getCandidates() {
+        return candidates;
+    }
+
+    /** */
+    public ANNClassificationTrainer.CentroidStat getCentroindsStat() {
+        return centroindsStat;
+    }
+
+    /** {@inheritDoc} */
+    @Override public Double apply(Vector v) {
+            List<LabeledVector> neighbors = findKNearestNeighbors(v);
+            return classify(neighbors, v, stgy);
+    }
+
+    /** */
+    @Override public <P> void saveModel(Exporter<KNNModelFormat, P> exporter, P path) {
+        ANNModelFormat mdlData = new ANNModelFormat(k, distanceMeasure, stgy, candidates, centroindsStat);
+        exporter.save(mdlData, path);
+    }
+
+    /**
+     * The main idea is calculation all distance pairs between given vector and all centroids in candidates set, sorting
+     * them and finding k vectors with min distance with the given vector.
+     *
+     * @param v The given vector.
+     * @return K-nearest neighbors.
+     */
+    private List<LabeledVector> findKNearestNeighbors(Vector v) {
+        return Arrays.asList(getKClosestVectors(getDistances(v)));
+    }
+
+    /**
+     * Iterates along entries in distance map and fill the resulting k-element array.
+     * @param distanceIdxPairs The distance map.
+     * @return K-nearest neighbors.
+     */
+    @NotNull private LabeledVector[] getKClosestVectors(
+        TreeMap<Double, Set<Integer>> distanceIdxPairs) {
+        LabeledVector[] res;
+
+        if (candidates.rowSize() <= k) {
+            res = new LabeledVector[candidates.rowSize()];
+            for (int i = 0; i < candidates.rowSize(); i++)
+                res[i] = candidates.getRow(i);
+        }
+        else {
+            res = new LabeledVector[k];
+            int i = 0;
+            final Iterator<Double> iter = distanceIdxPairs.keySet().iterator();
+            while (i < k) {
+                double key = iter.next();
+                Set<Integer> idxs = distanceIdxPairs.get(key);
+                for (Integer idx : idxs) {
+                    res[i] = candidates.getRow(idx);
+                    i++;
+                    if (i >= k)
+                        break; // go to next while-loop iteration
+                }
+            }
+        }
+
+        return res;
+    }
+
+    /**
+     * Computes distances between given vector and each vector in training dataset.
+     *
+     * @param v The given vector.
+     * @return Key - distanceMeasure from given features before features with idx stored in value. Value is presented
+     * with Set because there can be a few vectors with the same distance.
+     */
+    @NotNull private TreeMap<Double, Set<Integer>> getDistances(Vector v) {
+        TreeMap<Double, Set<Integer>> distanceIdxPairs = new TreeMap<>();
+
+        for (int i = 0; i < candidates.rowSize(); i++) {
+
+            LabeledVector labeledVector = candidates.getRow(i);
+            if (labeledVector != null) {
+                double distance = distanceMeasure.compute(v, labeledVector.features());
+                putDistanceIdxPair(distanceIdxPairs, i, distance);
+            }
+        }
+        return distanceIdxPairs;
+    }
+
+    /** */
+    private double classify(List<LabeledVector> neighbors, Vector v, NNStrategy stgy) {
+        Map<Double, Double> clsVotes = new HashMap<>();
+
+        for (LabeledVector neighbor : neighbors) {
+            TreeMap<Double, Double> probableClsLb = ((ProbableLabel)neighbor.label()).clsLbls;
+
+            double distance = distanceMeasure.compute(v, neighbor.features());
+
+            // we predict class label, not the probability vector (it need here another math with counting of votes)
+            probableClsLb.forEach((label, probability) -> {
+                double cnt = clsVotes.containsKey(label) ? clsVotes.get(label) : 0;
+                clsVotes.put(label, cnt + probability * getClassVoteForVector(stgy, distance));
+            });
+        }
+        return getClassWithMaxVotes(clsVotes);
+    }
+
+    /** {@inheritDoc} */
+    @Override public int hashCode() {
+        int res = 1;
+
+        res = res * 37 + k;
+        res = res * 37 + distanceMeasure.hashCode();
+        res = res * 37 + stgy.hashCode();
+        res = res * 37 + candidates.hashCode();
+
+        return res;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+
+        if (obj == null || getClass() != obj.getClass())
+            return false;
+
+        ANNClassificationModel that = (ANNClassificationModel)obj;
+
+        return k == that.k
+            && distanceMeasure.equals(that.distanceMeasure)
+            && stgy.equals(that.stgy)
+            && candidates.equals(that.candidates);
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString() {
+        return toString(false);
+    }
+
+    /** {@inheritDoc} */
+    @Override public String toString(boolean pretty) {
+        return ModelTrace.builder("KNNClassificationModel", pretty)
+            .addField("k", String.valueOf(k))
+            .addField("measure", distanceMeasure.getClass().getSimpleName())
+            .addField("strategy", stgy.name())
+            .addField("amount of candidates", String.valueOf(candidates.rowSize()))
+            .toString();
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ANNClassificationTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ANNClassificationTrainer.java
new file mode 100644
index 0000000..e56a10a
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ANNClassificationTrainer.java
@@ -0,0 +1,382 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.knn.ann;
+
+import java.io.Serializable;
+import java.util.Arrays;
+import java.util.List;
+import java.util.TreeMap;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentSkipListSet;
+import java.util.stream.Collectors;
+import org.apache.ignite.lang.IgniteBiTuple;
+import org.apache.ignite.ml.clustering.kmeans.KMeansModel;
+import org.apache.ignite.ml.clustering.kmeans.KMeansTrainer;
+import org.apache.ignite.ml.dataset.Dataset;
+import org.apache.ignite.ml.dataset.DatasetBuilder;
+import org.apache.ignite.ml.dataset.PartitionDataBuilder;
+import org.apache.ignite.ml.dataset.primitive.context.EmptyContext;
+import org.apache.ignite.ml.math.distances.DistanceMeasure;
+import org.apache.ignite.ml.math.distances.EuclideanDistance;
+import org.apache.ignite.ml.math.functions.IgniteBiFunction;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.math.util.MapUtil;
+import org.apache.ignite.ml.structures.LabeledVector;
+import org.apache.ignite.ml.structures.LabeledVectorSet;
+import org.apache.ignite.ml.structures.partition.LabeledDatasetPartitionDataBuilderOnHeap;
+import org.apache.ignite.ml.trainers.SingleLabelDatasetTrainer;
+import org.jetbrains.annotations.NotNull;
+
+/**
+ * ANN algorithm trainer to solve multi-class classification task. This trainer is based on ACD strategy and KMeans
+ * clustering algorithm to find centroids.
+ */
+public class ANNClassificationTrainer extends SingleLabelDatasetTrainer<ANNClassificationModel> {
+    /** Amount of clusters. */
+    private int k = 2;
+
+    /** Amount of iterations. */
+    private int maxIterations = 10;
+
+    /** Delta of convergence. */
+    private double epsilon = 1e-4;
+
+    /** Distance measure. */
+    private DistanceMeasure distance = new EuclideanDistance();
+
+    /** KMeans initializer. */
+    private long seed;
+
+    /**
+     * Trains model based on the specified data.
+     *
+     * @param datasetBuilder Dataset builder.
+     * @param featureExtractor Feature extractor.
+     * @param lbExtractor Label extractor.
+     * @return Model.
+     */
+    @Override public <K, V> ANNClassificationModel fit(DatasetBuilder<K, V> datasetBuilder,
+        IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, Double> lbExtractor) {
+
+        return updateModel(null, datasetBuilder, featureExtractor, lbExtractor);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected <K, V> ANNClassificationModel updateModel(ANNClassificationModel mdl,
+        DatasetBuilder<K, V> datasetBuilder, IgniteBiFunction<K, V, Vector> featureExtractor,
+        IgniteBiFunction<K, V, Double> lbExtractor) {
+
+        List<Vector> centers;
+        CentroidStat centroidStat;
+        if (mdl != null) {
+            centers = Arrays.stream(mdl.getCandidates().data()).map(x -> x.features()).collect(Collectors.toList());
+            CentroidStat newStat = getCentroidStat(datasetBuilder, featureExtractor, lbExtractor, centers);
+            if(newStat == null)
+                return mdl;
+            CentroidStat oldStat = mdl.getCentroindsStat();
+            centroidStat = newStat.merge(oldStat);
+        } else {
+            centers = getCentroids(featureExtractor, lbExtractor, datasetBuilder);
+            centroidStat = getCentroidStat(datasetBuilder, featureExtractor, lbExtractor, centers);
+        }
+
+        final LabeledVectorSet<ProbableLabel, LabeledVector> dataset = buildLabelsForCandidates(centers, centroidStat);
+
+        return new ANNClassificationModel(dataset, centroidStat);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected boolean checkState(ANNClassificationModel mdl) {
+        return mdl.getDistanceMeasure().equals(distance) && mdl.getCandidates().rowSize() == k;
+    }
+
+    /** */
+    @NotNull private LabeledVectorSet<ProbableLabel, LabeledVector> buildLabelsForCandidates(List<Vector> centers,
+        CentroidStat centroidStat) {
+        // init
+        final LabeledVector<Vector, ProbableLabel>[] arr = new LabeledVector[centers.size()];
+
+        // fill label for each centroid
+        for (int i = 0; i < centers.size(); i++)
+            arr[i] = new LabeledVector<>(centers.get(i), fillProbableLabel(i, centroidStat));
+
+        return new LabeledVectorSet<>(arr);
+    }
+
+    /**
+     * Perform KMeans clusterization algorithm to find centroids.
+     *
+     * @param featureExtractor Feature extractor.
+     * @param lbExtractor Label extractor.
+     * @param datasetBuilder The dataset builder.
+     * @param <K> Type of a key in {@code upstream} data.
+     * @param <V> Type of a value in {@code upstream} data.
+     * @return The arrays of vectors.
+     */
+    private <K, V> List<Vector> getCentroids(IgniteBiFunction<K, V, Vector> featureExtractor,
+        IgniteBiFunction<K, V, Double> lbExtractor, DatasetBuilder<K, V> datasetBuilder) {
+        KMeansTrainer trainer = new KMeansTrainer()
+            .withAmountOfClusters(k)
+            .withMaxIterations(maxIterations)
+            .withSeed(seed)
+            .withDistance(distance)
+            .withEpsilon(epsilon);
+
+        KMeansModel mdl = trainer.fit(
+            datasetBuilder,
+            featureExtractor,
+            lbExtractor
+        );
+
+        return Arrays.asList(mdl.getCenters());
+    }
+
+    /** */
+    private ProbableLabel fillProbableLabel(int centroidIdx, CentroidStat centroidStat) {
+        TreeMap<Double, Double> clsLbls = new TreeMap<>();
+
+        // add all class labels as keys
+        centroidStat.clsLblsSet.forEach(t -> clsLbls.put(t, 0.0));
+
+        ConcurrentHashMap<Double, Integer> centroidLbDistribution
+            = centroidStat.centroidStat().get(centroidIdx);
+
+        if (centroidStat.counts.containsKey(centroidIdx)) {
+
+            int clusterSize = centroidStat
+                .counts
+                .get(centroidIdx);
+
+            clsLbls.keySet().forEach(
+                (label) -> clsLbls.put(label, centroidLbDistribution.containsKey(label) ? ((double)(centroidLbDistribution.get(label)) / clusterSize) : 0.0)
+            );
+        }
+        return new ProbableLabel(clsLbls);
+    }
+
+    /** */
+    private <K, V> CentroidStat getCentroidStat(DatasetBuilder<K, V> datasetBuilder,
+        IgniteBiFunction<K, V, Vector> featureExtractor,
+        IgniteBiFunction<K, V, Double> lbExtractor, List<Vector> centers) {
+
+        PartitionDataBuilder<K, V, EmptyContext, LabeledVectorSet<Double, LabeledVector>> partDataBuilder = new LabeledDatasetPartitionDataBuilderOnHeap<>(
+            featureExtractor,
+            lbExtractor
+        );
+
+        try (Dataset<EmptyContext, LabeledVectorSet<Double, LabeledVector>> dataset = datasetBuilder.build(
+            (upstream, upstreamSize) -> new EmptyContext(),
+            partDataBuilder
+        )) {
+            return dataset.compute(data -> {
+                CentroidStat res = new CentroidStat();
+
+                for (int i = 0; i < data.rowSize(); i++) {
+                    final IgniteBiTuple<Integer, Double> closestCentroid = findClosestCentroid(centers, data.getRow(i));
+
+                    int centroidIdx = closestCentroid.get1();
+
+                    double lb = data.label(i);
+
+                    // add new label to label set
+                    res.labels().add(lb);
+
+                    ConcurrentHashMap<Double, Integer> centroidStat = res.centroidStat.get(centroidIdx);
+
+                    if (centroidStat == null) {
+                        centroidStat = new ConcurrentHashMap<>();
+                        centroidStat.put(lb, 1);
+                        res.centroidStat.put(centroidIdx, centroidStat);
+                    } else {
+                        int cnt = centroidStat.getOrDefault(lb, 0);
+                        centroidStat.put(lb, cnt + 1);
+                    }
+
+                    res.counts.merge(centroidIdx, 1,
+                        (IgniteBiFunction<Integer, Integer, Integer>)(i1, i2) -> i1 + i2);
+                }
+                return res;
+            }, (a, b) -> {
+                if (a == null)
+                    return b == null ? new CentroidStat() : b;
+                if (b == null)
+                    return a;
+                return a.merge(b);
+            });
+
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    /**
+     * Find the closest cluster center index and distance to it from a given point.
+     *
+     * @param centers Centers to look in.
+     * @param pnt Point.
+     */
+    private IgniteBiTuple<Integer, Double> findClosestCentroid(List<Vector> centers, LabeledVector pnt) {
+        double bestDistance = Double.POSITIVE_INFINITY;
+        int bestInd = 0;
+
+        for (int i = 0; i < centers.size(); i++) {
+            if (centers.get(i) != null) {
+                double dist = distance.compute(centers.get(i), pnt.features());
+                if (dist < bestDistance) {
+                    bestDistance = dist;
+                    bestInd = i;
+                }
+            }
+        }
+        return new IgniteBiTuple<>(bestInd, bestDistance);
+    }
+
+    /**
+     * Gets the amount of clusters.
+     *
+     * @return The parameter value.
+     */
+    public int getK() {
+        return k;
+    }
+
+    /**
+     * Set up the amount of clusters.
+     *
+     * @param k The parameter value.
+     * @return Model with new amount of clusters parameter value.
+     */
+    public ANNClassificationTrainer withK(int k) {
+        this.k = k;
+        return this;
+    }
+
+    /**
+     * Gets the max number of iterations before convergence.
+     *
+     * @return The parameter value.
+     */
+    public int getMaxIterations() {
+        return maxIterations;
+    }
+
+    /**
+     * Set up the max number of iterations before convergence.
+     *
+     * @param maxIterations The parameter value.
+     * @return Model with new max number of iterations before convergence parameter value.
+     */
+    public ANNClassificationTrainer withMaxIterations(int maxIterations) {
+        this.maxIterations = maxIterations;
+        return this;
+    }
+
+    /**
+     * Gets the epsilon.
+     *
+     * @return The parameter value.
+     */
+    public double getEpsilon() {
+        return epsilon;
+    }
+
+    /**
+     * Set up the epsilon.
+     *
+     * @param epsilon The parameter value.
+     * @return Model with new epsilon parameter value.
+     */
+    public ANNClassificationTrainer withEpsilon(double epsilon) {
+        this.epsilon = epsilon;
+        return this;
+    }
+
+    /**
+     * Gets the distance.
+     *
+     * @return The parameter value.
+     */
+    public DistanceMeasure getDistance() {
+        return distance;
+    }
+
+    /**
+     * Set up the distance.
+     *
+     * @param distance The parameter value.
+     * @return Model with new distance parameter value.
+     */
+    public ANNClassificationTrainer withDistance(DistanceMeasure distance) {
+        this.distance = distance;
+        return this;
+    }
+
+    /**
+     * Gets the seed number.
+     *
+     * @return The parameter value.
+     */
+    public long getSeed() {
+        return seed;
+    }
+
+    /**
+     * Set up the seed.
+     *
+     * @param seed The parameter value.
+     * @return Model with new seed parameter value.
+     */
+    public ANNClassificationTrainer withSeed(long seed) {
+        this.seed = seed;
+        return this;
+    }
+
+    /** Service class used for statistics. */
+    public static class CentroidStat implements Serializable {
+        /** Serial version uid. */
+        private static final long serialVersionUID = 7624883170532045144L;
+
+        /** Count of points closest to the center with a given index. */
+        ConcurrentHashMap<Integer, ConcurrentHashMap<Double, Integer>> centroidStat = new ConcurrentHashMap<>();
+
+        /** Count of points closest to the center with a given index. */
+        ConcurrentHashMap<Integer, Integer> counts = new ConcurrentHashMap<>();
+
+        /** Set of unique labels. */
+        ConcurrentSkipListSet<Double> clsLblsSet = new ConcurrentSkipListSet<>();
+
+        /** Merge current */
+        CentroidStat merge(CentroidStat other) {
+            this.counts = MapUtil.mergeMaps(counts, other.counts, (i1, i2) -> i1 + i2, ConcurrentHashMap::new);
+            this.centroidStat = MapUtil.mergeMaps(centroidStat, other.centroidStat, (m1, m2) ->
+                MapUtil.mergeMaps(m1, m2, (i1, i2) -> i1 + i2, ConcurrentHashMap::new), ConcurrentHashMap::new);
+            this.clsLblsSet.addAll(other.clsLblsSet);
+            return this;
+        }
+
+        /** */
+        public ConcurrentSkipListSet<Double> labels() {
+            return clsLblsSet;
+        }
+
+        /** */
+        ConcurrentHashMap<Integer, ConcurrentHashMap<Double, Integer>> centroidStat() {
+            return centroidStat;
+        }
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ANNModelFormat.java b/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ANNModelFormat.java
new file mode 100644
index 0000000..be09828
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ANNModelFormat.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.knn.ann;
+
+import java.io.Serializable;
+import org.apache.ignite.ml.knn.classification.KNNModelFormat;
+import org.apache.ignite.ml.knn.classification.NNStrategy;
+import org.apache.ignite.ml.math.distances.DistanceMeasure;
+import org.apache.ignite.ml.structures.LabeledVector;
+import org.apache.ignite.ml.structures.LabeledVectorSet;
+
+/**
+ * ANN model representation.
+ *
+ * @see ANNClassificationModel
+ */
+public class ANNModelFormat extends KNNModelFormat implements Serializable {
+    /** Centroid statistics. */
+    private final ANNClassificationTrainer.CentroidStat candidatesStat;
+
+    /** The labeled set of candidates. */
+    private LabeledVectorSet<ProbableLabel, LabeledVector> candidates;
+
+    /**
+     * Creates an instance.
+     * @param k Amount of nearest neighbors.
+     * @param measure Distance measure.
+     * @param stgy kNN strategy.
+     * @param candidatesStat
+     */
+    public ANNModelFormat(int k,
+        DistanceMeasure measure,
+        NNStrategy stgy,
+        LabeledVectorSet<ProbableLabel, LabeledVector> candidates,
+        ANNClassificationTrainer.CentroidStat candidatesStat) {
+        this.k = k;
+        this.distanceMeasure = measure;
+        this.stgy = stgy;
+        this.candidates = candidates;
+        this.candidatesStat = candidatesStat;
+    }
+
+    /** */
+    public LabeledVectorSet<ProbableLabel, LabeledVector> getCandidates() {
+        return candidates;
+    }
+
+    /** {@inheritDoc} */
+    @Override public int hashCode() {
+        int res = 1;
+
+        res = res * 37 + k;
+        res = res * 37 + distanceMeasure.hashCode();
+        res = res * 37 + stgy.hashCode();
+        res = res * 37 + candidates.hashCode();
+
+        return res;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean equals(Object obj) {
+        if (this == obj)
+            return true;
+
+        if (obj == null || getClass() != obj.getClass())
+            return false;
+
+        ANNModelFormat that = (ANNModelFormat)obj;
+
+        return k == that.k
+            && distanceMeasure.equals(that.distanceMeasure)
+            && stgy.equals(that.stgy)
+            && candidates.equals(that.candidates);
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ProbableLabel.java b/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ProbableLabel.java
new file mode 100644
index 0000000..1fee123
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/ProbableLabel.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.knn.ann;
+
+import java.util.TreeMap;
+
+/**
+ * The special class for fuzzy labels presenting the probability distribution
+ * over the class labels.
+ */
+public class ProbableLabel {
+    /** Key is label, value is probability to be this class */
+    TreeMap<Double, Double> clsLbls;
+
+    /**
+     * The key is class label,
+     * the value is the probability to be an item of this class.
+     *
+     * @param clsLbls Class labels.
+     */
+    public ProbableLabel(TreeMap<Double, Double> clsLbls) {
+        this.clsLbls = clsLbls;
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/package-info.java b/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/package-info.java
new file mode 100644
index 0000000..c18867e
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/knn/ann/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * <!-- Package description. -->
+ * Contains main APIs for ANN classification algorithms.
+ */
+package org.apache.ignite.ml.knn.ann;
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/knn/classification/KNNClassificationModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/knn/classification/KNNClassificationModel.java
index c2c1c43..0d03ee5 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/knn/classification/KNNClassificationModel.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/knn/classification/KNNClassificationModel.java
@@ -17,11 +17,10 @@
 
 package org.apache.ignite.ml.knn.classification;
 
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -30,52 +29,43 @@
 import java.util.stream.Stream;
 import org.apache.ignite.ml.Exportable;
 import org.apache.ignite.ml.Exporter;
-import org.apache.ignite.ml.Model;
 import org.apache.ignite.ml.dataset.Dataset;
 import org.apache.ignite.ml.dataset.primitive.context.EmptyContext;
-import org.apache.ignite.ml.math.distances.DistanceMeasure;
-import org.apache.ignite.ml.math.distances.EuclideanDistance;
+import org.apache.ignite.ml.knn.NNClassificationModel;
 import org.apache.ignite.ml.math.primitives.vector.Vector;
-import org.apache.ignite.ml.structures.LabeledDataset;
 import org.apache.ignite.ml.structures.LabeledVector;
-import org.apache.ignite.ml.util.ModelTrace;
-import org.jetbrains.annotations.NotNull;
+import org.apache.ignite.ml.structures.LabeledVectorSet;
 
 /**
  * kNN algorithm model to solve multi-class classification task.
  */
-public class KNNClassificationModel implements Model<Vector, Double>, Exportable<KNNModelFormat> {
+public class KNNClassificationModel extends NNClassificationModel implements Exportable<KNNModelFormat> {
     /** */
     private static final long serialVersionUID = -127386523291350345L;
 
-    /** Amount of nearest neighbors. */
-    protected int k = 5;
-
-    /** Distance measure. */
-    protected DistanceMeasure distanceMeasure = new EuclideanDistance();
-
-    /** kNN strategy. */
-    protected KNNStrategy stgy = KNNStrategy.SIMPLE;
-
-    /** Dataset. */
-    private Dataset<EmptyContext, LabeledDataset<Double, LabeledVector>> dataset;
+    /** Datasets. */
+    private List<Dataset<EmptyContext, LabeledVectorSet<Double, LabeledVector>>> datasets;
 
     /**
      * Builds the model via prepared dataset.
+     *
      * @param dataset Specially prepared object to run algorithm over it.
      */
-    public KNNClassificationModel(Dataset<EmptyContext, LabeledDataset<Double, LabeledVector>> dataset) {
-        this.dataset = dataset;
+    public KNNClassificationModel(Dataset<EmptyContext, LabeledVectorSet<Double, LabeledVector>> dataset) {
+        this.datasets = new ArrayList<>();
+        if (dataset != null)
+            datasets.add(dataset);
     }
 
     /** {@inheritDoc} */
     @Override public Double apply(Vector v) {
-        if(dataset != null) {
+        if (!datasets.isEmpty()) {
             List<LabeledVector> neighbors = findKNearestNeighbors(v);
 
             return classify(neighbors, v, stgy);
-        } else
+        } else {
             throw new IllegalStateException("The train kNN dataset is null");
+        }
     }
 
     /** */
@@ -85,36 +75,6 @@
     }
 
     /**
-     * Set up parameter of the kNN model.
-     * @param k Amount of nearest neighbors.
-     * @return Model.
-     */
-    public KNNClassificationModel withK(int k) {
-        this.k = k;
-        return this;
-    }
-
-    /**
-     * Set up parameter of the kNN model.
-     * @param stgy Strategy of calculations.
-     * @return Model.
-     */
-    public KNNClassificationModel withStrategy(KNNStrategy stgy) {
-        this.stgy = stgy;
-        return this;
-    }
-
-    /**
-     * Set up parameter of the kNN model.
-     * @param distanceMeasure Distance measure.
-     * @return Model.
-     */
-    public KNNClassificationModel withDistanceMeasure(DistanceMeasure distanceMeasure) {
-        this.distanceMeasure = distanceMeasure;
-        return this;
-    }
-
-    /**
      * The main idea is calculation all distance pairs between given vector and all vectors in training set, sorting
      * them and finding k vectors with min distance with the given vector.
      *
@@ -122,99 +82,38 @@
      * @return K-nearest neighbors.
      */
     protected List<LabeledVector> findKNearestNeighbors(Vector v) {
-        List<LabeledVector> neighborsFromPartitions = dataset.compute(data -> {
-            TreeMap<Double, Set<Integer>> distanceIdxPairs = getDistances(v, data);
-            return Arrays.asList(getKClosestVectors(data, distanceIdxPairs));
-        }, (a, b) -> a == null ? b : Stream.concat(a.stream(), b.stream()).collect(Collectors.toList()));
+        List<LabeledVector> neighborsFromPartitions = datasets.stream()
+            .flatMap(dataset -> findKNearestNeighborsInDataset(v, dataset).stream())
+            .collect(Collectors.toList());
 
-        LabeledDataset<Double, LabeledVector> neighborsToFilter = buildLabeledDatasetOnListOfVectors(neighborsFromPartitions);
+        LabeledVectorSet<Double, LabeledVector> neighborsToFilter = buildLabeledDatasetOnListOfVectors(neighborsFromPartitions);
 
         return Arrays.asList(getKClosestVectors(neighborsToFilter, getDistances(v, neighborsToFilter)));
     }
 
+    private List<LabeledVector> findKNearestNeighborsInDataset(Vector v,
+        Dataset<EmptyContext, LabeledVectorSet<Double, LabeledVector>> dataset) {
+        List<LabeledVector> neighborsFromPartitions = dataset.compute(data -> {
+            TreeMap<Double, Set<Integer>> distanceIdxPairs = getDistances(v, data);
+            return Arrays.asList(getKClosestVectors(data, distanceIdxPairs));
+        }, (a, b) -> {
+            if (a == null)
+                return b == null ? new ArrayList<>() : b;
+            if (b == null)
+                return a;
+            return Stream.concat(a.stream(), b.stream()).collect(Collectors.toList());
+        });
 
-    /** */
-    private LabeledDataset<Double, LabeledVector> buildLabeledDatasetOnListOfVectors(
-        List<LabeledVector> neighborsFromPartitions) {
-        LabeledVector[] arr = new LabeledVector[neighborsFromPartitions.size()];
-        for (int i = 0; i < arr.length; i++)
-            arr[i] = neighborsFromPartitions.get(i);
+        if(neighborsFromPartitions == null)
+            return Collections.emptyList();
 
-        return new LabeledDataset<Double, LabeledVector>(arr);
-    }
+        LabeledVectorSet<Double, LabeledVector> neighborsToFilter = buildLabeledDatasetOnListOfVectors(neighborsFromPartitions);
 
-    /**
-     * Iterates along entries in distance map and fill the resulting k-element array.
-     *
-     * @param trainingData The training data.
-     * @param distanceIdxPairs The distance map.
-     * @return K-nearest neighbors.
-     */
-    @NotNull private LabeledVector[] getKClosestVectors(LabeledDataset<Double, LabeledVector> trainingData,
-        TreeMap<Double, Set<Integer>> distanceIdxPairs) {
-        LabeledVector[] res;
-
-        if (trainingData.rowSize() <= k) {
-            res = new LabeledVector[trainingData.rowSize()];
-            for (int i = 0; i < trainingData.rowSize(); i++)
-                res[i] = trainingData.getRow(i);
-        }
-        else {
-            res = new LabeledVector[k];
-            int i = 0;
-            final Iterator<Double> iter = distanceIdxPairs.keySet().iterator();
-            while (i < k) {
-                double key = iter.next();
-                Set<Integer> idxs = distanceIdxPairs.get(key);
-                for (Integer idx : idxs) {
-                    res[i] = trainingData.getRow(idx);
-                    i++;
-                    if (i >= k)
-                        break; // go to next while-loop iteration
-                }
-            }
-        }
-
-        return res;
-    }
-
-    /**
-     * Computes distances between given vector and each vector in training dataset.
-     *
-     * @param v The given vector.
-     * @param trainingData The training dataset.
-     * @return Key - distanceMeasure from given features before features with idx stored in value. Value is presented
-     * with Set because there can be a few vectors with the same distance.
-     */
-    @NotNull private TreeMap<Double, Set<Integer>> getDistances(Vector v, LabeledDataset<Double, LabeledVector> trainingData) {
-        TreeMap<Double, Set<Integer>> distanceIdxPairs = new TreeMap<>();
-
-        for (int i = 0; i < trainingData.rowSize(); i++) {
-
-            LabeledVector labeledVector = trainingData.getRow(i);
-            if (labeledVector != null) {
-                double distance = distanceMeasure.compute(v, labeledVector.features());
-                putDistanceIdxPair(distanceIdxPairs, i, distance);
-            }
-        }
-        return distanceIdxPairs;
+        return Arrays.asList(getKClosestVectors(neighborsToFilter, getDistances(v, neighborsToFilter)));
     }
 
     /** */
-    private void putDistanceIdxPair(Map<Double, Set<Integer>> distanceIdxPairs, int i, double distance) {
-        if (distanceIdxPairs.containsKey(distance)) {
-            Set<Integer> idxs = distanceIdxPairs.get(distance);
-            idxs.add(i);
-        }
-        else {
-            Set<Integer> idxs = new HashSet<>();
-            idxs.add(i);
-            distanceIdxPairs.put(distance, idxs);
-        }
-    }
-
-    /** */
-    private double classify(List<LabeledVector> neighbors, Vector v, KNNStrategy stgy) {
+    private double classify(List<LabeledVector> neighbors, Vector v, NNStrategy stgy) {
         Map<Double, Double> clsVotes = new HashMap<>();
 
         for (LabeledVector neighbor : neighbors) {
@@ -235,54 +134,13 @@
         return getClassWithMaxVotes(clsVotes);
     }
 
-    /** */
-    private double getClassWithMaxVotes(Map<Double, Double> clsVotes) {
-        return Collections.max(clsVotes.entrySet(), Map.Entry.comparingByValue()).getKey();
-    }
-
-    /** */
-    private double getClassVoteForVector(KNNStrategy stgy, double distance) {
-        if (stgy.equals(KNNStrategy.WEIGHTED))
-            return 1 / distance; // strategy.WEIGHTED
-        else
-            return 1.0; // strategy.SIMPLE
-    }
-
-    /** {@inheritDoc} */
-    @Override public int hashCode() {
-        int res = 1;
-
-        res = res * 37 + k;
-        res = res * 37 + distanceMeasure.hashCode();
-        res = res * 37 + stgy.hashCode();
-
-        return res;
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean equals(Object obj) {
-        if (this == obj)
-            return true;
-
-        if (obj == null || getClass() != obj.getClass())
-            return false;
-
-        KNNClassificationModel that = (KNNClassificationModel)obj;
-
-        return k == that.k && distanceMeasure.equals(that.distanceMeasure) && stgy.equals(that.stgy);
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString() {
-        return toString(false);
-    }
-
-    /** {@inheritDoc} */
-    @Override public String toString(boolean pretty) {
-        return ModelTrace.builder("KNNClassificationModel", pretty)
-            .addField("k", String.valueOf(k))
-            .addField("measure", distanceMeasure.getClass().getSimpleName())
-            .addField("strategy", stgy.name())
-            .toString();
+    /**
+     * Copy parameters from other model and save all datasets from it.
+     *
+     * @param model Model.
+     */
+    public void copyStateFrom(KNNClassificationModel model) {
+        this.copyParametersFrom(model);
+        datasets.addAll(model.datasets);
     }
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/knn/classification/KNNClassificationTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/knn/classification/KNNClassificationTrainer.java
index e0a81f9..1a3ff73 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/knn/classification/KNNClassificationTrainer.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/knn/classification/KNNClassificationTrainer.java
@@ -37,6 +37,24 @@
      */
     @Override public <K, V> KNNClassificationModel fit(DatasetBuilder<K, V> datasetBuilder,
         IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, Double> lbExtractor) {
-        return new KNNClassificationModel(KNNUtils.buildDataset(datasetBuilder, featureExtractor, lbExtractor));
+
+        return updateModel(null, datasetBuilder, featureExtractor, lbExtractor);
+    }
+
+    /** {@inheritDoc} */
+    @Override public <K, V> KNNClassificationModel updateModel(KNNClassificationModel mdl,
+        DatasetBuilder<K, V> datasetBuilder, IgniteBiFunction<K, V, Vector> featureExtractor,
+        IgniteBiFunction<K, V, Double> lbExtractor) {
+
+        KNNClassificationModel res = new KNNClassificationModel(KNNUtils.buildDataset(datasetBuilder,
+            featureExtractor, lbExtractor));
+        if (mdl != null)
+            res.copyStateFrom(mdl);
+        return res;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected boolean checkState(KNNClassificationModel mdl) {
+        return true;
     }
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/knn/classification/KNNModelFormat.java b/modules/ml/src/main/java/org/apache/ignite/ml/knn/classification/KNNModelFormat.java
index a2efe7f..a588b6e 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/knn/classification/KNNModelFormat.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/knn/classification/KNNModelFormat.java
@@ -27,13 +27,13 @@
  */
 public class KNNModelFormat implements Serializable {
     /** Amount of nearest neighbors. */
-    private int k;
+    protected int k;
 
     /** Distance measure. */
-    private DistanceMeasure distanceMeasure;
+    protected DistanceMeasure distanceMeasure;
 
     /** kNN strategy. */
-    private KNNStrategy stgy;
+    protected NNStrategy stgy;
 
     /** Gets amount of nearest neighbors.*/
     public int getK() {
@@ -46,17 +46,21 @@
     }
 
     /** Gets kNN strategy.*/
-    public KNNStrategy getStgy() {
+    public NNStrategy getStgy() {
         return stgy;
     }
 
+    /** */
+    public KNNModelFormat() {
+    }
+
     /**
      * Creates an instance.
      * @param k Amount of nearest neighbors.
      * @param measure Distance measure.
      * @param stgy kNN strategy.
      */
-    public KNNModelFormat(int k, DistanceMeasure measure, KNNStrategy stgy) {
+    public KNNModelFormat(int k, DistanceMeasure measure, NNStrategy stgy) {
         this.k = k;
         this.distanceMeasure = measure;
         this.stgy = stgy;
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/knn/classification/KNNStrategy.java b/modules/ml/src/main/java/org/apache/ignite/ml/knn/classification/KNNStrategy.java
deleted file mode 100644
index 9a117de..0000000
--- a/modules/ml/src/main/java/org/apache/ignite/ml/knn/classification/KNNStrategy.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.ml.knn.classification;
-
-/** This enum contains settings for kNN algorithm. */
-public enum KNNStrategy {
-    /** The default strategy. All k neighbors have the same weight which is independent
-     * on their distance to the query point.*/
-    SIMPLE,
-
-    /** A refinement of the k-NN classification algorithm is to weigh the contribution of each of the k neighbors
-     * according to their distance to the query point, giving greater weight to closer neighbors. */
-    WEIGHTED
-}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/knn/classification/NNStrategy.java b/modules/ml/src/main/java/org/apache/ignite/ml/knn/classification/NNStrategy.java
new file mode 100644
index 0000000..bd43e8d
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/knn/classification/NNStrategy.java
@@ -0,0 +1,29 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.knn.classification;
+
+/** This enum contains settings for kNN algorithm. */
+public enum NNStrategy {
+    /** The default strategy. All k neighbors have the same weight which is independent
+     * on their distance to the query point.*/
+    SIMPLE,
+
+    /** A refinement of the k-NN classification algorithm is to weigh the contribution of each of the k neighbors
+     * according to their distance to the query point, giving greater weight to closer neighbors. */
+    WEIGHTED
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/knn/regression/KNNRegressionModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/knn/regression/KNNRegressionModel.java
index 16dcd8a..0761ff5 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/knn/regression/KNNRegressionModel.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/knn/regression/KNNRegressionModel.java
@@ -22,8 +22,8 @@
 import org.apache.ignite.ml.knn.classification.KNNClassificationModel;
 import org.apache.ignite.ml.math.exceptions.UnsupportedOperationException;
 import org.apache.ignite.ml.math.primitives.vector.Vector;
-import org.apache.ignite.ml.structures.LabeledDataset;
 import org.apache.ignite.ml.structures.LabeledVector;
+import org.apache.ignite.ml.structures.LabeledVectorSet;
 import org.apache.ignite.ml.util.ModelTrace;
 
 /**
@@ -45,7 +45,7 @@
      * Builds the model via prepared dataset.
      * @param dataset Specially prepared object to run algorithm over it.
      */
-    public KNNRegressionModel(Dataset<EmptyContext, LabeledDataset<Double, LabeledVector>> dataset) {
+    public KNNRegressionModel(Dataset<EmptyContext, LabeledVectorSet<Double, LabeledVector>> dataset) {
         super(dataset);
     }
 
@@ -95,7 +95,7 @@
 
     /** {@inheritDoc} */
     @Override public String toString(boolean pretty) {
-        return ModelTrace.builder("KNNClassificationModel", pretty)
+        return ModelTrace.builder("KNNRegressionModel", pretty)
             .addField("k", String.valueOf(k))
             .addField("measure", distanceMeasure.getClass().getSimpleName())
             .addField("strategy", stgy.name())
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/knn/regression/KNNRegressionTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/knn/regression/KNNRegressionTrainer.java
index 395ce61..7a42dc8 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/knn/regression/KNNRegressionTrainer.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/knn/regression/KNNRegressionTrainer.java
@@ -37,6 +37,23 @@
      */
     public <K, V> KNNRegressionModel fit(DatasetBuilder<K, V> datasetBuilder,
         IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, Double> lbExtractor) {
-        return new KNNRegressionModel(KNNUtils.buildDataset(datasetBuilder, featureExtractor, lbExtractor));
+
+        return updateModel(null, datasetBuilder, featureExtractor, lbExtractor);
+    }
+
+    /** {@inheritDoc} */
+    @Override public <K, V> KNNRegressionModel updateModel(KNNRegressionModel mdl, DatasetBuilder<K, V> datasetBuilder,
+        IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, Double> lbExtractor) {
+
+        KNNRegressionModel res = new KNNRegressionModel(KNNUtils.buildDataset(datasetBuilder,
+            featureExtractor, lbExtractor));
+        if (mdl != null)
+            res.copyStateFrom(mdl);
+        return res;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected boolean checkState(KNNRegressionModel mdl) {
+        return true;
     }
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/Isomorphism.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/Isomorphism.java
deleted file mode 100644
index 6f17e3a..0000000
--- a/modules/ml/src/main/java/org/apache/ignite/ml/math/Isomorphism.java
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.ml.math;
-
-import org.apache.ignite.ml.math.functions.IgniteFunction;
-
-/**
- *  Function from {@code K} to {@code V} with defined inverse.
- *
- * @param <K>
- * @param <V>
- */
-public class Isomorphism<K, V> {
-    /** */
-    private IgniteFunction<K, V> forward;
-    /** */
-    private IgniteFunction<V, K> back;
-
-    /**
-     * Identity isomorphism.
-     */
-    public static <K> Isomorphism<K, K> id() {
-        return new Isomorphism<>(a -> a, a -> a);
-    }
-
-    /**
-     * Build isomorphism with forward and backward functions.
-     *
-     * @param forward Forward.
-     * @param back Back.
-     */
-    public Isomorphism(IgniteFunction<K, V> forward, IgniteFunction<V, K> back) {
-        this.forward = forward;
-        this.back = back;
-    }
-
-    /**
-     * Forward function.
-     *
-     * @param k K.
-     */
-    public V forward(K k) {
-        return forward.apply(k);
-    }
-
-    /**
-     * Backward function.
-     *
-     * @param v V.
-     */
-    public K back(V v) {
-        return back.apply(v);
-    }
-}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/MurmurHash.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/MurmurHash.java
deleted file mode 100644
index d1ebf53..0000000
--- a/modules/ml/src/main/java/org/apache/ignite/ml/math/MurmurHash.java
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.ml.math;
-
-import java.nio.ByteBuffer;
-import java.nio.ByteOrder;
-
-/**
- * This is a very fast, non-cryptographic hash suitable for general hash-based lookup.
- * <p>
- * See http://murmurhash.googlepages.com/ for mre details.</p>
- */
-public class MurmurHash {
-    /** Hide it. */
-    private MurmurHash() {
-    }
-
-    /**
-     * This produces exactly the same hash values as the final C+ version of MurmurHash3 and is
-     * thus suitable for producing the same hash values across platforms.
-     * <p>
-     * The 32 bit x86 version of this hash should be the fastest variant for relatively short keys like IDs.</p>
-     * <p>
-     * Note - The x86 and x64 versions do _not_ produce the same results, as the algorithms are
-     * optimized for their respective platforms.</p>
-     * <p>
-     * See also http://github.com/yonik/java_util for future updates to this method.</p>
-     *
-     * @param data Data to hash.
-     * @param off Where to start munging.
-     * @param len How many bytes to process.
-     * @param seed The seed to start with.
-     * @return 32 bit hash platform compatible with C++ MurmurHash3 implementation on x86.
-     */
-    public static int hash3X86(byte[] data, int off, int len, int seed) {
-        int c1 = 0xcc9e2d51;
-        int c2 = 0x1b873593;
-
-        int h1 = seed;
-        int roundedEnd = off + (len & 0xfffffffc);  // Round down to 4 byte block.
-
-        for (int i = off; i < roundedEnd; i += 4) {
-            int k1 = (data[i] & 0xff) | ((data[i + 1] & 0xff) << 8) | ((data[i + 2] & 0xff) << 16) | (data[i + 3] << 24);
-
-            k1 *= c1;
-            k1 = (k1 << 15) | (k1 >>> 17);
-            k1 *= c2;
-
-            h1 ^= k1;
-            h1 = (h1 << 13) | (h1 >>> 19);
-            h1 = h1 * 5 + 0xe6546b64;
-        }
-
-        // Tail.
-        int k1 = 0;
-
-        switch (len & 0x03) {
-            case 3:
-                k1 = (data[roundedEnd + 2] & 0xff) << 16;
-                // Fallthrough - WTF?
-            case 2:
-                k1 |= (data[roundedEnd + 1] & 0xff) << 8;
-                // Fallthrough - WTF?
-            case 1:
-                k1 |= data[roundedEnd] & 0xff;
-                k1 *= c1;
-                k1 = (k1 << 15) | (k1 >>> 17);
-                k1 *= c2;
-                h1 ^= k1;
-            default:
-        }
-
-        // Finalization.
-        h1 ^= len;
-
-        h1 ^= h1 >>> 16;
-        h1 *= 0x85ebca6b;
-        h1 ^= h1 >>> 13;
-        h1 *= 0xc2b2ae35;
-        h1 ^= h1 >>> 16;
-
-        return h1;
-    }
-
-    /**
-     * Hashes an int.
-     *
-     * @param data The int to hash.
-     * @param seed The seed to start with.
-     * @return The 32 bit hash of the bytes in question.
-     */
-    public static int hash(int data, int seed) {
-        byte[] arr = new byte[] {
-            (byte)(data >>> 24),
-            (byte)(data >>> 16),
-            (byte)(data >>> 8),
-            (byte)data
-        };
-
-        return hash(ByteBuffer.wrap(arr), seed);
-    }
-
-    /**
-     * Hashes bytes in an array.
-     *
-     * @param data The bytes to hash.
-     * @param seed The seed to start with.
-     * @return The 32 bit hash of the bytes in question.
-     */
-    public static int hash(byte[] data, int seed) {
-        return hash(ByteBuffer.wrap(data), seed);
-    }
-
-    /**
-     * Hashes bytes in part of an array.
-     *
-     * @param data The data to hash.
-     * @param off Where to start munging.
-     * @param len How many bytes to process.
-     * @param seed The seed to start with.
-     * @return The 32-bit hash of the data in question.
-     */
-    public static int hash(byte[] data, int off, int len, int seed) {
-        return hash(ByteBuffer.wrap(data, off, len), seed);
-    }
-
-    /**
-     * Hashes the bytes in a buffer from the current position to the limit.
-     *
-     * @param buf The bytes to hash.
-     * @param seed The seed to start with.
-     * @return The 32 bit murmur hash of the bytes in the buffer.
-     */
-    public static int hash(ByteBuffer buf, int seed) {
-        ByteOrder byteOrder = buf.order();
-        buf.order(ByteOrder.LITTLE_ENDIAN);
-
-        int m = 0x5bd1e995;
-        int r = 24;
-
-        int h = seed ^ buf.remaining();
-
-        while (buf.remaining() >= 4) {
-            int k = buf.getInt();
-
-            k *= m;
-            k ^= k >>> r;
-            k *= m;
-
-            h *= m;
-            h ^= k;
-        }
-
-        if (buf.remaining() > 0) {
-            ByteBuffer finish = ByteBuffer.allocate(4).order(ByteOrder.LITTLE_ENDIAN);
-
-            finish.put(buf).rewind();
-
-            h ^= finish.getInt();
-            h *= m;
-        }
-
-        h ^= h >>> 13;
-        h *= m;
-        h ^= h >>> 15;
-
-        buf.order(byteOrder);
-
-        return h;
-    }
-
-    /**
-     * @param data The data to hash.
-     * @param seed The seed to start with.
-     * @return Hash value for given data and seed.
-     */
-    public static long hash64A(byte[] data, int seed) {
-        return hash64A(ByteBuffer.wrap(data), seed);
-    }
-
-    /**
-     * @param data The data to hash.
-     * @param off Where to start munging.
-     * @param len How many bytes to process.
-     * @param seed The seed to start with.
-     */
-    public static long hash64A(byte[] data, int off, int len, int seed) {
-        return hash64A(ByteBuffer.wrap(data, off, len), seed);
-    }
-
-    /**
-     * @param buf The data to hash.
-     * @param seed The seed to start with.
-     */
-    public static long hash64A(ByteBuffer buf, int seed) {
-        ByteOrder byteOrder = buf.order();
-        buf.order(ByteOrder.LITTLE_ENDIAN);
-
-        long m = 0xc6a4a7935bd1e995L;
-        int r = 47;
-
-        long h = seed ^ (buf.remaining() * m);
-
-        while (buf.remaining() >= 8) {
-            long k = buf.getLong();
-
-            k *= m;
-            k ^= k >>> r;
-            k *= m;
-
-            h ^= k;
-            h *= m;
-        }
-
-        if (buf.remaining() > 0) {
-            ByteBuffer finish = ByteBuffer.allocate(8).order(ByteOrder.LITTLE_ENDIAN);
-
-            finish.put(buf).rewind();
-
-            h ^= finish.getLong();
-            h *= m;
-        }
-
-        h ^= h >>> r;
-        h *= m;
-        h ^= h >>> r;
-
-        buf.order(byteOrder);
-
-        return h;
-    }
-}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/Precision.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/Precision.java
deleted file mode 100644
index 830644c..0000000
--- a/modules/ml/src/main/java/org/apache/ignite/ml/math/Precision.java
+++ /dev/null
@@ -1,588 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.ml.math;
-
-import java.math.BigDecimal;
-import org.apache.ignite.ml.math.exceptions.MathArithmeticException;
-import org.apache.ignite.ml.math.exceptions.MathIllegalArgumentException;
-
-/**
- * This class is based on the corresponding class from Apache Common Math lib.
- * Utilities for comparing numbers. *
- */
-public class Precision {
-    /**
-     * <p>
-     * Largest double-precision floating-point number such that
-     * {@code 1 + EPSILON} is numerically equal to 1. This value is an upper
-     * bound on the relative error due to rounding real numbers to double
-     * precision floating-point numbers.
-     * </p>
-     * <p>
-     * In IEEE 754 arithmetic, this is 2<sup>-53</sup>.
-     * </p>
-     *
-     * @see <a href="http://en.wikipedia.org/wiki/Machine_epsilon">Machine epsilon</a>
-     */
-    public static final double EPSILON;
-
-    /**
-     * Safe minimum, such that {@code 1 / SAFE_MIN} does not overflow.
-     * <br/>
-     * In IEEE 754 arithmetic, this is also the smallest normalized
-     * number 2<sup>-1022</sup>.
-     */
-    public static final double SAFE_MIN;
-
-    /** Exponent offset in IEEE754 representation. */
-    private static final long EXPONENT_OFFSET = 1023L;
-
-    /** Offset to order signed double numbers lexicographically. */
-    private static final long SGN_MASK = 0x8000000000000000L;
-    /** Offset to order signed double numbers lexicographically. */
-    private static final int SGN_MASK_FLOAT = 0x80000000;
-    /** Positive zero. */
-    private static final double POSITIVE_ZERO = 0d;
-    /** Positive zero bits. */
-    private static final long POSITIVE_ZERO_DOUBLE_BITS = Double.doubleToRawLongBits(+0.0);
-    /** Negative zero bits. */
-    private static final long NEGATIVE_ZERO_DOUBLE_BITS = Double.doubleToRawLongBits(-0.0);
-    /** Positive zero bits. */
-    private static final int POSITIVE_ZERO_FLOAT_BITS = Float.floatToRawIntBits(+0.0f);
-    /** Negative zero bits. */
-    private static final int NEGATIVE_ZERO_FLOAT_BITS = Float.floatToRawIntBits(-0.0f);
-    /** */
-    private static final String INVALID_ROUNDING_METHOD = "invalid rounding method {0}, " +
-        "valid methods: {1} ({2}), {3} ({4}), {5} ({6}), {7} ({8}), {9} ({10}), {11} ({12}), {13} ({14}), {15} ({16})";
-
-    static {
-        /*
-         *  This was previously expressed as = 0x1.0p-53;
-         *  However, OpenJDK (Sparc Solaris) cannot handle such small
-         *  constants: MATH-721
-         */
-        EPSILON = Double.longBitsToDouble((EXPONENT_OFFSET - 53L) << 52);
-
-        /*
-         * This was previously expressed as = 0x1.0p-1022;
-         * However, OpenJDK (Sparc Solaris) cannot handle such small
-         * constants: MATH-721
-         */
-        SAFE_MIN = Double.longBitsToDouble((EXPONENT_OFFSET - 1022L) << 52);
-    }
-
-    /**
-     * Private constructor.
-     */
-    private Precision() {
-    }
-
-    /**
-     * Compares two numbers given some amount of allowed error.
-     *
-     * @param x the first number
-     * @param y the second number
-     * @param eps the amount of error to allow when checking for equality
-     * @return <ul><li>0 if  {@link #equals(double, double, double) equals(x, y, eps)}</li> <li>&lt; 0 if !{@link
-     * #equals(double, double, double) equals(x, y, eps)} &amp;&amp; x &lt; y</li> <li>> 0 if !{@link #equals(double,
-     * double, double) equals(x, y, eps)} &amp;&amp; x > y or either argument is NaN</li></ul>
-     */
-    public static int compareTo(double x, double y, double eps) {
-        if (equals(x, y, eps))
-            return 0;
-        else if (x < y)
-            return -1;
-        return 1;
-    }
-
-    /**
-     * Compares two numbers given some amount of allowed error.
-     * Two float numbers are considered equal if there are {@code (maxUlps - 1)}
-     * (or fewer) floating point numbers between them, i.e. two adjacent floating
-     * point numbers are considered equal.
-     * Adapted from <a
-     * href="http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/">
-     * Bruce Dawson</a>. Returns {@code false} if either of the arguments is NaN.
-     *
-     * @param x first value
-     * @param y second value
-     * @param maxUlps {@code (maxUlps - 1)} is the number of floating point values between {@code x} and {@code y}.
-     * @return <ul><li>0 if  {@link #equals(double, double, int) equals(x, y, maxUlps)}</li> <li>&lt; 0 if !{@link
-     * #equals(double, double, int) equals(x, y, maxUlps)} &amp;&amp; x &lt; y</li> <li>&gt; 0 if !{@link
-     * #equals(double, double, int) equals(x, y, maxUlps)} &amp;&amp; x > y or either argument is NaN</li></ul>
-     */
-    public static int compareTo(final double x, final double y, final int maxUlps) {
-        if (equals(x, y, maxUlps))
-            return 0;
-        else if (x < y)
-            return -1;
-        return 1;
-    }
-
-    /**
-     * Returns true iff they are equal as defined by
-     * {@link #equals(float, float, int) equals(x, y, 1)}.
-     *
-     * @param x first value
-     * @param y second value
-     * @return {@code true} if the values are equal.
-     */
-    public static boolean equals(float x, float y) {
-        return equals(x, y, 1);
-    }
-
-    /**
-     * Returns true if both arguments are NaN or they are
-     * equal as defined by {@link #equals(float, float) equals(x, y, 1)}.
-     *
-     * @param x first value
-     * @param y second value
-     * @return {@code true} if the values are equal or both are NaN.
-     * @since 2.2
-     */
-    public static boolean equalsIncludingNaN(float x, float y) {
-        return (x != x || y != y) ? !(x != x ^ y != y) : equals(x, y, 1);
-    }
-
-    /**
-     * Returns true if the arguments are equal or within the range of allowed
-     * error (inclusive).  Returns {@code false} if either of the arguments
-     * is NaN.
-     *
-     * @param x first value
-     * @param y second value
-     * @param eps the amount of absolute error to allow.
-     * @return {@code true} if the values are equal or within range of each other.
-     * @since 2.2
-     */
-    public static boolean equals(float x, float y, float eps) {
-        return equals(x, y, 1) || Math.abs(y - x) <= eps;
-    }
-
-    /**
-     * Returns true if the arguments are both NaN, are equal, or are within the range
-     * of allowed error (inclusive).
-     *
-     * @param x first value
-     * @param y second value
-     * @param eps the amount of absolute error to allow.
-     * @return {@code true} if the values are equal or within range of each other, or both are NaN.
-     * @since 2.2
-     */
-    public static boolean equalsIncludingNaN(float x, float y, float eps) {
-        return equalsIncludingNaN(x, y) || (Math.abs(y - x) <= eps);
-    }
-
-    /**
-     * Returns true if the arguments are equal or within the range of allowed
-     * error (inclusive).
-     * Two float numbers are considered equal if there are {@code (maxUlps - 1)}
-     * (or fewer) floating point numbers between them, i.e. two adjacent floating
-     * point numbers are considered equal.
-     * Adapted from <a
-     * href="http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/">
-     * Bruce Dawson</a>.  Returns {@code false} if either of the arguments is NaN.
-     *
-     * @param x first value
-     * @param y second value
-     * @param maxUlps {@code (maxUlps - 1)} is the number of floating point values between {@code x} and {@code y}.
-     * @return {@code true} if there are fewer than {@code maxUlps} floating point values between {@code x} and {@code
-     * y}.
-     * @since 2.2
-     */
-    public static boolean equals(final float x, final float y, final int maxUlps) {
-
-        final int xInt = Float.floatToRawIntBits(x);
-        final int yInt = Float.floatToRawIntBits(y);
-
-        final boolean isEqual;
-        if (((xInt ^ yInt) & SGN_MASK_FLOAT) == 0) {
-            // number have same sign, there is no risk of overflow
-            isEqual = Math.abs(xInt - yInt) <= maxUlps;
-        }
-        else {
-            // number have opposite signs, take care of overflow
-            final int deltaPlus;
-            final int deltaMinus;
-            if (xInt < yInt) {
-                deltaPlus = yInt - POSITIVE_ZERO_FLOAT_BITS;
-                deltaMinus = xInt - NEGATIVE_ZERO_FLOAT_BITS;
-            }
-            else {
-                deltaPlus = xInt - POSITIVE_ZERO_FLOAT_BITS;
-                deltaMinus = yInt - NEGATIVE_ZERO_FLOAT_BITS;
-            }
-
-            if (deltaPlus > maxUlps)
-                isEqual = false;
-            else
-                isEqual = deltaMinus <= (maxUlps - deltaPlus);
-
-        }
-
-        return isEqual && !Float.isNaN(x) && !Float.isNaN(y);
-
-    }
-
-    /**
-     * Returns true if the arguments are both NaN or if they are equal as defined
-     * by {@link #equals(float, float, int) equals(x, y, maxUlps)}.
-     *
-     * @param x first value
-     * @param y second value
-     * @param maxUlps {@code (maxUlps - 1)} is the number of floating point values between {@code x} and {@code y}.
-     * @return {@code true} if both arguments are NaN or if there are less than {@code maxUlps} floating point values
-     * between {@code x} and {@code y}.
-     * @since 2.2
-     */
-    public static boolean equalsIncludingNaN(float x, float y, int maxUlps) {
-        return (x != x || y != y) ? !(x != x ^ y != y) : equals(x, y, maxUlps);
-    }
-
-    /**
-     * Returns true iff they are equal as defined by
-     * {@link #equals(double, double, int) equals(x, y, 1)}.
-     *
-     * @param x first value
-     * @param y second value
-     * @return {@code true} if the values are equal.
-     */
-    public static boolean equals(double x, double y) {
-        return equals(x, y, 1);
-    }
-
-    /**
-     * Returns true if the arguments are both NaN or they are
-     * equal as defined by {@link #equals(double, double) equals(x, y, 1)}.
-     *
-     * @param x first value
-     * @param y second value
-     * @return {@code true} if the values are equal or both are NaN.
-     * @since 2.2
-     */
-    public static boolean equalsIncludingNaN(double x, double y) {
-        return (x != x || y != y) ? !(x != x ^ y != y) : equals(x, y, 1);
-    }
-
-    /**
-     * Returns {@code true} if there is no double value strictly between the
-     * arguments or the difference between them is within the range of allowed
-     * error (inclusive). Returns {@code false} if either of the arguments
-     * is NaN.
-     *
-     * @param x First value.
-     * @param y Second value.
-     * @param eps Amount of allowed absolute error.
-     * @return {@code true} if the values are two adjacent floating point numbers or they are within range of each
-     * other.
-     */
-    public static boolean equals(double x, double y, double eps) {
-        return equals(x, y, 1) || Math.abs(y - x) <= eps;
-    }
-
-    /**
-     * Returns {@code true} if there is no double value strictly between the
-     * arguments or the relative difference between them is less than or equal
-     * to the given tolerance. Returns {@code false} if either of the arguments
-     * is NaN.
-     *
-     * @param x First value.
-     * @param y Second value.
-     * @param eps Amount of allowed relative error.
-     * @return {@code true} if the values are two adjacent floating point numbers or they are within range of each
-     * other.
-     * @since 3.1
-     */
-    public static boolean equalsWithRelativeTolerance(double x, double y, double eps) {
-        if (equals(x, y, 1))
-            return true;
-
-        final double absMax = Math.max(Math.abs(x), Math.abs(y));
-        final double relativeDifference = Math.abs((x - y) / absMax);
-
-        return relativeDifference <= eps;
-    }
-
-    /**
-     * Returns true if the arguments are both NaN, are equal or are within the range
-     * of allowed error (inclusive).
-     *
-     * @param x first value
-     * @param y second value
-     * @param eps the amount of absolute error to allow.
-     * @return {@code true} if the values are equal or within range of each other, or both are NaN.
-     * @since 2.2
-     */
-    public static boolean equalsIncludingNaN(double x, double y, double eps) {
-        return equalsIncludingNaN(x, y) || (Math.abs(y - x) <= eps);
-    }
-
-    /**
-     * Returns true if the arguments are equal or within the range of allowed
-     * error (inclusive).
-     * <p>
-     * Two float numbers are considered equal if there are {@code (maxUlps - 1)}
-     * (or fewer) floating point numbers between them, i.e. two adjacent
-     * floating point numbers are considered equal.
-     * </p>
-     * <p>
-     * Adapted from <a
-     * href="http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/">
-     * Bruce Dawson</a>. Returns {@code false} if either of the arguments is NaN.
-     * </p>
-     *
-     * @param x first value
-     * @param y second value
-     * @param maxUlps {@code (maxUlps - 1)} is the number of floating point values between {@code x} and {@code y}.
-     * @return {@code true} if there are fewer than {@code maxUlps} floating point values between {@code x} and {@code
-     * y}.
-     */
-    public static boolean equals(final double x, final double y, final int maxUlps) {
-
-        final long xInt = Double.doubleToRawLongBits(x);
-        final long yInt = Double.doubleToRawLongBits(y);
-
-        final boolean isEqual;
-        if (((xInt ^ yInt) & SGN_MASK) == 0L) {
-            // number have same sign, there is no risk of overflow
-            isEqual = Math.abs(xInt - yInt) <= maxUlps;
-        }
-        else {
-            // number have opposite signs, take care of overflow
-            final long deltaPlus;
-            final long deltaMinus;
-            if (xInt < yInt) {
-                deltaPlus = yInt - POSITIVE_ZERO_DOUBLE_BITS;
-                deltaMinus = xInt - NEGATIVE_ZERO_DOUBLE_BITS;
-            }
-            else {
-                deltaPlus = xInt - POSITIVE_ZERO_DOUBLE_BITS;
-                deltaMinus = yInt - NEGATIVE_ZERO_DOUBLE_BITS;
-            }
-
-            if (deltaPlus > maxUlps)
-                isEqual = false;
-            else
-                isEqual = deltaMinus <= (maxUlps - deltaPlus);
-
-        }
-
-        return isEqual && !Double.isNaN(x) && !Double.isNaN(y);
-
-    }
-
-    /**
-     * Returns true if both arguments are NaN or if they are equal as defined
-     * by {@link #equals(double, double, int) equals(x, y, maxUlps)}.
-     *
-     * @param x first value
-     * @param y second value
-     * @param maxUlps {@code (maxUlps - 1)} is the number of floating point values between {@code x} and {@code y}.
-     * @return {@code true} if both arguments are NaN or if there are less than {@code maxUlps} floating point values
-     * between {@code x} and {@code y}.
-     * @since 2.2
-     */
-    public static boolean equalsIncludingNaN(double x, double y, int maxUlps) {
-        return (x != x || y != y) ? !(x != x ^ y != y) : equals(x, y, maxUlps);
-    }
-
-    /**
-     * Rounds the given value to the specified number of decimal places.
-     * The value is rounded using the {@link BigDecimal#ROUND_HALF_UP} method.
-     *
-     * @param x Value to round.
-     * @param scale Number of digits to the right of the decimal point.
-     * @return the rounded value.
-     * @since 1.1 (previously in {@code MathUtils}, moved as of version 3.0)
-     */
-    public static double round(double x, int scale) {
-        return round(x, scale, BigDecimal.ROUND_HALF_UP);
-    }
-
-    /**
-     * Rounds the given value to the specified number of decimal places.
-     * The value is rounded using the given method which is any method defined
-     * in {@link BigDecimal}.
-     * If {@code x} is infinite or {@code NaN}, then the value of {@code x} is
-     * returned unchanged, regardless of the other parameters.
-     *
-     * @param x Value to round.
-     * @param scale Number of digits to the right of the decimal point.
-     * @param roundingMtd Rounding method as defined in {@link BigDecimal}.
-     * @return the rounded value.
-     * @throws ArithmeticException if {@code roundingMethod == ROUND_UNNECESSARY} and the specified scaling operation
-     * would require rounding.
-     * @throws IllegalArgumentException if {@code roundingMethod} does not represent a valid rounding mode.
-     * @since 1.1 (previously in {@code MathUtils}, moved as of version 3.0)
-     */
-    public static double round(double x, int scale, int roundingMtd) {
-        try {
-            final double rounded = (new BigDecimal(Double.toString(x))
-                .setScale(scale, roundingMtd))
-                .doubleValue();
-            // MATH-1089: negative values rounded to zero should result in negative zero
-            return rounded == POSITIVE_ZERO ? POSITIVE_ZERO * x : rounded;
-        }
-        catch (NumberFormatException ex) {
-            if (Double.isInfinite(x))
-                return x;
-            else
-                return Double.NaN;
-        }
-    }
-
-    /**
-     * Rounds the given value to the specified number of decimal places.
-     * The value is rounded using the {@link BigDecimal#ROUND_HALF_UP} method.
-     *
-     * @param x Value to round.
-     * @param scale Number of digits to the right of the decimal point.
-     * @return the rounded value.
-     * @since 1.1 (previously in {@code MathUtils}, moved as of version 3.0)
-     */
-    public static float round(float x, int scale) {
-        return round(x, scale, BigDecimal.ROUND_HALF_UP);
-    }
-
-    /**
-     * Rounds the given value to the specified number of decimal places.
-     * The value is rounded using the given method which is any method defined
-     * in {@link BigDecimal}.
-     *
-     * @param x Value to round.
-     * @param scale Number of digits to the right of the decimal point.
-     * @param roundingMtd Rounding method as defined in {@link BigDecimal}.
-     * @return the rounded value.
-     * @throws MathArithmeticException if an exact operation is required but result is not exact
-     * @throws MathIllegalArgumentException if {@code roundingMethod} is not a valid rounding method.
-     * @since 1.1 (previously in {@code MathUtils}, moved as of version 3.0)
-     */
-    public static float round(float x, int scale, int roundingMtd)
-        throws MathArithmeticException, MathIllegalArgumentException {
-        final float sign = Math.copySign(1f, x);
-        final float factor = (float)Math.pow(10.0f, scale) * sign;
-        return (float)roundUnscaled(x * factor, sign, roundingMtd) / factor;
-    }
-
-    /**
-     * Rounds the given non-negative value to the "nearest" integer. Nearest is
-     * determined by the rounding method specified. Rounding methods are defined
-     * in {@link BigDecimal}.
-     *
-     * @param unscaled Value to round.
-     * @param sign Sign of the original, scaled value.
-     * @param roundingMtd Rounding method, as defined in {@link BigDecimal}.
-     * @return the rounded value.
-     * @throws MathArithmeticException if an exact operation is required but result is not exact
-     * @throws MathIllegalArgumentException if {@code roundingMethod} is not a valid rounding method.
-     * @since 1.1 (previously in {@code MathUtils}, moved as of version 3.0)
-     */
-    private static double roundUnscaled(double unscaled, double sign, int roundingMtd)
-        throws MathArithmeticException, MathIllegalArgumentException {
-        switch (roundingMtd) {
-            case BigDecimal.ROUND_CEILING:
-                if (sign == -1)
-                    unscaled = Math.floor(Math.nextAfter(unscaled, Double.NEGATIVE_INFINITY));
-                else
-                    unscaled = Math.ceil(Math.nextAfter(unscaled, Double.POSITIVE_INFINITY));
-                break;
-            case BigDecimal.ROUND_DOWN:
-                unscaled = Math.floor(Math.nextAfter(unscaled, Double.NEGATIVE_INFINITY));
-                break;
-            case BigDecimal.ROUND_FLOOR:
-                if (sign == -1)
-                    unscaled = Math.ceil(Math.nextAfter(unscaled, Double.POSITIVE_INFINITY));
-                else
-                    unscaled = Math.floor(Math.nextAfter(unscaled, Double.NEGATIVE_INFINITY));
-                break;
-            case BigDecimal.ROUND_HALF_DOWN: {
-                unscaled = Math.nextAfter(unscaled, Double.NEGATIVE_INFINITY);
-                double fraction = unscaled - Math.floor(unscaled);
-                if (fraction > 0.5)
-                    unscaled = Math.ceil(unscaled);
-                else
-                    unscaled = Math.floor(unscaled);
-                break;
-            }
-            case BigDecimal.ROUND_HALF_EVEN: {
-                double fraction = unscaled - Math.floor(unscaled);
-                if (fraction > 0.5)
-                    unscaled = Math.ceil(unscaled);
-                else if (fraction < 0.5)
-                    unscaled = Math.floor(unscaled);
-                else {
-                    // The following equality test is intentional and needed for rounding purposes
-                    if (Math.floor(unscaled) / 2.0 == Math.floor(Math.floor(unscaled) / 2.0)) { // even
-                        unscaled = Math.floor(unscaled);
-                    }
-                    else { // odd
-                        unscaled = Math.ceil(unscaled);
-                    }
-                }
-                break;
-            }
-            case BigDecimal.ROUND_HALF_UP: {
-                unscaled = Math.nextAfter(unscaled, Double.POSITIVE_INFINITY);
-                double fraction = unscaled - Math.floor(unscaled);
-                if (fraction >= 0.5)
-                    unscaled = Math.ceil(unscaled);
-                else
-                    unscaled = Math.floor(unscaled);
-                break;
-            }
-            case BigDecimal.ROUND_UNNECESSARY:
-                if (unscaled != Math.floor(unscaled))
-                    throw new MathArithmeticException();
-                break;
-            case BigDecimal.ROUND_UP:
-                // do not round if the discarded fraction is equal to zero
-                if (unscaled != Math.floor(unscaled))
-                    unscaled = Math.ceil(Math.nextAfter(unscaled, Double.POSITIVE_INFINITY));
-                break;
-            default:
-                throw new MathIllegalArgumentException(INVALID_ROUNDING_METHOD,
-                    roundingMtd,
-                    "ROUND_CEILING", BigDecimal.ROUND_CEILING,
-                    "ROUND_DOWN", BigDecimal.ROUND_DOWN,
-                    "ROUND_FLOOR", BigDecimal.ROUND_FLOOR,
-                    "ROUND_HALF_DOWN", BigDecimal.ROUND_HALF_DOWN,
-                    "ROUND_HALF_EVEN", BigDecimal.ROUND_HALF_EVEN,
-                    "ROUND_HALF_UP", BigDecimal.ROUND_HALF_UP,
-                    "ROUND_UNNECESSARY", BigDecimal.ROUND_UNNECESSARY,
-                    "ROUND_UP", BigDecimal.ROUND_UP);
-        }
-        return unscaled;
-    }
-
-    /**
-     * Computes a number {@code delta} close to {@code originalDelta} with
-     * the property that <pre><code>
-     *   x + delta - x
-     * </code></pre>
-     * is exactly machine-representable.
-     * This is useful when computing numerical derivatives, in order to reduce
-     * roundoff errors.
-     *
-     * @param x Value.
-     * @param originalDelta Offset value.
-     * @return a number {@code delta} so that {@code x + delta} and {@code x} differ by a representable floating number.
-     */
-    public static double representableDelta(double x, double originalDelta) {
-        return x + originalDelta - x;
-    }
-}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/EuclideanDistance.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/EuclideanDistance.java
index fa5c21c..98f994f 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/EuclideanDistance.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/EuclideanDistance.java
@@ -41,7 +41,7 @@
         double res = 0.0;
 
         for (int i = 0; i < b.length; i++)
-            res+= Math.abs(b[i] - a.get(i));
+            res += Math.pow(Math.abs(b[i] - a.get(i)), 2.0);
 
         return Math.sqrt(res);
     }
@@ -63,4 +63,9 @@
 
         return obj != null && getClass() == obj.getClass();
     }
+
+    /** {@inheritDoc} */
+    @Override public int hashCode() {
+        return getClass().hashCode();
+    }
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/HammingDistance.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/HammingDistance.java
index ef50a69..c886f53 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/HammingDistance.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/HammingDistance.java
@@ -64,4 +64,9 @@
 
         return obj != null && getClass() == obj.getClass();
     }
+
+    /** {@inheritDoc} */
+    @Override public int hashCode() {
+        return getClass().hashCode();
+    }
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/ManhattanDistance.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/ManhattanDistance.java
index bd3df14..fec3120 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/ManhattanDistance.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/math/distances/ManhattanDistance.java
@@ -58,4 +58,9 @@
 
         return obj != null && getClass() == obj.getClass();
     }
+
+    /** {@inheritDoc} */
+    @Override public int hashCode() {
+        return getClass().hashCode();
+    }
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/ConvergenceException.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/ConvergenceException.java
deleted file mode 100644
index 64687b6..0000000
--- a/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/ConvergenceException.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ignite.ml.math.exceptions;
-
-/**
- * This class is based on the corresponding class from Apache Common Math lib.
- * Error thrown when a numerical computation can not be performed because the
- * numerical result failed to converge to a finite value.
- */
-public class ConvergenceException extends MathIllegalStateException {
-    /** Serializable version Id. */
-    private static final long serialVersionUID = 4330003017885151975L;
-
-    /** */
-    private static final String CONVERGENCE_FAILED = "convergence failed";
-
-    /**
-     * Construct the exception.
-     */
-    public ConvergenceException() {
-        this(CONVERGENCE_FAILED);
-    }
-
-    /**
-     * Construct the exception with a specific context and arguments.
-     *
-     * @param msg Message pattern providing the specific context of the error.
-     * @param args Arguments.
-     */
-    public ConvergenceException(String msg, Object... args) {
-        super(msg, args);
-    }
-}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/InsufficientDataException.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/InsufficientDataException.java
deleted file mode 100644
index a57997d..0000000
--- a/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/InsufficientDataException.java
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.ml.math.exceptions;
-
-/**
- * This class is based on the corresponding class from Apache Common Math lib.
- * Exception to be thrown when there is insufficient data to perform a computation.
- */
-public class InsufficientDataException extends MathIllegalArgumentException {
-    /** Serializable version Id. */
-    private static final long serialVersionUID = -2629324471511903359L;
-
-    /** */
-    private static final String INSUFFICIENT_DATA = "Insufficient data.";
-
-    /**
-     * Construct the exception.
-     */
-    public InsufficientDataException() {
-        this(INSUFFICIENT_DATA);
-    }
-
-    /**
-     * Construct the exception.
-     */
-    public InsufficientDataException(String msg, Object... args) {
-        super(msg, args);
-    }
-}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/MathArithmeticException.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/MathArithmeticException.java
deleted file mode 100644
index ccd019c..0000000
--- a/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/MathArithmeticException.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.ml.math.exceptions;
-
-/**
- * This class is based on the corresponding class from Apache Common Math lib.
- * Base class for arithmetic exceptions.
- */
-public class MathArithmeticException extends MathRuntimeException {
-    /** Serializable version Id. */
-    private static final long serialVersionUID = -6024911025449780478L;
-
-    /**
-     * Default constructor.
-     */
-    public MathArithmeticException() {
-        this("Arithmetic exception.");
-    }
-
-    /**
-     * Constructor with a specific message.
-     *
-     * @param format Message pattern providing the specific context of the error.
-     * @param args Arguments.
-     */
-    public MathArithmeticException(String format, Object... args) {
-        super(format, args);
-    }
-
-}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/MathIllegalNumberException.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/MathIllegalNumberException.java
deleted file mode 100644
index b2abf63..0000000
--- a/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/MathIllegalNumberException.java
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ignite.ml.math.exceptions;
-
-/**
- * This class is based on the corresponding class from Apache Common Math lib.
- * Base class for exceptions raised by a wrong number.
- * This class is not intended to be instantiated directly: it should serve
- * as a base class to create all the exceptions that are raised because some
- * precondition is violated by a number argument.
- */
-public class MathIllegalNumberException extends MathIllegalArgumentException {
-    /** Serializable version Id. */
-    private static final long serialVersionUID = -7447085893598031110L;
-
-    /** Requested. */
-    private final Number arg;
-
-    /**
-     * Construct an exception.
-     *
-     * @param msg Localizable pattern.
-     * @param wrong Wrong number.
-     * @param arguments Arguments.
-     */
-    protected MathIllegalNumberException(String msg, Number wrong, Object... arguments) {
-        super(msg, wrong, arguments);
-        arg = wrong;
-    }
-
-    /**
-     * @return the requested value.
-     */
-    public Number getArg() {
-        return arg;
-    }
-}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/MathIllegalStateException.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/MathIllegalStateException.java
deleted file mode 100644
index 6c63086..0000000
--- a/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/MathIllegalStateException.java
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ignite.ml.math.exceptions;
-
-/**
- * This class is based on the corresponding class from Apache Common Math lib.
- * Base class for all exceptions that signal that the process
- * throwing the exception is in a state that does not comply with
- * the set of states that it is designed to be in.
- */
-public class MathIllegalStateException extends MathRuntimeException {
-    /** Serializable version Id. */
-    private static final long serialVersionUID = -6024911025449780478L;
-
-    /** */
-    private static final String ILLEGAL_STATE = "Illegal state.";
-
-    /**
-     * Simple constructor.
-     *
-     * @param msg Message pattern explaining the cause of the error.
-     * @param args Arguments.
-     */
-    public MathIllegalStateException(String msg, Object... args) {
-        super(msg, args);
-    }
-
-    /**
-     * Default constructor.
-     */
-    public MathIllegalStateException() {
-        this(ILLEGAL_STATE);
-    }
-
-}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/NonPositiveDefiniteMatrixException.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/NonPositiveDefiniteMatrixException.java
deleted file mode 100644
index 2e588dc..0000000
--- a/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/NonPositiveDefiniteMatrixException.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.ml.math.exceptions;
-
-/**
- * This exception is used to indicate error condition of matrix elements failing the positivity check.
- */
-public class NonPositiveDefiniteMatrixException extends MathIllegalArgumentException {
-    /**
-     * Construct an exception.
-     *
-     * @param wrong Value that fails the positivity check.
-     * @param idx Row (and column) index.
-     * @param threshold Absolute positivity threshold.
-     */
-    public NonPositiveDefiniteMatrixException(double wrong, int idx, double threshold) {
-        super("Matrix must be positive, wrong element located on diagonal with index %d and has value %f with this threshold %f",
-            idx, wrong, threshold);
-    }
-}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/NonSymmetricMatrixException.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/NonSymmetricMatrixException.java
deleted file mode 100644
index 7c563fe..0000000
--- a/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/NonSymmetricMatrixException.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.ml.math.exceptions;
-
-import org.apache.ignite.IgniteException;
-
-/**
- * This exception is used to indicate error condition of matrix failing the symmetry check.
- */
-public class NonSymmetricMatrixException extends IgniteException {
-    /**
-     * @param row Row.
-     * @param col Column.
-     * @param threshold Threshold.
-     */
-    public NonSymmetricMatrixException(int row, int col, double threshold) {
-        super("Symmetric matrix expected, the symmetry is broken on row "
-            + row + " and col " + col + " with this threshold " + threshold);
-    }
-}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/NullArgumentException.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/NullArgumentException.java
deleted file mode 100644
index 58a6fa3..0000000
--- a/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/NullArgumentException.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ignite.ml.math.exceptions;
-
-/**
- * This class is based on the corresponding class from Apache Common Math lib.
- * All conditions checks that fail due to a {@code null} argument must throw
- * this exception.
- * This class is meant to signal a precondition violation ("null is an illegal
- * argument").
- */
-public class NullArgumentException extends NullPointerException {
-}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/NumberIsTooSmallException.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/NumberIsTooSmallException.java
deleted file mode 100644
index 7427592..0000000
--- a/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/NumberIsTooSmallException.java
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.ignite.ml.math.exceptions;
-
-/**
- * Exception to be thrown when a number is too small.
- */
-public class NumberIsTooSmallException extends MathIllegalNumberException {
-    /** */
-    private static final String NUMBER_TOO_SMALL = "Number {1} is smaller than the minimum ({2}).";
-
-    /** */
-    private static final String NUMBER_TOO_SMALL_BOUND_EXCLUDED = "Number {1} is smaller than, or equal to, the minimum ({2}).";
-
-    /** Serializable version Id. */
-    private static final long serialVersionUID = -6100997100383932834L;
-    /**
-     * Higher bound.
-     */
-    private final Number min;
-    /**
-     * Whether the maximum is included in the allowed range.
-     */
-    private final boolean boundIsAllowed;
-
-    /**
-     * Construct the exception.
-     *
-     * @param wrong Value that is smaller than the minimum.
-     * @param min Minimum.
-     * @param boundIsAllowed Whether {@code min} is included in the allowed range.
-     */
-    public NumberIsTooSmallException(Number wrong, Number min, boolean boundIsAllowed) {
-        this(boundIsAllowed ? NUMBER_TOO_SMALL : NUMBER_TOO_SMALL_BOUND_EXCLUDED,
-            wrong, min, boundIsAllowed);
-    }
-
-    /**
-     * Construct the exception with a specific context.
-     *
-     * @param msg Specific context pattern.
-     * @param wrong Value that is smaller than the minimum.
-     * @param min Minimum.
-     * @param boundIsAllowed Whether {@code min} is included in the allowed range.
-     */
-    public NumberIsTooSmallException(String msg, Number wrong, Number min, boolean boundIsAllowed) {
-        super(msg, wrong, min);
-        this.min = min;
-        this.boundIsAllowed = boundIsAllowed;
-    }
-
-    /**
-     * @return {@code true} if the minimum is included in the allowed range.
-     */
-    public boolean getBoundIsAllowed() {
-        return boundIsAllowed;
-    }
-
-    /**
-     * @return the minimum.
-     */
-    public Number getMin() {
-        return min;
-    }
-}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/SingularMatrixException.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/SingularMatrixException.java
deleted file mode 100644
index c7acc80..0000000
--- a/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/SingularMatrixException.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.ml.math.exceptions;
-
-/**
- * Exception to be thrown when a non-singular matrix is expected.
- */
-public class SingularMatrixException extends MathIllegalArgumentException {
-    /** */
-    public SingularMatrixException() {
-        super("Regular (or non-singular) matrix expected.");
-    }
-
-    /** */
-    public SingularMatrixException(String format, Object... args) {
-        super(format, args);
-    }
-}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/UnknownProviderException.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/UnknownProviderException.java
deleted file mode 100644
index 940b9aa..0000000
--- a/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/UnknownProviderException.java
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.ml.math.exceptions;
-
-import org.apache.ignite.IgniteException;
-
-/**
- * Indicates that no provider has been found for a given vector or matrix flavor.
- */
-public class UnknownProviderException extends IgniteException {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /**
-     * @param flv Flavor (a.k.a. operation performance hints) that has no registered provider for.
-     */
-    public UnknownProviderException(String flv) {
-        super("No provider has been found for the flavor: " + flv);
-    }
-}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/knn/SmallTrainingDatasetSizeException.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/knn/SmallTrainingDatasetSizeException.java
deleted file mode 100644
index 5eb3f7a..0000000
--- a/modules/ml/src/main/java/org/apache/ignite/ml/math/exceptions/knn/SmallTrainingDatasetSizeException.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.ml.math.exceptions.knn;
-
-import org.apache.ignite.ml.math.exceptions.MathIllegalArgumentException;
-
-/**
- * Indicates a small training dataset size in ML algorithms.
- */
-public class SmallTrainingDatasetSizeException extends MathIllegalArgumentException {
-    /** */
-    private static final long serialVersionUID = 0L;
-
-    /**
-     * Creates new small training dataset size exception.
-     *
-     * @param exp Expected dataset size.
-     * @param act Actual dataset size.
-     */
-    public SmallTrainingDatasetSizeException(int exp, int act) {
-        super("Small training dataset size [expected=%d, actual=%d]", exp, act);
-    }
-}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/isolve/lsqr/AbstractLSQR.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/isolve/lsqr/AbstractLSQR.java
index d1d3219..c9281c0 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/math/isolve/lsqr/AbstractLSQR.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/math/isolve/lsqr/AbstractLSQR.java
@@ -19,7 +19,6 @@
 
 import com.github.fommil.netlib.BLAS;
 import java.util.Arrays;
-import org.apache.ignite.ml.math.Precision;
 
 /**
  * Basic implementation of the LSQR algorithm without assumptions about dataset storage format or data processing
@@ -30,8 +29,35 @@
  */
 // TODO: IGNITE-7660: Refactor LSQR algorithm
 public abstract class AbstractLSQR {
+    /**
+     * <p>
+     * Largest double-precision floating-point number such that
+     * {@code 1 + EPSILON} is numerically equal to 1. This value is an upper
+     * bound on the relative error due to rounding real numbers to double
+     * precision floating-point numbers.
+     * </p>
+     * <p>
+     * In IEEE 754 arithmetic, this is 2<sup>-53</sup>.
+     * </p>
+     *
+     * @see <a href="http://en.wikipedia.org/wiki/Machine_epsilon">Machine epsilon</a>
+     */
+    private static final double EPSILON;
+
+    /** Exponent offset in IEEE754 representation. */
+    private static final long EXPONENT_OFFSET = 1023L;
+
+    static {
+        /*
+         *  This was previously expressed as = 0x1.0p-53;
+         *  However, OpenJDK (Sparc Solaris) cannot handle such small
+         *  constants: MATH-721
+         */
+        EPSILON = Double.longBitsToDouble((EXPONENT_OFFSET - 53L) << 52);
+    }
+
     /** The smallest representable positive number such that 1.0 + eps != 1.0. */
-    private static final double eps = Precision.EPSILON;
+    private static final double eps = EPSILON;
 
     /** BLAS (Basic Linear Algebra Subprograms) instance. */
     private static BLAS blas = BLAS.getInstance();
@@ -52,7 +78,9 @@
      */
     public LSQRResult solve(double damp, double atol, double btol, double conlim, double iterLim, boolean calcVar,
         double[] x0) {
-        int n = getColumns();
+        Integer n = getColumns();
+        if(n == null)
+            return null;
 
         if (iterLim < 0)
             iterLim = 2 * n;
@@ -287,7 +315,7 @@
     protected abstract double[] iter(double bnorm, double[] target);
 
     /** */
-    protected abstract int getColumns();
+    protected abstract Integer getColumns();
 
     /** */
     private static double[] symOrtho(double a, double b) {
@@ -331,4 +359,4 @@
         return res;
     }
 
-}
\ No newline at end of file
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/isolve/lsqr/LSQROnHeap.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/isolve/lsqr/LSQROnHeap.java
index e138cf3..14356e1 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/math/isolve/lsqr/LSQROnHeap.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/math/isolve/lsqr/LSQROnHeap.java
@@ -100,10 +100,16 @@
      *
      * @return number of columns
      */
-    @Override protected int getColumns() {
+    @Override protected Integer getColumns() {
         return dataset.compute(
             data -> data.getFeatures() == null ? null : data.getFeatures().length / data.getRows(),
-            (a, b) -> a == null ? b : a
+            (a, b) -> {
+                if (a == null)
+                    return b == null ? 0 : b;
+                if (b == null)
+                    return a;
+                return b;
+            }
         );
     }
 
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/math/isolve/lsqr/LSQRResult.java b/modules/ml/src/main/java/org/apache/ignite/ml/math/isolve/lsqr/LSQRResult.java
index 47beddb..0d6681c 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/math/isolve/lsqr/LSQRResult.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/math/isolve/lsqr/LSQRResult.java
@@ -34,7 +34,7 @@
     private final int isstop;
 
     /** Represents norm(r), where r = b - Ax. */
-    private final double r1norn;
+    private final double r1norm;
 
     /**Represents sqrt( norm(r)^2  +  damp^2 * norm(x)^2 ). Equal to r1norm if damp == 0. */
     private final double r2norm;
@@ -63,7 +63,7 @@
      * @param x X value.
      * @param iterations Number of performed iterations.
      * @param isstop Stop reason.
-     * @param r1norn R1 norm value.
+     * @param r1norm R1 norm value.
      * @param r2norm R2 norm value.
      * @param anorm A norm value.
      * @param acond A cond value.
@@ -71,11 +71,11 @@
      * @param xnorm X norm value.
      * @param var Var value.
      */
-    public LSQRResult(double[] x, int iterations, int isstop, double r1norn, double r2norm, double anorm, double acond,
+    public LSQRResult(double[] x, int iterations, int isstop, double r1norm, double r2norm, double anorm, double acond,
         double arnorm, double xnorm, double[] var) {
         super(x, iterations);
         this.isstop = isstop;
-        this.r1norn = r1norn;
+        this.r1norm = r1norm;
         this.r2norm = r2norm;
         this.anorm = anorm;
         this.acond = acond;
@@ -90,8 +90,8 @@
     }
 
     /** */
-    public double getR1norn() {
-        return r1norn;
+    public double getR1norm() {
+        return r1norm;
     }
 
     /** */
@@ -128,7 +128,7 @@
     @Override public String toString() {
         return "LSQRResult{" +
             "isstop=" + isstop +
-            ", r1norn=" + r1norn +
+            ", r1norm=" + r1norm +
             ", r2norm=" + r2norm +
             ", anorm=" + anorm +
             ", acond=" + acond +
@@ -137,4 +137,4 @@
             ", var=" + Arrays.toString(var) +
             '}';
     }
-}
\ No newline at end of file
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/nn/MLPTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/nn/MLPTrainer.java
index 6727ba9..1cac909 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/nn/MLPTrainer.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/nn/MLPTrainer.java
@@ -46,25 +46,25 @@
  */
 public class MLPTrainer<P extends Serializable> extends MultiLabelDatasetTrainer<MultilayerPerceptron> {
     /** Multilayer perceptron architecture supplier that defines layers and activators. */
-    private final IgniteFunction<Dataset<EmptyContext, SimpleLabeledDatasetData>, MLPArchitecture> archSupplier;
+    private IgniteFunction<Dataset<EmptyContext, SimpleLabeledDatasetData>, MLPArchitecture> archSupplier;
 
     /** Loss function to be minimized during the training. */
-    private final IgniteFunction<Vector, IgniteDifferentiableVectorToDoubleFunction> loss;
+    private IgniteFunction<Vector, IgniteDifferentiableVectorToDoubleFunction> loss;
 
     /** Update strategy that defines how to update model parameters during the training. */
-    private final UpdatesStrategy<? super MultilayerPerceptron, P> updatesStgy;
+    private UpdatesStrategy<? super MultilayerPerceptron, P> updatesStgy;
 
     /** Maximal number of iterations before the training will be stopped. */
-    private final int maxIterations;
+    private int maxIterations = 100;
 
     /** Batch size (per every partition). */
-    private final int batchSize;
+    private int batchSize = 100;
 
     /** Maximal number of local iterations before synchronization. */
-    private final int locIterations;
+    private int locIterations = 100;
 
     /** Multilayer perceptron model initializer. */
-    private final long seed;
+    private long seed = 1234L;
 
     /**
      * Constructs a new instance of multilayer perceptron trainer.
@@ -111,12 +111,29 @@
     public <K, V> MultilayerPerceptron fit(DatasetBuilder<K, V> datasetBuilder,
         IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, double[]> lbExtractor) {
 
+        return updateModel(null, datasetBuilder, featureExtractor, lbExtractor);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected <K, V> MultilayerPerceptron updateModel(MultilayerPerceptron lastLearnedModel,
+        DatasetBuilder<K, V> datasetBuilder,
+        IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, double[]> lbExtractor) {
+
+        assert archSupplier != null;
+        assert loss!= null;
+        assert updatesStgy!= null;
+
         try (Dataset<EmptyContext, SimpleLabeledDatasetData> dataset = datasetBuilder.build(
             new EmptyContextBuilder<>(),
             new SimpleLabeledDatasetDataBuilder<>(featureExtractor, lbExtractor)
         )) {
-            MLPArchitecture arch = archSupplier.apply(dataset);
-            MultilayerPerceptron mdl = new MultilayerPerceptron(arch, new RandomInitializer(seed));
+            MultilayerPerceptron mdl;
+            if (lastLearnedModel != null)
+                mdl = lastLearnedModel;
+            else {
+                MLPArchitecture arch = archSupplier.apply(dataset);
+                mdl = new MultilayerPerceptron(arch, new RandomInitializer(seed));
+            }
             ParameterUpdateCalculator<? super MultilayerPerceptron, P> updater = updatesStgy.getUpdatesCalculator();
 
             for (int i = 0; i < maxIterations; i += locIterations) {
@@ -178,6 +195,9 @@
                     }
                 );
 
+                if (totUp == null)
+                    return getLastTrainedModelOrThrowEmptyDatasetException(lastLearnedModel);
+
                 P update = updatesStgy.allUpdatesReducer().apply(totUp);
                 mdl = updater.update(mdl, update);
             }
@@ -190,6 +210,154 @@
     }
 
     /**
+     * Get the multilayer perceptron architecture supplier that defines layers and activators.
+     *
+     * @return The property value.
+     */
+    public IgniteFunction<Dataset<EmptyContext, SimpleLabeledDatasetData>, MLPArchitecture> getArchSupplier() {
+        return archSupplier;
+    }
+
+    /**
+     * Set up the multilayer perceptron architecture supplier that defines layers and activators.
+     *
+     * @param archSupplier The parameter value.
+     * @return Model with the multilayer perceptron architecture supplier that defines layers and activators.
+     */
+    public MLPTrainer<P> withArchSupplier(
+        IgniteFunction<Dataset<EmptyContext, SimpleLabeledDatasetData>, MLPArchitecture> archSupplier) {
+        this.archSupplier = archSupplier;
+        return this;
+    }
+
+    /**
+     * Get the loss function to be minimized during the training.
+     *
+     * @return The property value.
+     */
+    public IgniteFunction<Vector, IgniteDifferentiableVectorToDoubleFunction> getLoss() {
+        return loss;
+    }
+
+    /**
+     * Set up the loss function to be minimized during the training.
+     *
+     * @param loss The parameter value.
+     * @return Model with the loss function to be minimized during the training.
+     */
+    public MLPTrainer<P> withLoss(
+        IgniteFunction<Vector, IgniteDifferentiableVectorToDoubleFunction> loss) {
+        this.loss = loss;
+        return this;
+    }
+
+    /**
+     * Get the update strategy that defines how to update model parameters during the training.
+     *
+     * @return The property value.
+     */
+    public UpdatesStrategy<? super MultilayerPerceptron, P> getUpdatesStgy() {
+        return updatesStgy;
+    }
+
+    /**
+     * Set up the update strategy that defines how to update model parameters during the training.
+     *
+     * @param updatesStgy The parameter value.
+     * @return Model with the update strategy that defines how to update model parameters during the training.
+     */
+    public MLPTrainer<P> withUpdatesStgy(
+        UpdatesStrategy<? super MultilayerPerceptron, P> updatesStgy) {
+        this.updatesStgy = updatesStgy;
+        return this;
+    }
+
+    /**
+     * Get the maximal number of iterations before the training will be stopped.
+     *
+     * @return The property value.
+     */
+    public int getMaxIterations() {
+        return maxIterations;
+    }
+
+    /**
+     * Set up the maximal number of iterations before the training will be stopped.
+     *
+     * @param maxIterations The parameter value.
+     * @return Model with the maximal number of iterations before the training will be stopped.
+     */
+    public MLPTrainer<P> withMaxIterations(int maxIterations) {
+        this.maxIterations = maxIterations;
+        return this;
+    }
+
+    /**
+     * Get the batch size (per every partition).
+     *
+     * @return The property value.
+     */
+    public int getBatchSize() {
+        return batchSize;
+    }
+
+    /**
+     * Set up the batch size (per every partition).
+     *
+     * @param batchSize The parameter value.
+     * @return Model with the batch size (per every partition).
+     */
+    public MLPTrainer<P> withBatchSize(int batchSize) {
+        this.batchSize = batchSize;
+        return this;
+    }
+
+    /**
+     * Get the maximal number of local iterations before synchronization.
+     *
+     * @return The property value.
+     */
+    public int getLocIterations() {
+        return locIterations;
+    }
+
+    /**
+     * Set up the maximal number of local iterations before synchronization.
+     *
+     * @param locIterations The parameter value.
+     * @return Model with the maximal number of local iterations before synchronization.
+     */
+    public MLPTrainer<P>  withLocIterations(int locIterations) {
+        this.locIterations = locIterations;
+        return this;
+    }
+
+    /**
+     * Get the multilayer perceptron model initializer.
+     *
+     * @return The property value.
+     */
+    public long getSeed() {
+        return seed;
+    }
+
+    /**
+     * Set up the multilayer perceptron model initializer.
+     *
+     * @param seed The parameter value.
+     * @return Model with the multilayer perceptron model initializer.
+     */
+    public MLPTrainer<P>  withSeed(long seed) {
+        this.seed = seed;
+        return this;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected boolean checkState(MultilayerPerceptron mdl) {
+        return true;
+    }
+
+    /**
      * Builds a batch of the data by fetching specified rows.
      *
      * @param data All data.
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/nn/MultilayerPerceptron.java b/modules/ml/src/main/java/org/apache/ignite/ml/nn/MultilayerPerceptron.java
index 639bb44..b469603 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/nn/MultilayerPerceptron.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/nn/MultilayerPerceptron.java
@@ -349,7 +349,7 @@
      */
     public MLPArchitecture architecture() {
         if (below != null)
-            return below.architecture().add(architecture());
+            return below.architecture().add(architecture);
         return architecture;
     }
 
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/optimization/BarzilaiBorweinUpdater.java b/modules/ml/src/main/java/org/apache/ignite/ml/optimization/BarzilaiBorweinUpdater.java
deleted file mode 100644
index c374a8a..0000000
--- a/modules/ml/src/main/java/org/apache/ignite/ml/optimization/BarzilaiBorweinUpdater.java
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.ml.optimization;
-
-import org.apache.ignite.ml.math.primitives.vector.Vector;
-
-/**
- * Updater based in Barzilai-Borwein method which guarantees convergence.
- */
-public class BarzilaiBorweinUpdater implements Updater {
-    /** */
-    private static final long serialVersionUID = 5046575099408708472L;
-
-    /**
-     * Learning rate used on the first iteration.
-     */
-    private static final double INITIAL_LEARNING_RATE = 1.0;
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override public Vector compute(Vector oldWeights, Vector oldGradient, Vector weights, Vector gradient,
-        int iteration) {
-        double learningRate = computeLearningRate(oldWeights != null ? oldWeights.copy() : null,
-            oldGradient != null ? oldGradient.copy() : null, weights.copy(), gradient.copy());
-
-        return weights.copy().minus(gradient.copy().times(learningRate));
-    }
-
-    /** */
-    private double computeLearningRate(Vector oldWeights, Vector oldGradient, Vector weights, Vector gradient) {
-        if (oldWeights == null || oldGradient == null)
-            return INITIAL_LEARNING_RATE;
-        else {
-            Vector gradientDiff = gradient.minus(oldGradient);
-
-            return weights.minus(oldWeights).dot(gradientDiff) / Math.pow(gradientDiff.kNorm(2.0), 2.0);
-        }
-    }
-}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/optimization/SimpleUpdater.java b/modules/ml/src/main/java/org/apache/ignite/ml/optimization/SimpleUpdater.java
deleted file mode 100644
index 5079659..0000000
--- a/modules/ml/src/main/java/org/apache/ignite/ml/optimization/SimpleUpdater.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.ml.optimization;
-
-import org.apache.ignite.ml.math.primitives.vector.Vector;
-
-/**
- * Simple updater with fixed learning rate which doesn't guarantee convergence.
- */
-public class SimpleUpdater implements Updater {
-    /** */
-    private static final long serialVersionUID = 6417716224818162225L;
-
-    /** */
-    private final double learningRate;
-
-    /** */
-    public SimpleUpdater(double learningRate) {
-        assert learningRate > 0;
-
-        this.learningRate = learningRate;
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override public Vector compute(Vector oldWeights, Vector oldGradient, Vector weights, Vector gradient,
-        int iteration) {
-        return weights.minus(gradient.times(learningRate));
-    }
-}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/optimization/Updater.java b/modules/ml/src/main/java/org/apache/ignite/ml/optimization/Updater.java
deleted file mode 100644
index 3d38119..0000000
--- a/modules/ml/src/main/java/org/apache/ignite/ml/optimization/Updater.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.ml.optimization;
-
-import java.io.Serializable;
-import org.apache.ignite.ml.math.primitives.vector.Vector;
-
-/**
- * Weights updater applied on every gradient descent step to decide how weights should be changed.
- */
-@FunctionalInterface
-public interface Updater extends Serializable {
-    /** */
-    public Vector compute(Vector oldWeights, Vector oldGradient, Vector weights, Vector gradient, int iteration);
-}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/pipeline/Pipeline.java b/modules/ml/src/main/java/org/apache/ignite/ml/pipeline/Pipeline.java
new file mode 100644
index 0000000..ba5740e
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/pipeline/Pipeline.java
@@ -0,0 +1,152 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.pipeline;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.ml.Model;
+import org.apache.ignite.ml.dataset.DatasetBuilder;
+import org.apache.ignite.ml.dataset.impl.cache.CacheBasedDatasetBuilder;
+import org.apache.ignite.ml.dataset.impl.local.LocalDatasetBuilder;
+import org.apache.ignite.ml.math.functions.IgniteBiFunction;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.preprocessing.PreprocessingTrainer;
+import org.apache.ignite.ml.trainers.DatasetTrainer;
+
+/**
+ * A simple pipeline, which acts as a global trainer which produce a Pipeline Model.
+ * A Pipeline consists of a sequence of stages, each of which is either a Preprocessing Stage or a Trainer.
+ * When {@code fit()} method is called, the stages are executed in order.
+ *
+ * @param <K> Type of a key in {@code upstream} data.
+ * @param <V> Type of a value in {@code upstream} data.
+ * @param <R> Type of a result in {@code upstream} feature extractor.
+ */
+public class Pipeline<K, V, R> {
+    /** Feature extractor. */
+    private IgniteBiFunction<K, V, R> finalFeatureExtractor;
+
+    /** Label extractor. */
+    private IgniteBiFunction<K, V, Double> lbExtractor;
+
+    /** Prerpocessor stages. */
+    private List<PreprocessingTrainer> preprocessors = new ArrayList<>();
+
+    /** Final trainer stage. */
+    private DatasetTrainer finalStage;
+
+    /**
+     * Adds feature extractor as a zero stage.
+     *
+     * @param featureExtractor The parameter value.
+     * @return The updated Pipeline.
+     */
+    public Pipeline<K, V, R> addFeatureExtractor(IgniteBiFunction<K, V, R> featureExtractor) {
+        this.finalFeatureExtractor = featureExtractor;
+        return this;
+    }
+
+    /**
+     * Adds a label extractor for the produced model.
+     *
+     * @param lbExtractor The parameter value.
+     * @return The updated Pipeline.
+     */
+    public Pipeline<K, V, R> addLabelExtractor(IgniteBiFunction<K, V, Double> lbExtractor) {
+        this.lbExtractor = lbExtractor;
+        return this;
+    }
+
+    /**
+     * Adds a preprocessor.
+     *
+     * @param preprocessor The parameter value.
+     * @return The updated Pipeline.
+     */
+    public Pipeline<K, V, R> addPreprocessor(PreprocessingTrainer preprocessor) {
+        preprocessors.add(preprocessor);
+        return this;
+    }
+
+    /**
+     * Adds a trainer.
+     *
+     * @param trainer The parameter value.
+     * @return The updated Pipeline.
+     */
+    public Pipeline<K, V, R> addTrainer(DatasetTrainer trainer) {
+        this.finalStage = trainer;
+        return this;
+    }
+
+    /**
+     * Fits the pipeline to the input cache.
+     *
+     * @param ignite Ignite instance.
+     * @param cache Ignite cache with {@code upstream} data.
+     * @return The fitted model based on chain of preprocessors and final trainer.
+     */
+    public PipelineMdl<K, V> fit(Ignite ignite, IgniteCache<K, V> cache) {
+        DatasetBuilder datasetBuilder = new CacheBasedDatasetBuilder<>(ignite, cache);
+        return fit(datasetBuilder);
+    }
+
+    /**
+     * Fits the pipeline to the input mock data.
+     *
+     * @param data Data.
+     * @param parts Number of partitions.
+     * @return The fitted model based on chain of preprocessors and final trainer.
+     */
+    public PipelineMdl<K, V> fit(Map<K, V> data, int parts) {
+        DatasetBuilder datasetBuilder = new LocalDatasetBuilder<>(data, parts);
+        return fit(datasetBuilder);
+    }
+
+    /** Fits the pipeline to the input dataset builder. */
+    private PipelineMdl<K, V> fit(DatasetBuilder datasetBuilder) {
+        assert lbExtractor != null;
+        assert finalFeatureExtractor != null;
+
+        if (finalStage == null)
+            throw new IllegalStateException("The Pipeline should be finished with the Training Stage.");
+
+        preprocessors.forEach(e -> {
+
+            finalFeatureExtractor = e.fit(
+                datasetBuilder,
+                finalFeatureExtractor
+            );
+        });
+
+        Model<Vector, Double> internalMdl = finalStage
+            .fit(
+                datasetBuilder,
+                finalFeatureExtractor,
+                lbExtractor
+            );
+
+        return new PipelineMdl<K, V>()
+            .withFeatureExtractor(finalFeatureExtractor)
+            .withLabelExtractor(lbExtractor)
+            .withInternalMdl(internalMdl);
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/pipeline/PipelineMdl.java b/modules/ml/src/main/java/org/apache/ignite/ml/pipeline/PipelineMdl.java
new file mode 100644
index 0000000..edd70eb
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/pipeline/PipelineMdl.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.pipeline;
+
+import org.apache.ignite.ml.Model;
+import org.apache.ignite.ml.math.functions.IgniteBiFunction;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+
+/**
+ * Wraps the model produced by {@link Pipeline}.
+ *
+ * @param <K> Type of a key in {@code upstream} data.
+ * @param <V> Type of a value in {@code upstream} data.
+ */
+public class PipelineMdl<K, V> implements Model<Vector, Double> {
+    /** Internal model produced by {@link Pipeline}. */
+    private Model<Vector, Double> internalMdl;
+
+    /** Feature extractor. */
+    private IgniteBiFunction<K, V, Vector> featureExtractor;
+
+    /** Label extractor. */
+    private IgniteBiFunction<K, V, Double> lbExtractor;
+
+    /** */
+    @Override public Double apply(Vector vector) {
+        return internalMdl.apply(vector);
+    }
+
+    /** */
+    public IgniteBiFunction<K, V, Vector> getFeatureExtractor() {
+        return featureExtractor;
+    }
+
+    /** */
+    public IgniteBiFunction<K, V, Double> getLabelExtractor() {
+        return lbExtractor;
+    }
+
+    /** */
+    public Model<Vector, Double> getInternalMdl() {
+        return internalMdl;
+    }
+
+    /** */
+    public PipelineMdl<K, V> withInternalMdl(Model<Vector, Double> internalMdl) {
+        this.internalMdl = internalMdl;
+        return this;
+    }
+
+    /** */
+    public PipelineMdl<K, V> withFeatureExtractor(IgniteBiFunction featureExtractor) {
+        this.featureExtractor = featureExtractor;
+        return this;
+    }
+
+    /** */
+    public PipelineMdl<K, V> withLabelExtractor(IgniteBiFunction<K, V, Double> lbExtractor) {
+        this.lbExtractor = lbExtractor;
+        return this;
+    }
+
+    /** */
+    @Override public String toString() {
+        return "PipelineMdl{" +
+            "internalMdl=" + internalMdl +
+            '}';
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/pipeline/package-info.java b/modules/ml/src/main/java/org/apache/ignite/ml/pipeline/package-info.java
new file mode 100644
index 0000000..d29140ab
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/pipeline/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * <!-- Package description. -->
+ * Contains Pipeline API.
+ */
+package org.apache.ignite.ml.pipeline;
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/PreprocessingTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/PreprocessingTrainer.java
index 1886ee5..b977864 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/PreprocessingTrainer.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/PreprocessingTrainer.java
@@ -17,6 +17,7 @@
 
 package org.apache.ignite.ml.preprocessing;
 
+import java.util.Map;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.ml.dataset.DatasetBuilder;
@@ -24,8 +25,6 @@
 import org.apache.ignite.ml.dataset.impl.local.LocalDatasetBuilder;
 import org.apache.ignite.ml.math.functions.IgniteBiFunction;
 
-import java.util.Map;
-
 /**
  * Trainer for preprocessor.
  *
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/binarization/BinarizationPreprocessor.java b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/binarization/BinarizationPreprocessor.java
index 8300820..2e1bd5c 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/binarization/BinarizationPreprocessor.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/binarization/BinarizationPreprocessor.java
@@ -68,8 +68,8 @@
         return res;
     }
 
-    /** Gets the threshold parameter. */
-    public double threshold() {
+    /** Get the threshold parameter. */
+    public double getThreshold() {
         return threshold;
     }
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/binarization/BinarizationTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/binarization/BinarizationTrainer.java
index 26541e0..ad8c90e7 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/binarization/BinarizationTrainer.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/binarization/BinarizationTrainer.java
@@ -39,15 +39,16 @@
     }
 
     /**
-     * Gets the threshold parameter value.
-     * @return The parameter value.
+     * Get the threshold parameter value.
+     *
+     * @return The property value.
      */
-    public double threshold() {
+    public double getThreshold() {
         return threshold;
     }
 
     /**
-     * Sets the threshold parameter value.
+     * Set the threshold parameter value.
      *
      * @param threshold The given value.
      * @return The Binarization trainer.
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/encoding/EncoderTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/encoding/EncoderTrainer.java
index f716d96..8b2d9b7 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/encoding/EncoderTrainer.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/encoding/EncoderTrainer.java
@@ -207,7 +207,7 @@
      * @param idx The index of encoded feature.
      * @return The changed trainer.
      */
-    public EncoderTrainer<K, V> encodeFeature(int idx) {
+    public EncoderTrainer<K, V> withEncodedFeature(int idx) {
         handledIndices.add(idx);
         return this;
     }
@@ -222,4 +222,15 @@
         this.encoderType = type;
         return this;
     }
+
+    /**
+     * Sets the indices of features which should be encoded.
+     *
+     * @param handledIndices Indices of features which should be encoded.
+     * @return The changed trainer.
+     */
+    public EncoderTrainer<K, V> withEncodedFeatures(Set<Integer> handledIndices) {
+        this.handledIndices = handledIndices;
+        return this;
+    }
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/maxabsscaling/MaxAbsScalerPartitionData.java b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/maxabsscaling/MaxAbsScalerPartitionData.java
new file mode 100644
index 0000000..e4658da
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/maxabsscaling/MaxAbsScalerPartitionData.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.preprocessing.maxabsscaling;
+
+/**
+ * Partition data used in maxabsscaling preprocessor.
+ *
+ * @see MaxAbsScalerTrainer
+ * @see MaxAbsScalerPreprocessor
+ */
+public class MaxAbsScalerPartitionData implements AutoCloseable {
+    /** Maximum absolute values. */
+    private final double[] maxAbs;
+
+    /**
+     * Constructs a new instance of maxabsscaling partition data.
+     *
+     * @param maxAbs Maximal absolute values.
+     */
+    public MaxAbsScalerPartitionData(double[] maxAbs) {
+        this.maxAbs = maxAbs;
+    }
+
+    /** */
+    public double[] getMaxAbs() {
+        return maxAbs;
+    }
+
+    /** */
+    @Override public void close() {
+        // Do nothing, GC will clean up.
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/maxabsscaling/MaxAbsScalerPreprocessor.java b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/maxabsscaling/MaxAbsScalerPreprocessor.java
new file mode 100644
index 0000000..4eb0e31
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/maxabsscaling/MaxAbsScalerPreprocessor.java
@@ -0,0 +1,77 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.preprocessing.maxabsscaling;
+
+import org.apache.ignite.ml.math.functions.IgniteBiFunction;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+
+/**
+ * The preprocessing function that makes maxabsscaling, transforms features to the scale {@code [-1,+1]}. From
+ * mathematical point of view it's the following function which is applied to every element in a dataset:
+ *
+ * {@code a_i = a_i / maxabs_i for all i},
+ *
+ * where {@code i} is a number of column, {@code maxabs_i} is the value of the absolute maximum element in this column.
+ *
+ * @param <K> Type of a key in {@code upstream} data.
+ * @param <V> Type of a value in {@code upstream} data.
+ */
+public class MaxAbsScalerPreprocessor<K, V> implements IgniteBiFunction<K, V, Vector> {
+    /** */
+    private static final long serialVersionUID = 1L;
+
+    /** Maximum absolute values. */
+    private final double[] maxAbs;
+
+    /** Base preprocessor. */
+    private final IgniteBiFunction<K, V, Vector> basePreprocessor;
+
+    /**
+     * Constructs a new instance of maxabsscaling preprocessor.
+     *
+     * @param maxAbs Maximal absolute values.
+     * @param basePreprocessor Base preprocessor.
+     */
+    public MaxAbsScalerPreprocessor(double[] maxAbs, IgniteBiFunction<K, V, Vector> basePreprocessor) {
+        this.maxAbs = maxAbs;
+        this.basePreprocessor = basePreprocessor;
+    }
+
+    /**
+     * Applies this preprocessor.
+     *
+     * @param k Key.
+     * @param v Value.
+     * @return Preprocessed row.
+     */
+    @Override public Vector apply(K k, V v) {
+        Vector res = basePreprocessor.apply(k, v);
+
+        assert res.size() == maxAbs.length;
+
+        for (int i = 0; i < res.size(); i++)
+            res.set(i, res.get(i) / maxAbs[i]);
+
+        return res;
+    }
+
+    /** */
+    public double[] getMaxAbs() {
+        return maxAbs;
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/maxabsscaling/MaxAbsScalerTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/maxabsscaling/MaxAbsScalerTrainer.java
new file mode 100644
index 0000000..d3e5734
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/maxabsscaling/MaxAbsScalerTrainer.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.preprocessing.maxabsscaling;
+
+import org.apache.ignite.ml.dataset.Dataset;
+import org.apache.ignite.ml.dataset.DatasetBuilder;
+import org.apache.ignite.ml.dataset.UpstreamEntry;
+import org.apache.ignite.ml.dataset.primitive.context.EmptyContext;
+import org.apache.ignite.ml.math.functions.IgniteBiFunction;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.preprocessing.PreprocessingTrainer;
+
+/**
+ * Trainer of the maxabsscaling preprocessor.
+ *
+ * @param <K> Type of a key in {@code upstream} data.
+ * @param <V> Type of a value in {@code upstream} data.
+ */
+public class MaxAbsScalerTrainer<K, V> implements PreprocessingTrainer<K, V, Vector, Vector> {
+    /** {@inheritDoc} */
+    @Override public MaxAbsScalerPreprocessor<K, V> fit(DatasetBuilder<K, V> datasetBuilder,
+        IgniteBiFunction<K, V, Vector> basePreprocessor) {
+        try (Dataset<EmptyContext, MaxAbsScalerPartitionData> dataset = datasetBuilder.build(
+            (upstream, upstreamSize) -> new EmptyContext(),
+            (upstream, upstreamSize, ctx) -> {
+                double[] maxAbs = null;
+
+                while (upstream.hasNext()) {
+                    UpstreamEntry<K, V> entity = upstream.next();
+                    Vector row = basePreprocessor.apply(entity.getKey(), entity.getValue());
+
+                    if (maxAbs == null) {
+                        maxAbs = new double[row.size()];
+                        for (int i = 0; i < maxAbs.length; i++)
+                            maxAbs[i] = .0;
+                    }
+                    else
+                        assert maxAbs.length == row.size() : "Base preprocessor must return exactly " + maxAbs.length
+                            + " features";
+
+                    for (int i = 0; i < row.size(); i++) {
+                        if (Math.abs(row.get(i)) > Math.abs(maxAbs[i]))
+                            maxAbs[i] = Math.abs(row.get(i));
+                    }
+                }
+                return new MaxAbsScalerPartitionData(maxAbs);
+            }
+        )) {
+            double[] maxAbs = dataset.compute(MaxAbsScalerPartitionData::getMaxAbs,
+                (a, b) -> {
+                    if (a == null)
+                        return b;
+
+                    if (b == null)
+                        return a;
+
+                    double[] result = new double[a.length];
+
+                    for (int i = 0; i < result.length; i++) {
+                        result[i] = Math.max(Math.abs(a[i]), Math.abs(b[i]));
+                    }
+                    return result;
+                });
+            return new MaxAbsScalerPreprocessor<>(maxAbs, basePreprocessor);
+        }
+        catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/maxabsscaling/package-info.java b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/maxabsscaling/package-info.java
new file mode 100644
index 0000000..343ba30
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/preprocessing/maxabsscaling/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * <!-- Package description. -->
+ * Contains Max Abs Scaler preprocessor.
+ */
+package org.apache.ignite.ml.preprocessing.maxabsscaling;
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionLSQRTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionLSQRTrainer.java
index 8197779..5497177 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionLSQRTrainer.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionLSQRTrainer.java
@@ -38,16 +38,34 @@
     @Override public <K, V> LinearRegressionModel fit(DatasetBuilder<K, V> datasetBuilder,
         IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, Double> lbExtractor) {
 
+        return updateModel(null, datasetBuilder, featureExtractor, lbExtractor);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected <K, V> LinearRegressionModel updateModel(LinearRegressionModel mdl,
+        DatasetBuilder<K, V> datasetBuilder,
+        IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, Double> lbExtractor) {
+
         LSQRResult res;
 
         try (LSQROnHeap<K, V> lsqr = new LSQROnHeap<>(
             datasetBuilder,
             new SimpleLabeledDatasetDataBuilder<>(
                 new FeatureExtractorWrapper<>(featureExtractor),
-                lbExtractor.andThen(e -> new double[]{e})
+                lbExtractor.andThen(e -> new double[] {e})
             )
         )) {
-            res = lsqr.solve(0, 1e-12, 1e-12, 1e8, -1, false, null);
+            double[] x0 = null;
+            if (mdl != null) {
+                int x0Size = mdl.getWeights().size() + 1;
+                Vector weights = mdl.getWeights().like(x0Size);
+                mdl.getWeights().nonZeroes().forEach(ith -> weights.set(ith.index(), ith.get()));
+                weights.set(weights.size() - 1, mdl.getIntercept());
+                x0 = weights.asArray();
+            }
+            res = lsqr.solve(0, 1e-12, 1e-12, 1e8, -1, false, x0);
+            if (res == null)
+                return getLastTrainedModelOrThrowEmptyDatasetException(mdl);
         }
         catch (Exception e) {
             throw new RuntimeException(e);
@@ -58,4 +76,9 @@
 
         return new LinearRegressionModel(weights, x[x.length - 1]);
     }
+
+    /** {@inheritDoc} */
+    @Override protected boolean checkState(LinearRegressionModel mdl) {
+        return true;
+    }
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionSGDTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionSGDTrainer.java
index 2237c95..4132d35 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionSGDTrainer.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/regressions/linear/LinearRegressionSGDTrainer.java
@@ -19,6 +19,7 @@
 
 import java.io.Serializable;
 import java.util.Arrays;
+import java.util.Optional;
 import org.apache.ignite.ml.dataset.Dataset;
 import org.apache.ignite.ml.dataset.DatasetBuilder;
 import org.apache.ignite.ml.dataset.primitive.context.EmptyContext;
@@ -34,6 +35,7 @@
 import org.apache.ignite.ml.nn.architecture.MLPArchitecture;
 import org.apache.ignite.ml.optimization.LossFunctions;
 import org.apache.ignite.ml.trainers.SingleLabelDatasetTrainer;
+import org.jetbrains.annotations.NotNull;
 
 /**
  * Trainer of the linear regression model based on stochastic gradient descent algorithm.
@@ -42,17 +44,17 @@
     /** Update strategy. */
     private final UpdatesStrategy<? super MultilayerPerceptron, P> updatesStgy;
 
-    /** Max number of iteration. */
-    private final int maxIterations;
+    /** Max amount of iterations. */
+    private int maxIterations = 1000;
 
     /** Batch size. */
-    private final int batchSize;
+    private int batchSize = 10;
 
-    /** Number of local iterations. */
-    private final int locIterations;
+    /** Amount of local iterations. */
+    private int locIterations = 100;
 
     /** Seed for random generator. */
-    private final long seed;
+    private long seed = 1234L;
 
     /**
      * Constructs a new instance of linear regression SGD trainer.
@@ -72,17 +74,40 @@
         this.seed = seed;
     }
 
+    /**
+     * Constructs a new instance of linear regression SGD trainer.
+     */
+    public LinearRegressionSGDTrainer(UpdatesStrategy<? super MultilayerPerceptron, P> updatesStgy) {
+        this.updatesStgy = updatesStgy;
+    }
+
     /** {@inheritDoc} */
     @Override public <K, V> LinearRegressionModel fit(DatasetBuilder<K, V> datasetBuilder,
         IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, Double> lbExtractor) {
 
+        return updateModel(null, datasetBuilder, featureExtractor, lbExtractor);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected <K, V> LinearRegressionModel updateModel(LinearRegressionModel mdl,
+        DatasetBuilder<K, V> datasetBuilder,
+        IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, Double> lbExtractor) {
+
+        assert updatesStgy != null;
+
         IgniteFunction<Dataset<EmptyContext, SimpleLabeledDatasetData>, MLPArchitecture> archSupplier = dataset -> {
 
             int cols = dataset.compute(data -> {
                 if (data.getFeatures() == null)
                     return null;
                 return data.getFeatures().length / data.getRows();
-            }, (a, b) -> a == null ? b : a);
+            }, (a, b) -> {
+                if (a == null)
+                    return b == null ? 0 : b;
+                if (b == null)
+                    return a;
+                return b;
+            });
 
             MLPArchitecture architecture = new MLPArchitecture(cols);
             architecture = architecture.withAddedLayer(1, true, Activators.LINEAR);
@@ -100,9 +125,12 @@
             seed
         );
 
-        IgniteBiFunction<K, V, double[]> lbE = (IgniteBiFunction<K, V, double[]>)(k, v) -> new double[]{lbExtractor.apply(k, v)};
+        IgniteBiFunction<K, V, double[]> lbE = (IgniteBiFunction<K, V, double[]>)(k, v) -> new double[] {lbExtractor.apply(k, v)};
 
-        MultilayerPerceptron mlp = trainer.fit(datasetBuilder, featureExtractor, lbE);
+        MultilayerPerceptron mlp = Optional.ofNullable(mdl)
+            .map(this::restoreMLPState)
+            .map(m -> trainer.update(m, datasetBuilder, featureExtractor, lbE))
+            .orElseGet(() -> trainer.fit(datasetBuilder, featureExtractor, lbE));
 
         double[] p = mlp.parameters().getStorage().data();
 
@@ -111,4 +139,117 @@
             p[p.length - 1]
         );
     }
+
+    /**
+     * @param mdl Model.
+     * @return state of MLP from last learning.
+     */
+    @NotNull private MultilayerPerceptron restoreMLPState(LinearRegressionModel mdl) {
+        Vector weights = mdl.getWeights();
+        double intercept = mdl.getIntercept();
+        MLPArchitecture architecture1 = new MLPArchitecture(weights.size());
+        architecture1 = architecture1.withAddedLayer(1, true, Activators.LINEAR);
+        MLPArchitecture architecture = architecture1;
+        MultilayerPerceptron perceptron = new MultilayerPerceptron(architecture);
+
+        Vector mlpState = weights.like(weights.size() + 1);
+        weights.nonZeroes().forEach(ith -> mlpState.set(ith.index(), ith.get()));
+        mlpState.set(mlpState.size() - 1, intercept);
+        perceptron.setParameters(mlpState);
+        return perceptron;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected boolean checkState(LinearRegressionModel mdl) {
+        return true;
+    }
+
+    /**
+     * Set up the max amount of iterations before convergence.
+     *
+     * @param maxIterations The parameter value.
+     * @return Model with new max number of iterations before convergence parameter value.
+     */
+    public LinearRegressionSGDTrainer<P> withMaxIterations(int maxIterations) {
+        this.maxIterations = maxIterations;
+        return this;
+    }
+
+    /**
+     * Set up the batchSize parameter.
+     *
+     * @param batchSize The size of learning batch.
+     * @return Trainer with new batch size parameter value.
+     */
+    public LinearRegressionSGDTrainer<P> withBatchSize(int batchSize) {
+        this.batchSize = batchSize;
+        return this;
+    }
+
+    /**
+     * Set up the amount of local iterations of SGD algorithm.
+     *
+     * @param amountOfLocIterations The parameter value.
+     * @return Trainer with new locIterations parameter value.
+     */
+    public LinearRegressionSGDTrainer<P> withLocIterations(int amountOfLocIterations) {
+        this.locIterations = amountOfLocIterations;
+        return this;
+    }
+
+    /**
+     * Set up the random seed parameter.
+     *
+     * @param seed Seed for random generator.
+     * @return Trainer with new seed parameter value.
+     */
+    public LinearRegressionSGDTrainer<P> withSeed(long seed) {
+        this.seed = seed;
+        return this;
+    }
+
+    /**
+     * Get the update strategy.
+     *
+     * @return The property value.
+     */
+    public UpdatesStrategy<? super MultilayerPerceptron, P> getUpdatesStgy() {
+        return updatesStgy;
+    }
+
+    /**
+     * Get the max amount of iterations.
+     *
+     * @return The property value.
+     */
+    public int getMaxIterations() {
+        return maxIterations;
+    }
+
+    /**
+     * Get the batch size.
+     *
+     * @return The property value.
+     */
+    public int getBatchSize() {
+        return batchSize;
+    }
+
+    /**
+     * Get the amount of local iterations.
+     *
+     * @return The property value.
+     */
+    public int getLocIterations() {
+        return locIterations;
+    }
+
+    /**
+     * Get the seed for random generator.
+     *
+     * @return The property value.
+     */
+    public long getSeed() {
+        return seed;
+    }
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/regressions/logistic/binomial/LogisticRegressionSGDTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/regressions/logistic/binomial/LogisticRegressionSGDTrainer.java
index 840a18d..fb5d5a0 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/regressions/logistic/binomial/LogisticRegressionSGDTrainer.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/regressions/logistic/binomial/LogisticRegressionSGDTrainer.java
@@ -34,25 +34,26 @@
 import org.apache.ignite.ml.nn.architecture.MLPArchitecture;
 import org.apache.ignite.ml.optimization.LossFunctions;
 import org.apache.ignite.ml.trainers.SingleLabelDatasetTrainer;
+import org.jetbrains.annotations.NotNull;
 
 /**
  * Trainer of the logistic regression model based on stochastic gradient descent algorithm.
  */
 public class LogisticRegressionSGDTrainer<P extends Serializable> extends SingleLabelDatasetTrainer<LogisticRegressionModel> {
     /** Update strategy. */
-    private final UpdatesStrategy<? super MultilayerPerceptron, P> updatesStgy;
+    private UpdatesStrategy<? super MultilayerPerceptron, P> updatesStgy;
 
     /** Max number of iteration. */
-    private final int maxIterations;
+    private int maxIterations;
 
     /** Batch size. */
-    private final int batchSize;
+    private int batchSize;
 
     /** Number of local iterations. */
-    private final int locIterations;
+    private int locIterations;
 
     /** Seed for random generator. */
-    private final long seed;
+    private long seed;
 
     /**
      * Constructs a new instance of linear regression SGD trainer.
@@ -64,7 +65,7 @@
      * @param seed Seed for random generator.
      */
     public LogisticRegressionSGDTrainer(UpdatesStrategy<? super MultilayerPerceptron, P> updatesStgy, int maxIterations,
-                                        int batchSize, int locIterations, long seed) {
+        int batchSize, int locIterations, long seed) {
         this.updatesStgy = updatesStgy;
         this.maxIterations = maxIterations;
         this.batchSize = batchSize;
@@ -76,13 +77,26 @@
     @Override public <K, V> LogisticRegressionModel fit(DatasetBuilder<K, V> datasetBuilder,
         IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, Double> lbExtractor) {
 
-        IgniteFunction<Dataset<EmptyContext, SimpleLabeledDatasetData>, MLPArchitecture> archSupplier = dataset -> {
+        return updateModel(null, datasetBuilder, featureExtractor, lbExtractor);
+    }
 
+    /** {@inheritDoc} */
+    @Override protected <K, V> LogisticRegressionModel updateModel(LogisticRegressionModel mdl,
+        DatasetBuilder<K, V> datasetBuilder, IgniteBiFunction<K, V, Vector> featureExtractor,
+        IgniteBiFunction<K, V, Double> lbExtractor) {
+
+        IgniteFunction<Dataset<EmptyContext, SimpleLabeledDatasetData>, MLPArchitecture> archSupplier = dataset -> {
             int cols = dataset.compute(data -> {
                 if (data.getFeatures() == null)
                     return null;
                 return data.getFeatures().length / data.getRows();
-            }, (a, b) -> a == null ? b : a);
+            }, (a, b) -> {
+                if (a == null)
+                    return b == null ? 0 : b;
+                if (b == null)
+                    return a;
+                return b;
+            });
 
             MLPArchitecture architecture = new MLPArchitecture(cols);
             architecture = architecture.withAddedLayer(1, true, Activators.SIGMOID);
@@ -100,7 +114,14 @@
             seed
         );
 
-        MultilayerPerceptron mlp = trainer.fit(datasetBuilder, featureExtractor, (k, v) -> new double[]{lbExtractor.apply(k, v)});
+        IgniteBiFunction<K, V, double[]> lbExtractorWrapper = (k, v) -> new double[] {lbExtractor.apply(k, v)};
+        MultilayerPerceptron mlp;
+        if (mdl != null) {
+            mlp = restoreMLPState(mdl);
+            mlp = trainer.update(mlp, datasetBuilder, featureExtractor, lbExtractorWrapper);
+        }
+        else
+            mlp = trainer.fit(datasetBuilder, featureExtractor, lbExtractorWrapper);
 
         double[] params = mlp.parameters().getStorage().data();
 
@@ -108,4 +129,120 @@
             params[params.length - 1]
         );
     }
+
+    /**
+     * @param mdl Model.
+     * @return state of MLP from last learning.
+     */
+    @NotNull private MultilayerPerceptron restoreMLPState(LogisticRegressionModel mdl) {
+        Vector weights = mdl.weights();
+        double intercept = mdl.intercept();
+
+        MLPArchitecture architecture1 = new MLPArchitecture(weights.size());
+        architecture1 = architecture1.withAddedLayer(1, true, Activators.SIGMOID);
+
+        MLPArchitecture architecture = architecture1;
+        MultilayerPerceptron perceptron = new MultilayerPerceptron(architecture);
+
+        Vector mlpState = weights.like(weights.size() + 1);
+        weights.nonZeroes().forEach(ith -> mlpState.set(ith.index(), ith.get()));
+        mlpState.set(mlpState.size() - 1, intercept);
+        perceptron.setParameters(mlpState);
+
+        return perceptron;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected boolean checkState(LogisticRegressionModel mdl) {
+        return true;
+    }
+
+    /**
+     * Set up the max amount of iterations before convergence.
+     *
+     * @param maxIterations The parameter value.
+     * @return Model with new max number of iterations before convergence parameter value.
+     */
+    public LogisticRegressionSGDTrainer<P> withMaxIterations(int maxIterations) {
+        this.maxIterations = maxIterations;
+        return this;
+    }
+
+    /**
+     * Set up the batchSize parameter.
+     *
+     * @param batchSize The size of learning batch.
+     * @return Trainer with new batch size parameter value.
+     */
+    public LogisticRegressionSGDTrainer<P> withBatchSize(int batchSize) {
+        this.batchSize = batchSize;
+        return this;
+    }
+
+    /**
+     * Set up the amount of local iterations of SGD algorithm.
+     *
+     * @param amountOfLocIterations The parameter value.
+     * @return Trainer with new locIterations parameter value.
+     */
+    public LogisticRegressionSGDTrainer<P> withLocIterations(int amountOfLocIterations) {
+        this.locIterations = amountOfLocIterations;
+        return this;
+    }
+
+    /**
+     * Set up the random seed parameter.
+     *
+     * @param seed Seed for random generator.
+     * @return Trainer with new seed parameter value.
+     */
+    public LogisticRegressionSGDTrainer<P> withSeed(long seed) {
+        this.seed = seed;
+        return this;
+    }
+
+    /**
+     * Get the update strategy.
+     *
+     * @return The property value.
+     */
+    public UpdatesStrategy<? super MultilayerPerceptron, P> getUpdatesStgy() {
+        return updatesStgy;
+    }
+
+    /**
+     * Get the max amount of iterations.
+     *
+     * @return The property value.
+     */
+    public int getMaxIterations() {
+        return maxIterations;
+    }
+
+    /**
+     * Get the batch size.
+     *
+     * @return The property value.
+     */
+    public int getBatchSize() {
+        return batchSize;
+    }
+
+    /**
+     * Get the amount of local iterations.
+     *
+     * @return The property value.
+     */
+    public int getLocIterations() {
+        return locIterations;
+    }
+
+    /**
+     * Get the seed for random generator.
+     *
+     * @return The property value.
+     */
+    public long getSeed() {
+        return seed;
+    }
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/regressions/logistic/multiclass/LogRegressionMultiClassModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/regressions/logistic/multiclass/LogRegressionMultiClassModel.java
index 56d2d29..a7c9118 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/regressions/logistic/multiclass/LogRegressionMultiClassModel.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/regressions/logistic/multiclass/LogRegressionMultiClassModel.java
@@ -21,6 +21,7 @@
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Objects;
+import java.util.Optional;
 import java.util.TreeMap;
 import org.apache.ignite.ml.Exportable;
 import org.apache.ignite.ml.Exporter;
@@ -103,4 +104,12 @@
     public void add(double clsLb, LogisticRegressionModel mdl) {
         models.put(clsLb, mdl);
     }
+
+    /**
+     * @param clsLb Class label.
+     * @return model for class label if it exists.
+     */
+    public Optional<LogisticRegressionModel> getModel(Double clsLb) {
+        return Optional.ofNullable(models.get(clsLb));
+    }
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/regressions/logistic/multiclass/LogRegressionMultiClassTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/regressions/logistic/multiclass/LogRegressionMultiClassTrainer.java
index 1ed938a..b9cdcc7 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/regressions/logistic/multiclass/LogRegressionMultiClassTrainer.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/regressions/logistic/multiclass/LogRegressionMultiClassTrainer.java
@@ -22,6 +22,7 @@
 import java.util.Collection;
 import java.util.HashSet;
 import java.util.List;
+import java.util.Optional;
 import java.util.Set;
 import java.util.stream.Collectors;
 import java.util.stream.Stream;
@@ -33,6 +34,7 @@
 import org.apache.ignite.ml.math.primitives.vector.Vector;
 import org.apache.ignite.ml.nn.MultilayerPerceptron;
 import org.apache.ignite.ml.nn.UpdatesStrategy;
+import org.apache.ignite.ml.regressions.logistic.binomial.LogisticRegressionModel;
 import org.apache.ignite.ml.regressions.logistic.binomial.LogisticRegressionSGDTrainer;
 import org.apache.ignite.ml.structures.partition.LabelPartitionDataBuilderOnHeap;
 import org.apache.ignite.ml.structures.partition.LabelPartitionDataOnHeap;
@@ -61,16 +63,29 @@
     /**
      * Trains model based on the specified data.
      *
-     * @param datasetBuilder   Dataset builder.
+     * @param datasetBuilder Dataset builder.
      * @param featureExtractor Feature extractor.
-     * @param lbExtractor      Label extractor.
+     * @param lbExtractor Label extractor.
      * @return Model.
      */
     @Override public <K, V> LogRegressionMultiClassModel fit(DatasetBuilder<K, V> datasetBuilder,
-                                                                IgniteBiFunction<K, V, Vector> featureExtractor,
-                                                                IgniteBiFunction<K, V, Double> lbExtractor) {
+        IgniteBiFunction<K, V, Vector> featureExtractor,
+        IgniteBiFunction<K, V, Double> lbExtractor) {
         List<Double> classes = extractClassLabels(datasetBuilder, lbExtractor);
 
+        return updateModel(null, datasetBuilder, featureExtractor, lbExtractor);
+    }
+
+    /** {@inheritDoc} */
+    @Override public <K, V> LogRegressionMultiClassModel updateModel(LogRegressionMultiClassModel newMdl,
+        DatasetBuilder<K, V> datasetBuilder, IgniteBiFunction<K, V, Vector> featureExtractor,
+        IgniteBiFunction<K, V, Double> lbExtractor) {
+
+        List<Double> classes = extractClassLabels(datasetBuilder, lbExtractor);
+
+        if(classes.isEmpty())
+            return getLastTrainedModelOrThrowEmptyDatasetException(newMdl);
+
         LogRegressionMultiClassModel multiClsMdl = new LogRegressionMultiClassModel();
 
         classes.forEach(clsLb -> {
@@ -85,14 +100,26 @@
                 else
                     return 0.0;
             };
-            multiClsMdl.add(clsLb, trainer.fit(datasetBuilder, featureExtractor, lbTransformer));
+
+            LogisticRegressionModel mdl = Optional.ofNullable(newMdl)
+                .flatMap(multiClassModel -> multiClassModel.getModel(clsLb))
+                .map(learnedModel -> trainer.update(learnedModel, datasetBuilder, featureExtractor, lbTransformer))
+                .orElseGet(() -> trainer.fit(datasetBuilder, featureExtractor, lbTransformer));
+
+            multiClsMdl.add(clsLb, mdl);
         });
 
         return multiClsMdl;
     }
 
+    /** {@inheritDoc} */
+    @Override protected boolean checkState(LogRegressionMultiClassModel mdl) {
+        return true;
+    }
+
     /** Iterates among dataset and collects class labels. */
-    private <K, V> List<Double> extractClassLabels(DatasetBuilder<K, V> datasetBuilder, IgniteBiFunction<K, V, Double> lbExtractor) {
+    private <K, V> List<Double> extractClassLabels(DatasetBuilder<K, V> datasetBuilder,
+        IgniteBiFunction<K, V, Double> lbExtractor) {
         assert datasetBuilder != null;
 
         PartitionDataBuilder<K, V, EmptyContext, LabelPartitionDataOnHeap> partDataBuilder = new LabelPartitionDataBuilderOnHeap<>(lbExtractor);
@@ -108,14 +135,23 @@
 
                 final double[] lbs = data.getY();
 
-                for (double lb : lbs) locClsLabels.add(lb);
+                for (double lb : lbs)
+                    locClsLabels.add(lb);
 
                 return locClsLabels;
-            }, (a, b) -> a == null ? b : Stream.of(a, b).flatMap(Collection::stream).collect(Collectors.toSet()));
+            }, (a, b) -> {
+                if (a == null)
+                    return b == null ? new HashSet<>() : b;
+                if (b == null)
+                    return a;
+                return Stream.of(a, b).flatMap(Collection::stream).collect(Collectors.toSet());
+            });
 
-            res.addAll(clsLabels);
+            if (clsLabels != null)
+                res.addAll(clsLabels);
 
-        } catch (Exception e) {
+        }
+        catch (Exception e) {
             throw new RuntimeException(e);
         }
         return res;
@@ -133,20 +169,20 @@
     }
 
     /**
-     * Gets the batch size.
+     * Get the batch size.
      *
      * @return The parameter value.
      */
-    public double batchSize() {
+    public double getBatchSize() {
         return batchSize;
     }
 
     /**
-     * Gets the amount of outer iterations of SGD algorithm.
+     * Get the amount of outer iterations of SGD algorithm.
      *
      * @return The parameter value.
      */
-    public int amountOfIterations() {
+    public int getAmountOfIterations() {
         return amountOfIterations;
     }
 
@@ -162,11 +198,11 @@
     }
 
     /**
-     * Gets the amount of local iterations.
+     * Get the amount of local iterations.
      *
      * @return The parameter value.
      */
-    public int amountOfLocIterations() {
+    public int getAmountOfLocIterations() {
         return amountOfLocIterations;
     }
 
@@ -182,7 +218,7 @@
     }
 
     /**
-     * Set up the regularization parameter.
+     * Set up the random seed parameter.
      *
      * @param seed Seed for random generator.
      * @return Trainer with new seed parameter value.
@@ -193,7 +229,7 @@
     }
 
     /**
-     * Gets the seed for random generator.
+     * Get the seed for random generator.
      *
      * @return The parameter value.
      */
@@ -213,11 +249,11 @@
     }
 
     /**
-     * Gets the update strategy..
+     * Get the update strategy..
      *
      * @return The parameter value.
      */
-    public UpdatesStrategy<? super MultilayerPerceptron, P> updatesStgy() {
+    public UpdatesStrategy<? super MultilayerPerceptron, P> getUpdatesStgy() {
         return updatesStgy;
     }
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/selection/cv/CrossValidation.java b/modules/ml/src/main/java/org/apache/ignite/ml/selection/cv/CrossValidation.java
index 1ade876..ef4f30f 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/selection/cv/CrossValidation.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/selection/cv/CrossValidation.java
@@ -120,7 +120,7 @@
         IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, L> lbExtractor, int cv,
         ParamGrid paramGrid) {
 
-        List<Double[]> paramSets = new ParameterSetGenerator(paramGrid.getParamValuesByParamIndex()).generate();
+        List<Double[]> paramSets = new ParameterSetGenerator(paramGrid.getParamValuesByParamIdx()).generate();
 
         CrossValidationResult cvRes = new CrossValidationResult();
 
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/selection/paramgrid/ParamGrid.java b/modules/ml/src/main/java/org/apache/ignite/ml/selection/paramgrid/ParamGrid.java
index 3279d93..f9c5bd2 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/selection/paramgrid/ParamGrid.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/selection/paramgrid/ParamGrid.java
@@ -25,17 +25,17 @@
  */
 public class ParamGrid {
     /** Parameter values by parameter index. */
-    private Map<Integer, Double[]> paramValuesByParamIndex = new HashMap<>();
+    private Map<Integer, Double[]> paramValuesByParamIdx = new HashMap<>();
 
     /** Parameter names by parameter index. */
-    private Map<Integer, String> paramNamesByParamIndex = new HashMap<>();
+    private Map<Integer, String> paramNamesByParamIdx = new HashMap<>();
 
     /** Parameter counter. */
     private int paramCntr;
 
     /** */
-    public Map<Integer, Double[]> getParamValuesByParamIndex() {
-        return paramValuesByParamIndex;
+    public Map<Integer, Double[]> getParamValuesByParamIdx() {
+        return paramValuesByParamIdx;
     }
 
     /**
@@ -45,14 +45,14 @@
      * @return The updated ParamGrid.
      */
     public ParamGrid addHyperParam(String paramName, Double[] params) {
-        paramValuesByParamIndex.put(paramCntr, params);
-        paramNamesByParamIndex.put(paramCntr, paramName);
+        paramValuesByParamIdx.put(paramCntr, params);
+        paramNamesByParamIdx.put(paramCntr, paramName);
         paramCntr++;
         return this;
     }
 
     /** */
     public String getParamNameByIndex(int idx) {
-        return paramNamesByParamIndex.get(idx);
+        return paramNamesByParamIdx.get(idx);
     }
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/structures/DatasetRow.java b/modules/ml/src/main/java/org/apache/ignite/ml/structures/DatasetRow.java
index 1e3e12c..eda901e 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/structures/DatasetRow.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/structures/DatasetRow.java
@@ -58,7 +58,7 @@
 
         DatasetRow vector1 = (DatasetRow)o;
 
-        return vector != null ? !vector.equals(vector1.vector) : vector1.vector != null;
+        return vector != null ? vector.equals(vector1.vector) : vector1.vector == null;
     }
 
     /** {@inheritDoc} */
@@ -72,6 +72,7 @@
     }
 
     /** {@inheritDoc} */
+    @SuppressWarnings("unchecked")
     @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException {
         vector = (V)in.readObject();
     }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/structures/LabeledDataset.java b/modules/ml/src/main/java/org/apache/ignite/ml/structures/LabeledDataset.java
deleted file mode 100644
index 2440587..0000000
--- a/modules/ml/src/main/java/org/apache/ignite/ml/structures/LabeledDataset.java
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.ml.structures;
-
-import org.apache.ignite.ml.math.exceptions.CardinalityException;
-import org.apache.ignite.ml.math.exceptions.NoDataException;
-import org.apache.ignite.ml.math.exceptions.knn.NoLabelVectorException;
-import org.apache.ignite.ml.math.primitives.vector.Vector;
-import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
-
-/**
- * Class for set of labeled vectors.
- */
-public class LabeledDataset<L, Row extends LabeledVector> extends Dataset<Row> implements AutoCloseable {
-    /**
-     * Default constructor (required by Externalizable).
-     */
-    public LabeledDataset() {
-        super();
-    }
-
-    /**
-     * Creates new Labeled Dataset and initialized with empty data structure.
-     *
-     * @param rowSize Amount of instances. Should be > 0.
-     * @param colSize Amount of attributes. Should be > 0.
-     * @param isDistributed Use distributed data structures to keep data.
-     */
-    public LabeledDataset(int rowSize, int colSize,  boolean isDistributed){
-        this(rowSize, colSize, null, isDistributed);
-    }
-
-    /**
-     * Creates new local Labeled Dataset and initialized with empty data structure.
-     *
-     * @param rowSize Amount of instances. Should be > 0.
-     * @param colSize Amount of attributes. Should be > 0.
-     */
-    public LabeledDataset(int rowSize, int colSize){
-        this(rowSize, colSize, null, false);
-    }
-
-    /**
-     * Creates new Labeled Dataset and initialized with empty data structure.
-     *
-     * @param rowSize Amount of instances. Should be > 0.
-     * @param colSize Amount of attributes. Should be > 0
-     * @param featureNames Column names.
-     * @param isDistributed Use distributed data structures to keep data.
-     */
-    public LabeledDataset(int rowSize, int colSize, String[] featureNames, boolean isDistributed){
-        super(rowSize, colSize, featureNames, isDistributed);
-
-        initializeDataWithLabeledVectors();
-    }
-
-    /**
-     * Creates new Labeled Dataset by given data.
-     *
-     * @param data Should be initialized with one vector at least.
-     */
-    public LabeledDataset(Row[] data) {
-        super(data);
-    }
-
-    /** */
-    private void initializeDataWithLabeledVectors() {
-        data = (Row[])new LabeledVector[rowSize];
-        for (int i = 0; i < rowSize; i++)
-            data[i] = (Row)new LabeledVector(emptyVector(colSize, isDistributed), null);
-    }
-
-    /**
-     * Creates new Labeled Dataset by given data.
-     *
-     * @param data Should be initialized with one vector at least.
-     * @param colSize Amount of observed attributes in each vector.
-     */
-    public LabeledDataset(Row[] data, int colSize) {
-        super(data, colSize);
-    }
-
-
-    /**
-     * Creates new local Labeled Dataset by matrix and vector of labels.
-     *
-     * @param mtx Given matrix with rows as observations.
-     * @param lbs Labels of observations.
-     */
-    public LabeledDataset(double[][] mtx, double[] lbs) {
-       this(mtx, lbs, null, false);
-    }
-
-    /**
-     * Creates new Labeled Dataset by matrix and vector of labels.
-     *
-     * @param mtx Given matrix with rows as observations.
-     * @param lbs Labels of observations.
-     * @param featureNames Column names.
-     * @param isDistributed Use distributed data structures to keep data.
-     */
-    public LabeledDataset(double[][] mtx, double[] lbs, String[] featureNames, boolean isDistributed) {
-        super();
-        assert mtx != null;
-        assert lbs != null;
-
-        if(mtx.length != lbs.length)
-            throw new CardinalityException(lbs.length, mtx.length);
-
-        if(mtx[0] == null)
-            throw new NoDataException("Pass filled array, the first vector is empty");
-
-        this.rowSize = lbs.length;
-        this.colSize = mtx[0].length;
-
-        if(featureNames == null)
-            generateFeatureNames();
-        else {
-            assert colSize == featureNames.length;
-            convertStringNamesToFeatureMetadata(featureNames);
-        }
-
-        data = (Row[])new LabeledVector[rowSize];
-        for (int i = 0; i < rowSize; i++){
-
-            data[i] = (Row)new LabeledVector(emptyVector(colSize, isDistributed), lbs[i]);
-            for (int j = 0; j < colSize; j++) {
-                try {
-                    data[i].features().set(j, mtx[i][j]);
-                } catch (ArrayIndexOutOfBoundsException e) {
-                    throw new NoDataException("No data in given matrix by coordinates (" + i + "," + j + ")");
-                }
-            }
-        }
-    }
-
-    /**
-     * Returns label if label is attached or null if label is missed.
-     *
-     * @param idx Index of observation.
-     * @return Label.
-     */
-    public double label(int idx) {
-        LabeledVector labeledVector = data[idx];
-
-        if(labeledVector!=null)
-            return (double)labeledVector.label();
-        else
-            return Double.NaN;
-    }
-
-    /**
-     * Returns new copy of labels of all labeled vectors NOTE: This method is useful for copying labels from test
-     * dataset.
-     *
-     * @return Copy of labels.
-     */
-    public double[] labels() {
-        assert data != null;
-        assert data.length > 0;
-
-        double[] labels = new double[data.length];
-
-        for (int i = 0; i < data.length; i++)
-            labels[i] = (double)data[i].label();
-
-        return labels;
-    }
-
-    /**
-     * Fill the label with given value.
-     *
-     * @param idx Index of observation.
-     * @param lb The given label.
-     */
-    public void setLabel(int idx, double lb) {
-        LabeledVector<Vector, Double> labeledVector = data[idx];
-
-        if(labeledVector != null)
-            labeledVector.setLabel(lb);
-        else
-            throw new NoLabelVectorException(idx);
-    }
-
-    /** */
-    public static Vector emptyVector(int size, boolean isDistributed) {
-            return new DenseVector(size);
-    }
-
-    /** Makes copy with new Label objects and old features and Metadata objects. */
-    public LabeledDataset copy(){
-        LabeledDataset res = new LabeledDataset(this.data, this.colSize);
-        res.isDistributed = this.isDistributed;
-        res.meta = this.meta;
-        for (int i = 0; i < rowSize; i++)
-            res.setLabel(i, this.label(i));
-
-        return res;
-    }
-
-    /** Closes LabeledDataset. */
-    @Override public void close() throws Exception {
-
-    }
-}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/structures/LabeledDatasetTestTrainPair.java b/modules/ml/src/main/java/org/apache/ignite/ml/structures/LabeledDatasetTestTrainPair.java
deleted file mode 100644
index f362fbc..0000000
--- a/modules/ml/src/main/java/org/apache/ignite/ml/structures/LabeledDatasetTestTrainPair.java
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.ml.structures;
-
-import java.io.Serializable;
-import java.util.Map;
-import java.util.Random;
-import java.util.TreeMap;
-import java.util.TreeSet;
-import org.jetbrains.annotations.NotNull;
-
-/**
- * Class for splitting Labeled Dataset on train and test sets.
- */
-public class LabeledDatasetTestTrainPair implements Serializable {
-    /** Data to keep train set. */
-    private LabeledDataset train;
-
-    /** Data to keep test set. */
-    private LabeledDataset test;
-
-    /**
-     * Creates two subsets of given dataset.
-     * <p>
-     * NOTE: This method uses next algorithm with O(n log n) by calculations and O(n) by memory.
-     * </p>
-     * @param dataset The dataset to split on train and test subsets.
-     * @param testPercentage The percentage of the test subset.
-     */
-    public LabeledDatasetTestTrainPair(LabeledDataset dataset, double testPercentage) {
-        assert testPercentage > 0.0;
-        assert testPercentage < 1.0;
-        final int datasetSize = dataset.rowSize();
-        assert datasetSize > 2;
-
-        final int testSize = (int)Math.floor(datasetSize * testPercentage);
-        final int trainSize = datasetSize - testSize;
-
-        final TreeSet<Integer> sortedTestIndices = getSortedIndices(datasetSize, testSize);
-
-        LabeledVector[] testVectors = new LabeledVector[testSize];
-        LabeledVector[] trainVectors = new LabeledVector[trainSize];
-
-        int datasetCntr = 0;
-        int trainCntr = 0;
-        int testCntr = 0;
-
-        for (Integer idx: sortedTestIndices){ // guarantee order as iterator
-            testVectors[testCntr] = (LabeledVector)dataset.getRow(idx);
-            testCntr++;
-
-            for (int i = datasetCntr; i < idx; i++) {
-                trainVectors[trainCntr] = (LabeledVector)dataset.getRow(i);
-                trainCntr++;
-            }
-
-            datasetCntr = idx + 1;
-        }
-        if(datasetCntr < datasetSize){
-            for (int i = datasetCntr; i < datasetSize; i++) {
-                trainVectors[trainCntr] = (LabeledVector)dataset.getRow(i);
-                trainCntr++;
-            }
-        }
-
-        test = new LabeledDataset(testVectors, dataset.colSize());
-        train = new LabeledDataset(trainVectors, dataset.colSize());
-    }
-
-    /** This method generates "random double, integer" pairs, sort them, gets first "testSize" elements and returns appropriate indices */
-    @NotNull private TreeSet<Integer> getSortedIndices(int datasetSize, int testSize) {
-        Random rnd = new Random();
-        TreeMap<Double, Integer> randomIdxPairs = new TreeMap<>();
-        for (int i = 0; i < datasetSize; i++)
-            randomIdxPairs.put(rnd.nextDouble(), i);
-
-        final TreeMap<Double, Integer> testIdxPairs = randomIdxPairs.entrySet().stream()
-            .limit(testSize)
-            .collect(TreeMap::new, (m, e) -> m.put(e.getKey(), e.getValue()), Map::putAll);
-
-        return new TreeSet<>(testIdxPairs.values());
-    }
-
-    /**
-     * Train subset of the whole dataset.
-     * @return Train subset.
-     */
-    public LabeledDataset train() {
-        return train;
-    }
-
-    /**
-     * Test subset of the whole dataset.
-     * @return Test subset.
-     */
-    public LabeledDataset test() {
-        return test;
-    }
-}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/structures/LabeledVectorDouble.java b/modules/ml/src/main/java/org/apache/ignite/ml/structures/LabeledVectorDouble.java
deleted file mode 100644
index 1d0573d..0000000
--- a/modules/ml/src/main/java/org/apache/ignite/ml/structures/LabeledVectorDouble.java
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.ml.structures;
-
-import org.apache.ignite.ml.math.primitives.vector.Vector;
-
-/**
- * Labeled vector specialized to double label.
- *
- * @param <V> Type of vector.
- */
-public class LabeledVectorDouble<V extends Vector> extends LabeledVector<V, Double> {
-    /**
-     * Construct LabeledVectorDouble.
-     *
-     * @param vector Vector.
-     * @param lb Label.
-     */
-    public LabeledVectorDouble(V vector, Double lb) {
-        super(vector, lb);
-    }
-
-    /**
-     * Get label as double.
-     *
-     * @return label as double.
-     */
-    public double doubleLabel() {
-        return label();
-    }
-}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/structures/LabeledVectorSet.java b/modules/ml/src/main/java/org/apache/ignite/ml/structures/LabeledVectorSet.java
new file mode 100644
index 0000000..e98d793
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/structures/LabeledVectorSet.java
@@ -0,0 +1,220 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.structures;
+
+import org.apache.ignite.ml.math.exceptions.CardinalityException;
+import org.apache.ignite.ml.math.exceptions.NoDataException;
+import org.apache.ignite.ml.math.exceptions.knn.NoLabelVectorException;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
+
+/**
+ * The set of labeled vectors used in local partition calculations.
+ */
+public class LabeledVectorSet<L, Row extends LabeledVector> extends Dataset<Row> implements AutoCloseable {
+    /**
+     * Default constructor (required by Externalizable).
+     */
+    public LabeledVectorSet() {
+        super();
+    }
+
+    /**
+     * Creates new Labeled Dataset and initialized with empty data structure.
+     *
+     * @param rowSize Amount of instances. Should be > 0.
+     * @param colSize Amount of attributes. Should be > 0.
+     * @param isDistributed Use distributed data structures to keep data.
+     */
+    public LabeledVectorSet(int rowSize, int colSize, boolean isDistributed){
+        this(rowSize, colSize, null, isDistributed);
+    }
+
+    /**
+     * Creates new local Labeled Dataset and initialized with empty data structure.
+     *
+     * @param rowSize Amount of instances. Should be > 0.
+     * @param colSize Amount of attributes. Should be > 0.
+     */
+    public LabeledVectorSet(int rowSize, int colSize){
+        this(rowSize, colSize, null, false);
+    }
+
+    /**
+     * Creates new Labeled Dataset and initialized with empty data structure.
+     *
+     * @param rowSize Amount of instances. Should be > 0.
+     * @param colSize Amount of attributes. Should be > 0
+     * @param featureNames Column names.
+     * @param isDistributed Use distributed data structures to keep data.
+     */
+    public LabeledVectorSet(int rowSize, int colSize, String[] featureNames, boolean isDistributed){
+        super(rowSize, colSize, featureNames, isDistributed);
+
+        initializeDataWithLabeledVectors();
+    }
+
+    /**
+     * Creates new Labeled Dataset by given data.
+     *
+     * @param data Should be initialized with one vector at least.
+     */
+    public LabeledVectorSet(Row[] data) {
+        super(data);
+    }
+
+    /** */
+    private void initializeDataWithLabeledVectors() {
+        data = (Row[])new LabeledVector[rowSize];
+        for (int i = 0; i < rowSize; i++)
+            data[i] = (Row)new LabeledVector(emptyVector(colSize, isDistributed), null);
+    }
+
+    /**
+     * Creates new Labeled Dataset by given data.
+     *
+     * @param data Should be initialized with one vector at least.
+     * @param colSize Amount of observed attributes in each vector.
+     */
+    public LabeledVectorSet(Row[] data, int colSize) {
+        super(data, colSize);
+    }
+
+
+    /**
+     * Creates new local Labeled Dataset by matrix and vector of labels.
+     *
+     * @param mtx Given matrix with rows as observations.
+     * @param lbs Labels of observations.
+     */
+    public LabeledVectorSet(double[][] mtx, double[] lbs) {
+       this(mtx, lbs, null, false);
+    }
+
+    /**
+     * Creates new Labeled Dataset by matrix and vector of labels.
+     *
+     * @param mtx Given matrix with rows as observations.
+     * @param lbs Labels of observations.
+     * @param featureNames Column names.
+     * @param isDistributed Use distributed data structures to keep data.
+     */
+    public LabeledVectorSet(double[][] mtx, double[] lbs, String[] featureNames, boolean isDistributed) {
+        super();
+        assert mtx != null;
+        assert lbs != null;
+
+        if(mtx.length != lbs.length)
+            throw new CardinalityException(lbs.length, mtx.length);
+
+        if(mtx[0] == null)
+            throw new NoDataException("Pass filled array, the first vector is empty");
+
+        this.rowSize = lbs.length;
+        this.colSize = mtx[0].length;
+
+        if(featureNames == null)
+            generateFeatureNames();
+        else {
+            assert colSize == featureNames.length;
+            convertStringNamesToFeatureMetadata(featureNames);
+        }
+
+        data = (Row[])new LabeledVector[rowSize];
+        for (int i = 0; i < rowSize; i++){
+
+            data[i] = (Row)new LabeledVector(emptyVector(colSize, isDistributed), lbs[i]);
+            for (int j = 0; j < colSize; j++) {
+                try {
+                    data[i].features().set(j, mtx[i][j]);
+                } catch (ArrayIndexOutOfBoundsException e) {
+                    throw new NoDataException("No data in given matrix by coordinates (" + i + "," + j + ")");
+                }
+            }
+        }
+    }
+
+    /**
+     * Returns label if label is attached or null if label is missed.
+     *
+     * @param idx Index of observation.
+     * @return Label.
+     */
+    public double label(int idx) {
+        LabeledVector labeledVector = data[idx];
+
+        if(labeledVector!=null)
+            return (double)labeledVector.label();
+        else
+            return Double.NaN;
+    }
+
+    /**
+     * Returns new copy of labels of all labeled vectors NOTE: This method is useful for copying labels from test
+     * dataset.
+     *
+     * @return Copy of labels.
+     */
+    public double[] labels() {
+        assert data != null;
+        assert data.length > 0;
+
+        double[] labels = new double[data.length];
+
+        for (int i = 0; i < data.length; i++)
+            labels[i] = (double)data[i].label();
+
+        return labels;
+    }
+
+    /**
+     * Fill the label with given value.
+     *
+     * @param idx Index of observation.
+     * @param lb The given label.
+     */
+    public void setLabel(int idx, double lb) {
+        LabeledVector<Vector, Double> labeledVector = data[idx];
+
+        if(labeledVector != null)
+            labeledVector.setLabel(lb);
+        else
+            throw new NoLabelVectorException(idx);
+    }
+
+    /** */
+    public static Vector emptyVector(int size, boolean isDistributed) {
+            return new DenseVector(size);
+    }
+
+    /** Makes copy with new Label objects and old features and Metadata objects. */
+    public LabeledVectorSet copy(){
+        LabeledVectorSet res = new LabeledVectorSet(this.data, this.colSize);
+        res.isDistributed = this.isDistributed;
+        res.meta = this.meta;
+        for (int i = 0; i < rowSize; i++)
+            res.setLabel(i, this.label(i));
+
+        return res;
+    }
+
+    /** Closes LabeledDataset. */
+    @Override public void close() throws Exception {
+
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/structures/LabeledVectorSetTestTrainPair.java b/modules/ml/src/main/java/org/apache/ignite/ml/structures/LabeledVectorSetTestTrainPair.java
new file mode 100644
index 0000000..d06dfd0
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/structures/LabeledVectorSetTestTrainPair.java
@@ -0,0 +1,114 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.structures;
+
+import java.io.Serializable;
+import java.util.Map;
+import java.util.Random;
+import java.util.TreeMap;
+import java.util.TreeSet;
+import org.jetbrains.annotations.NotNull;
+
+/**
+ * Class for splitting Labeled Dataset on train and test sets.
+ */
+public class LabeledVectorSetTestTrainPair implements Serializable {
+    /** Data to keep train set. */
+    private LabeledVectorSet train;
+
+    /** Data to keep test set. */
+    private LabeledVectorSet test;
+
+    /**
+     * Creates two subsets of given dataset.
+     * <p>
+     * NOTE: This method uses next algorithm with O(n log n) by calculations and O(n) by memory.
+     * </p>
+     * @param dataset The dataset to split on train and test subsets.
+     * @param testPercentage The percentage of the test subset.
+     */
+    public LabeledVectorSetTestTrainPair(LabeledVectorSet dataset, double testPercentage) {
+        assert testPercentage > 0.0;
+        assert testPercentage < 1.0;
+        final int datasetSize = dataset.rowSize();
+        assert datasetSize > 2;
+
+        final int testSize = (int)Math.floor(datasetSize * testPercentage);
+        final int trainSize = datasetSize - testSize;
+
+        final TreeSet<Integer> sortedTestIndices = getSortedIndices(datasetSize, testSize);
+
+        LabeledVector[] testVectors = new LabeledVector[testSize];
+        LabeledVector[] trainVectors = new LabeledVector[trainSize];
+
+        int datasetCntr = 0;
+        int trainCntr = 0;
+        int testCntr = 0;
+
+        for (Integer idx: sortedTestIndices){ // guarantee order as iterator
+            testVectors[testCntr] = (LabeledVector)dataset.getRow(idx);
+            testCntr++;
+
+            for (int i = datasetCntr; i < idx; i++) {
+                trainVectors[trainCntr] = (LabeledVector)dataset.getRow(i);
+                trainCntr++;
+            }
+
+            datasetCntr = idx + 1;
+        }
+        if(datasetCntr < datasetSize){
+            for (int i = datasetCntr; i < datasetSize; i++) {
+                trainVectors[trainCntr] = (LabeledVector)dataset.getRow(i);
+                trainCntr++;
+            }
+        }
+
+        test = new LabeledVectorSet(testVectors, dataset.colSize());
+        train = new LabeledVectorSet(trainVectors, dataset.colSize());
+    }
+
+    /** This method generates "random double, integer" pairs, sort them, gets first "testSize" elements and returns appropriate indices */
+    @NotNull private TreeSet<Integer> getSortedIndices(int datasetSize, int testSize) {
+        Random rnd = new Random();
+        TreeMap<Double, Integer> randomIdxPairs = new TreeMap<>();
+        for (int i = 0; i < datasetSize; i++)
+            randomIdxPairs.put(rnd.nextDouble(), i);
+
+        final TreeMap<Double, Integer> testIdxPairs = randomIdxPairs.entrySet().stream()
+            .limit(testSize)
+            .collect(TreeMap::new, (m, e) -> m.put(e.getKey(), e.getValue()), Map::putAll);
+
+        return new TreeSet<>(testIdxPairs.values());
+    }
+
+    /**
+     * Train subset of the whole dataset.
+     * @return Train subset.
+     */
+    public LabeledVectorSet train() {
+        return train;
+    }
+
+    /**
+     * Test subset of the whole dataset.
+     * @return Test subset.
+     */
+    public LabeledVectorSet test() {
+        return test;
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/structures/partition/LabeledDatasetPartitionDataBuilderOnHeap.java b/modules/ml/src/main/java/org/apache/ignite/ml/structures/partition/LabeledDatasetPartitionDataBuilderOnHeap.java
index b4e552b..0351037 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/structures/partition/LabeledDatasetPartitionDataBuilderOnHeap.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/structures/partition/LabeledDatasetPartitionDataBuilderOnHeap.java
@@ -23,18 +23,18 @@
 import org.apache.ignite.ml.dataset.UpstreamEntry;
 import org.apache.ignite.ml.math.functions.IgniteBiFunction;
 import org.apache.ignite.ml.math.primitives.vector.Vector;
-import org.apache.ignite.ml.structures.LabeledDataset;
 import org.apache.ignite.ml.structures.LabeledVector;
+import org.apache.ignite.ml.structures.LabeledVectorSet;
 
 /**
- * Partition data builder that builds {@link LabeledDataset}.
+ * Partition data builder that builds {@link LabeledVectorSet}.
  *
  * @param <K> Type of a key in <tt>upstream</tt> data.
  * @param <V> Type of a value in <tt>upstream</tt> data.
  * @param <C> Type of a partition <tt>context</tt>.
  */
 public class LabeledDatasetPartitionDataBuilderOnHeap<K, V, C extends Serializable>
-    implements PartitionDataBuilder<K, V, C, LabeledDataset<Double, LabeledVector>> {
+    implements PartitionDataBuilder<K, V, C, LabeledVectorSet<Double, LabeledVector>> {
     /** */
     private static final long serialVersionUID = -7820760153954269227L;
 
@@ -57,8 +57,8 @@
     }
 
     /** {@inheritDoc} */
-    @Override public LabeledDataset<Double, LabeledVector> build(Iterator<UpstreamEntry<K, V>> upstreamData,
-        long upstreamDataSize, C ctx) {
+    @Override public LabeledVectorSet<Double, LabeledVector> build(Iterator<UpstreamEntry<K, V>> upstreamData,
+                                                                   long upstreamDataSize, C ctx) {
         int xCols = -1;
         double[][] x = null;
         double[] y = new double[Math.toIntExact(upstreamDataSize)];
@@ -82,6 +82,6 @@
 
             ptr++;
         }
-        return new LabeledDataset<>(x, y);
+        return new LabeledVectorSet<>(x, y);
     }
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/structures/preprocessing/LabeledDatasetLoader.java b/modules/ml/src/main/java/org/apache/ignite/ml/structures/preprocessing/LabeledDatasetLoader.java
index 5c20d9c..f370cbd 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/structures/preprocessing/LabeledDatasetLoader.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/structures/preprocessing/LabeledDatasetLoader.java
@@ -28,8 +28,8 @@
 import org.apache.ignite.ml.math.exceptions.knn.EmptyFileException;
 import org.apache.ignite.ml.math.exceptions.knn.FileParsingException;
 import org.apache.ignite.ml.math.primitives.vector.Vector;
-import org.apache.ignite.ml.structures.LabeledDataset;
 import org.apache.ignite.ml.structures.LabeledVector;
+import org.apache.ignite.ml.structures.LabeledVectorSet;
 import org.jetbrains.annotations.NotNull;
 
 /** Data pre-processing step which loads data from different file types. */
@@ -43,8 +43,8 @@
      * @param isFallOnBadData Fall on incorrect data if true.
      * @return Labeled Dataset parsed from file.
      */
-    public static LabeledDataset loadFromTxtFile(Path pathToFile, String separator, boolean isDistributed,
-        boolean isFallOnBadData) throws IOException {
+    public static LabeledVectorSet loadFromTxtFile(Path pathToFile, String separator, boolean isDistributed,
+                                                   boolean isFallOnBadData) throws IOException {
         Stream<String> stream = Files.lines(pathToFile);
         List<String> list = new ArrayList<>();
         stream.forEach(list::add);
@@ -81,7 +81,7 @@
                 for (int i = 0; i < vectors.size(); i++)
                     data[i] = new LabeledVector(vectors.get(i), labels.get(i));
 
-                return new LabeledDataset(data, colSize);
+                return new LabeledVectorSet(data, colSize);
             }
             else
                 throw new NoDataException("File should contain first row with data");
@@ -93,7 +93,7 @@
     /** */
     @NotNull private static Vector parseFeatures(Path pathToFile, boolean isDistributed, boolean isFallOnBadData,
         int colSize, int rowIdx, String[] rowData) {
-        final Vector vec = LabeledDataset.emptyVector(colSize, isDistributed);
+        final Vector vec = LabeledVectorSet.emptyVector(colSize, isDistributed);
 
         if (isFallOnBadData && rowData.length != colSize + 1)
             throw new CardinalityException(colSize + 1, rowData.length);
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/svm/SVMLinearBinaryClassificationModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/svm/SVMLinearBinaryClassificationModel.java
index 4771e4a..f5d2b28 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/svm/SVMLinearBinaryClassificationModel.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/svm/SVMLinearBinaryClassificationModel.java
@@ -31,11 +31,11 @@
     /** */
     private static final long serialVersionUID = -996984622291440226L;
 
-    /** Output label format. -1 and +1 for false value and raw distances from the separating hyperplane otherwise. */
+    /** Output label format. '0' and '1' for false value and raw distances from the separating hyperplane otherwise. */
     private boolean isKeepingRawLabels = false;
 
-    /** Threshold to assign +1 label to the observation if raw value more than this threshold. */
-    private double threshold = 0.0;
+    /** Threshold to assign '1' label to the observation if raw value more than this threshold. */
+    private double threshold = 0.5;
 
     /** Multiplier of the objects's vector required to make prediction. */
     private Vector weights;
@@ -99,7 +99,7 @@
         if (isKeepingRawLabels)
             return res;
         else
-            return res - threshold > 0 ? 1.0 : -1.0;
+            return res - threshold > 0 ? 1.0 : 0;
     }
 
     /**
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/svm/SVMLinearBinaryClassificationTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/svm/SVMLinearBinaryClassificationTrainer.java
index 1ae896f..2c621c8 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/svm/SVMLinearBinaryClassificationTrainer.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/svm/SVMLinearBinaryClassificationTrainer.java
@@ -17,24 +17,26 @@
 
 package org.apache.ignite.ml.svm;
 
-import java.util.concurrent.ThreadLocalRandom;
+import java.util.Random;
 import org.apache.ignite.ml.dataset.Dataset;
 import org.apache.ignite.ml.dataset.DatasetBuilder;
 import org.apache.ignite.ml.dataset.PartitionDataBuilder;
 import org.apache.ignite.ml.dataset.primitive.context.EmptyContext;
+import org.apache.ignite.ml.math.StorageConstants;
 import org.apache.ignite.ml.math.functions.IgniteBiFunction;
 import org.apache.ignite.ml.math.primitives.vector.Vector;
 import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
-import org.apache.ignite.ml.structures.LabeledDataset;
+import org.apache.ignite.ml.math.primitives.vector.impl.SparseVector;
 import org.apache.ignite.ml.structures.LabeledVector;
+import org.apache.ignite.ml.structures.LabeledVectorSet;
 import org.apache.ignite.ml.structures.partition.LabeledDatasetPartitionDataBuilderOnHeap;
 import org.apache.ignite.ml.trainers.SingleLabelDatasetTrainer;
 import org.jetbrains.annotations.NotNull;
 
 /**
  * Base class for a soft-margin SVM linear classification trainer based on the communication-efficient distributed dual
- * coordinate ascent algorithm (CoCoA) with hinge-loss function. <p> This trainer takes input as Labeled Dataset with -1
- * and +1 labels for two classes and makes binary classification. </p> The paper about this algorithm could be found
+ * coordinate ascent algorithm (CoCoA) with hinge-loss function. <p> This trainer takes input as Labeled Dataset with 0
+ * and 1 labels for two classes and makes binary classification. </p> The paper about this algorithm could be found
  * here https://arxiv.org/abs/1409.1458.
  */
 public class SVMLinearBinaryClassificationTrainer extends SingleLabelDatasetTrainer<SVMLinearBinaryClassificationModel> {
@@ -47,36 +49,68 @@
     /** Regularization parameter. */
     private double lambda = 0.4;
 
+    /** The seed number. */
+    private long seed;
+
     /**
      * Trains model based on the specified data.
      *
-     * @param datasetBuilder   Dataset builder.
+     * @param datasetBuilder Dataset builder.
      * @param featureExtractor Feature extractor.
-     * @param lbExtractor      Label extractor.
+     * @param lbExtractor Label extractor.
      * @return Model.
      */
     @Override public <K, V> SVMLinearBinaryClassificationModel fit(DatasetBuilder<K, V> datasetBuilder,
         IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, Double> lbExtractor) {
 
+        return updateModel(null, datasetBuilder, featureExtractor, lbExtractor);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected <K, V> SVMLinearBinaryClassificationModel updateModel(SVMLinearBinaryClassificationModel mdl,
+        DatasetBuilder<K, V> datasetBuilder, IgniteBiFunction<K, V, Vector> featureExtractor,
+        IgniteBiFunction<K, V, Double> lbExtractor) {
+
         assert datasetBuilder != null;
 
-        PartitionDataBuilder<K, V, EmptyContext, LabeledDataset<Double, LabeledVector>> partDataBuilder = new LabeledDatasetPartitionDataBuilderOnHeap<>(
+        IgniteBiFunction<K, V, Double> patchedLbExtractor = (k, v) ->  {
+            final Double lb = lbExtractor.apply(k, v);
+            if (lb == 0.0)
+                return -1.0;
+            else
+                return lb;
+        };
+
+        PartitionDataBuilder<K, V, EmptyContext, LabeledVectorSet<Double, LabeledVector>> partDataBuilder = new LabeledDatasetPartitionDataBuilderOnHeap<>(
             featureExtractor,
-            lbExtractor
+            patchedLbExtractor
         );
 
         Vector weights;
 
-        try(Dataset<EmptyContext, LabeledDataset<Double, LabeledVector>> dataset = datasetBuilder.build(
+        try (Dataset<EmptyContext, LabeledVectorSet<Double, LabeledVector>> dataset = datasetBuilder.build(
             (upstream, upstreamSize) -> new EmptyContext(),
             partDataBuilder
         )) {
-            final int cols = dataset.compute(org.apache.ignite.ml.structures.Dataset::colSize, (a, b) -> a == null ? b : a);
-            final int weightVectorSizeWithIntercept = cols + 1;
-            weights = initializeWeightsWithZeros(weightVectorSizeWithIntercept);
+            if (mdl == null) {
+                final int cols = dataset.compute(org.apache.ignite.ml.structures.Dataset::colSize, (a, b) -> {
+                    if (a == null)
+                        return b == null ? 0 : b;
+                    if (b == null)
+                        return a;
+                    return b;
+                });
+
+                final int weightVectorSizeWithIntercept = cols + 1;
+                weights = initializeWeightsWithZeros(weightVectorSizeWithIntercept);
+            } else
+                weights = getStateVector(mdl);
 
             for (int i = 0; i < this.getAmountOfIterations(); i++) {
                 Vector deltaWeights = calculateUpdates(weights, dataset);
+                if (deltaWeights == null)
+                    return getLastTrainedModelOrThrowEmptyDatasetException(mdl);
+
                 weights = weights.plus(deltaWeights); // creates new vector
             }
         } catch (Exception e) {
@@ -85,13 +119,37 @@
         return new SVMLinearBinaryClassificationModel(weights.viewPart(1, weights.size() - 1), weights.get(0));
     }
 
-    /** */
-    @NotNull private Vector initializeWeightsWithZeros(int vectorSize) {
-            return new DenseVector(vectorSize);
+    /** {@inheritDoc} */
+    @Override protected boolean checkState(SVMLinearBinaryClassificationModel mdl) {
+        return true;
+    }
+
+    /**
+     * @param mdl Model.
+     * @return vector of model weights with intercept.
+     */
+    private Vector getStateVector(SVMLinearBinaryClassificationModel mdl) {
+        double intercept = mdl.intercept();
+        Vector weights = mdl.weights();
+
+        int stateVectorSize = weights.size() + 1;
+        Vector res = weights.isDense() ?
+            new DenseVector(stateVectorSize) :
+            new SparseVector(stateVectorSize, StorageConstants.RANDOM_ACCESS_MODE);
+
+        res.set(0, intercept);
+        weights.nonZeroes().forEach(ith -> res.set(ith.index(), ith.get()));
+        return res;
     }
 
     /** */
-    private Vector calculateUpdates(Vector weights, Dataset<EmptyContext, LabeledDataset<Double, LabeledVector>> dataset) {
+    @NotNull private Vector initializeWeightsWithZeros(int vectorSize) {
+        return new DenseVector(vectorSize);
+    }
+
+    /** */
+    private Vector calculateUpdates(Vector weights,
+        Dataset<EmptyContext, LabeledVectorSet<Double, LabeledVector>> dataset) {
         return dataset.compute(data -> {
             Vector copiedWeights = weights.copy();
             Vector deltaWeights = initializeWeightsWithZeros(weights.size());
@@ -100,8 +158,10 @@
             Vector tmpAlphas = initializeWeightsWithZeros(amountOfObservation);
             Vector deltaAlphas = initializeWeightsWithZeros(amountOfObservation);
 
+            Random random = new Random(seed);
+
             for (int i = 0; i < this.getAmountOfLocIterations(); i++) {
-                int randomIdx = ThreadLocalRandom.current().nextInt(amountOfObservation);
+                int randomIdx = random.nextInt(amountOfObservation);
 
                 Deltas deltas = getDeltas(data, copiedWeights, amountOfObservation, tmpAlphas, randomIdx);
 
@@ -112,11 +172,17 @@
                 deltaAlphas.set(randomIdx, deltaAlphas.get(randomIdx) + deltas.deltaAlpha);
             }
             return deltaWeights;
-        }, (a, b) -> a == null ? b : a.plus(b));
+        }, (a, b) -> {
+            if (a == null)
+                return b == null ? new DenseVector() : b;
+            if (b == null)
+                return a;
+            return a.plus(b);
+        });
     }
 
     /** */
-    private Deltas getDeltas(LabeledDataset data, Vector copiedWeights, int amountOfObservation, Vector tmpAlphas,
+    private Deltas getDeltas(LabeledVectorSet data, Vector copiedWeights, int amountOfObservation, Vector tmpAlphas,
         int randomIdx) {
         LabeledVector row = (LabeledVector)data.getRow(randomIdx);
         Double lb = (Double)row.label();
@@ -155,7 +221,7 @@
             double qii = v.dot(v);
             double newAlpha = calcNewAlpha(alpha, gradient, qii);
 
-            Vector deltaWeights = v.times(lb * (newAlpha - alpha) / (this.lambda() * amountOfObservation));
+            Vector deltaWeights = v.times(lb * (newAlpha - alpha) / (this.getLambda() * amountOfObservation));
 
             return new Deltas(newAlpha - alpha, deltaWeights);
         }
@@ -174,7 +240,7 @@
     /** */
     private double calcGradient(double lb, Vector v, Vector weights, int amountOfObservation) {
         double dotProduct = v.dot(weights);
-        return (lb * dotProduct - 1.0) * (this.lambda() * amountOfObservation);
+        return (lb * dotProduct - 1.0) * (this.getLambda() * amountOfObservation);
     }
 
     /** */
@@ -191,6 +257,7 @@
 
     /**
      * Set up the regularization parameter.
+     *
      * @param lambda The regularization parameter. Should be more than 0.0.
      * @return Trainer with new lambda parameter value.
      */
@@ -201,16 +268,18 @@
     }
 
     /**
-     * Gets the regularization lambda.
-     * @return The parameter value.
+     * Get the regularization lambda.
+     *
+     * @return The property value.
      */
-    public double lambda() {
+    public double getLambda() {
         return lambda;
     }
 
     /**
-     * Gets the amount of outer iterations of SCDA algorithm.
-     * @return The parameter value.
+     * Get the amount of outer iterations of SCDA algorithm.
+     *
+     * @return The property value.
      */
     public int getAmountOfIterations() {
         return amountOfIterations;
@@ -218,6 +287,7 @@
 
     /**
      * Set up the amount of outer iterations of SCDA algorithm.
+     *
      * @param amountOfIterations The parameter value.
      * @return Trainer with new amountOfIterations parameter value.
      */
@@ -227,8 +297,9 @@
     }
 
     /**
-     * Gets the amount of local iterations of SCDA algorithm.
-     * @return The parameter value.
+     * Get the amount of local iterations of SCDA algorithm.
+     *
+     * @return The property value.
      */
     public int getAmountOfLocIterations() {
         return amountOfLocIterations;
@@ -236,6 +307,7 @@
 
     /**
      * Set up the amount of local iterations of SCDA algorithm.
+     *
      * @param amountOfLocIterations The parameter value.
      * @return Trainer with new amountOfLocIterations parameter value.
      */
@@ -244,6 +316,25 @@
         return this;
     }
 
+    /**
+     * Get the seed number.
+     *
+     * @return The property value.
+     */
+    public long getSeed() {
+        return seed;
+    }
+
+    /**
+     * Set up the seed.
+     *
+     * @param seed The parameter value.
+     * @return Model with new seed parameter value.
+     */
+    public SVMLinearBinaryClassificationTrainer withSeed(long seed) {
+        this.seed = seed;
+        return this;
+    }
 }
 
 /** This is a helper class to handle pair results which are returned from the calculation method. */
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/svm/SVMLinearMultiClassClassificationModel.java b/modules/ml/src/main/java/org/apache/ignite/ml/svm/SVMLinearMultiClassClassificationModel.java
index 4b04824..46bf4b2 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/svm/SVMLinearMultiClassClassificationModel.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/svm/SVMLinearMultiClassClassificationModel.java
@@ -21,6 +21,7 @@
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Objects;
+import java.util.Optional;
 import java.util.TreeMap;
 import org.apache.ignite.ml.Exportable;
 import org.apache.ignite.ml.Exporter;
@@ -102,4 +103,12 @@
     public void add(double clsLb, SVMLinearBinaryClassificationModel mdl) {
         models.put(clsLb, mdl);
     }
+
+    /**
+     * @param clsLb Class label.
+     * @return model trained for target class if it exists.
+     */
+    public Optional<SVMLinearBinaryClassificationModel> getModelForClass(double clsLb) {
+        return Optional.of(models.get(clsLb));
+    }
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/svm/SVMLinearMultiClassClassificationTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/svm/SVMLinearMultiClassClassificationTrainer.java
index 7069c4d..7cbb1dc 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/svm/SVMLinearMultiClassClassificationTrainer.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/svm/SVMLinearMultiClassClassificationTrainer.java
@@ -51,26 +51,41 @@
     /** Regularization parameter. */
     private double lambda = 0.2;
 
+    /** The seed number. */
+    private long seed;
+
     /**
      * Trains model based on the specified data.
      *
-     * @param datasetBuilder   Dataset builder.
+     * @param datasetBuilder Dataset builder.
      * @param featureExtractor Feature extractor.
-     * @param lbExtractor      Label extractor.
+     * @param lbExtractor Label extractor.
      * @return Model.
      */
     @Override public <K, V> SVMLinearMultiClassClassificationModel fit(DatasetBuilder<K, V> datasetBuilder,
-                                                                IgniteBiFunction<K, V, Vector> featureExtractor,
-                                                                IgniteBiFunction<K, V, Double> lbExtractor) {
+        IgniteBiFunction<K, V, Vector> featureExtractor,
+        IgniteBiFunction<K, V, Double> lbExtractor) {
+        return updateModel(null, datasetBuilder, featureExtractor, lbExtractor);
+    }
+
+    /** {@inheritDoc} */
+    @Override public <K, V> SVMLinearMultiClassClassificationModel updateModel(
+        SVMLinearMultiClassClassificationModel mdl,
+        DatasetBuilder<K, V> datasetBuilder, IgniteBiFunction<K, V, Vector> featureExtractor,
+        IgniteBiFunction<K, V, Double> lbExtractor) {
+
         List<Double> classes = extractClassLabels(datasetBuilder, lbExtractor);
+        if (classes.isEmpty())
+            return getLastTrainedModelOrThrowEmptyDatasetException(mdl);
 
         SVMLinearMultiClassClassificationModel multiClsMdl = new SVMLinearMultiClassClassificationModel();
 
         classes.forEach(clsLb -> {
             SVMLinearBinaryClassificationTrainer trainer = new SVMLinearBinaryClassificationTrainer()
-                .withAmountOfIterations(this.amountOfIterations())
-                .withAmountOfLocIterations(this.amountOfLocIterations())
-                .withLambda(this.lambda());
+                .withAmountOfIterations(this.getAmountOfIterations())
+                .withAmountOfLocIterations(this.getAmountOfLocIterations())
+                .withLambda(this.getLambda())
+                .withSeed(this.seed);
 
             IgniteBiFunction<K, V, Double> lbTransformer = (k, v) -> {
                 Double lb = lbExtractor.apply(k, v);
@@ -78,16 +93,62 @@
                 if (lb.equals(clsLb))
                     return 1.0;
                 else
-                    return -1.0;
+                    return 0.0;
             };
-            multiClsMdl.add(clsLb, trainer.fit(datasetBuilder, featureExtractor, lbTransformer));
+
+            SVMLinearBinaryClassificationModel model;
+            if (mdl == null)
+                model = learnNewModel(trainer, datasetBuilder, featureExtractor, lbTransformer);
+            else
+                model = updateModel(mdl, clsLb, trainer, datasetBuilder, featureExtractor, lbTransformer);
+            multiClsMdl.add(clsLb, model);
         });
 
         return multiClsMdl;
     }
 
+    /** {@inheritDoc} */
+    @Override protected boolean checkState(SVMLinearMultiClassClassificationModel mdl) {
+        return true;
+    }
+
+    /**
+     * Trains model based on the specified data.
+     *
+     * @param svmTrainer Prepared SVM trainer.
+     * @param datasetBuilder Dataset builder.
+     * @param featureExtractor Feature extractor.
+     * @param lbExtractor Label extractor.
+     */
+    private <K, V> SVMLinearBinaryClassificationModel learnNewModel(SVMLinearBinaryClassificationTrainer svmTrainer,
+        DatasetBuilder<K, V> datasetBuilder, IgniteBiFunction<K, V, Vector> featureExtractor,
+        IgniteBiFunction<K, V, Double> lbExtractor) {
+
+        return svmTrainer.fit(datasetBuilder, featureExtractor, lbExtractor);
+    }
+
+    /**
+     * Updates already learned model or fit new model if there is no model for current class label.
+     *
+     * @param multiClsMdl Learning multi-class model.
+     * @param clsLb Current class label.
+     * @param svmTrainer Prepared SVM trainer.
+     * @param datasetBuilder Dataset builder.
+     * @param featureExtractor Feature extractor.
+     * @param lbExtractor Label extractor.
+     */
+    private <K, V> SVMLinearBinaryClassificationModel updateModel(SVMLinearMultiClassClassificationModel multiClsMdl,
+        Double clsLb, SVMLinearBinaryClassificationTrainer svmTrainer, DatasetBuilder<K, V> datasetBuilder,
+        IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, Double> lbExtractor) {
+
+        return multiClsMdl.getModelForClass(clsLb)
+            .map(learnedModel -> svmTrainer.update(learnedModel, datasetBuilder, featureExtractor, lbExtractor))
+            .orElseGet(() -> svmTrainer.fit(datasetBuilder, featureExtractor, lbExtractor));
+    }
+
     /** Iterates among dataset and collects class labels. */
-    private <K, V> List<Double> extractClassLabels(DatasetBuilder<K, V> datasetBuilder, IgniteBiFunction<K, V, Double> lbExtractor) {
+    private <K, V> List<Double> extractClassLabels(DatasetBuilder<K, V> datasetBuilder,
+        IgniteBiFunction<K, V, Double> lbExtractor) {
         assert datasetBuilder != null;
 
         PartitionDataBuilder<K, V, EmptyContext, LabelPartitionDataOnHeap> partDataBuilder = new LabelPartitionDataBuilderOnHeap<>(lbExtractor);
@@ -103,13 +164,20 @@
 
                 final double[] lbs = data.getY();
 
-                for (double lb : lbs) locClsLabels.add(lb);
+                for (double lb : lbs)
+                    locClsLabels.add(lb);
 
                 return locClsLabels;
-            }, (a, b) -> a == null ? b : Stream.of(a, b).flatMap(Collection::stream).collect(Collectors.toSet()));
+            }, (a, b) -> {
+                if (a == null)
+                    return b == null ? new HashSet<>() : b;
+                if (b == null)
+                    return a;
+                return Stream.of(a, b).flatMap(Collection::stream).collect(Collectors.toSet());
+            });
 
-            res.addAll(clsLabels);
-
+            if (clsLabels != null)
+                res.addAll(clsLabels);
         } catch (Exception e) {
             throw new RuntimeException(e);
         }
@@ -122,27 +190,27 @@
      * @param lambda The regularization parameter. Should be more than 0.0.
      * @return Trainer with new lambda parameter value.
      */
-    public SVMLinearMultiClassClassificationTrainer  withLambda(double lambda) {
+    public SVMLinearMultiClassClassificationTrainer withLambda(double lambda) {
         assert lambda > 0.0;
         this.lambda = lambda;
         return this;
     }
 
     /**
-     * Gets the regularization lambda.
+     * Get the regularization lambda.
      *
-     * @return The parameter value.
+     * @return The property value.
      */
-    public double lambda() {
+    public double getLambda() {
         return lambda;
     }
 
     /**
      * Gets the amount of outer iterations of SCDA algorithm.
      *
-     * @return The parameter value.
+     * @return The property value.
      */
-    public int amountOfIterations() {
+    public int getAmountOfIterations() {
         return amountOfIterations;
     }
 
@@ -152,7 +220,7 @@
      * @param amountOfIterations The parameter value.
      * @return Trainer with new amountOfIterations parameter value.
      */
-    public SVMLinearMultiClassClassificationTrainer  withAmountOfIterations(int amountOfIterations) {
+    public SVMLinearMultiClassClassificationTrainer withAmountOfIterations(int amountOfIterations) {
         this.amountOfIterations = amountOfIterations;
         return this;
     }
@@ -160,9 +228,9 @@
     /**
      * Gets the amount of local iterations of SCDA algorithm.
      *
-     * @return The parameter value.
+     * @return The property value.
      */
-    public int amountOfLocIterations() {
+    public int getAmountOfLocIterations() {
         return amountOfLocIterations;
     }
 
@@ -172,8 +240,28 @@
      * @param amountOfLocIterations The parameter value.
      * @return Trainer with new amountOfLocIterations parameter value.
      */
-    public SVMLinearMultiClassClassificationTrainer  withAmountOfLocIterations(int amountOfLocIterations) {
+    public SVMLinearMultiClassClassificationTrainer withAmountOfLocIterations(int amountOfLocIterations) {
         this.amountOfLocIterations = amountOfLocIterations;
         return this;
     }
+
+    /**
+     * Gets the seed number.
+     *
+     * @return The property value.
+     */
+    public long getSeed() {
+        return seed;
+    }
+
+    /**
+     * Set up the seed.
+     *
+     * @param seed The parameter value.
+     * @return Model with new seed parameter value.
+     */
+    public SVMLinearMultiClassClassificationTrainer withSeed(long seed) {
+        this.seed = seed;
+        return this;
+    }
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/trainers/DatasetTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/trainers/DatasetTrainer.java
index 2f5d5d6..490c53d 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/trainers/DatasetTrainer.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/trainers/DatasetTrainer.java
@@ -26,8 +26,10 @@
 import org.apache.ignite.ml.dataset.impl.cache.CacheBasedDatasetBuilder;
 import org.apache.ignite.ml.dataset.impl.local.LocalDatasetBuilder;
 import org.apache.ignite.ml.environment.LearningEnvironment;
+import org.apache.ignite.ml.environment.logging.MLLogger;
 import org.apache.ignite.ml.math.functions.IgniteBiFunction;
 import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.jetbrains.annotations.NotNull;
 
 /**
  * Interface for trainers. Trainer is just a function which produces model from the data.
@@ -53,6 +55,71 @@
         IgniteBiFunction<K, V, L> lbExtractor);
 
     /**
+     * Gets state of model in arguments, compare it with training parameters of trainer and if they are fit then
+     * trainer updates model in according to new data and return new model. In other case trains new model.
+     *
+     * @param mdl Learned model.
+     * @param datasetBuilder Dataset builder.
+     * @param featureExtractor Feature extractor.
+     * @param lbExtractor Label extractor.
+     * @param <K> Type of a key in {@code upstream} data.
+     * @param <V> Type of a value in {@code upstream} data.
+     * @return Updated model.
+     */
+    public <K,V> M update(M mdl, DatasetBuilder<K, V> datasetBuilder,
+        IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, L> lbExtractor) {
+
+        if(mdl != null) {
+            if(checkState(mdl)) {
+                return updateModel(mdl, datasetBuilder, featureExtractor, lbExtractor);
+            } else {
+                environment.logger(getClass()).log(
+                    MLLogger.VerboseLevel.HIGH,
+                    "Model cannot be updated because of initial state of " +
+                        "it doesn't corresponds to trainer parameters"
+                );
+            }
+        }
+
+        return fit(datasetBuilder, featureExtractor, lbExtractor);
+    }
+
+    /**
+     * @param mdl Model.
+     * @return true if current critical for training parameters correspond to parameters from last training.
+     */
+    protected abstract boolean checkState(M mdl);
+
+    /**
+     * Used on update phase when given dataset is empty.
+     * If last trained model exist then method returns it. In other case throws IllegalArgumentException.
+     *
+     * @param lastTrainedMdl Model.
+     */
+    @NotNull protected M getLastTrainedModelOrThrowEmptyDatasetException(M lastTrainedMdl) {
+        String msg = "Cannot train model on empty dataset";
+        if (lastTrainedMdl != null) {
+            environment.logger(getClass()).log(MLLogger.VerboseLevel.HIGH, msg);
+            return lastTrainedMdl;
+        } else
+            throw new EmptyDatasetException();
+    }
+
+    /**
+     * Gets state of model in arguments, update in according to new data and return new model.
+     *
+     * @param mdl Learned model.
+     * @param datasetBuilder Dataset builder.
+     * @param featureExtractor Feature extractor.
+     * @param lbExtractor Label extractor.
+     * @param <K> Type of a key in {@code upstream} data.
+     * @param <V> Type of a value in {@code upstream} data.
+     * @return Updated model.
+     */
+    protected abstract <K, V> M updateModel(M mdl, DatasetBuilder<K, V> datasetBuilder,
+        IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, L> lbExtractor);
+
+    /**
      * Trains model based on the specified data.
      *
      * @param ignite Ignite instance.
@@ -73,6 +140,27 @@
     }
 
     /**
+     * Gets state of model in arguments, update in according to new data and return new model.
+     *
+     * @param mdl Learned model.
+     * @param ignite Ignite instance.
+     * @param cache Ignite cache.
+     * @param featureExtractor Feature extractor.
+     * @param lbExtractor Label extractor.
+     * @param <K> Type of a key in {@code upstream} data.
+     * @param <V> Type of a value in {@code upstream} data.
+     * @return Updated model.
+     */
+    public <K, V> M update(M mdl, Ignite ignite, IgniteCache<K, V> cache,
+        IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, L> lbExtractor) {
+        return update(
+            mdl, new CacheBasedDatasetBuilder<>(ignite, cache),
+            featureExtractor,
+            lbExtractor
+        );
+    }
+
+    /**
      * Trains model based on the specified data.
      *
      * @param ignite Ignite instance.
@@ -94,6 +182,28 @@
     }
 
     /**
+     * Gets state of model in arguments, update in according to new data and return new model.
+     *
+     * @param mdl Learned model.
+     * @param ignite Ignite instance.
+     * @param cache Ignite cache.
+     * @param filter Filter for {@code upstream} data.
+     * @param featureExtractor Feature extractor.
+     * @param lbExtractor Label extractor.
+     * @param <K> Type of a key in {@code upstream} data.
+     * @param <V> Type of a value in {@code upstream} data.
+     * @return Updated model.
+     */
+    public <K, V> M update(M mdl, Ignite ignite, IgniteCache<K, V> cache, IgniteBiPredicate<K, V> filter,
+        IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, L> lbExtractor) {
+        return update(
+            mdl, new CacheBasedDatasetBuilder<>(ignite, cache, filter),
+            featureExtractor,
+            lbExtractor
+        );
+    }
+
+    /**
      * Trains model based on the specified data.
      *
      * @param data Data.
@@ -114,6 +224,27 @@
     }
 
     /**
+     * Gets state of model in arguments, update in according to new data and return new model.
+     *
+     * @param mdl Learned model.
+     * @param data Data.
+     * @param parts Number of partitions.
+     * @param featureExtractor Feature extractor.
+     * @param lbExtractor Label extractor.
+     * @param <K> Type of a key in {@code upstream} data.
+     * @param <V> Type of a value in {@code upstream} data.
+     * @return Updated model.
+     */
+    public <K, V> M update(M mdl, Map<K, V> data, int parts, IgniteBiFunction<K, V, Vector> featureExtractor,
+        IgniteBiFunction<K, V, L> lbExtractor) {
+        return update(
+            mdl, new LocalDatasetBuilder<>(data, parts),
+            featureExtractor,
+            lbExtractor
+        );
+    }
+
+    /**
      * Trains model based on the specified data.
      *
      * @param data Data.
@@ -136,10 +267,47 @@
     }
 
     /**
+     * Gets state of model in arguments, update in according to new data and return new model.
+     *
+     * @param data Data.
+     * @param filter Filter for {@code upstream} data.
+     * @param parts Number of partitions.
+     * @param featureExtractor Feature extractor.
+     * @param lbExtractor Label extractor.
+     * @param <K> Type of a key in {@code upstream} data.
+     * @param <V> Type of a value in {@code upstream} data.
+     * @return Updated model.
+     */
+    public <K, V> M update(M mdl, Map<K, V> data, IgniteBiPredicate<K, V> filter, int parts,
+        IgniteBiFunction<K, V, Vector> featureExtractor,
+        IgniteBiFunction<K, V, L> lbExtractor) {
+        return update(
+            mdl, new LocalDatasetBuilder<>(data, filter, parts),
+            featureExtractor,
+            lbExtractor
+        );
+    }
+
+    /**
      * Sets learning Environment
      * @param environment Environment.
      */
     public void setEnvironment(LearningEnvironment environment) {
         this.environment = environment;
     }
+
+    /**
+     * EmptyDataset exception.
+     */
+    public static class EmptyDatasetException extends IllegalArgumentException {
+        /** Serial version uid. */
+        private static final long serialVersionUID = 6914650522523293521L;
+
+        /**
+         * Constructs an instance of EmptyDatasetException.
+         */
+        public EmptyDatasetException() {
+            super("Cannot train model on empty dataset");
+        }
+    }
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTree.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTree.java
index de8994a..45774cb 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTree.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTree.java
@@ -54,7 +54,7 @@
     private final DecisionTreeLeafBuilder decisionTreeLeafBuilder;
 
     /** Use index structure instead of using sorting while learning. */
-    protected boolean useIndex = true;
+    protected boolean usingIdx = true;
 
     /**
      * Constructs a new distributed decision tree trainer.
@@ -77,7 +77,7 @@
         IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, Double> lbExtractor) {
         try (Dataset<EmptyContext, DecisionTreeData> dataset = datasetBuilder.build(
             new EmptyContextBuilder<>(),
-            new DecisionTreeDataBuilder<>(featureExtractor, lbExtractor, useIndex)
+            new DecisionTreeDataBuilder<>(featureExtractor, lbExtractor, usingIdx)
         )) {
             return fit(dataset);
         }
@@ -86,6 +86,29 @@
         }
     }
 
+    /**
+     * Trains new model based on dataset because there is no valid approach to update decision trees.
+     *
+     * @param mdl Learned model.
+     * @param datasetBuilder Dataset builder.
+     * @param featureExtractor Feature extractor.
+     * @param lbExtractor Label extractor.
+     * @param <K> Type of a key in {@code upstream} data.
+     * @param <V> Type of a value in {@code upstream} data.
+     * @return New model based on new dataset.
+     */
+    @Override public <K, V> DecisionTreeNode updateModel(DecisionTreeNode mdl, DatasetBuilder<K, V> datasetBuilder,
+        IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, Double> lbExtractor) {
+
+        return fit(datasetBuilder, featureExtractor, lbExtractor);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected boolean checkState(DecisionTreeNode mdl) {
+        return true;
+    }
+
+    /** */
     public <K,V> DecisionTreeNode fit(Dataset<EmptyContext, DecisionTreeData> dataset) {
         return split(dataset, e -> true, 0, getImpurityMeasureCalculator(dataset));
     }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeClassificationTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeClassificationTrainer.java
index f8fc769..91ec8e1 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeClassificationTrainer.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeClassificationTrainer.java
@@ -91,7 +91,7 @@
      * @return Decision tree trainer.
      */
     public DecisionTreeClassificationTrainer withUseIndex(boolean useIndex) {
-        this.useIndex = useIndex;
+        this.usingIdx = useIndex;
         return this;
     }
 
@@ -127,6 +127,6 @@
         for (Double lb : labels)
             encoder.put(lb, idx++);
 
-        return new GiniImpurityMeasureCalculator(encoder, useIndex);
+        return new GiniImpurityMeasureCalculator(encoder, usingIdx);
     }
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainer.java
index 4c9aac9..ea57bcc 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainer.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainer.java
@@ -53,13 +53,13 @@
     }
 
     /**
-     * Sets useIndex parameter and returns trainer instance.
+     * Sets usingIdx parameter and returns trainer instance.
      *
-     * @param useIndex Use index.
+     * @param usingIdx Use index.
      * @return Decision tree trainer.
      */
-    public DecisionTreeRegressionTrainer withUseIndex(boolean useIndex) {
-        this.useIndex = useIndex;
+    public DecisionTreeRegressionTrainer withUsingIdx(boolean usingIdx) {
+        this.usingIdx = usingIdx;
         return this;
     }
 
@@ -67,6 +67,6 @@
     @Override protected ImpurityMeasureCalculator<MSEImpurityMeasure> getImpurityMeasureCalculator(
         Dataset<EmptyContext, DecisionTreeData> dataset) {
 
-        return new MSEImpurityMeasureCalculator(useIndex);
+        return new MSEImpurityMeasureCalculator(usingIdx);
     }
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/boosting/GDBBinaryClassifierOnTreesTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/boosting/GDBBinaryClassifierOnTreesTrainer.java
index 4d87b47..b99dc2f 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/boosting/GDBBinaryClassifierOnTreesTrainer.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/boosting/GDBBinaryClassifierOnTreesTrainer.java
@@ -27,13 +27,13 @@
  */
 public class GDBBinaryClassifierOnTreesTrainer extends GDBBinaryClassifierTrainer {
     /** Max depth. */
-    private final int maxDepth;
+    private int maxDepth;
 
     /** Min impurity decrease. */
-    private final double minImpurityDecrease;
+    private double minImpurityDecrease;
 
-    /** Use index structure instead of using sorting while learning. */
-    private boolean useIndex = true;
+    /** Use index structure instead of using sorting during the learning process. */
+    private boolean usingIdx = true;
 
     /**
      * Constructs instance of GDBBinaryClassifierOnTreesTrainer.
@@ -53,22 +53,71 @@
 
     /** {@inheritDoc} */
     @NotNull @Override protected DecisionTreeRegressionTrainer buildBaseModelTrainer() {
-        return new DecisionTreeRegressionTrainer(maxDepth, minImpurityDecrease).withUseIndex(useIndex);
-    }
-
-    /**
-     * Sets useIndex parameter and returns trainer instance.
-     *
-     * @param useIndex Use index.
-     * @return Decision tree trainer.
-     */
-    public GDBBinaryClassifierOnTreesTrainer withUseIndex(boolean useIndex) {
-        this.useIndex = useIndex;
-        return this;
+        return new DecisionTreeRegressionTrainer(maxDepth, minImpurityDecrease).withUsingIdx(usingIdx);
     }
 
     /** {@inheritDoc} */
     @Override protected GDBLearningStrategy getLearningStrategy() {
-        return new GDBOnTreesLearningStrategy(useIndex);
+        return new GDBOnTreesLearningStrategy(usingIdx);
+    }
+
+    /**
+     * Set useIndex parameter and returns trainer instance.
+     *
+     * @param usingIdx Use index.
+     * @return Decision tree trainer.
+     */
+    public GDBBinaryClassifierOnTreesTrainer withUsingIdx(boolean usingIdx) {
+        this.usingIdx = usingIdx;
+        return this;
+    }
+
+    /**
+     * Get the max depth.
+     *
+     * @return The property value.
+     */
+    public int getMaxDepth() {
+        return maxDepth;
+    }
+
+    /**
+     * Set up the max depth.
+     *
+     * @param maxDepth The parameter value.
+     * @return Decision tree trainer.
+     */
+    public GDBBinaryClassifierOnTreesTrainer setMaxDepth(int maxDepth) {
+        this.maxDepth = maxDepth;
+        return this;
+    }
+
+    /**
+     * Get the min impurity decrease.
+     *
+     * @return The property value.
+     */
+    public double getMinImpurityDecrease() {
+        return minImpurityDecrease;
+    }
+
+    /**
+     * Set up the min impurity decrease.
+     *
+     * @param minImpurityDecrease The parameter value.
+     * @return Decision tree trainer.
+     */
+    public GDBBinaryClassifierOnTreesTrainer setMinImpurityDecrease(double minImpurityDecrease) {
+        this.minImpurityDecrease = minImpurityDecrease;
+        return this;
+    }
+
+    /**
+     * Get the using index structure property instead of using sorting during the learning process.
+     *
+     * @return The property value.
+     */
+    public boolean isUsingIdx() {
+        return usingIdx;
     }
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/boosting/GDBOnTreesLearningStrategy.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/boosting/GDBOnTreesLearningStrategy.java
index 8589a79..6ebbda1 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/boosting/GDBOnTreesLearningStrategy.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/boosting/GDBOnTreesLearningStrategy.java
@@ -17,12 +17,13 @@
 
 package org.apache.ignite.ml.tree.boosting;
 
-import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import org.apache.ignite.ml.Model;
 import org.apache.ignite.ml.composition.ModelsComposition;
 import org.apache.ignite.ml.composition.boosting.GDBLearningStrategy;
+import org.apache.ignite.ml.composition.boosting.GDBTrainer;
+import org.apache.ignite.ml.composition.boosting.convergence.ConvergenceChecker;
 import org.apache.ignite.ml.composition.predictionsaggregator.WeightedPredictionsAggregator;
 import org.apache.ignite.ml.dataset.Dataset;
 import org.apache.ignite.ml.dataset.DatasetBuilder;
@@ -54,22 +55,30 @@
     }
 
     /** {@inheritDoc} */
-    @Override public <K, V> List<Model<Vector, Double>> learnModels(DatasetBuilder<K, V> datasetBuilder,
-        IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, Double> lbExtractor) {
+    @Override public <K, V> List<Model<Vector, Double>> update(GDBTrainer.GDBModel mdlToUpdate,
+        DatasetBuilder<K, V> datasetBuilder, IgniteBiFunction<K, V, Vector> featureExtractor,
+        IgniteBiFunction<K, V, Double> lbExtractor) {
 
         DatasetTrainer<? extends Model<Vector, Double>, Double> trainer = baseMdlTrainerBuilder.get();
         assert trainer instanceof DecisionTree;
         DecisionTree decisionTreeTrainer = (DecisionTree) trainer;
 
-        List<Model<Vector, Double>> models = new ArrayList<>();
+        List<Model<Vector, Double>> models = initLearningState(mdlToUpdate);
+
+        ConvergenceChecker<K,V> convCheck = checkConvergenceStgyFactory.create(sampleSize,
+            externalLbToInternalMapping, loss, datasetBuilder, featureExtractor, lbExtractor);
+
         try (Dataset<EmptyContext, DecisionTreeData> dataset = datasetBuilder.build(
             new EmptyContextBuilder<>(),
             new DecisionTreeDataBuilder<>(featureExtractor, lbExtractor, useIndex)
         )) {
             for (int i = 0; i < cntOfIterations; i++) {
-                double[] weights = Arrays.copyOf(compositionWeights, i);
+                double[] weights = Arrays.copyOf(compositionWeights, models.size());
                 WeightedPredictionsAggregator aggregator = new WeightedPredictionsAggregator(weights, meanLabelValue);
-                Model<Vector, Double> currComposition = new ModelsComposition(models, aggregator);
+                ModelsComposition currComposition = new ModelsComposition(models, aggregator);
+
+                if(convCheck.isConverged(dataset, currComposition))
+                    break;
 
                 dataset.compute(part -> {
                     if(part.getCopyOfOriginalLabels() == null)
@@ -78,7 +87,7 @@
                     for(int j = 0; j < part.getLabels().length; j++) {
                         double mdlAnswer = currComposition.apply(VectorUtils.of(part.getFeatures()[j]));
                         double originalLbVal = externalLbToInternalMapping.apply(part.getCopyOfOriginalLabels()[j]);
-                        part.getLabels()[j] = -lossGradient.apply(sampleSize, originalLbVal, mdlAnswer);
+                        part.getLabels()[j] = -loss.gradient(sampleSize, originalLbVal, mdlAnswer);
                     }
                 });
 
@@ -92,6 +101,7 @@
             throw new RuntimeException(e);
         }
 
+        compositionWeights = Arrays.copyOf(compositionWeights, models.size());
         return models;
     }
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/boosting/GDBRegressionOnTreesTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/boosting/GDBRegressionOnTreesTrainer.java
index e2a183c..b6c0b48 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/boosting/GDBRegressionOnTreesTrainer.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/boosting/GDBRegressionOnTreesTrainer.java
@@ -27,13 +27,13 @@
  */
 public class GDBRegressionOnTreesTrainer extends GDBRegressionTrainer {
     /** Max depth. */
-    private final int maxDepth;
+    private int maxDepth;
 
     /** Min impurity decrease. */
-    private final double minImpurityDecrease;
+    private double minImpurityDecrease;
 
     /** Use index structure instead of using sorting while learning. */
-    private boolean useIndex = true;
+    private boolean usingIdx = true;
 
     /**
      * Constructs instance of GDBRegressionOnTreesTrainer.
@@ -53,22 +53,71 @@
 
     /** {@inheritDoc} */
     @NotNull @Override protected DecisionTreeRegressionTrainer buildBaseModelTrainer() {
-        return new DecisionTreeRegressionTrainer(maxDepth, minImpurityDecrease).withUseIndex(useIndex);
+        return new DecisionTreeRegressionTrainer(maxDepth, minImpurityDecrease).withUsingIdx(usingIdx);
     }
 
     /**
-     * Sets useIndex parameter and returns trainer instance.
+     * Set useIndex parameter and returns trainer instance.
      *
-     * @param useIndex Use index.
+     * @param usingIdx Use index.
      * @return Decision tree trainer.
      */
-    public GDBRegressionOnTreesTrainer withUseIndex(boolean useIndex) {
-        this.useIndex = useIndex;
+    public GDBRegressionOnTreesTrainer withUsingIdx(boolean usingIdx) {
+        this.usingIdx = usingIdx;
         return this;
     }
 
+    /**
+     * Get the max depth.
+     *
+     * @return The property value.
+     */
+    public int getMaxDepth() {
+        return maxDepth;
+    }
+
+    /**
+     * Set up the max depth.
+     *
+     * @param maxDepth The parameter value.
+     * @return Decision tree trainer.
+     */
+    public GDBRegressionOnTreesTrainer setMaxDepth(int maxDepth) {
+        this.maxDepth = maxDepth;
+        return this;
+    }
+
+    /**
+     * Get the min impurity decrease.
+     *
+     * @return The property value.
+     */
+    public double getMinImpurityDecrease() {
+        return minImpurityDecrease;
+    }
+
+    /**
+     * Set up the min impurity decrease.
+     *
+     * @param minImpurityDecrease The parameter value.
+     * @return Decision tree trainer.
+     */
+    public GDBRegressionOnTreesTrainer setMinImpurityDecrease(double minImpurityDecrease) {
+        this.minImpurityDecrease = minImpurityDecrease;
+        return this;
+    }
+
+    /**
+     * Get the using index structure property instead of using sorting during the learning process.
+     *
+     * @return The property value.
+     */
+    public boolean isUsingIdx() {
+        return usingIdx;
+    }
+
     /** {@inheritDoc} */
     @Override protected GDBLearningStrategy getLearningStrategy() {
-        return new GDBOnTreesLearningStrategy(useIndex);
+        return new GDBOnTreesLearningStrategy(usingIdx);
     }
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/data/DecisionTreeData.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/data/DecisionTreeData.java
index d5750ea..b8a16dc 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/data/DecisionTreeData.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/data/DecisionTreeData.java
@@ -19,18 +19,14 @@
 
 import java.util.ArrayList;
 import java.util.List;
+import org.apache.ignite.ml.dataset.primitive.FeatureMatrixWithLabelsOnHeapData;
 import org.apache.ignite.ml.tree.TreeFilter;
 
 /**
- * A partition {@code data} of the containing matrix of features and vector of labels stored in heap.
+ * A partition {@code data} of the containing matrix of features and vector of labels stored in heap
+ * with index on features.
  */
-public class DecisionTreeData implements AutoCloseable {
-    /** Matrix with features. */
-    private final double[][] features;
-
-    /** Vector with labels. */
-    private final double[] labels;
-
+public class DecisionTreeData extends FeatureMatrixWithLabelsOnHeapData implements AutoCloseable {
     /** Copy of vector with original labels. Auxiliary for Gradient Boosting on Trees.*/
     private double[] copyOfOriginalLabels;
 
@@ -48,10 +44,7 @@
      * @param buildIdx Build index.
      */
     public DecisionTreeData(double[][] features, double[] labels, boolean buildIdx) {
-        assert features.length == labels.length : "Features and labels have to be the same length";
-
-        this.features = features;
-        this.labels = labels;
+        super(features, labels);
         this.buildIndex = buildIdx;
 
         indexesCache = new ArrayList<>();
@@ -68,6 +61,8 @@
     public DecisionTreeData filter(TreeFilter filter) {
         int size = 0;
 
+        double[][] features = getFeatures();
+        double[] labels = getLabels();
         for (int i = 0; i < features.length; i++)
             if (filter.test(features[i]))
                 size++;
@@ -95,12 +90,15 @@
      * @param col Column.
      */
     public void sort(int col) {
-        sort(col, 0, features.length - 1);
+        sort(col, 0, getFeatures().length - 1);
     }
 
     /** */
     private void sort(int col, int from, int to) {
         if (from < to) {
+            double[][] features = getFeatures();
+            double[] labels = getLabels();
+
             double pivot = features[(from + to) / 2][col];
 
             int i = from, j = to;
@@ -131,19 +129,11 @@
     }
 
     /** */
-    public double[][] getFeatures() {
-        return features;
-    }
-
-    /** */
-    public double[] getLabels() {
-        return labels;
-    }
-
     public double[] getCopyOfOriginalLabels() {
         return copyOfOriginalLabels;
     }
 
+    /** */
     public void setCopyOfOriginalLabels(double[] copyOfOriginalLabels) {
         this.copyOfOriginalLabels = copyOfOriginalLabels;
     }
@@ -170,7 +160,7 @@
 
         if (depth == indexesCache.size()) {
             if (depth == 0)
-                indexesCache.add(new TreeDataIndex(features, labels));
+                indexesCache.add(new TreeDataIndex(getFeatures(), getLabels()));
             else {
                 TreeDataIndex lastIndex = indexesCache.get(depth - 1);
                 indexesCache.add(lastIndex.filter(filter));
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestClassifierTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestClassifierTrainer.java
index bbbb2a9..7832584 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestClassifierTrainer.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestClassifierTrainer.java
@@ -17,72 +17,97 @@
 
 package org.apache.ignite.ml.tree.randomforest;
 
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import org.apache.ignite.ml.composition.ModelsComposition;
 import org.apache.ignite.ml.composition.predictionsaggregator.OnMajorityPredictionsAggregator;
-import org.apache.ignite.ml.composition.predictionsaggregator.PredictionsAggregator;
-import org.apache.ignite.ml.trainers.DatasetTrainer;
-import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer;
-import org.apache.ignite.ml.tree.DecisionTreeNode;
+import org.apache.ignite.ml.dataset.Dataset;
+import org.apache.ignite.ml.dataset.feature.FeatureMeta;
+import org.apache.ignite.ml.dataset.feature.ObjectHistogram;
+import org.apache.ignite.ml.dataset.impl.bootstrapping.BootstrappedDatasetPartition;
+import org.apache.ignite.ml.dataset.impl.bootstrapping.BootstrappedVector;
+import org.apache.ignite.ml.dataset.primitive.context.EmptyContext;
+import org.apache.ignite.ml.tree.randomforest.data.TreeRoot;
+import org.apache.ignite.ml.tree.randomforest.data.impurity.GiniHistogram;
+import org.apache.ignite.ml.tree.randomforest.data.impurity.GiniHistogramsComputer;
+import org.apache.ignite.ml.tree.randomforest.data.impurity.ImpurityHistogramsComputer;
+import org.apache.ignite.ml.tree.randomforest.data.statistics.ClassifierLeafValuesComputer;
+import org.apache.ignite.ml.tree.randomforest.data.statistics.LeafValuesComputer;
 
 /**
- * Random forest classifier trainer.
+ * Classifier trainer based on RandomForest algorithm.
  */
-public class RandomForestClassifierTrainer extends RandomForestTrainer {
-    /**
-     * Constructs new instance of RandomForestClassifierTrainer.
-     *
-     * @param predictionsAggregator Predictions aggregator.
-     * @param featureVectorSize Feature vector size.
-     * @param maximumFeaturesCntPerMdl Number of features to draw from original features vector to train each model.
-     * @param ensembleSize Ensemble size.
-     * @param samplePartSizePerMdl Size of sample part in percent to train one model.
-     * @param maxDeep Max decision tree deep.
-     * @param minImpurityDecrease Min impurity decrease.
-     */
-    public RandomForestClassifierTrainer(PredictionsAggregator predictionsAggregator,
-        int featureVectorSize,
-        int maximumFeaturesCntPerMdl,
-        int ensembleSize,
-        double samplePartSizePerMdl,
-        int maxDeep,
-        double minImpurityDecrease) {
-
-        super(predictionsAggregator, featureVectorSize, maximumFeaturesCntPerMdl,
-            ensembleSize, samplePartSizePerMdl, maxDeep, minImpurityDecrease);
-    }
+public class RandomForestClassifierTrainer
+    extends RandomForestTrainer<ObjectHistogram<BootstrappedVector>, GiniHistogram, RandomForestClassifierTrainer> {
+    /** Label mapping. */
+    private Map<Double, Integer> lblMapping = new HashMap<>();
 
     /**
-     * Constructs new instance of RandomForestClassifierTrainer.
+     * Constructs an instance of RandomForestClassifierTrainer.
      *
-     * @param featureVectorSize Feature vector size.
-     * @param maximumFeaturesCntPerMdl Number of features to draw from original features vector to train each model.
-     * @param ensembleSize Ensemble size.
-     * @param samplePartSizePerMdl Size of sample part in percent to train one model.
-     * @param maxDeep Max decision tree deep.
-     * @param minImpurityDecrease Min impurity decrease.
+     * @param meta Features meta.
      */
-    public RandomForestClassifierTrainer(int featureVectorSize,
-        int maximumFeaturesCntPerMdl,
-        int ensembleSize,
-        double samplePartSizePerMdl,
-        int maxDeep, double minImpurityDecrease) {
-
-        this(new OnMajorityPredictionsAggregator(), featureVectorSize, maximumFeaturesCntPerMdl,
-            ensembleSize, samplePartSizePerMdl, maxDeep, minImpurityDecrease);
+    public RandomForestClassifierTrainer(List<FeatureMeta> meta) {
+        super(meta);
     }
 
     /** {@inheritDoc} */
-    @Override protected DatasetTrainer<DecisionTreeNode, Double> buildDatasetTrainerForModel() {
-        return new DecisionTreeClassificationTrainer(maxDeep, minImpurityDecrease).withUseIndex(useIndex);
+    @Override protected RandomForestClassifierTrainer instance() {
+        return this;
     }
 
     /**
-     * Sets useIndex parameter and returns trainer instance.
+     * Aggregates all unique labels from dataset and assigns integer id value for each label.
+     * This id can be used as index in arrays or lists.
      *
-     * @param useIndex Use index.
-     * @return Decision tree trainer.
+     * @param dataset Dataset.
+     * @return true if initialization was done.
      */
-    public RandomForestClassifierTrainer withUseIndex(boolean useIndex) {
-        this.useIndex = useIndex;
-        return this;
+    @Override protected boolean init(Dataset<EmptyContext, BootstrappedDatasetPartition> dataset) {
+        Set<Double> uniqLabels = dataset.compute(
+            x -> {
+                Set<Double> labels = new HashSet<>();
+                for (int i = 0; i < x.getRowsCount(); i++)
+                    labels.add(x.getRow(i).label());
+                return labels;
+            },
+            (l, r) -> {
+                if (l == null)
+                    return r;
+                if (r == null)
+                    return l;
+                Set<Double> lbls = new HashSet<>();
+                lbls.addAll(l);
+                lbls.addAll(r);
+                return lbls;
+            }
+        );
+
+        if(uniqLabels == null)
+            return false;
+
+        int i = 0;
+        for (Double label : uniqLabels)
+            lblMapping.put(label, i++);
+
+        return super.init(dataset);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected ModelsComposition buildComposition(List<TreeRoot> models) {
+        return new ModelsComposition(models, new OnMajorityPredictionsAggregator());
+    }
+
+    /** {@inheritDoc} */
+    @Override protected ImpurityHistogramsComputer<GiniHistogram> createImpurityHistogramsComputer() {
+        return new GiniHistogramsComputer(lblMapping);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected LeafValuesComputer<ObjectHistogram<BootstrappedVector>> createLeafStatisticsAggregator() {
+        return new ClassifierLeafValuesComputer(lblMapping);
     }
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestRegressionTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestRegressionTrainer.java
index 009fff2..ab1d036 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestRegressionTrainer.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestRegressionTrainer.java
@@ -17,73 +17,49 @@
 
 package org.apache.ignite.ml.tree.randomforest;
 
+import java.util.List;
+import org.apache.ignite.ml.composition.ModelsComposition;
 import org.apache.ignite.ml.composition.predictionsaggregator.MeanValuePredictionsAggregator;
-import org.apache.ignite.ml.composition.predictionsaggregator.PredictionsAggregator;
-import org.apache.ignite.ml.trainers.DatasetTrainer;
-import org.apache.ignite.ml.tree.DecisionTreeNode;
-import org.apache.ignite.ml.tree.DecisionTreeRegressionTrainer;
+import org.apache.ignite.ml.dataset.feature.FeatureMeta;
+import org.apache.ignite.ml.tree.randomforest.data.TreeRoot;
+import org.apache.ignite.ml.tree.randomforest.data.impurity.ImpurityHistogramsComputer;
+import org.apache.ignite.ml.tree.randomforest.data.impurity.MSEHistogram;
+import org.apache.ignite.ml.tree.randomforest.data.impurity.MSEHistogramComputer;
+import org.apache.ignite.ml.tree.randomforest.data.statistics.LeafValuesComputer;
+import org.apache.ignite.ml.tree.randomforest.data.statistics.MeanValueStatistic;
+import org.apache.ignite.ml.tree.randomforest.data.statistics.RegressionLeafValuesComputer;
 
 /**
- * Random forest regression trainer.
+ * Regression trainer based on RandomForest algorithm.
  */
-public class RandomForestRegressionTrainer extends RandomForestTrainer {
+public class RandomForestRegressionTrainer
+    extends RandomForestTrainer<MeanValueStatistic, MSEHistogram, RandomForestRegressionTrainer> {
     /**
-     * Constructs new instance of RandomForestRegressionTrainer.
+     * Constructs an instance of RandomForestRegressionTrainer.
      *
-     * @param predictionsAggregator Predictions aggregator.
-     * @param featureVectorSize Feature vector size.
-     * @param maximumFeaturesCntPerMdl Number of features to draw from original features vector to train each model.
-     * @param ensembleSize Ensemble size.
-     * @param samplePartSizePerMdl Size of sample part in percent to train one model.
-     * @param maxDeep Max decision tree deep.
-     * @param minImpurityDecrease Min impurity decrease.
+     * @param meta Features meta.
      */
-    public RandomForestRegressionTrainer(PredictionsAggregator predictionsAggregator,
-        int featureVectorSize,
-        int maximumFeaturesCntPerMdl,
-        int ensembleSize,
-        double samplePartSizePerMdl,
-        int maxDeep,
-        double minImpurityDecrease) {
-
-        super(predictionsAggregator, featureVectorSize, maximumFeaturesCntPerMdl,
-            ensembleSize, samplePartSizePerMdl, maxDeep, minImpurityDecrease);
-    }
-
-    /**
-     * Constructs new instance of RandomForestRegressionTrainer.
-     *
-     * @param featureVectorSize Feature vector size.
-     * @param maximumFeaturesCntPerMdl Number of features to draw from original features vector to train each model.
-     * @param ensembleSize Ensemble size.
-     * @param samplePartSizePerMdl Size of sample part in percent to train one model.
-     * @param maxDeep Max decision tree deep.
-     * @param minImpurityDecrease Min impurity decrease.
-     */
-    public RandomForestRegressionTrainer(int featureVectorSize,
-        int maximumFeaturesCntPerMdl,
-        int ensembleSize,
-        double samplePartSizePerMdl,
-        int maxDeep,
-        double minImpurityDecrease) {
-
-        this(new MeanValuePredictionsAggregator(), featureVectorSize, maximumFeaturesCntPerMdl,
-            ensembleSize, samplePartSizePerMdl, maxDeep, minImpurityDecrease);
+    public RandomForestRegressionTrainer(List<FeatureMeta> meta) {
+        super(meta);
     }
 
     /** {@inheritDoc} */
-    @Override protected DatasetTrainer<DecisionTreeNode, Double> buildDatasetTrainerForModel() {
-        return new DecisionTreeRegressionTrainer(maxDeep, minImpurityDecrease).withUseIndex(useIndex);
+    @Override protected RandomForestRegressionTrainer instance() {
+        return this;
     }
 
-    /**
-     * Sets useIndex parameter and returns trainer instance.
-     *
-     * @param useIndex Use index.
-     * @return Decision tree trainer.
-     */
-    public RandomForestRegressionTrainer withUseIndex(boolean useIndex) {
-        this.useIndex = useIndex;
-        return this;
+    /** {@inheritDoc} */
+    @Override protected ModelsComposition buildComposition(List<TreeRoot> models) {
+        return new ModelsComposition(models, new MeanValuePredictionsAggregator());
+    }
+
+    /** {@inheritDoc} */
+    @Override protected ImpurityHistogramsComputer<MSEHistogram> createImpurityHistogramsComputer() {
+        return new MSEHistogramComputer();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected LeafValuesComputer<MeanValueStatistic> createLeafStatisticsAggregator() {
+        return new RegressionLeafValuesComputer();
     }
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestTrainer.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestTrainer.java
index 8608f09..c617d8d 100644
--- a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestTrainer.java
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/RandomForestTrainer.java
@@ -17,45 +17,383 @@
 
 package org.apache.ignite.ml.tree.randomforest;
 
-import org.apache.ignite.ml.composition.BaggingModelTrainer;
-import org.apache.ignite.ml.composition.predictionsaggregator.PredictionsAggregator;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Queue;
+import java.util.Random;
+import java.util.Set;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+import org.apache.ignite.ml.Model;
+import org.apache.ignite.ml.composition.ModelsComposition;
+import org.apache.ignite.ml.dataset.Dataset;
+import org.apache.ignite.ml.dataset.DatasetBuilder;
+import org.apache.ignite.ml.dataset.feature.BucketMeta;
+import org.apache.ignite.ml.dataset.feature.FeatureMeta;
+import org.apache.ignite.ml.dataset.impl.bootstrapping.BootstrappedDatasetBuilder;
+import org.apache.ignite.ml.dataset.impl.bootstrapping.BootstrappedDatasetPartition;
+import org.apache.ignite.ml.dataset.impl.bootstrapping.BootstrappedVector;
+import org.apache.ignite.ml.dataset.primitive.builder.context.EmptyContextBuilder;
+import org.apache.ignite.ml.dataset.primitive.context.EmptyContext;
+import org.apache.ignite.ml.math.functions.IgniteBiFunction;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.trainers.DatasetTrainer;
+import org.apache.ignite.ml.tree.randomforest.data.FeaturesCountSelectionStrategies;
+import org.apache.ignite.ml.tree.randomforest.data.NodeId;
+import org.apache.ignite.ml.tree.randomforest.data.NodeSplit;
+import org.apache.ignite.ml.tree.randomforest.data.TreeNode;
+import org.apache.ignite.ml.tree.randomforest.data.TreeRoot;
+import org.apache.ignite.ml.tree.randomforest.data.impurity.ImpurityComputer;
+import org.apache.ignite.ml.tree.randomforest.data.impurity.ImpurityHistogramsComputer;
+import org.apache.ignite.ml.tree.randomforest.data.statistics.LeafValuesComputer;
+import org.apache.ignite.ml.tree.randomforest.data.statistics.NormalDistributionStatistics;
+import org.apache.ignite.ml.tree.randomforest.data.statistics.NormalDistributionStatisticsComputer;
 
 /**
- * Abstract random forest trainer.
+ * Class represents a realization of Random Forest algorithm. Main idea of this realization is that at each learning
+ * iteration it tries to aggregate statistics on impurity for each corner nodes (leaves that may be splitted on a new
+ * iteration) in each trees in random forest. It requires one map-reduce operation over learning dataset. After such
+ * aggregation the algorithm selects split points for each corner nodes of create leaf nodes. The algorithm stops when
+ * there is no splitting for nodes in trees. At last stage the algorithm aggregates statistics on labels for leaf nodes
+ * in one map-reduce step and sets values to leafs based these statistics.
+ *
+ * @param <L> Type of statistics aggregator for leaf values computing.
+ * @param <S> Type of impurity computer specific for algorithm.
+ * @param <T> Type of child of RandomForestTrainer using in with-methods.
  */
-public abstract class RandomForestTrainer extends BaggingModelTrainer {
-    /** Max decision tree deep. */
-    protected final int maxDeep;
+public abstract class RandomForestTrainer<L, S extends ImpurityComputer<BootstrappedVector, S>,
+    T extends RandomForestTrainer<L, S, T>> extends DatasetTrainer<ModelsComposition, Double> {
+    /** Bucket size factor. */
+    private static final double BUCKET_SIZE_FACTOR = (1 / 10.0);
 
-    /** Min impurity decrease. */
-    protected final double minImpurityDecrease;
+    /** Count of trees. */
+    private int cntOfTrees = 1;
 
-    /** Use index structure instead of using sorting while decision tree learning. */
-    protected boolean useIndex = false;
+    /** Subsample size. */
+    private double subSampleSize = 1.0;
+
+    /** Max depth. */
+    private int maxDepth = 5;
+
+    /** Min impurity delta. */
+    private double minImpurityDelta = 0.0;
+
+    /** Features Meta. */
+    private List<FeatureMeta> meta;
+
+    /** Features per tree. */
+    private int featuresPerTree = 5;
+
+    /** Seed. */
+    private long seed = 1234L;
+
+    /** Random generator. */
+    private Random random = new Random(seed);
+
+    /** Nodes to learn selection strategy. */
+    private Function<Queue<TreeNode>, List<TreeNode>> nodesToLearnSelectionStrgy = this::defaultNodesToLearnSelectionStrgy;
 
     /**
-     * Constructs new instance of BaggingModelTrainer.
+     * Create an instance of RandomForestTrainer.
      *
-     * @param predictionsAggregator Predictions aggregator.
-     * @param featureVectorSize Feature vector size.
-     * @param maximumFeaturesCntPerMdl Number of features to draw from original features vector to train each model.
-     * @param ensembleSize Ensemble size.
-     * @param samplePartSizePerMdl Size of sample part in percent to train one model.
-     * @param maxDeep Max decision tree deep.
-     * @param minImpurityDecrease Min impurity decrease.
+     * @param meta Features Meta.
      */
-    public RandomForestTrainer(PredictionsAggregator predictionsAggregator,
-        int featureVectorSize,
-        int maximumFeaturesCntPerMdl,
-        int ensembleSize,
-        double samplePartSizePerMdl,
-        int maxDeep,
-        double minImpurityDecrease) {
-
-        super(predictionsAggregator, featureVectorSize, maximumFeaturesCntPerMdl,
-            ensembleSize, samplePartSizePerMdl);
-
-        this.maxDeep = maxDeep;
-        this.minImpurityDecrease = minImpurityDecrease;
+    public RandomForestTrainer(List<FeatureMeta> meta) {
+        this.meta = meta;
+        this.featuresPerTree = FeaturesCountSelectionStrategies.ALL.apply(meta);
     }
+
+    /** {@inheritDoc} */
+    @Override public <K, V> ModelsComposition fit(DatasetBuilder<K, V> datasetBuilder,
+        IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, Double> lbExtractor) {
+        List<TreeRoot> models = null;
+        try (Dataset<EmptyContext, BootstrappedDatasetPartition> dataset = datasetBuilder.build(
+            new EmptyContextBuilder<>(),
+            new BootstrappedDatasetBuilder<>(featureExtractor, lbExtractor, cntOfTrees, subSampleSize))) {
+
+            if(!init(dataset))
+                return buildComposition(Collections.emptyList());
+            models = fit(dataset);
+        }
+        catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+
+        assert models != null;
+        return buildComposition(models);
+    }
+
+    /**
+     * @return an instance of current object with valid type in according to inheritance.
+     */
+    protected abstract T instance();
+
+    /**
+     * @param cntOfTrees Count of trees.
+     * @return an instance of current object with valid type in according to inheritance.
+     */
+    public T withCountOfTrees(int cntOfTrees) {
+        this.cntOfTrees = cntOfTrees;
+        return instance();
+    }
+
+    /**
+     * @param subSampleSize Subsample size.
+     * @return an instance of current object with valid type in according to inheritance.
+     */
+    public T withSubSampleSize(double subSampleSize) {
+        this.subSampleSize = subSampleSize;
+        return instance();
+    }
+
+    /**
+     * @param maxDepth Max depth.
+     * @return an instance of current object with valid type in according to inheritance.
+     */
+    public T withMaxDepth(int maxDepth) {
+        this.maxDepth = maxDepth;
+        return instance();
+    }
+
+    /**
+     * @param minImpurityDelta Min impurity delta.
+     * @return an instance of current object with valid type in according to inheritance.
+     */
+    public T withMinImpurityDelta(double minImpurityDelta) {
+        this.minImpurityDelta = minImpurityDelta;
+        return instance();
+    }
+
+    /**
+     * @param strgy Strgy.
+     * @return an instance of current object with valid type in according to inheritance.
+     */
+    public T withFeaturesCountSelectionStrgy(Function<List<FeatureMeta>, Integer> strgy) {
+        this.featuresPerTree = strgy.apply(meta);
+        return instance();
+    }
+
+    /**
+     * Sets strategy for selection nodes from learning queue in each iteration.
+     *
+     * @param strgy Strgy.
+     */
+    public T withNodesToLearnSelectionStrgy(Function<Queue<TreeNode>, List<TreeNode>> strgy) {
+        this.nodesToLearnSelectionStrgy = strgy;
+        return instance();
+    }
+
+    /**
+     * @param seed Seed.
+     * @return an instance of current object with valid type in according to inheritance.
+     */
+    public T withSeed(long seed) {
+        this.seed = seed;
+        this.random = new Random(seed);
+        return instance();
+    }
+
+    /**
+     * Init-step before learning. It may be useful collecting labels statistics step for classification.
+     *
+     * @param dataset Dataset.
+     */
+    protected boolean init(Dataset<EmptyContext, BootstrappedDatasetPartition> dataset) {
+        return true;
+    }
+
+    /**
+     * Trains model based on the specified data.
+     *
+     * @param dataset Dataset.
+     * @return list of decision trees.
+     */
+    private List<TreeRoot> fit(Dataset<EmptyContext, BootstrappedDatasetPartition> dataset) {
+        Queue<TreeNode> treesQueue = createRootsQueue();
+        ArrayList<TreeRoot> roots = initTrees(treesQueue);
+        Map<Integer, BucketMeta> histMeta = computeHistogramMeta(meta, dataset);
+        if(histMeta.isEmpty())
+            return Collections.emptyList();
+
+        ImpurityHistogramsComputer<S> histogramsComputer = createImpurityHistogramsComputer();
+        while (!treesQueue.isEmpty()) {
+            Map<NodeId, TreeNode> nodesToLearn = getNodesToLearn(treesQueue);
+            Map<NodeId, ImpurityHistogramsComputer.NodeImpurityHistograms<S>> nodesImpHists = histogramsComputer
+                .aggregateImpurityStatistics(roots, histMeta, nodesToLearn, dataset);
+            if (nodesToLearn.size() != nodesImpHists.size())
+                throw new IllegalStateException();
+
+            for (NodeId nodeId : nodesImpHists.keySet())
+                split(treesQueue, nodesToLearn, nodesImpHists.get(nodeId));
+        }
+
+        createLeafStatisticsAggregator().setValuesForLeaves(roots, dataset);
+        return roots;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected boolean checkState(ModelsComposition mdl) {
+        ModelsComposition fakeComposition = buildComposition(Collections.emptyList());
+        return mdl.getPredictionsAggregator().getClass() == fakeComposition.getPredictionsAggregator().getClass();
+    }
+
+    /** {@inheritDoc} */
+    @Override protected <K, V> ModelsComposition updateModel(ModelsComposition mdl, DatasetBuilder<K, V> datasetBuilder,
+        IgniteBiFunction<K, V, Vector> featureExtractor, IgniteBiFunction<K, V, Double> lbExtractor) {
+
+        ArrayList<Model<Vector, Double>> oldModels = new ArrayList<>(mdl.getModels());
+        ModelsComposition newModels = fit(datasetBuilder, featureExtractor, lbExtractor);
+        oldModels.addAll(newModels.getModels());
+
+        return new ModelsComposition(oldModels, mdl.getPredictionsAggregator());
+    }
+
+    /**
+     * Split node with NodeId if need.
+     *
+     * @param learningQueue Learning queue.
+     * @param nodesToLearn Nodes to learn at current iteration.
+     * @param nodeImpurityHistograms Impurity histograms on current iteration.
+     */
+    private void split(Queue<TreeNode> learningQueue, Map<NodeId, TreeNode> nodesToLearn,
+        ImpurityHistogramsComputer.NodeImpurityHistograms<S> nodeImpurityHistograms) {
+
+        TreeNode cornerNode = nodesToLearn.get(nodeImpurityHistograms.getNodeId());
+        Optional<NodeSplit> bestSplit = nodeImpurityHistograms.findBestSplit();
+
+        if (needSplit(cornerNode, bestSplit)) {
+            List<TreeNode> children = bestSplit.get().split(cornerNode);
+            learningQueue.addAll(children);
+        }
+        else {
+            if (bestSplit.isPresent())
+                bestSplit.get().createLeaf(cornerNode);
+            else {
+                cornerNode.setImpurity(Double.NEGATIVE_INFINITY);
+                cornerNode.toLeaf(0.0);
+            }
+        }
+    }
+
+    /**
+     * Creates an instance of Histograms Computer corresponding to RF implementation.
+     */
+    protected abstract ImpurityHistogramsComputer<S> createImpurityHistogramsComputer();
+
+    /**
+     * Creates an instance of Leaf Statistics Aggregator corresponding to RF implementation.
+     */
+    protected abstract LeafValuesComputer<L> createLeafStatisticsAggregator();
+
+    /**
+     * Creates list of trees.
+     *
+     * @param treesQueue Trees queue.
+     * @return List of trees.
+     */
+    protected ArrayList<TreeRoot> initTrees(Queue<TreeNode> treesQueue) {
+        assert featuresPerTree > 0;
+        ArrayList<TreeRoot> roots = new ArrayList<>();
+
+        List<Integer> allFeatureIds = IntStream.range(0, meta.size()).boxed().collect(Collectors.toList());
+        for (TreeNode node : treesQueue) {
+            Collections.shuffle(allFeatureIds, random);
+            Set<Integer> featuresSubspace = allFeatureIds.stream()
+                .limit(featuresPerTree).collect(Collectors.toSet());
+            roots.add(new TreeRoot(node, featuresSubspace));
+        }
+
+        return roots;
+    }
+
+    /**
+     * Compute bucket metas based on feature metas and learning dataset.
+     *
+     * @param meta Features meta.
+     * @param dataset Dataset.
+     * @return bucket metas.
+     */
+    private Map<Integer, BucketMeta> computeHistogramMeta(List<FeatureMeta> meta,
+        Dataset<EmptyContext, BootstrappedDatasetPartition> dataset) {
+
+        List<NormalDistributionStatistics> stats = new NormalDistributionStatisticsComputer()
+            .computeStatistics(meta, dataset);
+        if(stats == null)
+            return Collections.emptyMap();
+
+        Map<Integer, BucketMeta> bucketsMeta = new HashMap<>();
+        for (int i = 0; i < stats.size(); i++) {
+            BucketMeta bucketMeta = new BucketMeta(meta.get(i));
+            if (!bucketMeta.getFeatureMeta().isCategoricalFeature()) {
+                NormalDistributionStatistics stat = stats.get(i);
+                bucketMeta.setMinVal(stat.min());
+                bucketMeta.setBucketSize(stat.std() * BUCKET_SIZE_FACTOR);
+            }
+            bucketsMeta.put(i, bucketMeta);
+        }
+        return bucketsMeta;
+    }
+
+    /**
+     * Creates an initial nodes queue for learning based on countOfTrees. Each of these nodes represents a root decision
+     * trees in random forest.
+     *
+     * @return initial nodes queue.
+     */
+    private Queue<TreeNode> createRootsQueue() {
+        Queue<TreeNode> roots = new LinkedList<>();
+        for (int i = 0; i < cntOfTrees; i++)
+            roots.add(new TreeNode(1, i));
+        return roots;
+    }
+
+    /**
+     * Select set of nodes for leaning from queue based in nodesToLearnSelectionStrategy.
+     *
+     * @param queue Learning queue.
+     * @return collection of nodes for learning iterations.
+     */
+    private Map<NodeId, TreeNode> getNodesToLearn(Queue<TreeNode> queue) {
+        return nodesToLearnSelectionStrgy.apply(queue).stream()
+            .collect(Collectors.toMap(TreeNode::getId, node -> node));
+    }
+
+    /**
+     * Default nodesToLearnSelectionStrategy that returns all nodes from queue.
+     *
+     * @param queue Queue.
+     * @return List of nodes to learn.
+     */
+    private List<TreeNode> defaultNodesToLearnSelectionStrgy(Queue<TreeNode> queue) {
+        List<TreeNode> res = new ArrayList<>(queue);
+        queue.clear();
+        return res;
+    }
+
+    /**
+     * Check current note for the need for splitting.
+     *
+     * @param parentNode Parent node.
+     * @param split Best node split.
+     * @return true if split is needed.
+     */
+    boolean needSplit(TreeNode parentNode, Optional<NodeSplit> split) {
+        return split.isPresent() && parentNode.getImpurity() - split.get().getImpurity() > minImpurityDelta &&
+            parentNode.getDepth() < (maxDepth + 1);
+    }
+
+    /**
+     * Returns composition of built trees.
+     *
+     * @param models Models.
+     * @return composition of built trees.
+     */
+    protected abstract ModelsComposition buildComposition(List<TreeRoot> models);
+
 }
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/FeaturesCountSelectionStrategies.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/FeaturesCountSelectionStrategies.java
new file mode 100644
index 0000000..a246277
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/FeaturesCountSelectionStrategies.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.tree.randomforest.data;
+
+import java.util.List;
+import org.apache.ignite.ml.dataset.feature.FeatureMeta;
+import org.apache.ignite.ml.math.functions.IgniteFunction;
+
+/**
+ * Class contains a default implementations of some features count selection strategies for random forest.
+ */
+public class FeaturesCountSelectionStrategies {
+    /** */
+    public static IgniteFunction<List<FeatureMeta>, Integer> SQRT = (List<FeatureMeta> meta) -> {
+        return (int)Math.sqrt(meta.size());
+    };
+
+    /** */
+    public static IgniteFunction<List<FeatureMeta>, Integer> ALL = (List<FeatureMeta> meta) -> {
+        return meta.size();
+    };
+
+    /** */
+    public static IgniteFunction<List<FeatureMeta>, Integer> LOG2 = (List<FeatureMeta> meta) -> {
+        return (int)(Math.log(meta.size()) / Math.log(2));
+    };
+
+    /** */
+    public static IgniteFunction<List<FeatureMeta>, Integer> ONE_THIRD = (List<FeatureMeta> meta) -> {
+        return (int)(meta.size() / 3);
+    };
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/NodeId.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/NodeId.java
new file mode 100644
index 0000000..2f40af3
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/NodeId.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.tree.randomforest.data;
+
+import org.apache.ignite.lang.IgniteBiTuple;
+
+/**
+ * Class represents Node id in Random Forest consisting of tree id and node id in tree in according to
+ * breadth-first search in tree.
+ */
+public class NodeId extends IgniteBiTuple<Integer, Long> {
+    /** Serial version uid. */
+    private static final long serialVersionUID = 4400852013136423333L;
+
+    /**
+     * Create an instance of NodeId.
+     *
+     * @param treeId Tree id.
+     * @param nodeId Node id.
+     */
+    public NodeId(Integer treeId, Long nodeId) {
+        super(treeId, nodeId);
+    }
+
+    /**
+     *
+     * @return Tree id.
+     */
+    public int treeId() {
+        return get1();
+    }
+
+    /**
+     *
+     * @return Node id.
+     */
+    public long nodeId() {
+        return get2();
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/NodeSplit.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/NodeSplit.java
new file mode 100644
index 0000000..52d0b74
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/NodeSplit.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.tree.randomforest.data;
+
+import java.util.List;
+
+/**
+ * Class represents a split point for decision tree.
+ */
+public class NodeSplit {
+    /** Feature id in feature vector. */
+    private final int featureId;
+
+    /** Feature split value. */
+    private final double value;
+
+    /** Impurity at this split point. */
+    private final double impurity;
+
+    /**
+     * Creates an instance of NodeSplit.
+     *
+     * @param featureId Feature id.
+     * @param value Feature split value.
+     * @param impurity Impurity value.
+     */
+    public NodeSplit(int featureId, double value, double impurity) {
+        this.featureId = featureId;
+        this.value = value;
+        this.impurity = impurity;
+    }
+
+    /**
+     * Split node from parameter onto two children nodes.
+     *
+     * @param node Node.
+     * @return list of children.
+     */
+    public List<TreeNode> split(TreeNode node) {
+        List<TreeNode> children = node.toConditional(featureId, value);
+        node.setImpurity(impurity);
+        return children;
+    }
+
+    /**
+     * Convert node to leaf.
+     *
+     * @param node Node.
+     */
+    public void createLeaf(TreeNode node) {
+        node.setImpurity(impurity);
+        node.toLeaf(0.0); //values will be set in last stage if training
+    }
+
+    /** */
+    public double getImpurity() {
+        return impurity;
+    }
+
+    /** */
+    public double getValue() {
+        return value;
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/TreeNode.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/TreeNode.java
new file mode 100644
index 0000000..eb06143
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/TreeNode.java
@@ -0,0 +1,200 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.tree.randomforest.data;
+
+import java.io.Serializable;
+import java.util.Arrays;
+import java.util.List;
+import org.apache.ignite.ml.Model;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+
+/**
+ * Decision tree node class.
+ */
+public class TreeNode implements Model<Vector, Double>, Serializable {
+    /** Serial version uid. */
+    private static final long serialVersionUID = -8546263332508653661L;
+
+    /**
+     * Type of node.
+     */
+    public enum Type {
+        /** Unknown. */
+        UNKNOWN,
+
+        /** Leaf node. */
+        LEAF,
+
+        /** Conditional node. */
+        CONDITIONAL
+    }
+
+    /** Id. */
+    private final NodeId id;
+
+    /** Feature id. */
+    private int featureId;
+
+    /** Value. */
+    private double value;
+
+    /** Type. */
+    private Type type;
+
+    /** Impurity. */
+    private double impurity;
+
+    /** Depth. */
+    private int depth;
+
+    /** Left branch. */
+    private TreeNode left;
+
+    /** Right branch. */
+    private TreeNode right;
+
+    /**
+     * Create an instance of TreeNode.
+     *
+     * @param id Id in according to breadth-first search ordering.
+     * @param treeId Tree id.
+     */
+    public TreeNode(long id, int treeId) {
+        this.id = new NodeId(treeId, id);
+        this.value = -1;
+        this.type = Type.UNKNOWN;
+        this.impurity = Double.POSITIVE_INFINITY;
+        this.depth = 1;
+    }
+
+    /** {@inheritDoc} */
+    public Double apply(Vector features) {
+        assert type != Type.UNKNOWN;
+
+        if (type == Type.LEAF)
+            return value;
+        else {
+            if (features.get(featureId) <= value)
+                return left.apply(features);
+            else
+                return right.apply(features);
+        }
+    }
+
+    /**
+     * Returns leaf node for feature vector in according to decision tree.
+     *
+     * @param features Features.
+     * @return Node.
+     */
+    public  NodeId predictNextNodeKey(Vector features) {
+        switch (type) {
+            case UNKNOWN:
+                return id;
+            case LEAF:
+                return id;
+            default:
+                if (features.get(featureId) <= value)
+                    return left.predictNextNodeKey(features);
+                else
+                    return right.predictNextNodeKey(features);
+        }
+    }
+
+    /**
+     * Convert node to conditional node.
+     *
+     * @param featureId Feature id.
+     * @param value Value.
+     */
+    public List<TreeNode> toConditional(int featureId, double value) {
+        assert type == Type.UNKNOWN;
+
+        toLeaf(value);
+        left = new TreeNode(2 * id.nodeId(), id.treeId());
+        right = new TreeNode(2 * id.nodeId() + 1, id.treeId());
+        this.type = Type.CONDITIONAL;
+        this.featureId = featureId;
+
+        left.depth = right.depth = depth + 1;
+        return Arrays.asList(left, right);
+    }
+
+    /**
+     * Convert node to leaf.
+     *
+     * @param value Value.
+     */
+    public void toLeaf(double value) {
+        assert type == Type.UNKNOWN;
+
+        this.value = value;
+        this.type = Type.LEAF;
+
+        this.left = null;
+        this.right = null;
+    }
+
+    /** */
+    public NodeId getId() {
+        return id;
+    }
+
+    /** */
+    public void setValue(double value) {
+        this.value = value;
+    }
+
+    /** */
+    public Type getType() {
+        return type;
+    }
+
+    /** */
+    public void setImpurity(double impurity) {
+        this.impurity = impurity;
+    }
+
+    /**
+     * @return impurity in current node.
+     */
+    public double getImpurity() {
+        return impurity;
+    }
+
+    /**
+     * @return depth of current node.
+     */
+    public int getDepth() {
+        return depth;
+    }
+
+    /**
+     * @return right subtree.
+     */
+    public TreeNode getLeft() {
+        return left;
+    }
+
+    /**
+     * @return left subtree.
+     */
+    public TreeNode getRight() {
+        return right;
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/TreeRoot.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/TreeRoot.java
new file mode 100644
index 0000000..e47868d
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/TreeRoot.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.tree.randomforest.data;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+import org.apache.ignite.ml.Model;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+
+/**
+ * Tree root class.
+ */
+public class TreeRoot implements Model<Vector, Double> {
+    /** Serial version uid. */
+    private static final long serialVersionUID = 531797299171329057L;
+
+    /** Root node. */
+    private TreeNode node;
+
+    /** Used features. */
+    private Set<Integer> usedFeatures;
+
+    /**
+     * Create an instance of TreeRoot.
+     *
+     * @param root Root.
+     * @param usedFeatures Used features.
+     */
+    public TreeRoot(TreeNode root, Set<Integer> usedFeatures) {
+        this.node = root;
+        this.usedFeatures = usedFeatures;
+    }
+
+    /** {@inheritDoc} */
+    @Override public Double apply(Vector vector) {
+        return node.apply(vector);
+    }
+
+    /** */
+    public Set<Integer> getUsedFeatures() {
+        return usedFeatures;
+    }
+
+    /** */
+    public TreeNode getRootNode() {
+        return node;
+    }
+
+    /**
+     * @return all leafs in tree.
+     */
+    public List<TreeNode> getLeafs() {
+        List<TreeNode> res = new ArrayList<>();
+        getLeafs(node, res);
+        return res;
+    }
+
+    /**
+     * @param root Root.
+     * @param res Result list.
+     */
+    private void getLeafs(TreeNode root, List<TreeNode> res) {
+        if (root.getType() == TreeNode.Type.LEAF)
+            res.add(root);
+        else {
+            getLeafs(root.getLeft(), res);
+            getLeafs(root.getRight(), res);
+        }
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/GiniHistogram.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/GiniHistogram.java
new file mode 100644
index 0000000..3ca2a93
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/GiniHistogram.java
@@ -0,0 +1,224 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.tree.randomforest.data.impurity;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.Set;
+import java.util.TreeMap;
+import java.util.TreeSet;
+import java.util.stream.Collectors;
+import org.apache.ignite.ml.dataset.feature.BucketMeta;
+import org.apache.ignite.ml.dataset.feature.ObjectHistogram;
+import org.apache.ignite.ml.dataset.impl.bootstrapping.BootstrappedVector;
+import org.apache.ignite.ml.tree.randomforest.data.NodeSplit;
+
+/**
+ * Class contains implementation of splitting point finding algorithm based on Gini metric (see
+ * https://en.wikipedia.org/wiki/Gini_coefficient) and represents a set of histograms in according to this metric.
+ */
+public class GiniHistogram extends ImpurityHistogram implements ImpurityComputer<BootstrappedVector, GiniHistogram> {
+    /** Serial version uid. */
+    private static final long serialVersionUID = 5780670356098827667L;
+
+    /** Bucket meta. */
+    private final BucketMeta bucketMeta;
+
+    /** Sample id. */
+    private final int sampleId;
+
+    /** Hists of counters for each labels. */
+    private final ArrayList<ObjectHistogram<BootstrappedVector>> hists;
+
+    /** Label mapping to internal representation. */
+    private final Map<Double, Integer> lblMapping;
+
+    /** Bucket ids. */
+    private final Set<Integer> bucketIds;
+
+    /**
+     * Creates an instance of GiniHistogram.
+     *
+     * @param sampleId Sample id.
+     * @param lblMapping Label mapping.
+     * @param bucketMeta Bucket meta.
+     */
+    public GiniHistogram(int sampleId, Map<Double, Integer> lblMapping, BucketMeta bucketMeta) {
+        super(bucketMeta.getFeatureMeta().getFeatureId());
+        this.hists = new ArrayList<>(lblMapping.size());
+        this.sampleId = sampleId;
+        this.bucketMeta = bucketMeta;
+        this.lblMapping = lblMapping;
+
+        for (int i = 0; i < lblMapping.size(); i++)
+            hists.add(new ObjectHistogram<>(this::bucketMap, this::counterMap));
+
+        this.bucketIds = new TreeSet<>();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void addElement(BootstrappedVector vector) {
+        Integer lblId = lblMapping.get(vector.label());
+        hists.get(lblId).addElement(vector);
+    }
+
+    /** {@inheritDoc} */
+    @Override public Optional<Double> getValue(Integer bucketId) {
+        throw new IllegalStateException("Gini histogram doesn't support 'getValue' method");
+    }
+
+    /** {@inheritDoc} */
+    @Override public GiniHistogram plus(GiniHistogram other) {
+        GiniHistogram res = new GiniHistogram(sampleId, lblMapping, bucketMeta);
+        res.bucketIds.addAll(this.bucketIds);
+        res.bucketIds.addAll(other.bucketIds);
+        for (int i = 0; i < hists.size(); i++)
+            res.hists.set(i, this.hists.get(i).plus(other.hists.get(i)));
+        return res;
+    }
+
+    /** {@inheritDoc} */
+    @Override public Optional<NodeSplit> findBestSplit() {
+        if (bucketIds.size() < 2)
+            return Optional.empty();
+
+        double bestImpurity = Double.POSITIVE_INFINITY;
+        double bestSplitVal = Double.NEGATIVE_INFINITY;
+        int bestBucketId = -1;
+
+        List<TreeMap<Integer, Double>> countersDistribPerCls = hists.stream()
+            .map(ObjectHistogram::computeDistributionFunction)
+            .collect(Collectors.toList());
+
+        double[] totalSampleCntPerLb = countersDistribPerCls.stream()
+            .mapToDouble(x -> x.isEmpty() ? 0.0 : x.lastEntry().getValue())
+            .toArray();
+
+        Map<Integer, Double> lastLeftValues = new HashMap<>();
+        for (int i = 0; i < lblMapping.size(); i++)
+            lastLeftValues.put(i, 0.0);
+
+        for (Integer bucketId : bucketIds) {
+            double totalToleftCnt = 0;
+            double totalToRightCnt = 0;
+
+            double leftImpurity = 0;
+            double rightImpurity = 0;
+
+            //Compute number of samples left and right in according to split by bucketId
+            for (int lbId = 0; lbId < lblMapping.size(); lbId++) {
+                Double left = countersDistribPerCls.get(lbId).get(bucketId);
+                if (left == null)
+                    left = lastLeftValues.get(lbId);
+
+                totalToleftCnt += left;
+                totalToRightCnt += totalSampleCntPerLb[lbId] - left;
+
+                lastLeftValues.put(lbId, left);
+            }
+
+            for (int lbId = 0; lbId < lblMapping.size(); lbId++) {
+                //count of samples with label [corresponding lblId] to the left of bucket
+                Double toLeftCnt = countersDistribPerCls.get(lbId).getOrDefault(bucketId, lastLeftValues.get(lbId));
+
+                if (toLeftCnt > 0)
+                    leftImpurity += Math.pow(toLeftCnt, 2) / totalToleftCnt;
+
+                //number of samples to the right of bucket = total samples count - toLeftCnt
+                double toRightCnt = totalSampleCntPerLb[lbId] - toLeftCnt;
+                if (toRightCnt > 0)
+                    rightImpurity += (Math.pow(toRightCnt, 2)) / totalToRightCnt;
+            }
+
+            double impurityInBucket = -(leftImpurity + rightImpurity);
+            if (impurityInBucket <= bestImpurity) {
+                bestImpurity = impurityInBucket;
+                bestSplitVal = bucketMeta.bucketIdToValue(bucketId);
+                bestBucketId = bucketId;
+            }
+        }
+
+        return checkAndReturnSplitValue(bestBucketId, bestSplitVal, bestImpurity);
+    }
+
+    /** {@inheritDoc} */
+    @Override public Set<Integer> buckets() {
+        return bucketIds;
+    }
+
+    /**
+     * Returns counters histogram for class-label.
+     *
+     * @param lbl Label.
+     * @return counters histogram for class-label.
+     */
+    ObjectHistogram<BootstrappedVector> getHistForLabel(Double lbl) {
+        return hists.get(lblMapping.get(lbl));
+    }
+
+    /**
+     * Maps vector to counter value.
+     *
+     * @param vec Vector.
+     * @return Counter value.
+     */
+    private Double counterMap(BootstrappedVector vec) {
+        return (double)vec.counters()[sampleId];
+    }
+
+    /**
+     * Maps vector to bucket id.
+     *
+     * @param vec Vector.
+     * @return Bucket id.
+     */
+    private Integer bucketMap(BootstrappedVector vec) {
+        int bucketId = bucketMeta.getBucketId(vec.features().get(featureId));
+        this.bucketIds.add(bucketId);
+        return bucketId;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean isEqualTo(GiniHistogram other) {
+        HashSet<Integer> unionBuckets = new HashSet<>(buckets());
+        unionBuckets.addAll(other.bucketIds);
+        if (unionBuckets.size() != bucketIds.size())
+            return false;
+
+        HashSet<Double> unionMappings = new HashSet<>(lblMapping.keySet());
+        unionMappings.addAll(other.lblMapping.keySet());
+        if (unionMappings.size() != lblMapping.size())
+            return false;
+
+        for (Double lbl : unionMappings) {
+            if (lblMapping.get(lbl) != other.lblMapping.get(lbl))
+                return false;
+
+            ObjectHistogram<BootstrappedVector> thisHist = getHistForLabel(lbl);
+            ObjectHistogram<BootstrappedVector> otherHist = other.getHistForLabel(lbl);
+            if (!thisHist.isEqualTo(otherHist))
+                return false;
+        }
+
+        return true;
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/GiniHistogramsComputer.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/GiniHistogramsComputer.java
new file mode 100644
index 0000000..8c9dc95
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/GiniHistogramsComputer.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.tree.randomforest.data.impurity;
+
+import java.util.Map;
+import org.apache.ignite.ml.dataset.feature.BucketMeta;
+
+/**
+ * Implementation of {@link ImpurityHistogramsComputer} for classification task.
+ */
+public class GiniHistogramsComputer extends ImpurityHistogramsComputer<GiniHistogram> {
+    /** Serial version uid. */
+    private static final long serialVersionUID = 3672921182944932748L;
+
+    /** Label mapping. */
+    private final Map<Double, Integer> lblMapping;
+
+    /**
+     * Creates an instance of GiniHistogramsComputer.
+     *
+     * @param lblMapping Lbl mapping.
+     */
+    public GiniHistogramsComputer(Map<Double, Integer> lblMapping) {
+        this.lblMapping = lblMapping;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected GiniHistogram createImpurityComputerForFeature(int sampleId, BucketMeta meta) {
+        return new GiniHistogram(sampleId, lblMapping, meta);
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/ImpurityComputer.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/ImpurityComputer.java
new file mode 100644
index 0000000..0684415
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/ImpurityComputer.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.tree.randomforest.data.impurity;
+
+import java.util.Optional;
+import org.apache.ignite.ml.dataset.feature.Histogram;
+import org.apache.ignite.ml.tree.randomforest.data.NodeSplit;
+
+/**
+ * Interface represents an object that can compute best splitting point using features histograms.
+ *
+ * @param <T> Base object type for histogram.
+ * @param <H> Type of histogram that can be used in math operations with this object.
+ */
+public interface ImpurityComputer<T, H extends Histogram<T, H>> extends Histogram<T, H> {
+    /**
+     * Returns best split point computed on histogram if it exists.
+     * Split point may be absent when there is no data in histograms or split point lay in last bucket in histogram.
+     *
+     * @return Splitting point for decision tree.
+     */
+    public Optional<NodeSplit> findBestSplit();
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/ImpurityHistogram.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/ImpurityHistogram.java
new file mode 100644
index 0000000..296d862
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/ImpurityHistogram.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.tree.randomforest.data.impurity;
+
+import java.util.Optional;
+import java.util.Set;
+import java.util.TreeSet;
+import org.apache.ignite.ml.tree.randomforest.data.NodeSplit;
+
+/**
+ * Helper class for ImpurityHistograms.
+ */
+public abstract class ImpurityHistogram {
+    /** Bucket ids. */
+    protected final Set<Integer> bucketIds = new TreeSet<>();
+
+    /** Feature id. */
+    protected final int featureId;
+
+    /**
+     * Creates an instance of ImpurityHistogram.
+     *
+     * @param featureId Feature id.
+     */
+    public ImpurityHistogram(int featureId) {
+        this.featureId = featureId;
+    }
+
+    /**
+     * Checks split value validity and return Optional-wrap of it.
+     * In other case returns Optional.empty
+     *
+     * @param bestBucketId Best bucket id.
+     * @param bestSplitVal Best split value.
+     * @param bestImpurity Best impurity.
+     * @return best split value.
+     */
+    protected Optional<NodeSplit> checkAndReturnSplitValue(int bestBucketId, double bestSplitVal, double bestImpurity) {
+        if (isLastBucket(bestBucketId))
+            return Optional.empty();
+        else
+            return Optional.of(new NodeSplit(featureId, bestSplitVal, bestImpurity));
+    }
+
+    /**
+     * @param bestBucketId Best bucket id.
+     * @return true if best found bucket is last within all bucketIds.
+     */
+    private boolean isLastBucket(int bestBucketId) {
+        int minBucketId = Integer.MAX_VALUE;
+        int maxBucketId = Integer.MIN_VALUE;
+        for (Integer bucketId : bucketIds) {
+            minBucketId = Math.min(minBucketId, bucketId);
+            maxBucketId = Math.max(maxBucketId, bucketId);
+        }
+
+        return bestBucketId == maxBucketId;
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/ImpurityHistogramsComputer.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/ImpurityHistogramsComputer.java
new file mode 100644
index 0000000..d1ed87f
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/ImpurityHistogramsComputer.java
@@ -0,0 +1,211 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.tree.randomforest.data.impurity;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Comparator;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Optional;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+import org.apache.ignite.ml.dataset.Dataset;
+import org.apache.ignite.ml.dataset.feature.BucketMeta;
+import org.apache.ignite.ml.dataset.impl.bootstrapping.BootstrappedDatasetPartition;
+import org.apache.ignite.ml.dataset.impl.bootstrapping.BootstrappedVector;
+import org.apache.ignite.ml.dataset.primitive.context.EmptyContext;
+import org.apache.ignite.ml.tree.randomforest.data.NodeId;
+import org.apache.ignite.ml.tree.randomforest.data.NodeSplit;
+import org.apache.ignite.ml.tree.randomforest.data.TreeNode;
+import org.apache.ignite.ml.tree.randomforest.data.TreeRoot;
+
+/**
+ * Class containing logic of aggregation impurity statistics within learning dataset.
+ *
+ * @param <S> Type of basic impurity computer for feature.
+ */
+public abstract class ImpurityHistogramsComputer<S extends ImpurityComputer<BootstrappedVector, S>> implements Serializable {
+    /** Serial version uid. */
+    private static final long serialVersionUID = -4984067145908187508L;
+
+    /**
+     * Computes histograms for each features.
+     *
+     * @param roots Random forest roots.
+     * @param histMeta Histograms meta.
+     * @param nodesToLearn Nodes to learn.
+     * @param dataset Dataset.
+     */
+    public Map<NodeId, NodeImpurityHistograms<S>> aggregateImpurityStatistics(ArrayList<TreeRoot> roots,
+        Map<Integer, BucketMeta> histMeta, Map<NodeId, TreeNode> nodesToLearn,
+        Dataset<EmptyContext, BootstrappedDatasetPartition> dataset) {
+
+        return dataset.compute(
+            x -> aggregateImpurityStatisticsOnPartition(x, roots, histMeta, nodesToLearn),
+            this::reduceImpurityStatistics
+        );
+    }
+
+    /**
+     * Aggregates statistics for impurity computing for each corner nodes for each trees in random forest. This
+     * algorithm predict corner node in decision tree for learning vector and stocks it to correspond histogram.
+     *
+     * @param dataset Dataset.
+     * @param roots Trees.
+     * @param histMeta Histogram buckets meta.
+     * @param part Partition.
+     * @return Leaf statistics for impurity computing.
+     */
+    private Map<NodeId, NodeImpurityHistograms<S>> aggregateImpurityStatisticsOnPartition(
+        BootstrappedDatasetPartition dataset, ArrayList<TreeRoot> roots,
+        Map<Integer, BucketMeta> histMeta,
+        Map<NodeId, TreeNode> part) {
+
+        Map<NodeId, NodeImpurityHistograms<S>> res = part.keySet().stream()
+            .collect(Collectors.toMap(n -> n, NodeImpurityHistograms::new));
+
+        dataset.forEach(vector -> {
+            for (int sampleId = 0; sampleId < vector.counters().length; sampleId++) {
+                if (vector.counters()[sampleId] == 0)
+                    continue;
+
+                TreeRoot root = roots.get(sampleId);
+                NodeId key = root.getRootNode().predictNextNodeKey(vector.features());
+                if (!part.containsKey(key)) //if we didn't take all nodes from learning queue
+                    continue;
+
+                NodeImpurityHistograms<S> statistics = res.get(key);
+                for (Integer featureId : root.getUsedFeatures()) {
+                    BucketMeta meta = histMeta.get(featureId);
+                    if (!statistics.perFeatureStatistics.containsKey(featureId))
+                        statistics.perFeatureStatistics.put(featureId, createImpurityComputerForFeature(sampleId, meta));
+                    S impurityComputer = statistics.perFeatureStatistics.get(featureId);
+                    impurityComputer.addElement(vector);
+                }
+            }
+        });
+        return res;
+    }
+
+    /**
+     * Merge leaf statistics from several data partitions.
+     *
+     * @param left Left.
+     * @param right Right.
+     * @return merged leaf impurity statistics.
+     */
+    private Map<NodeId, NodeImpurityHistograms<S>> reduceImpurityStatistics(Map<NodeId, NodeImpurityHistograms<S>> left,
+        Map<NodeId, NodeImpurityHistograms<S>> right) {
+
+        if (left == null)
+            return right;
+        if (right == null)
+            return left;
+
+        Map<NodeId, NodeImpurityHistograms<S>> res = new HashMap<>(left);
+        for (NodeId key : right.keySet()) {
+            NodeImpurityHistograms<S> rightVal = right.get(key);
+            if (!res.containsKey(key))
+                res.put(key, rightVal);
+            else
+                res.put(key, left.get(key).plus(rightVal));
+        }
+
+        return res;
+    }
+
+    /**
+     * Creates impurity computer in according to specific algorithm based on random forest (for example {@link
+     * GiniHistogram} for classification).
+     *
+     * @param sampleId Sample id.
+     * @param meta Bucket Meta.
+     * @return impurity computer
+     */
+    protected abstract S createImpurityComputerForFeature(int sampleId, BucketMeta meta);
+
+    /**
+     * Class represents per feature statistics for impurity computing.
+     */
+    public static class NodeImpurityHistograms<S extends ImpurityComputer<BootstrappedVector, S>> implements Serializable {
+        /** Serial version uid. */
+        private static final long serialVersionUID = 2700045747590421768L;
+
+        /** Node id. */
+        private final NodeId nodeId;
+
+        /** Per feature statistics. */
+        private final Map<Integer, S> perFeatureStatistics = new HashMap<>();
+
+        /**
+         * Create an instance of NodeImpurityHistograms.
+         *
+         * @param nodeId Node id.
+         */
+        public NodeImpurityHistograms(NodeId nodeId) {
+            this.nodeId = nodeId;
+        }
+
+        /**
+         * Store features statistics from other instance.
+         *
+         * @param other Other instance.
+         */
+        public NodeImpurityHistograms<S> plus(NodeImpurityHistograms<S> other) {
+            assert nodeId == other.nodeId;
+            NodeImpurityHistograms<S> res = new NodeImpurityHistograms<>(nodeId);
+            addTo(this.perFeatureStatistics, res.perFeatureStatistics);
+            addTo(other.perFeatureStatistics, res.perFeatureStatistics);
+            return res;
+        }
+
+        /**
+         * Adds all statistics to target.
+         *
+         * @param from From.
+         * @param to To.
+         */
+        private void addTo(Map<Integer, S> from, Map<Integer, S> to) {
+            from.forEach((key, hist) -> {
+                if(!to.containsKey(key)) {
+                    to.put(key, hist);
+                } else {
+                    S sumOfHists = to.get(key).plus(hist);
+                    to.put(key, sumOfHists);
+                }
+            });
+        }
+
+        /** */
+        public NodeId getNodeId() {
+            return nodeId;
+        }
+
+        /**
+         * Find best split point, based on feature statistics.
+         *
+         * @return Best split point if it exists.
+         */
+        public Optional<NodeSplit> findBestSplit() {
+            return perFeatureStatistics.values().stream()
+                .flatMap(x -> x.findBestSplit().map(Stream::of).orElse(Stream.empty()))
+                .min(Comparator.comparingDouble(NodeSplit::getImpurity));
+        }
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/MSEHistogram.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/MSEHistogram.java
new file mode 100644
index 0000000..c00b1c1
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/MSEHistogram.java
@@ -0,0 +1,234 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.tree.randomforest.data.impurity;
+
+import java.util.HashSet;
+import java.util.Optional;
+import java.util.Set;
+import java.util.TreeMap;
+import org.apache.ignite.ml.dataset.feature.BucketMeta;
+import org.apache.ignite.ml.dataset.feature.ObjectHistogram;
+import org.apache.ignite.ml.dataset.impl.bootstrapping.BootstrappedVector;
+import org.apache.ignite.ml.tree.randomforest.data.NodeSplit;
+
+/**
+ * Class contains implementation of splitting point finding algorithm based on MSE metric (see https://en.wikipedia.org/wiki/Mean_squared_error)
+ * and represents a set of histograms in according to this metric.
+ */
+public class MSEHistogram extends ImpurityHistogram implements ImpurityComputer<BootstrappedVector, MSEHistogram> {
+    /** Serial version uid. */
+    private static final long serialVersionUID = 9175485616887867623L;
+
+    /** Bucket meta. */
+    private final BucketMeta bucketMeta;
+
+    /** Sample id. */
+    private final int sampleId;
+
+    /** Counters. */
+    private ObjectHistogram<BootstrappedVector> counters;
+
+    /** Sums of label values. */
+    private ObjectHistogram<BootstrappedVector> sumOfLabels;
+
+    /** Sums of squared label values. */
+    private ObjectHistogram<BootstrappedVector> sumOfSquaredLabels;
+
+    /**
+     * Creates an instance of MSEHistogram.
+     *
+     * @param sampleId Sample id.
+     * @param bucketMeta Bucket meta.
+     */
+    public MSEHistogram(int sampleId, BucketMeta bucketMeta) {
+        super(bucketMeta.getFeatureMeta().getFeatureId());
+        this.bucketMeta = bucketMeta;
+        this.sampleId = sampleId;
+
+        counters = new ObjectHistogram<>(this::bucketMap, this::counterMap);
+        sumOfLabels = new ObjectHistogram<>(this::bucketMap, this::ysMap);
+        sumOfSquaredLabels = new ObjectHistogram<>(this::bucketMap, this::y2sMap);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void addElement(BootstrappedVector vector) {
+        counters.addElement(vector);
+        sumOfLabels.addElement(vector);
+        sumOfSquaredLabels.addElement(vector);
+    }
+
+    /** {@inheritDoc} */
+    @Override public MSEHistogram plus(MSEHistogram other) {
+        MSEHistogram res = new MSEHistogram(sampleId, bucketMeta);
+        res.counters = this.counters.plus(other.counters);
+        res.sumOfLabels = this.sumOfLabels.plus(other.sumOfLabels);
+        res.sumOfSquaredLabels = this.sumOfSquaredLabels.plus(other.sumOfSquaredLabels);
+        res.bucketIds.addAll(this.bucketIds);
+        res.bucketIds.addAll(bucketIds);
+        return res;
+    }
+
+    /** {@inheritDoc} */
+    @Override public Set<Integer> buckets() {
+        return bucketIds;
+    }
+
+    /** {@inheritDoc} */
+    @Override public Optional<Double> getValue(Integer bucketId) {
+        throw new IllegalStateException("MSE histogram doesn't support 'getValue' method");
+    }
+
+    /** {@inheritDoc} */
+    @Override public Optional<NodeSplit> findBestSplit() {
+        double bestImpurity = Double.POSITIVE_INFINITY;
+        double bestSplitVal = Double.NEGATIVE_INFINITY;
+        int bestBucketId = -1;
+
+        //counter corresponds to number of samples
+        //ys corresponds to sumOfLabels
+        //y2s corresponds to sumOfSquaredLabels
+        TreeMap<Integer, Double> cntrDistrib = counters.computeDistributionFunction();
+        TreeMap<Integer, Double> ysDistrib = sumOfLabels.computeDistributionFunction();
+        TreeMap<Integer, Double> y2sDistrib = sumOfSquaredLabels.computeDistributionFunction();
+
+        double cntrMax = cntrDistrib.lastEntry().getValue();
+        double ysMax = ysDistrib.lastEntry().getValue();
+        double y2sMax = y2sDistrib.lastEntry().getValue();
+
+        double lastLeftCntrVal = 0.0;
+        double lastLeftYVal = 0.0;
+        double lastLeftY2Val = 0.0;
+
+        for (Integer bucketId : bucketIds) {
+            //values for impurity computing to the left of bucket value
+            double leftCnt = cntrDistrib.getOrDefault(bucketId, lastLeftCntrVal);
+            double leftY = ysDistrib.getOrDefault(bucketId, lastLeftYVal);
+            double leftY2 = y2sDistrib.getOrDefault(bucketId, lastLeftY2Val);
+
+            //values for impurity computing to the right of bucket value
+            double rightCnt = cntrMax - leftCnt;
+            double rightY = ysMax - leftY;
+            double rightY2 = y2sMax - leftY2;
+
+            double impurity = 0.0;
+
+            if (leftCnt > 0)
+                impurity += impurity(leftCnt, leftY, leftY2);
+            if (rightCnt > 0)
+                impurity += impurity(rightCnt, rightY, rightY2);
+
+            if (impurity < bestImpurity) {
+                bestImpurity = impurity;
+                bestSplitVal = bucketMeta.bucketIdToValue(bucketId);
+                bestBucketId = bucketId;
+            }
+        }
+
+        return checkAndReturnSplitValue(bestBucketId, bestSplitVal, bestImpurity);
+    }
+
+    /**
+     * Computes impurity function value.
+     *
+     * @param cnt Counter value.
+     * @param ys plus of Ys.
+     * @param y2s plus of Y^2 s.
+     * @return impurity value.
+     */
+    private double impurity(double cnt, double ys, double y2s) {
+        return y2s - 2.0 * ys / cnt * ys + Math.pow(ys / cnt, 2) * cnt;
+    }
+
+    /**
+     * Maps vector to bucket id.
+     *
+     * @param vec Vector.
+     * @return Bucket id.
+     */
+    private Integer bucketMap(BootstrappedVector vec) {
+        int bucketId = bucketMeta.getBucketId(vec.features().get(featureId));
+        this.bucketIds.add(bucketId);
+        return bucketId;
+    }
+
+    /**
+     * Maps vector to counter value.
+     *
+     * @param vec Vector.
+     * @return Counter value.
+     */
+    private Double counterMap(BootstrappedVector vec) {
+        return (double)vec.counters()[sampleId];
+    }
+
+    /**
+     * Maps vector to Y-value.
+     *
+     * @param vec Vector.
+     * @return Y value.
+     */
+    private Double ysMap(BootstrappedVector vec) {
+        return vec.counters()[sampleId] * vec.label();
+    }
+
+    /**
+     * Maps vector to Y^2 value.
+     *
+     * @param vec Vec.
+     * @return Y^2 value.
+     */
+    private Double y2sMap(BootstrappedVector vec) {
+        return vec.counters()[sampleId] * Math.pow(vec.label(), 2);
+    }
+
+    /**
+     * @return Counters histogram.
+     */
+    ObjectHistogram<BootstrappedVector> getCounters() {
+        return counters;
+    }
+
+    /**
+     * @return Ys histogram.
+     */
+    ObjectHistogram<BootstrappedVector> getSumOfLabels() {
+        return sumOfLabels;
+    }
+
+    /**
+     * @return Y^2s histogram.
+     */
+    ObjectHistogram<BootstrappedVector> getSumOfSquaredLabels() {
+        return sumOfSquaredLabels;
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean isEqualTo(MSEHistogram other) {
+        HashSet<Integer> unionBuckets = new HashSet<>(buckets());
+        unionBuckets.addAll(other.bucketIds);
+        if(unionBuckets.size() != bucketIds.size())
+            return false;
+
+        if(!this.counters.isEqualTo(other.counters))
+            return false;
+        if(!this.sumOfLabels.isEqualTo(other.sumOfLabels))
+            return false;
+
+        return this.sumOfSquaredLabels.isEqualTo(other.sumOfSquaredLabels);
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/MSEHistogramComputer.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/MSEHistogramComputer.java
new file mode 100644
index 0000000..412cfc9
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/MSEHistogramComputer.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.tree.randomforest.data.impurity;
+
+import org.apache.ignite.ml.dataset.feature.BucketMeta;
+
+/**
+ * Histogram computer realization for MSE impurity metric.
+ */
+public class MSEHistogramComputer extends ImpurityHistogramsComputer<MSEHistogram> {
+    /** Serial version uid. */
+    private static final long serialVersionUID = -1544823437437088334L;
+
+    /** {@inheritDoc} */
+    @Override protected MSEHistogram createImpurityComputerForFeature(int sampleId, BucketMeta meta) {
+        return new MSEHistogram(sampleId, meta);
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/package-info.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/package-info.java
new file mode 100644
index 0000000..62e2259
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/impurity/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * <!-- Package description. -->
+ * Contains implementation of impurity computers based on histograms.
+ */
+package org.apache.ignite.ml.tree.randomforest.data.impurity;
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/package-info.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/package-info.java
new file mode 100644
index 0000000..0311845
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * <!-- Package description. -->
+ * Package contains helper data structures for random forest implementation.
+ */
+package org.apache.ignite.ml.tree.randomforest.data;
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/statistics/ClassifierLeafValuesComputer.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/statistics/ClassifierLeafValuesComputer.java
new file mode 100644
index 0000000..64297ff
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/statistics/ClassifierLeafValuesComputer.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.tree.randomforest.data.statistics;
+
+import java.util.Comparator;
+import java.util.Map;
+import org.apache.ignite.ml.dataset.feature.ObjectHistogram;
+import org.apache.ignite.ml.dataset.impl.bootstrapping.BootstrappedVector;
+
+/**
+ * Implementation of {@link LeafValuesComputer} for classification task.
+ */
+public class ClassifierLeafValuesComputer extends LeafValuesComputer<ObjectHistogram<BootstrappedVector>> {
+    /** Serial version uid. */
+    private static final long serialVersionUID = 420416095877577599L;
+
+    /** Label mapping. */
+    private final Map<Double, Integer> lblMapping;
+
+    /**
+     * Creates an instance of ClassifierLeafValuesComputer.
+     *
+     * @param lblMapping Label mapping.
+     */
+    public ClassifierLeafValuesComputer(Map<Double, Integer> lblMapping) {
+        this.lblMapping = lblMapping;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void addElementToLeafStatistic(ObjectHistogram<BootstrappedVector> leafStatAggr, BootstrappedVector vec, int sampleId) {
+        leafStatAggr.addElement(vec);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected ObjectHistogram<BootstrappedVector> mergeLeafStats(ObjectHistogram<BootstrappedVector> leftStats,
+        ObjectHistogram<BootstrappedVector> rightStats) {
+
+        return leftStats.plus(rightStats);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected ObjectHistogram<BootstrappedVector> createLeafStatsAggregator(int sampleId) {
+        return new ObjectHistogram<>(
+            x -> lblMapping.get(x.label()),
+            x -> (double)x.counters()[sampleId]
+        );
+    }
+
+    /**
+     * Returns the most frequent value in according to statistic.
+     *
+     * @param stat Leaf statistics.
+     */
+    @Override protected double computeLeafValue(ObjectHistogram<BootstrappedVector> stat) {
+        Integer bucketId = stat.buckets().stream()
+            .max(Comparator.comparing(b -> stat.getValue(b).orElse(0.0)))
+            .orElse(-1);
+
+        if(bucketId == -1)
+            return Double.NaN;
+
+        return lblMapping.entrySet().stream()
+            .filter(x -> x.getValue().equals(bucketId))
+            .findFirst()
+            .get().getKey();
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/statistics/LeafValuesComputer.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/statistics/LeafValuesComputer.java
new file mode 100644
index 0000000..056eece
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/statistics/LeafValuesComputer.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.tree.randomforest.data.statistics;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+import org.apache.ignite.ml.dataset.Dataset;
+import org.apache.ignite.ml.dataset.impl.bootstrapping.BootstrappedDatasetPartition;
+import org.apache.ignite.ml.dataset.impl.bootstrapping.BootstrappedVector;
+import org.apache.ignite.ml.dataset.primitive.context.EmptyContext;
+import org.apache.ignite.ml.tree.randomforest.data.NodeId;
+import org.apache.ignite.ml.tree.randomforest.data.TreeNode;
+import org.apache.ignite.ml.tree.randomforest.data.TreeRoot;
+
+/**
+ * Class containing logic of leaf values computing after building of all trees in random forest.
+ *
+ * @param <T> Type of leaf statistic.
+ */
+public abstract class LeafValuesComputer<T> implements Serializable {
+    /** Serial version uid. */
+    private static final long serialVersionUID = -429848953091775832L;
+
+    /**
+     * Takes a list of all built trees and in one map-reduceImpurityStatistics step collect statistics for evaluating
+     * leaf-values for each tree and sets values for leaves.
+     *
+     * @param roots Learned trees.
+     * @param dataset Dataset.
+     */
+    public void setValuesForLeaves(ArrayList<TreeRoot> roots,
+        Dataset<EmptyContext, BootstrappedDatasetPartition> dataset) {
+
+        Map<NodeId, TreeNode> leafs = roots.stream()
+            .flatMap(r -> r.getLeafs().stream())
+            .collect(Collectors.toMap(TreeNode::getId, Function.identity()));
+
+        Map<NodeId, T> stats = dataset.compute(
+            data -> computeLeafsStatisticsInPartition(roots, leafs, data),
+            this::mergeLeafStatistics
+        );
+
+        leafs.forEach((id, leaf) -> {
+            T stat = stats.get(id);
+            if(stat != null) {
+                double leafVal = computeLeafValue(stat);
+                leaf.setValue(leafVal);
+            }
+        });
+    }
+
+    /**
+     * Aggregates statistics on labels from learning dataset for each leaf nodes.
+     *
+     * @param roots Learned trees.
+     * @param leafs List of all leafs.
+     * @param data Data.
+     * @return statistics on labels for each leaf nodes.
+     */
+    private Map<NodeId, T> computeLeafsStatisticsInPartition(ArrayList<TreeRoot> roots,
+        Map<NodeId, TreeNode> leafs, BootstrappedDatasetPartition data) {
+
+        Map<NodeId, T> res = new HashMap<>();
+        for (int sampleId = 0; sampleId < roots.size(); sampleId++) {
+            final int sampleIdConst = sampleId;
+
+            data.forEach(vec -> {
+                NodeId leafId = roots.get(sampleIdConst).getRootNode().predictNextNodeKey(vec.features());
+                if (!leafs.containsKey(leafId))
+                    throw new IllegalStateException();
+
+                if (!res.containsKey(leafId))
+                    res.put(leafId, createLeafStatsAggregator(sampleIdConst));
+
+                addElementToLeafStatistic(res.get(leafId), vec, sampleIdConst);
+            });
+        }
+
+        return res;
+    }
+
+    /**
+     * Merges statistics on labels from several partitions.
+     *
+     * @param left first partition.
+     * @param right second partition.
+     * @return merged statistics.
+     */
+    private Map<NodeId, T> mergeLeafStatistics(Map<NodeId, T> left, Map<NodeId, T> right) {
+        if (left == null)
+            return right;
+        if (right == null)
+            return left;
+
+        Set<NodeId> keys = new HashSet<>(left.keySet());
+        keys.addAll(right.keySet());
+        for (NodeId key : keys) {
+            if (!left.containsKey(key))
+                left.put(key, right.get(key));
+            else if (right.containsKey(key))
+                left.put(key, mergeLeafStats(left.get(key), right.get(key)));
+        }
+
+        return left;
+    }
+
+    /**
+     * Save vector to leaf statistic.
+     *
+     * @param leafStatAggr Leaf statistics aggregator.
+     * @param vec Vector.
+     * @param sampleId Sample id.
+     */
+    protected abstract void addElementToLeafStatistic(T leafStatAggr, BootstrappedVector vec, int sampleId);
+
+    /**
+     * Merge statistics for same leafs.
+     *
+     * @param leftStats First leaf stat aggregator.
+     * @param rightStats Second leaf stat aggregator.
+     */
+    protected abstract T mergeLeafStats(T leftStats, T rightStats);
+
+    /**
+     * Creates an instance of leaf statistics aggregator in according to concrete algorithm based on RandomForest.
+     *
+     * @param sampleId Sample id.
+     */
+    protected abstract T createLeafStatsAggregator(int sampleId);
+
+    /**
+     * Compute value from leaf based on statistics on labels corresponds to leaf.
+     *
+     * @param stat Leaf statistics.
+     */
+    protected abstract double computeLeafValue(T stat);
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/statistics/MeanValueStatistic.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/statistics/MeanValueStatistic.java
new file mode 100644
index 0000000..c2c61d7
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/statistics/MeanValueStatistic.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.tree.randomforest.data.statistics;
+
+import java.io.Serializable;
+
+/**
+ * Statistics for mean value computing container.
+ */
+public class MeanValueStatistic implements Serializable {
+    /** Serial version uid. */
+    private static final long serialVersionUID = -6265792209142062174L;
+
+    /** Sum of values. */
+    private double sumOfValues;
+
+    /** Count of values. */
+    private long cntOfValues;
+
+    /**
+     * Creates an instance of MeanValueStatistic.
+     *
+     * @param sumOfValues Sum of values.
+     * @param cntOfValues Count of values.
+     */
+    public MeanValueStatistic(double sumOfValues, long cntOfValues) {
+        this.sumOfValues = sumOfValues;
+        this.cntOfValues = cntOfValues;
+    }
+
+    /**
+     * @return mean value.
+     */
+    public double mean() {
+        return sumOfValues / cntOfValues;
+    }
+
+    /** */
+    public double getSumOfValues() {
+        return sumOfValues;
+    }
+
+    /** */
+    public void setSumOfValues(double sumOfValues) {
+        this.sumOfValues = sumOfValues;
+    }
+
+    /** */
+    public long getCntOfValues() {
+        return cntOfValues;
+    }
+
+    /** */
+    public void setCntOfValues(long cntOfValues) {
+        this.cntOfValues = cntOfValues;
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/statistics/NormalDistributionStatistics.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/statistics/NormalDistributionStatistics.java
new file mode 100644
index 0000000..8341204
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/statistics/NormalDistributionStatistics.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.tree.randomforest.data.statistics;
+
+import java.io.Serializable;
+
+/**
+ * Aggregator of normal distribution statistics for continual features.
+ */
+public class NormalDistributionStatistics implements Serializable {
+    /** Serial version uid. */
+    private static final long serialVersionUID = -5422805289301484436L;
+
+    /** Min value. */
+    private final double min;
+
+    /** Max value. */
+    private final double max;
+
+    /** Sum of value squares. */
+    private final double sumOfSquares;
+
+    /** Sum of values. */
+    private final double sumOfValues;
+
+    /** Count of objects. */
+    private final long n;
+
+    /**
+     * Creates an instance of NormalDistributionStatistics.
+     *
+     * @param min Min.
+     * @param max Max.
+     * @param sumOfSquares Sum of squares.
+     * @param sumOfValues Sum of values.
+     * @param n N.
+     */
+    public NormalDistributionStatistics(double min, double max, double sumOfSquares, double sumOfValues, long n) {
+        this.min = min;
+        this.max = max;
+        this.sumOfSquares = sumOfSquares;
+        this.sumOfValues = sumOfValues;
+        this.n = n;
+    }
+
+    /**
+     * Returns plus of normal distribution statistics.
+     *
+     * @param stats Stats.
+     * @return plus of normal distribution statistics.
+     */
+    public NormalDistributionStatistics plus(NormalDistributionStatistics stats) {
+        return new NormalDistributionStatistics(
+            Math.min(this.min, stats.min),
+            Math.max(this.max, stats.max),
+            this.sumOfSquares + stats.sumOfSquares,
+            this.sumOfValues + stats.sumOfValues,
+            this.n + stats.n
+        );
+    }
+
+    /**
+     * @return mean value.
+     */
+    public double mean() {
+        return sumOfValues / n;
+    }
+
+    /**
+     * @return variance value.
+     */
+    public double variance() {
+        double mean = mean();
+        return (sumOfSquares / n) - mean * mean;
+    }
+
+    /**
+     * @return standard deviation value.
+     */
+    public double std() {
+        return Math.sqrt(variance());
+    }
+
+    /**
+     * @return min value.
+     */
+    public double min() {
+        return min;
+    }
+
+    /**
+     * @return max value.
+     */
+    public double max() {
+        return max;
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/statistics/NormalDistributionStatisticsComputer.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/statistics/NormalDistributionStatisticsComputer.java
new file mode 100644
index 0000000..d972631
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/statistics/NormalDistributionStatisticsComputer.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.tree.randomforest.data.statistics;
+
+import java.io.Serializable;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import org.apache.ignite.ml.dataset.Dataset;
+import org.apache.ignite.ml.dataset.feature.FeatureMeta;
+import org.apache.ignite.ml.dataset.impl.bootstrapping.BootstrappedDatasetPartition;
+import org.apache.ignite.ml.dataset.primitive.context.EmptyContext;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+
+/**
+ * Normal distribution parameters computer logic.
+ */
+public class NormalDistributionStatisticsComputer implements Serializable {
+    /** Serial version uid. */
+    private static final long serialVersionUID = -3699071003012595743L;
+
+    /**
+     * Computes statistics of normal distribution on features in dataset.
+     *
+     * @param meta Meta.
+     * @param dataset Dataset.
+     */
+    public List<NormalDistributionStatistics> computeStatistics(List<FeatureMeta> meta, Dataset<EmptyContext,
+        BootstrappedDatasetPartition> dataset) {
+
+        return dataset.compute(
+            x -> computeStatsOnPartition(x, meta),
+            (l, r) -> reduceStats(l, r, meta)
+        );
+    }
+
+    /**
+     * Aggregates normal distribution statistics for continual features in dataset partition.
+     *
+     * @param part Partition.
+     * @param meta Meta.
+     * @return Statistics for each feature.
+     */
+    public List<NormalDistributionStatistics> computeStatsOnPartition(BootstrappedDatasetPartition part,
+        List<FeatureMeta> meta) {
+
+        double[] sumOfValues = new double[meta.size()];
+        double[] sumOfSquares = new double[sumOfValues.length];
+        double[] min = new double[sumOfValues.length];
+        double[] max = new double[sumOfValues.length];
+        Arrays.fill(min, Double.POSITIVE_INFINITY);
+        Arrays.fill(max, Double.NEGATIVE_INFINITY);
+
+        for (int i = 0; i < part.getRowsCount(); i++) {
+            Vector vec = part.getRow(i).features();
+            for (int featureId = 0; featureId < vec.size(); featureId++) {
+                if (!meta.get(featureId).isCategoricalFeature()) {
+                    double featureVal = vec.get(featureId);
+                    sumOfValues[featureId] += featureVal;
+                    sumOfSquares[featureId] += Math.pow(featureVal, 2);
+                    min[featureId] = Math.min(min[featureId], featureVal);
+                    max[featureId] = Math.max(max[featureId], featureVal);
+                }
+            }
+        }
+
+        ArrayList<NormalDistributionStatistics> res = new ArrayList<>();
+        for (int featureId = 0; featureId < sumOfSquares.length; featureId++) {
+            res.add(new NormalDistributionStatistics(
+                min[featureId], max[featureId],
+                sumOfSquares[featureId], sumOfValues[featureId],
+                part.getRowsCount())
+            );
+        }
+        return res;
+    }
+
+    /**
+     * Merges statistics on features from two partitions.
+     *
+     * @param left Left.
+     * @param right Right.
+     * @param meta Features meta.
+     * @return plus of statistics for each features.
+     */
+    public List<NormalDistributionStatistics> reduceStats(List<NormalDistributionStatistics> left,
+        List<NormalDistributionStatistics> right,
+        List<FeatureMeta> meta) {
+
+        if (left == null)
+            return right;
+        if (right == null)
+            return left;
+
+        assert meta.size() == left.size() && meta.size() == right.size();
+        List<NormalDistributionStatistics> res = new ArrayList<>();
+        for (int featureId = 0; featureId < meta.size(); featureId++) {
+            NormalDistributionStatistics leftStat = left.get(featureId);
+            NormalDistributionStatistics rightStat = right.get(featureId);
+            res.add(leftStat.plus(rightStat));
+        }
+        return res;
+    }
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/statistics/RegressionLeafValuesComputer.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/statistics/RegressionLeafValuesComputer.java
new file mode 100644
index 0000000..7dcb025
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/statistics/RegressionLeafValuesComputer.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.tree.randomforest.data.statistics;
+
+import org.apache.ignite.ml.dataset.impl.bootstrapping.BootstrappedVector;
+
+/**
+ * Implementation of {@link LeafValuesComputer} for regression task.
+ */
+public class RegressionLeafValuesComputer extends LeafValuesComputer<MeanValueStatistic> {
+    /** Serial version uid. */
+    private static final long serialVersionUID = -1898031675220962125L;
+
+    /** {@inheritDoc} */
+    @Override protected void addElementToLeafStatistic(MeanValueStatistic leafStatAggr,
+        BootstrappedVector vec, int sampleId) {
+
+        int numOfRepetitions = vec.counters()[sampleId];
+        leafStatAggr.setSumOfValues(leafStatAggr.getSumOfValues() + vec.label() * numOfRepetitions);
+        leafStatAggr.setCntOfValues(leafStatAggr.getCntOfValues() + numOfRepetitions);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected MeanValueStatistic mergeLeafStats(MeanValueStatistic leftStats,
+        MeanValueStatistic rightStats) {
+
+        return new MeanValueStatistic(
+            leftStats.getSumOfValues() + rightStats.getSumOfValues(),
+            leftStats.getCntOfValues() + rightStats.getCntOfValues()
+        );
+    }
+
+    /** {@inheritDoc} */
+    @Override protected MeanValueStatistic createLeafStatsAggregator(int sampleId) {
+        return new MeanValueStatistic(0.0, 0);
+    }
+
+    /**
+     * Returns the mean value in according to statistic.
+     *
+     * @param stat Leaf statistics.
+     */
+    @Override protected double computeLeafValue(MeanValueStatistic stat) {
+        return stat.mean();
+    }
+
+}
diff --git a/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/statistics/package-info.java b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/statistics/package-info.java
new file mode 100644
index 0000000..dff89704
--- /dev/null
+++ b/modules/ml/src/main/java/org/apache/ignite/ml/tree/randomforest/data/statistics/package-info.java
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * <!-- Package description. -->
+ * Contains implementation of statistics computers for Random Forest.
+ */
+package org.apache.ignite.ml.tree.randomforest.data.statistics;
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/IgniteMLTestSuite.java b/modules/ml/src/test/java/org/apache/ignite/ml/IgniteMLTestSuite.java
index 9f60c48..481e1fa 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/IgniteMLTestSuite.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/IgniteMLTestSuite.java
@@ -18,21 +18,27 @@
 package org.apache.ignite.ml;
 
 import org.apache.ignite.ml.clustering.ClusteringTestSuite;
+import org.apache.ignite.ml.common.CommonTestSuite;
+import org.apache.ignite.ml.composition.CompositionTestSuite;
 import org.apache.ignite.ml.dataset.DatasetTestSuite;
+import org.apache.ignite.ml.environment.EnvironmentTestSuite;
 import org.apache.ignite.ml.genetic.GAGridTestSuite;
 import org.apache.ignite.ml.knn.KNNTestSuite;
 import org.apache.ignite.ml.math.MathImplMainTestSuite;
 import org.apache.ignite.ml.nn.MLPTestSuite;
+import org.apache.ignite.ml.pipeline.PipelineTestSuite;
 import org.apache.ignite.ml.preprocessing.PreprocessingTestSuite;
 import org.apache.ignite.ml.regressions.RegressionsTestSuite;
 import org.apache.ignite.ml.selection.SelectionTestSuite;
+import org.apache.ignite.ml.structures.StructuresTestSuite;
 import org.apache.ignite.ml.svm.SVMTestSuite;
 import org.apache.ignite.ml.tree.DecisionTreeTestSuite;
 import org.junit.runner.RunWith;
 import org.junit.runners.Suite;
 
 /**
- * Test suite for all module tests.
+ * Test suite for all module tests. IMPL NOTE tests in {@code org.apache.ignite.ml.tree.performance} are not
+ * included here because these are intended only for manual execution.
  */
 @RunWith(Suite.class)
 @Suite.SuiteClasses({
@@ -42,13 +48,17 @@
     ClusteringTestSuite.class,
     DecisionTreeTestSuite.class,
     KNNTestSuite.class,
-    LocalModelsTest.class,
     MLPTestSuite.class,
     DatasetTestSuite.class,
+    PipelineTestSuite.class,
     PreprocessingTestSuite.class,
     GAGridTestSuite.class,
-    SelectionTestSuite.class
+    SelectionTestSuite.class,
+    CompositionTestSuite.class,
+    EnvironmentTestSuite.class,
+    StructuresTestSuite.class,
+    CommonTestSuite.class
 })
 public class IgniteMLTestSuite {
     // No-op.
-}
\ No newline at end of file
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/LocalModelsTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/LocalModelsTest.java
deleted file mode 100644
index 3e3bab5..0000000
--- a/modules/ml/src/test/java/org/apache/ignite/ml/LocalModelsTest.java
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.ml;
-
-import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.function.Function;
-import org.apache.ignite.ml.clustering.kmeans.KMeansModel;
-import org.apache.ignite.ml.clustering.kmeans.KMeansModelFormat;
-import org.apache.ignite.ml.clustering.kmeans.KMeansTrainer;
-import org.apache.ignite.ml.dataset.impl.local.LocalDatasetBuilder;
-import org.apache.ignite.ml.knn.classification.KNNClassificationModel;
-import org.apache.ignite.ml.knn.classification.KNNModelFormat;
-import org.apache.ignite.ml.knn.classification.KNNStrategy;
-import org.apache.ignite.ml.math.distances.EuclideanDistance;
-import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
-import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
-import org.apache.ignite.ml.regressions.linear.LinearRegressionModel;
-import org.apache.ignite.ml.svm.SVMLinearBinaryClassificationModel;
-import org.apache.ignite.ml.svm.SVMLinearMultiClassClassificationModel;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Tests for models import/export functionality.
- */
-public class LocalModelsTest {
-    /** */
-    @Test
-    public void importExportKMeansModelTest() throws IOException {
-        executeModelTest(mdlFilePath -> {
-            KMeansModel mdl = getClusterModel();
-
-            Exporter<KMeansModelFormat, String> exporter = new FileExporter<>();
-
-            mdl.saveModel(exporter, mdlFilePath);
-
-            KMeansModelFormat load = exporter.load(mdlFilePath);
-
-            Assert.assertNotNull(load);
-
-            KMeansModel importedMdl = new KMeansModel(load.getCenters(), load.getDistance());
-
-            Assert.assertTrue("", mdl.equals(importedMdl));
-
-            return null;
-        });
-    }
-
-    /** */
-    @Test
-    public void importExportLinearRegressionModelTest() throws IOException {
-        executeModelTest(mdlFilePath -> {
-            LinearRegressionModel mdl = new LinearRegressionModel(new DenseVector(new double[]{1, 2}), 3);
-            Exporter<LinearRegressionModel, String> exporter = new FileExporter<>();
-            mdl.saveModel(exporter, mdlFilePath);
-
-            LinearRegressionModel load = exporter.load(mdlFilePath);
-
-            Assert.assertNotNull(load);
-            Assert.assertEquals("", mdl, load);
-
-            return null;
-        });
-    }
-
-    /** */
-    @Test
-    public void importExportSVMBinaryClassificationModelTest() throws IOException {
-        executeModelTest(mdlFilePath -> {
-            SVMLinearBinaryClassificationModel mdl = new SVMLinearBinaryClassificationModel(new DenseVector(new double[]{1, 2}), 3);
-            Exporter<SVMLinearBinaryClassificationModel, String> exporter = new FileExporter<>();
-            mdl.saveModel(exporter, mdlFilePath);
-
-            SVMLinearBinaryClassificationModel load = exporter.load(mdlFilePath);
-
-            Assert.assertNotNull(load);
-            Assert.assertEquals("", mdl, load);
-
-            return null;
-        });
-    }
-
-    /** */
-    @Test
-    public void importExportSVMMulticlassClassificationModelTest() throws IOException {
-        executeModelTest(mdlFilePath -> {
-            SVMLinearBinaryClassificationModel binaryMdl1 = new SVMLinearBinaryClassificationModel(new DenseVector(new double[]{1, 2}), 3);
-            SVMLinearBinaryClassificationModel binaryMdl2 = new SVMLinearBinaryClassificationModel(new DenseVector(new double[]{2, 3}), 4);
-            SVMLinearBinaryClassificationModel binaryMdl3 = new SVMLinearBinaryClassificationModel(new DenseVector(new double[]{3, 4}), 5);
-
-            SVMLinearMultiClassClassificationModel mdl = new SVMLinearMultiClassClassificationModel();
-            mdl.add(1, binaryMdl1);
-            mdl.add(2, binaryMdl2);
-            mdl.add(3, binaryMdl3);
-
-            Exporter<SVMLinearMultiClassClassificationModel, String> exporter = new FileExporter<>();
-            mdl.saveModel(exporter, mdlFilePath);
-
-            SVMLinearMultiClassClassificationModel load = exporter.load(mdlFilePath);
-
-            Assert.assertNotNull(load);
-            Assert.assertEquals("", mdl, load);
-
-            return null;
-        });
-    }
-
-    /** */
-    private void executeModelTest(Function<String, Void> code) throws IOException {
-        Path mdlPath = Files.createTempFile(null, null);
-
-        Assert.assertNotNull(mdlPath);
-
-        try {
-            String mdlFilePath = mdlPath.toAbsolutePath().toString();
-
-            Assert.assertTrue(String.format("File %s not found.", mdlFilePath), Files.exists(mdlPath));
-
-            code.apply(mdlFilePath);
-        }
-        finally {
-            Files.deleteIfExists(mdlPath);
-        }
-    }
-
-    /** */
-    private KMeansModel getClusterModel() {
-        Map<Integer, double[]> data = new HashMap<>();
-        data.put(0, new double[] {1.0, 1959, 325100});
-        data.put(1, new double[] {1.0, 1960, 373200});
-
-        KMeansTrainer trainer = new KMeansTrainer()
-            .withK(1);
-
-        KMeansModel knnMdl = trainer.fit(
-            new LocalDatasetBuilder<>(data, 2),
-            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 0, v.length - 1)),
-            (k, v) -> v[2]
-        );
-
-        return knnMdl;
-    }
-
-    /** */
-    @Test
-    public void importExportKNNModelTest() throws IOException {
-        executeModelTest(mdlFilePath -> {
-            KNNClassificationModel mdl = new KNNClassificationModel(null)
-                .withK(3)
-                .withDistanceMeasure(new EuclideanDistance())
-                .withStrategy(KNNStrategy.SIMPLE);
-
-            Exporter<KNNModelFormat, String> exporter = new FileExporter<>();
-            mdl.saveModel(exporter, mdlFilePath);
-
-            KNNModelFormat load = exporter.load(mdlFilePath);
-
-            Assert.assertNotNull(load);
-
-            KNNClassificationModel importedMdl = new KNNClassificationModel(null)
-                .withK(load.getK())
-                .withDistanceMeasure(load.getDistanceMeasure())
-                .withStrategy(load.getStgy());
-
-            Assert.assertTrue("", mdl.equals(importedMdl));
-
-            return null;
-        });
-    }
-}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/TestUtils.java b/modules/ml/src/test/java/org/apache/ignite/ml/TestUtils.java
index a4591fb..4b472cc 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/TestUtils.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/TestUtils.java
@@ -18,7 +18,6 @@
 package org.apache.ignite.ml;
 
 import java.util.stream.IntStream;
-import org.apache.ignite.ml.math.Precision;
 import org.apache.ignite.ml.math.primitives.matrix.Matrix;
 import org.apache.ignite.ml.math.primitives.vector.Vector;
 import org.junit.Assert;
@@ -205,44 +204,6 @@
             Assert.fail(out.toString());
     }
 
-    /**
-     * Verifies that two float arrays are close (sup norm).
-     *
-     * @param msg The identifying message for the assertion error.
-     * @param exp Expected array.
-     * @param actual Actual array.
-     * @param tolerance Comparison tolerance value.
-     */
-    public static void assertEquals(String msg, float[] exp, float[] actual, float tolerance) {
-        StringBuilder out = new StringBuilder(msg);
-
-        if (exp.length != actual.length) {
-            out.append("\n Arrays not same length. \n");
-            out.append("expected has length ");
-            out.append(exp.length);
-            out.append(" observed length = ");
-            out.append(actual.length);
-            Assert.fail(out.toString());
-        }
-
-        boolean failure = false;
-
-        for (int i = 0; i < exp.length; i++)
-            if (!Precision.equalsIncludingNaN(exp[i], actual[i], tolerance)) {
-                failure = true;
-                out.append("\n Elements at index ");
-                out.append(i);
-                out.append(" differ. ");
-                out.append(" expected = ");
-                out.append(exp[i]);
-                out.append(" observed = ");
-                out.append(actual[i]);
-            }
-
-        if (failure)
-            Assert.fail(out.toString());
-    }
-
     /** */
     public static double maximumAbsoluteRowSum(Matrix mtx) {
         return IntStream.range(0, mtx.rowSize()).mapToObj(mtx::viewRow).map(v -> Math.abs(v.sum())).reduce(Math::max).get();
@@ -271,4 +232,97 @@
 
         return true;
     }
+
+    /** */
+    private static class Precision {
+        /** Offset to order signed double numbers lexicographically. */
+        private static final long SGN_MASK = 0x8000000000000000L;
+
+        /** Positive zero bits. */
+        private static final long POSITIVE_ZERO_DOUBLE_BITS = Double.doubleToRawLongBits(+0.0);
+
+        /** Negative zero bits. */
+        private static final long NEGATIVE_ZERO_DOUBLE_BITS = Double.doubleToRawLongBits(-0.0);
+
+        /**
+         * Returns true if the arguments are both NaN, are equal or are within the range
+         * of allowed error (inclusive).
+         *
+         * @param x first value
+         * @param y second value
+         * @param eps the amount of absolute error to allow.
+         * @return {@code true} if the values are equal or within range of each other, or both are NaN.
+         * @since 2.2
+         */
+        static boolean equalsIncludingNaN(double x, double y, double eps) {
+            return equalsIncludingNaN(x, y) || (Math.abs(y - x) <= eps);
+        }
+
+        /**
+         * Returns true if the arguments are both NaN or they are
+         * equal as defined by {@link #equals(double, double, int) equals(x, y, 1)}.
+         *
+         * @param x first value
+         * @param y second value
+         * @return {@code true} if the values are equal or both are NaN.
+         * @since 2.2
+         */
+        private static boolean equalsIncludingNaN(double x, double y) {
+            return (x != x || y != y) ? !(x != x ^ y != y) : equals(x, y, 1);
+        }
+
+        /**
+         * Returns true if the arguments are equal or within the range of allowed
+         * error (inclusive).
+         * <p>
+         * Two float numbers are considered equal if there are {@code (maxUlps - 1)}
+         * (or fewer) floating point numbers between them, i.e. two adjacent
+         * floating point numbers are considered equal.
+         * </p>
+         * <p>
+         * Adapted from <a
+         * href="http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/">
+         * Bruce Dawson</a>. Returns {@code false} if either of the arguments is NaN.
+         * </p>
+         *
+         * @param x first value
+         * @param y second value
+         * @param maxUlps {@code (maxUlps - 1)} is the number of floating point values between {@code x} and {@code y}.
+         * @return {@code true} if there are fewer than {@code maxUlps} floating point values between {@code x} and {@code
+         * y}.
+         */
+        private static boolean equals(final double x, final double y, final int maxUlps) {
+
+            final long xInt = Double.doubleToRawLongBits(x);
+            final long yInt = Double.doubleToRawLongBits(y);
+
+            final boolean isEqual;
+            if (((xInt ^ yInt) & SGN_MASK) == 0L) {
+                // number have same sign, there is no risk of overflow
+                isEqual = Math.abs(xInt - yInt) <= maxUlps;
+            }
+            else {
+                // number have opposite signs, take care of overflow
+                final long deltaPlus;
+                final long deltaMinus;
+                if (xInt < yInt) {
+                    deltaPlus = yInt - POSITIVE_ZERO_DOUBLE_BITS;
+                    deltaMinus = xInt - NEGATIVE_ZERO_DOUBLE_BITS;
+                }
+                else {
+                    deltaPlus = xInt - POSITIVE_ZERO_DOUBLE_BITS;
+                    deltaMinus = yInt - NEGATIVE_ZERO_DOUBLE_BITS;
+                }
+
+                if (deltaPlus > maxUlps)
+                    isEqual = false;
+                else
+                    isEqual = deltaMinus <= (maxUlps - deltaPlus);
+
+            }
+
+            return isEqual && !Double.isNaN(x) && !Double.isNaN(y);
+
+        }
+    }
 }
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/clustering/KMeansModelTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/clustering/KMeansModelTest.java
index 0d95d05..f71b7b3 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/clustering/KMeansModelTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/clustering/KMeansModelTest.java
@@ -46,13 +46,15 @@
 
         KMeansModel mdl = new KMeansModel(centers, distanceMeasure);
 
+        Assert.assertTrue(mdl.toString().contains("KMeansModel"));
+
         Assert.assertEquals(mdl.apply(new DenseVector(new double[]{1.1, 1.1})), 0.0, PRECISION);
         Assert.assertEquals(mdl.apply(new DenseVector(new double[]{-1.1, 1.1})), 1.0, PRECISION);
         Assert.assertEquals(mdl.apply(new DenseVector(new double[]{1.1, -1.1})), 2.0, PRECISION);
         Assert.assertEquals(mdl.apply(new DenseVector(new double[]{-1.1, -1.1})), 3.0, PRECISION);
 
         Assert.assertEquals(mdl.distanceMeasure(), distanceMeasure);
-        Assert.assertEquals(mdl.amountOfClusters(), 4);
-        Assert.assertArrayEquals(mdl.centers(), centers);
+        Assert.assertEquals(mdl.getAmountOfClusters(), 4);
+        Assert.assertArrayEquals(mdl.getCenters(), centers);
     }
 }
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/clustering/KMeansTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/clustering/KMeansTrainerTest.java
index 8d2c341..74ff8f1 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/clustering/KMeansTrainerTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/clustering/KMeansTrainerTest.java
@@ -27,9 +27,11 @@
 import org.apache.ignite.ml.math.primitives.vector.Vector;
 import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
 import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
+import org.jetbrains.annotations.NotNull;
 import org.junit.Test;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 /**
  * Tests for {@link KMeansTrainer}.
@@ -38,27 +40,25 @@
     /** Precision in test checks. */
     private static final double PRECISION = 1e-2;
 
-    /**
-     * A few points, one cluster, one iteration
-     */
-    @Test
-    public void findOneClusters() {
+    /** Data. */
+    private static final Map<Integer, double[]> data = new HashMap<>();
 
-        Map<Integer, double[]> data = new HashMap<>();
+    static {
         data.put(0, new double[] {1.0, 1.0, 1.0});
         data.put(1, new double[] {1.0, 2.0, 1.0});
         data.put(2, new double[] {2.0, 1.0, 1.0});
         data.put(3, new double[] {-1.0, -1.0, 2.0});
         data.put(4, new double[] {-1.0, -2.0, 2.0});
         data.put(5, new double[] {-2.0, -1.0, 2.0});
+    }
 
-        KMeansTrainer trainer = new KMeansTrainer()
-            .withDistance(new EuclideanDistance())
-            .withK(1)
-            .withMaxIterations(1)
-            .withEpsilon(PRECISION);
-
-        KMeansModel knnMdl = trainer.fit(
+    /**
+     * A few points, one cluster, one iteration
+     */
+    @Test
+    public void findOneClusters() {
+        KMeansTrainer trainer = createAndCheckTrainer();
+        KMeansModel knnMdl = trainer.withAmountOfClusters(1).fit(
             new LocalDatasetBuilder<>(data, 2),
             (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 0, v.length - 1)),
             (k, v) -> v[2]
@@ -71,4 +71,48 @@
         assertEquals(trainer.getMaxIterations(), 1);
         assertEquals(trainer.getEpsilon(), PRECISION, PRECISION);
     }
+
+    /** */
+    @Test
+    public void testUpdateMdl() {
+        KMeansTrainer trainer = createAndCheckTrainer();
+        KMeansModel originalMdl = trainer.withAmountOfClusters(1).fit(
+            new LocalDatasetBuilder<>(data, 2),
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 0, v.length - 1)),
+            (k, v) -> v[2]
+        );
+        KMeansModel updatedMdlOnSameDataset = trainer.update(
+            originalMdl,
+            new LocalDatasetBuilder<>(data, 2),
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 0, v.length - 1)),
+            (k, v) -> v[2]
+        );
+        KMeansModel updatedMdlOnEmptyDataset = trainer.update(
+            originalMdl,
+            new LocalDatasetBuilder<>(new HashMap<Integer, double[]>(), 2),
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 0, v.length - 1)),
+            (k, v) -> v[2]
+        );
+
+        Vector firstVector = new DenseVector(new double[] {2.0, 2.0});
+        Vector secondVector = new DenseVector(new double[] {-2.0, -2.0});
+        assertEquals(originalMdl.apply(firstVector), updatedMdlOnSameDataset.apply(firstVector), PRECISION);
+        assertEquals(originalMdl.apply(secondVector), updatedMdlOnSameDataset.apply(secondVector), PRECISION);
+        assertEquals(originalMdl.apply(firstVector), updatedMdlOnEmptyDataset.apply(firstVector), PRECISION);
+        assertEquals(originalMdl.apply(secondVector), updatedMdlOnEmptyDataset.apply(secondVector), PRECISION);
+    }
+
+    /** */
+    @NotNull private KMeansTrainer createAndCheckTrainer() {
+        KMeansTrainer trainer = new KMeansTrainer()
+            .withDistance(new EuclideanDistance())
+            .withAmountOfClusters(10)
+            .withMaxIterations(1)
+            .withEpsilon(PRECISION)
+            .withSeed(2);
+        assertEquals(10, trainer.getAmountOfClusters());
+        assertEquals(2, trainer.getSeed());
+        assertTrue(trainer.getDistance() instanceof EuclideanDistance);
+        return trainer;
+    }
 }
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/common/CollectionsTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/common/CollectionsTest.java
new file mode 100644
index 0000000..745eac9
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/common/CollectionsTest.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.common;
+
+import java.util.HashSet;
+import java.util.Set;
+import org.apache.ignite.ml.clustering.kmeans.KMeansModel;
+import org.apache.ignite.ml.clustering.kmeans.KMeansModelFormat;
+import org.apache.ignite.ml.knn.ann.ANNClassificationModel;
+import org.apache.ignite.ml.knn.ann.ANNClassificationTrainer;
+import org.apache.ignite.ml.knn.ann.ANNModelFormat;
+import org.apache.ignite.ml.knn.classification.KNNClassificationModel;
+import org.apache.ignite.ml.knn.classification.KNNModelFormat;
+import org.apache.ignite.ml.knn.classification.NNStrategy;
+import org.apache.ignite.ml.math.distances.EuclideanDistance;
+import org.apache.ignite.ml.math.distances.HammingDistance;
+import org.apache.ignite.ml.math.distances.ManhattanDistance;
+import org.apache.ignite.ml.math.primitives.matrix.impl.DenseMatrix;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
+import org.apache.ignite.ml.math.primitives.vector.impl.VectorizedViewMatrix;
+import org.apache.ignite.ml.regressions.linear.LinearRegressionModel;
+import org.apache.ignite.ml.regressions.logistic.binomial.LogisticRegressionModel;
+import org.apache.ignite.ml.regressions.logistic.multiclass.LogRegressionMultiClassModel;
+import org.apache.ignite.ml.structures.Dataset;
+import org.apache.ignite.ml.structures.DatasetRow;
+import org.apache.ignite.ml.structures.FeatureMetadata;
+import org.apache.ignite.ml.structures.LabeledVector;
+import org.apache.ignite.ml.structures.LabeledVectorSet;
+import org.apache.ignite.ml.svm.SVMLinearBinaryClassificationModel;
+import org.apache.ignite.ml.svm.SVMLinearMultiClassClassificationModel;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotEquals;
+
+/**
+ * Tests for equals and hashCode methods in classes that provide own implementations of these.
+ */
+public class CollectionsTest {
+    /** */
+    @Test
+    @SuppressWarnings("unchecked")
+    public void test() {
+        test(new VectorizedViewMatrix(new DenseMatrix(2, 2), 1, 1, 1, 1),
+            new VectorizedViewMatrix(new DenseMatrix(3, 2), 2, 1, 1, 1));
+
+        specialTest(new ManhattanDistance(), new ManhattanDistance());
+
+        specialTest(new HammingDistance(), new HammingDistance());
+
+        specialTest(new EuclideanDistance(), new EuclideanDistance());
+
+        FeatureMetadata data = new FeatureMetadata("name2");
+        data.setName("name1");
+        test(data, new FeatureMetadata("name2"));
+
+        test(new DatasetRow<>(new DenseVector()), new DatasetRow<>(new DenseVector(1)));
+
+        test(new LabeledVector<>(new DenseVector(), null), new LabeledVector<>(new DenseVector(1), null));
+
+        test(new Dataset<DatasetRow<Vector>>(new DatasetRow[] {}, new FeatureMetadata[] {}),
+            new Dataset<DatasetRow<Vector>>(new DatasetRow[] {new DatasetRow()},
+                new FeatureMetadata[] {new FeatureMetadata()}));
+
+        test(new LogisticRegressionModel(new DenseVector(), 1.0),
+            new LogisticRegressionModel(new DenseVector(), 0.5));
+
+        test(new KMeansModelFormat(new Vector[] {}, new ManhattanDistance()),
+            new KMeansModelFormat(new Vector[] {}, new HammingDistance()));
+
+        test(new KMeansModel(new Vector[] {}, new ManhattanDistance()),
+            new KMeansModel(new Vector[] {}, new HammingDistance()));
+
+        test(new KNNModelFormat(1, new ManhattanDistance(), NNStrategy.SIMPLE),
+            new KNNModelFormat(2, new ManhattanDistance(), NNStrategy.SIMPLE));
+
+        test(new KNNClassificationModel(null).withK(1), new KNNClassificationModel(null).withK(2));
+
+        LogRegressionMultiClassModel mdl = new LogRegressionMultiClassModel();
+        mdl.add(1, new LogisticRegressionModel(new DenseVector(), 1.0));
+        test(mdl, new LogRegressionMultiClassModel());
+
+        test(new LinearRegressionModel(null, 1.0), new LinearRegressionModel(null, 0.5));
+
+        SVMLinearMultiClassClassificationModel mdl1 = new SVMLinearMultiClassClassificationModel();
+        mdl1.add(1, new SVMLinearBinaryClassificationModel(new DenseVector(), 1.0));
+        test(mdl1, new SVMLinearMultiClassClassificationModel());
+
+        test(new SVMLinearBinaryClassificationModel(null, 1.0), new SVMLinearBinaryClassificationModel(null, 0.5));
+
+        test(new ANNClassificationModel(new LabeledVectorSet<>(), new ANNClassificationTrainer.CentroidStat()),
+            new ANNClassificationModel(new LabeledVectorSet<>(1, 1, true), new ANNClassificationTrainer.CentroidStat()));
+
+        test(new ANNModelFormat(1, new ManhattanDistance(), NNStrategy.SIMPLE, new LabeledVectorSet<>(), new ANNClassificationTrainer.CentroidStat()),
+            new ANNModelFormat(2, new ManhattanDistance(), NNStrategy.SIMPLE, new LabeledVectorSet<>(), new ANNClassificationTrainer.CentroidStat()));
+    }
+
+    /** Test classes that have all instances equal (eg, metrics). */
+    private <T> void specialTest(T o1, T o2) {
+        assertEquals(o1, o2);
+
+        test(o1, new Object());
+    }
+
+    /** */
+    private <T> void test(T o1, T o2) {
+        assertNotEquals(o1, null);
+        assertNotEquals(o2, null);
+
+        assertEquals(o1, o1);
+        assertEquals(o2, o2);
+
+        assertNotEquals(o1, o2);
+
+        Set<T> set = new HashSet<>();
+        set.add(o1);
+        set.add(o1);
+        assertEquals(1, set.size());
+
+        set.add(o2);
+        set.add(o2);
+        assertEquals(2, set.size());
+
+        set.remove(o1);
+        assertEquals(1, set.size());
+
+        set.remove(o2);
+        assertEquals(0, set.size());
+    }
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/common/CommonTestSuite.java b/modules/ml/src/test/java/org/apache/ignite/ml/common/CommonTestSuite.java
new file mode 100644
index 0000000..e3e1d2b
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/common/CommonTestSuite.java
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.common;
+
+import org.junit.runner.RunWith;
+import org.junit.runners.Suite;
+
+/**
+ * Test suite for all tests located in org.apache.ignite.ml.trees package.
+ */
+@RunWith(Suite.class)
+@Suite.SuiteClasses({
+    LocalModelsTest.class,
+    CollectionsTest.class,
+    ExternalizeTest.class,
+    ModelTest.class
+})
+public class CommonTestSuite {
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/common/ExternalizeTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/common/ExternalizeTest.java
new file mode 100644
index 0000000..dc37ee8
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/common/ExternalizeTest.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.common;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import org.apache.ignite.ml.math.Destroyable;
+import org.apache.ignite.ml.math.distances.EuclideanDistance;
+import org.apache.ignite.ml.math.distances.HammingDistance;
+import org.apache.ignite.ml.math.distances.ManhattanDistance;
+import org.apache.ignite.ml.math.primitives.MathTestConstants;
+import org.apache.ignite.ml.math.primitives.matrix.impl.DenseMatrix;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.math.primitives.vector.impl.DelegatingVector;
+import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
+import org.apache.ignite.ml.math.primitives.vector.impl.VectorizedViewMatrix;
+import org.apache.ignite.ml.structures.Dataset;
+import org.apache.ignite.ml.structures.DatasetRow;
+import org.apache.ignite.ml.structures.FeatureMetadata;
+import org.apache.ignite.ml.structures.LabeledVector;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+/**
+ * Tests for externalizable classes.
+ */
+public class ExternalizeTest {
+    /** */
+    @Test
+    @SuppressWarnings("unchecked")
+    public void test() {
+        externalizeTest(new DelegatingVector(new DenseVector(1)));
+
+        externalizeTest(new VectorizedViewMatrix(new DenseMatrix(2, 2), 1, 1, 1, 1));
+
+        externalizeTest(new ManhattanDistance());
+
+        externalizeTest(new HammingDistance());
+
+        externalizeTest(new EuclideanDistance());
+
+        externalizeTest(new FeatureMetadata());
+
+        externalizeTest(new VectorizedViewMatrix(new DenseMatrix(2, 2), 1, 1, 1, 1));
+
+        externalizeTest(new DatasetRow<>(new DenseVector()));
+
+        externalizeTest(new LabeledVector<>(new DenseVector(), null));
+
+        externalizeTest(new Dataset<DatasetRow<Vector>>(new DatasetRow[] {}, new FeatureMetadata[] {}));
+    }
+
+    /** */
+    @SuppressWarnings("unchecked")
+    private <T> void externalizeTest(T initObj) {
+        T objRestored = null;
+
+        try {
+            ByteArrayOutputStream byteArrOutputStream = new ByteArrayOutputStream();
+            ObjectOutputStream objOutputStream = new ObjectOutputStream(byteArrOutputStream);
+
+            objOutputStream.writeObject(initObj);
+
+            ByteArrayInputStream byteArrInputStream = new ByteArrayInputStream(byteArrOutputStream.toByteArray());
+            ObjectInputStream objInputStream = new ObjectInputStream(byteArrInputStream);
+
+            objRestored = (T)objInputStream.readObject();
+
+            assertEquals(MathTestConstants.VAL_NOT_EQUALS, initObj, objRestored);
+
+           assertEquals(MathTestConstants.VAL_NOT_EQUALS, 0, Integer.compare(initObj.hashCode(), objRestored.hashCode()));
+        }
+        catch (ClassNotFoundException | IOException e) {
+            fail(e + " [" + e.getMessage() + "]");
+        }
+        finally {
+            if (objRestored instanceof Destroyable)
+                ((Destroyable)objRestored).destroy();
+        }
+    }
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/common/LocalModelsTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/common/LocalModelsTest.java
new file mode 100644
index 0000000..ca3f0b5
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/common/LocalModelsTest.java
@@ -0,0 +1,264 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.common;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.function.Function;
+import org.apache.ignite.ml.Exporter;
+import org.apache.ignite.ml.FileExporter;
+import org.apache.ignite.ml.clustering.kmeans.KMeansModel;
+import org.apache.ignite.ml.clustering.kmeans.KMeansModelFormat;
+import org.apache.ignite.ml.clustering.kmeans.KMeansTrainer;
+import org.apache.ignite.ml.dataset.impl.local.LocalDatasetBuilder;
+import org.apache.ignite.ml.knn.NNClassificationModel;
+import org.apache.ignite.ml.knn.ann.ANNClassificationModel;
+import org.apache.ignite.ml.knn.ann.ANNClassificationTrainer;
+import org.apache.ignite.ml.knn.ann.ANNModelFormat;
+import org.apache.ignite.ml.knn.ann.ProbableLabel;
+import org.apache.ignite.ml.knn.classification.KNNClassificationModel;
+import org.apache.ignite.ml.knn.classification.KNNModelFormat;
+import org.apache.ignite.ml.knn.classification.NNStrategy;
+import org.apache.ignite.ml.math.distances.EuclideanDistance;
+import org.apache.ignite.ml.math.distances.ManhattanDistance;
+import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
+import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
+import org.apache.ignite.ml.regressions.linear.LinearRegressionModel;
+import org.apache.ignite.ml.regressions.logistic.binomial.LogisticRegressionModel;
+import org.apache.ignite.ml.regressions.logistic.multiclass.LogRegressionMultiClassModel;
+import org.apache.ignite.ml.structures.LabeledVector;
+import org.apache.ignite.ml.structures.LabeledVectorSet;
+import org.apache.ignite.ml.svm.SVMLinearBinaryClassificationModel;
+import org.apache.ignite.ml.svm.SVMLinearMultiClassClassificationModel;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Tests for models import/export functionality.
+ */
+public class LocalModelsTest {
+    /** */
+    @Test
+    public void importExportKMeansModelTest() throws IOException {
+        executeModelTest(mdlFilePath -> {
+            KMeansModel mdl = getClusterModel();
+
+            Exporter<KMeansModelFormat, String> exporter = new FileExporter<>();
+
+            mdl.saveModel(exporter, mdlFilePath);
+
+            KMeansModelFormat load = exporter.load(mdlFilePath);
+
+            Assert.assertNotNull(load);
+
+            KMeansModel importedMdl = new KMeansModel(load.getCenters(), load.getDistance());
+
+            Assert.assertEquals("", mdl, importedMdl);
+
+            return null;
+        });
+    }
+
+    /** */
+    @Test
+    public void importExportLinearRegressionModelTest() throws IOException {
+        executeModelTest(mdlFilePath -> {
+            LinearRegressionModel mdl = new LinearRegressionModel(new DenseVector(new double[]{1, 2}), 3);
+            Exporter<LinearRegressionModel, String> exporter = new FileExporter<>();
+            mdl.saveModel(exporter, mdlFilePath);
+
+            LinearRegressionModel load = exporter.load(mdlFilePath);
+
+            Assert.assertNotNull(load);
+            Assert.assertEquals("", mdl, load);
+
+            return null;
+        });
+    }
+
+    /** */
+    @Test
+    public void importExportSVMBinaryClassificationModelTest() throws IOException {
+        executeModelTest(mdlFilePath -> {
+            SVMLinearBinaryClassificationModel mdl = new SVMLinearBinaryClassificationModel(new DenseVector(new double[]{1, 2}), 3);
+            Exporter<SVMLinearBinaryClassificationModel, String> exporter = new FileExporter<>();
+            mdl.saveModel(exporter, mdlFilePath);
+
+            SVMLinearBinaryClassificationModel load = exporter.load(mdlFilePath);
+
+            Assert.assertNotNull(load);
+            Assert.assertEquals("", mdl, load);
+
+            return null;
+        });
+    }
+
+    /** */
+    @Test
+    public void importExportSVMMultiClassClassificationModelTest() throws IOException {
+        executeModelTest(mdlFilePath -> {
+            SVMLinearBinaryClassificationModel binaryMdl1 = new SVMLinearBinaryClassificationModel(new DenseVector(new double[]{1, 2}), 3);
+            SVMLinearBinaryClassificationModel binaryMdl2 = new SVMLinearBinaryClassificationModel(new DenseVector(new double[]{2, 3}), 4);
+            SVMLinearBinaryClassificationModel binaryMdl3 = new SVMLinearBinaryClassificationModel(new DenseVector(new double[]{3, 4}), 5);
+
+            SVMLinearMultiClassClassificationModel mdl = new SVMLinearMultiClassClassificationModel();
+            mdl.add(1, binaryMdl1);
+            mdl.add(2, binaryMdl2);
+            mdl.add(3, binaryMdl3);
+
+            Exporter<SVMLinearMultiClassClassificationModel, String> exporter = new FileExporter<>();
+            mdl.saveModel(exporter, mdlFilePath);
+
+            SVMLinearMultiClassClassificationModel load = exporter.load(mdlFilePath);
+
+            Assert.assertNotNull(load);
+            Assert.assertEquals("", mdl, load);
+
+            return null;
+        });
+    }
+
+    /** */
+    @Test
+    public void importExportLogisticRegressionModelTest() throws IOException {
+        executeModelTest(mdlFilePath -> {
+            LogisticRegressionModel mdl = new LogisticRegressionModel(new DenseVector(new double[]{1, 2}), 3);
+            Exporter<LogisticRegressionModel, String> exporter = new FileExporter<>();
+            mdl.saveModel(exporter, mdlFilePath);
+
+            LogisticRegressionModel load = exporter.load(mdlFilePath);
+
+            Assert.assertNotNull(load);
+            Assert.assertEquals("", mdl, load);
+
+            return null;
+        });
+    }
+
+    /** */
+    @Test
+    public void importExportLogRegressionMultiClassModelTest() throws IOException {
+        executeModelTest(mdlFilePath -> {
+            LogRegressionMultiClassModel mdl = new LogRegressionMultiClassModel();
+            Exporter<LogRegressionMultiClassModel, String> exporter = new FileExporter<>();
+            mdl.saveModel(exporter, mdlFilePath);
+
+            LogRegressionMultiClassModel load = exporter.load(mdlFilePath);
+
+            Assert.assertNotNull(load);
+            Assert.assertEquals("", mdl, load);
+
+            return null;
+        });
+    }
+
+    /** */
+    private void executeModelTest(Function<String, Void> code) throws IOException {
+        Path mdlPath = Files.createTempFile(null, null);
+
+        Assert.assertNotNull(mdlPath);
+
+        try {
+            String mdlFilePath = mdlPath.toAbsolutePath().toString();
+
+            Assert.assertTrue(String.format("File %s not found.", mdlFilePath), Files.exists(mdlPath));
+
+            code.apply(mdlFilePath);
+        }
+        finally {
+            Files.deleteIfExists(mdlPath);
+        }
+    }
+
+    /** */
+    private KMeansModel getClusterModel() {
+        Map<Integer, double[]> data = new HashMap<>();
+        data.put(0, new double[] {1.0, 1959, 325100});
+        data.put(1, new double[] {1.0, 1960, 373200});
+
+        KMeansTrainer trainer = new KMeansTrainer()
+            .withAmountOfClusters(1);
+
+        return trainer.fit(
+            new LocalDatasetBuilder<>(data, 2),
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 0, v.length - 1)),
+            (k, v) -> v[2]
+        );
+    }
+
+    /** */
+    @Test
+    public void importExportKNNModelTest() throws IOException {
+        executeModelTest(mdlFilePath -> {
+            NNClassificationModel mdl = new KNNClassificationModel(null)
+                .withK(3)
+                .withDistanceMeasure(new EuclideanDistance())
+                .withStrategy(NNStrategy.SIMPLE);
+
+            Exporter<KNNModelFormat, String> exporter = new FileExporter<>();
+            mdl.saveModel(exporter, mdlFilePath);
+
+            KNNModelFormat load = exporter.load(mdlFilePath);
+
+            Assert.assertNotNull(load);
+
+            NNClassificationModel importedMdl = new KNNClassificationModel(null)
+                .withK(load.getK())
+                .withDistanceMeasure(load.getDistanceMeasure())
+                .withStrategy(load.getStgy());
+
+            Assert.assertEquals("", mdl, importedMdl);
+
+            return null;
+        });
+    }
+
+    /** */
+    @Test
+    public void importExportANNModelTest() throws IOException {
+        executeModelTest(mdlFilePath -> {
+            final LabeledVectorSet<ProbableLabel, LabeledVector> centers = new LabeledVectorSet<>();
+
+            NNClassificationModel mdl = new ANNClassificationModel(centers, new ANNClassificationTrainer.CentroidStat())
+                .withK(4)
+                .withDistanceMeasure(new ManhattanDistance())
+                .withStrategy(NNStrategy.WEIGHTED);
+
+            Exporter<KNNModelFormat, String> exporter = new FileExporter<>();
+            mdl.saveModel(exporter, mdlFilePath);
+
+            ANNModelFormat load = (ANNModelFormat) exporter.load(mdlFilePath);
+
+            Assert.assertNotNull(load);
+
+
+            NNClassificationModel importedMdl = new ANNClassificationModel(load.getCandidates(), new ANNClassificationTrainer.CentroidStat())
+                .withK(load.getK())
+                .withDistanceMeasure(load.getDistanceMeasure())
+                .withStrategy(load.getStgy());
+
+            Assert.assertEquals("", mdl, importedMdl);
+
+            return null;
+        });
+    }
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/common/ModelTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/common/ModelTest.java
new file mode 100644
index 0000000..cfc081b
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/common/ModelTest.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.common;
+
+import org.apache.ignite.ml.Model;
+import org.junit.Test;
+
+import static org.junit.Assert.assertNotNull;
+
+/**
+ * Tests for {@link Model} functionality.
+ */
+public class ModelTest {
+    /** */
+    @Test
+    public void testCombine() {
+        Model<Object, Object> mdl = new TestModel<>().combine(new TestModel<>(), (x, y) -> x);
+
+        assertNotNull(mdl.toString(true));
+        assertNotNull(mdl.toString(false));
+    }
+
+    /** */
+    private static class TestModel<T, V> implements Model<T, V> {
+        /** {@inheritDoc} */
+        @Override public V apply(T t) {
+            return null;
+        }
+    }
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/common/TrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/common/TrainerTest.java
new file mode 100644
index 0000000..678ed44
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/common/TrainerTest.java
@@ -0,0 +1,1161 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.common;
+
+import java.util.ArrayList;
+import java.util.List;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+/**
+ * Basic fields and methods for the trainer tests.
+ */
+@RunWith(Parameterized.class)
+public class TrainerTest {
+    /** Number of parts to be tested. */
+    private static final int[] partsToBeTested = new int[]{1, 2, 3, 4, 5, 7, 100};
+
+    /** Parameters. */
+    @Parameterized.Parameters(name = "Data divided on {0} partitions, training with batch size {1}")
+    public static Iterable<Integer[]> data() {
+        List<Integer[]> res = new ArrayList<>();
+
+        for (int part : partsToBeTested)
+            res.add(new Integer[]{part});
+
+        return res;
+    }
+
+    /** Number of partitions. */
+    @Parameterized.Parameter
+    public int parts;
+
+    /** Precision in test checks. */
+    protected static final double PRECISION = 1e-2;
+
+    /** Two-easy clustered data. */
+    protected static final double[][] twoClusters = {
+        {0, 519.9017766224466, 554.4100892224841},
+        {0, 563.5609233456146, 558.5857619285702},
+        {0, 503.5549215892729, 594.3825404658926},
+        {0, 584.7460223841858, 515.2243614011547},
+        {0, 575.095839624477, 590.8556618187845},
+        {0, 594.3592060102463, 554.2221434279162},
+        {0, 583.432820535236, 504.66164764881523},
+        {0, 599.0963460154512, 534.1774623344388},
+        {0, 568.9703081604248, 543.2226391011388},
+        {0, 586.6698629586531, 529.5241964168969},
+        {0, 551.1051323168858, 539.1885401513679},
+        {0, 508.4609024546371, 504.35073029226396},
+        {0, 599.0470661333914, 569.4595846036917},
+        {0, 570.5493551454197, 526.7253349784085},
+        {0, 534.2832458435303, 550.3000463382016},
+        {0, 594.4616179647461, 536.3197487506842},
+        {0, 565.3197172280577, 506.3293991999001},
+        {0, 592.6602122456759, 513.646808538896},
+        {0, 509.8216048850749, 509.4973240875119},
+        {0, 502.3878128815718, 570.9482197992043},
+        {0, 594.6632085763065, 547.9275009326266},
+        {0, 529.6467177083762, 547.9107158851994},
+        {0, 544.9626346641528, 567.3832919235468},
+        {0, 511.4105135690089, 578.1849565872583},
+        {0, 501.01584549257973, 570.6868576016038},
+        {0, 595.8080144542582, 512.03499265368},
+        {0, 528.786843178995, 502.8166496868458},
+        {0, 528.6621082789842, 560.8712577770658},
+        {0, 510.8974224808237, 596.4667253000505},
+        {0, 583.8947380467763, 547.9688139648637},
+        {0, 561.4766784411281, 531.2449896695659},
+        {0, 560.6943663394893, 566.9710095676068},
+        {0, 517.393777179133, 588.7651419118193},
+        {0, 500.4713974957799, 528.0769354138976},
+        {0, 545.8783916658755, 586.1791106273984},
+        {0, 587.1987551324714, 552.7968581692342},
+        {0, 504.14955324617733, 502.9202365190475},
+        {0, 589.118356537786, 567.5453447798067},
+        {0, 581.0404600079042, 524.3383641814191},
+        {0, 578.836850556919, 519.0303628080188},
+        {0, 532.684541905037, 592.0373074571884},
+        {0, 539.631541540315, 500.86701934899133},
+        {0, 585.080559785121, 559.185605736917},
+        {0, 557.6130747490417, 586.9060188494332},
+        {0, 511.4069711786483, 505.20182772247955},
+        {0, 543.3420695017039, 589.0522243776551},
+        {0, 545.7836567392021, 545.9829264066165},
+        {0, 587.4404520697882, 566.2450515524025},
+        {0, 598.0352806197182, 592.9871556855218},
+        {0, 599.1191676869415, 517.072913155282},
+        {0, 598.7990121325806, 542.5922389368699},
+        {0, 567.9157541778169, 508.8637304888606},
+        {0, 516.9141893487038, 504.5333015373364},
+        {0, 528.2650000284832, 592.3618290091457},
+        {0, 577.0877824827497, 572.106440915086},
+        {0, 569.5034479656674, 513.1883531774486},
+        {0, 587.7126777761002, 568.9323649263932},
+        {0, 565.9489368582279, 516.9745616328178},
+        {0, 557.5589060305804, 515.2687667913198},
+        {0, 503.1554198985989, 509.09477188561954},
+        {0, 550.0203572858189, 595.1223421437577},
+        {0, 524.7913631016987, 523.3640528148924},
+        {0, 552.7246513026029, 546.2810129784725},
+        {0, 586.3892191983499, 552.7576239548819},
+        {0, 526.0748315118926, 573.804342015302},
+        {0, 565.1398123093003, 539.6854465576956},
+        {0, 527.0537447563926, 595.2059572407275},
+        {0, 598.4431244531863, 518.7675712573573},
+        {0, 518.1347648644486, 571.2772685572616},
+        {0, 522.0665003535328, 597.4691949058798},
+        {0, 559.3717433904218, 507.63523020707987},
+        {0, 517.7519710704423, 595.9228343205995},
+        {0, 557.1028047052068, 513.67799332853},
+        {0, 527.9783249961056, 596.5923404246605},
+        {0, 508.9548667053109, 583.3851484560171},
+        {0, 597.3054599709918, 572.0492942719156},
+        {0, 506.48170301986886, 545.2749213691201},
+        {0, 569.5215580939445, 552.2362437646713},
+        {0, 530.5232047696994, 517.814585379635},
+        {0, 582.7447646378554, 554.0837636670908},
+        {0, 510.04656659835496, 548.308864572033},
+        {0, 517.0884034675382, 503.6293035255885},
+        {0, 547.4077952612713, 521.8105170207767},
+        {0, 525.2452470246204, 565.7690087891091},
+        {0, 525.726872006642, 592.172865284197},
+        {0, 598.311246268818, 506.29428096115674},
+        {0, 599.4974643204109, 579.8062124124598},
+        {0, 584.7506624741848, 592.2505541944379},
+        {0, 598.7379007956142, 561.8346831647877},
+        {0, 553.9325403298083, 540.4895037718127},
+        {0, 577.4868596401562, 533.9482256583582},
+        {0, 524.7729276101758, 523.3563039535018},
+        {0, 513.6033305233657, 572.2592770048955},
+        {0, 574.5120210087475, 557.5521505158835},
+        {0, 573.951281294893, 527.3670057739082},
+        {0, 548.1326423460839, 551.1839666791825},
+        {0, 508.2214563147455, 521.2342805765958},
+        {0, 515.93448815859, 511.17271820377954},
+        {0, 586.8712784936447, 571.3833808148395},
+        {0, 557.5242762492126, 527.4051948485309},
+        {1, -527.9820655500421, -597.0614987497938},
+        {1, -594.7423576008234, -570.0387215442279},
+        {1, -545.604557338824, -554.0763169557739},
+        {1, -502.35172702595014, -586.8484342087179},
+        {1, -587.293337705269, -588.0796352216714},
+        {1, -587.0516505340747, -517.7300179102016},
+        {1, -597.0360062250987, -547.9934802704281},
+        {1, -540.578489505472, -519.8075273206096},
+        {1, -530.4922286462058, -523.234050745461},
+        {1, -570.1324748254381, -584.3427934817109},
+        {1, -508.71765087148526, -521.2260165247377},
+        {1, -506.10153233039114, -546.0469706912013},
+        {1, -587.6311232069863, -500.8789962962048},
+        {1, -585.9407497123008, -593.6250426349442},
+        {1, -597.7192354774427, -504.3968636076061},
+        {1, -587.6912279656732, -587.810549281485},
+        {1, -567.4906024676383, -529.7889328775241},
+        {1, -510.5883782383144, -564.6056218025714},
+        {1, -545.5877634339324, -503.13342363625316},
+        {1, -595.491952236763, -526.4157102337199},
+        {1, -565.8931103880244, -512.3930396698607},
+        {1, -564.9817304867518, -518.5421568025347},
+        {1, -528.5838433236987, -590.2716385768655},
+        {1, -568.3038165320794, -523.2037657971182},
+        {1, -513.579781599134, -540.7083264768794},
+        {1, -577.5234177434545, -574.4083212880694},
+        {1, -566.4331360533965, -529.8498325039095},
+        {1, -517.1862636590681, -544.9513758919965},
+        {1, -534.6578726508548, -515.7113551681354},
+        {1, -531.5918919225953, -508.0051177928042},
+        {1, -521.335920134657, -549.8508399779365},
+        {1, -587.6565547672371, -500.40617781899505},
+        {1, -502.89297655657947, -550.0462820641452},
+        {1, -565.9529549834383, -570.5296426883887},
+        {1, -539.695184660248, -566.3720803092855},
+        {1, -557.2412994794262, -516.6673702747074},
+        {1, -548.3193140374153, -511.0113251963232},
+        {1, -599.1568790407902, -559.2622714664305},
+        {1, -571.755520275542, -554.0839358749181},
+        {1, -544.964945135059, -564.448243523719},
+        {1, -574.7985361688525, -593.6384131471896},
+        {1, -563.642288502551, -538.3721218790038},
+        {1, -500.4279098845297, -583.9340798923859},
+        {1, -569.917708080877, -550.7162526230916},
+        {1, -549.8345448125123, -565.7759787232027},
+        {1, -527.8248193430064, -562.9256751876678},
+        {1, -508.69265110570973, -544.8174395269017},
+        {1, -561.7662650395065, -534.6799220439667},
+        {1, -510.11351976460816, -567.17615864117},
+        {1, -592.6464340868883, -546.7679954740394},
+        {1, -591.5566687475105, -516.335391669214},
+        {1, -598.1620280980214, -563.5663494736577},
+        {1, -571.6540085024682, -514.5024112396218},
+        {1, -597.0973739353884, -518.6402453320493},
+        {1, -597.971879649216, -541.9911785849602},
+        {1, -502.7804400334985, -527.9041465965335},
+        {1, -502.24013032418287, -596.8646708140396},
+        {1, -598.4180305891012, -535.013864017069},
+        {1, -575.018281589379, -596.0252991207353},
+        {1, -593.1939727679464, -557.8288153478848},
+        {1, -552.9384213856413, -579.3694486320592},
+        {1, -559.9203621818546, -554.0072497905501},
+        {1, -588.3411365623961, -575.9606196770269},
+        {1, -517.0844394937534, -547.9291196136605},
+        {1, -509.32764537741576, -591.737729755405},
+        {1, -557.2674260753181, -543.5864572972603},
+        {1, -565.1475139126333, -559.4796022645727},
+        {1, -556.0025119789701, -572.6261174533101},
+        {1, -590.7960121205607, -517.0840963139137},
+        {1, -580.3696729031607, -541.5331163469414},
+        {1, -519.8369954073894, -599.1883519701099},
+        {1, -590.5570159829517, -587.4602437344656},
+        {1, -502.5275914906194, -540.3454217852702},
+        {1, -584.1282872304774, -593.2194019651928},
+        {1, -557.8446121942737, -558.0626917521755},
+        {1, -580.209165096907, -588.7259851212183},
+        {1, -510.90874302504056, -591.5091481352281},
+        {1, -514.1724729381817, -595.1020401318071},
+        {1, -552.5076612804402, -548.397966879673},
+        {1, -565.2070083573942, -536.1826380211752},
+        {1, -565.9469212749985, -561.5506672108052},
+        {1, -526.4398083538586, -507.1913169678737},
+        {1, -595.2594496218172, -594.98464576562},
+        {1, -530.6904491548875, -519.0678635750138},
+        {1, -547.9945700155467, -597.6557660417575},
+        {1, -554.9468747569997, -591.1678311453294},
+        {1, -593.9678599910096, -518.9397714406934},
+        {1, -580.6827396967085, -541.1770564720399},
+        {1, -526.2991394747967, -595.5353558464069},
+        {1, -532.0567052472832, -547.7555982808492},
+        {1, -506.550640897891, -501.44148884553215},
+        {1, -537.7945174903881, -539.9517392521116},
+        {1, -588.1139279080066, -572.5589261656883},
+        {1, -598.4030676856231, -528.8036722121387},
+        {1, -532.6970859002654, -567.13898500018},
+        {1, -564.8245220213231, -595.6981311004888},
+        {1, -568.8669962693484, -516.5125158739406},
+        {1, -549.1709908638323, -558.8129291840139},
+        {1, -510.85336064345756, -575.3635308154353},
+        {1, -583.9245510800588, -536.793806117792}
+    };
+
+    /** The data is easy separated with classifier by y = x. */
+    protected static final double[][] twoLinearlySeparableClasses = {
+        {0.0, -122.69914721554494, -152.90003228835155},
+        {1.0, -988.7803093110984, 39.64498230320555},
+        {1.0, -721.0342526056645, -167.29469954420483},
+        {1.0, 606.0603250738964, 612.4505657575703},
+        {1.0, -435.7428098964267, 749.26660250907},
+        {0.0, 977.0266542119459, 906.2797731011997},
+        {0.0, 442.79191352401017, 99.68443783203702},
+        {1.0, -984.4696576079481, 98.58983213854299},
+        {0.0, 950.3560064579242, -54.087172588871226},
+        {0.0, 989.1247453182418, -942.9228555672748},
+        {1.0, -950.3830359669219, 720.9427578590175},
+        {0.0, -263.7437828854337, -369.67762228969286},
+        {1.0, -837.771820186008, 966.2671117206883},
+        {1.0, -101.63051923258354, 135.30595977925213},
+        {0.0, 927.4068611376827, 552.576689560276},
+        {1.0, 671.674613544031, 867.0342619845135},
+        {0.0, 489.04809639359723, -371.80622025525497},
+        {1.0, -577.8620591314951, -561.9793202960524},
+        {1.0, -628.699903999805, 746.9179933415019},
+        {0.0, 787.7955413710754, 729.8880998762927},
+        {1.0, -160.9905826731191, 597.1342309929371},
+        {1.0, -661.7582546189365, 294.3559610458383},
+        {0.0, 992.067372280372, -586.7840785767917},
+        {0.0, -229.6963941046797, -860.6481903559245},
+        {1.0, -459.91823406828814, 174.31002243199828},
+        {0.0, 132.09417954527203, -203.6015836943012},
+        {0.0, 458.8315635996389, -109.92869423399452},
+        {1.0, 424.63154498678796, 581.7436424491116},
+        {0.0, 606.7777384705123, 382.51034075942744},
+        {1.0, 133.97732363544492, 810.4293150045719},
+        {1.0, -752.3792672455503, 902.3533215842801},
+        {0.0, 124.02578589031486, -242.0045741962906},
+        {0.0, 65.95100120357665, -362.9563512717997},
+        {1.0, -975.7825688109236, -724.6782664271469},
+        {1.0, -885.3333915784285, -166.8285153252507},
+        {1.0, -242.89869955409756, 878.9999767933075},
+        {0.0, 271.2149993049329, -490.0480096390996},
+        {0.0, -74.16302081043352, -824.0859586265949},
+        {1.0, -520.4108075793048, 751.6954919374432},
+        {0.0, 104.03293413801771, -631.0663974778311},
+        {0.0, 179.4274025610996, -610.9764997543232},
+        {1.0, 291.2686412591502, 892.1178988173092},
+        {0.0, 723.1240938478552, -291.3765504086348},
+        {0.0, 12.575218418479949, -307.36975804125973},
+        {1.0, -397.44825972130786, -295.76021536144117},
+        {1.0, -163.90291786947955, 501.6868597449188},
+        {0.0, 513.9232732684154, -287.4072243396091},
+        {1.0, 146.81987289015547, 293.1152654799746},
+        {1.0, -422.734205503476, 154.09536939552663},
+        {0.0, 293.2607563043757, -141.65822134246525},
+        {1.0, -93.46771747630169, 73.91086927080437},
+        {1.0, -972.6525030120272, -867.0819061818511},
+        {1.0, -636.136018043414, 55.4840372628596},
+        {1.0, -821.240801777343, -750.3407912999469},
+        {0.0, 826.9598934792543, -48.17510971836464},
+        {0.0, -737.5399357047692, -834.168742619978},
+        {0.0, 910.2286110591372, -321.2153303241547},
+        {1.0, -539.8385115026349, -204.624635929521},
+        {0.0, 710.9811829617875, 156.53494004963864},
+        {1.0, -576.1327147891295, -255.98030417689222},
+        {0.0, -406.9117225223731, -568.1674835571359},
+        {1.0, 786.4324782672932, 879.9433045727255},
+        {0.0, 655.1507253229393, -931.0320133380443},
+        {1.0, 920.1359556509667, 975.4010808044634},
+        {0.0, 340.9923780361835, -791.6415124130187},
+        {0.0, 789.0326432258107, 101.45600150894029},
+        {0.0, 301.62354598942807, -263.0383267796972},
+        {0.0, -196.75683699829483, -759.6731432356696},
+        {1.0, 104.36756752228234, 362.6645930627608},
+        {0.0, -110.09892045131369, -522.6327938767872},
+        {0.0, 983.058982063912, -853.6685099856713},
+        {0.0, 853.0396544144112, -373.6430440893963},
+        {0.0, 894.5396176478532, -259.3520478430646},
+        {0.0, -59.540445910742505, -405.2785421154832},
+        {1.0, -195.02204474289272, -98.01099074578019},
+        {1.0, -400.33845881394757, 517.4826371806812},
+        {0.0, 998.8721163227847, 658.7589886248159},
+        {1.0, -739.9839264739526, 281.7808456690698},
+        {0.0, 225.2955438875149, -240.13571797647785},
+        {0.0, 415.36363610958847, 119.2467848060553},
+        {1.0, -430.93611072673775, 953.9339020518189},
+        {0.0, 695.641934652828, -613.3163270715312},
+        {1.0, -977.0662561296275, 44.1566618295617},
+        {0.0, 894.0074404584143, 115.97551230630302},
+        {1.0, -256.65810543256225, 121.31432413171797},
+        {1.0, -745.2570475473517, 144.83266177886867},
+        {0.0, 865.266441371979, -329.08860770412593},
+        {1.0, -262.69924145366974, 196.52256942501003},
+        {0.0, 858.8703536921596, -755.3718265129426},
+        {1.0, -620.7574721811682, 744.695289706485},
+        {1.0, 526.9918067706062, 622.6110941283573},
+        {1.0, 30.51838905352247, 451.84360857486945},
+        {1.0, -886.670070825786, 955.5438997547349},
+        {0.0, -419.85446648529296, -904.4363933507589},
+        {1.0, -19.357361515996104, 288.3545217146416},
+        {1.0, 425.807567480902, 617.3859577708511},
+        {1.0, -369.8197242330872, 428.4625522196195},
+        {1.0, -540.2030619980012, 980.1078500916262},
+        {0.0, 963.0216885940265, -999.6718455904652},
+        {0.0, -36.084390168692494, -930.2210871204579},
+        {0.0, 686.7777019875359, 274.083830555807},
+        {1.0, -798.5755214306325, -292.6360310433025},
+        {0.0, -302.49374189510456, -979.2873514693756},
+        {1.0, -473.88156240514184, 290.3700442022921},
+        {1.0, -619.3422333592813, -203.62900604757556},
+        {1.0, -603.8165620304862, 433.7049783716991},
+        {0.0, -394.9003601369652, -423.49571094476414},
+        {0.0, -297.5499912778255, -379.6966117627778},
+        {0.0, 914.6350307682171, 395.0639307730339},
+        {1.0, 302.2432544019764, 420.03068857885364},
+        {1.0, -486.2192439106092, 504.61160963291354},
+        {0.0, -80.9055582464382, -999.3540019713568},
+        {1.0, -808.7735610468485, -600.3003616235419},
+        {1.0, 559.7216432827174, 573.1410775962665},
+        {0.0, 107.25054731907449, 56.68399536280276},
+        {1.0, -986.8173329580039, 955.5975873551458},
+        {0.0, -28.898975148538057, -764.5914349235939},
+        {0.0, 544.5435587517745, 541.7144224905855},
+        {1.0, -733.3388961452514, 995.0625378143936},
+        {0.0, -424.0376248679678, -808.8197992783022},
+        {0.0, 69.10888994619336, -596.3814493832142},
+        {0.0, 668.7563898645246, -309.5338641095864},
+        {1.0, -664.6829023895461, -421.3131122742957},
+        {0.0, 34.30209430645755, -10.50945210920679},
+        {0.0, -370.6335997213754, -510.2102646234516},
+        {1.0, 430.4223842649294, 947.0324231650752},
+        {1.0, -561.4417521638584, 912.0398180862007},
+        {0.0, -529.1099093762112, -787.9426065835444},
+        {0.0, -784.2287272477402, -950.6749150482902},
+        {1.0, -292.2382923363127, 29.73057963193787},
+        {1.0, 543.8216641288004, 574.9668960406921},
+        {0.0, 492.70797586385834, -508.7411915523603},
+        {0.0, 847.4958582226334, 141.27775112134555},
+        {0.0, -294.9950818964355, -539.6512583592041},
+        {1.0, -731.3440778046363, -194.13179207217638},
+        {0.0, -26.21276485761848, -177.1382736912766},
+        {0.0, 169.10051967522577, -877.8835027096119},
+        {0.0, 869.7338657560076, -216.14439990877327},
+        {0.0, 676.9668800100419, 487.3264255975398},
+        {0.0, 340.2086777131092, -483.69798685778176},
+        {0.0, 177.05787101614578, -187.8731928010908},
+        {0.0, 514.0064634256835, -838.309309799528},
+        {1.0, -945.6616134661633, -892.0662652148447},
+        {0.0, 706.7531607568874, 584.875678987067},
+        {0.0, 996.1691889712217, -381.420741757301},
+        {0.0, 846.3827047328193, 138.5937078747695},
+        {1.0, -579.1773394655615, -551.6157981896823},
+        {1.0, -379.8315393213704, 376.240073123181},
+        {0.0, 416.70241675343345, -762.0460887999392},
+        {0.0, 784.4659593773504, -476.3450292459248},
+        {0.0, -328.2495971471759, -797.0282102006712},
+        {1.0, 427.63385513313506, 691.0529822653089},
+        {0.0, 478.22491887051683, 368.08172770775104},
+        {0.0, -194.5486491952804, -635.7562271928532},
+        {1.0, 462.9118544444739, 546.477694721709},
+        {1.0, -364.33646342640543, -16.525517700831642},
+        {1.0, 191.5538518885253, 534.4886561736935},
+        {1.0, 162.29801970257063, 204.07339353277848},
+        {1.0, 359.87375962515307, 510.4390321509045},
+        {0.0, 906.0920707478278, 518.474366833321},
+        {0.0, -23.926514764001354, -545.5535138792807},
+        {1.0, -457.5490330216003, 462.75697632384026},
+        {1.0, 361.19368061986074, 602.0833438729098},
+        {1.0, 240.82404813916537, 903.8580437547587},
+        {0.0, 682.9887385477937, -575.5748494609797},
+        {0.0, -524.9683035626636, -643.4995281011295},
+        {1.0, -868.3907344133812, 687.0334981662659},
+        {0.0, 483.1046447412375, 425.5242965675352},
+        {0.0, 441.7390582141493, -178.6473657093535},
+        {0.0, 857.9901628015248, -725.079106653412},
+        {1.0, 3.9407370946466926, 501.36916187999213},
+        {0.0, 987.6165576421165, -870.7792926909152},
+        {0.0, 38.550394080002434, -316.2460756905849},
+        {1.0, 259.98559430828277, 779.1704474238529},
+        {1.0, -772.0783930084303, 457.81379891960387},
+        {0.0, 965.2460667816263, -900.5906154928432},
+        {0.0, 435.8488975524808, -807.3179393158829},
+        {1.0, -414.9097308847265, 663.2091519493613},
+        {0.0, -692.3369071358595, -853.7674486529854},
+        {1.0, -527.6968945977544, -89.29268231562753},
+        {0.0, 98.58509375449921, -812.2575242800065},
+        {1.0, -246.4858612821199, 690.7736181778389},
+        {0.0, 306.0413673433336, 50.36342267895475},
+        {0.0, -326.3755954952927, -630.9271581822045},
+        {0.0, 435.3759701541835, -478.72141764190417},
+        {0.0, 150.07627192243012, -126.16495181072969},
+        {0.0, 999.2382522208045, 293.8336213483592},
+        {1.0, -970.7818229850416, 559.8116781984274},
+        {0.0, 321.62133209742956, -446.07065722044115},
+        {1.0, 387.61470906465297, 809.9877801153038},
+        {1.0, 375.48380231362376, 548.1340438996276},
+        {0.0, 198.31962497327982, -841.3407638914643},
+        {0.0, -59.75027524961797, -196.91881794207666},
+        {0.0, 539.4390329297466, 265.73233936446013},
+        {1.0, 161.7769611006779, 420.4911194344545},
+        {1.0, -422.73262266569805, 305.27632230640575},
+        {0.0, 419.7041783295376, 384.4277361814418},
+        {1.0, -384.80122335064925, 128.84723939702212},
+        {0.0, 345.8732451410485, -634.6766931661393},
+        {1.0, -753.0957875425104, 162.043321600848},
+        {1.0, -721.0825943433963, -647.1437151757809},
+        {0.0, 737.8179495142201, -612.9000146979762},
+        {0.0, 165.62609685662937, -209.04556534374638},
+        {1.0, 211.75025757991534, 762.4363190775396},
+        {0.0, -282.0707259050812, -631.5669067165459},
+        {0.0, -10.649387489441551, -11.742073063187377},
+        {0.0, 532.2273317939553, -714.4637938741703},
+        {0.0, 851.6255007653094, -428.168617931829},
+        {0.0, -650.2303513768155, -701.0819971407498},
+        {0.0, 486.19072881419584, 17.642342348021202},
+        {0.0, 937.5878660613639, 253.91073899684488},
+        {1.0, -481.7837261941776, 386.0515070365086},
+        {1.0, 898.8591491398315, 960.3282479515362},
+        {1.0, -795.2119099095994, -52.442255260638944},
+        {1.0, -832.14760576095, 406.48368080778823},
+        {1.0, 317.3610961002403, 475.88090137988934},
+        {1.0, -543.9941239514503, 937.9571974443777},
+        {1.0, -737.7149868841586, 412.02870959820666},
+        {1.0, -86.04799530647608, 764.2717139104996},
+        {1.0, -908.3441434769735, -52.62148904481751},
+        {1.0, -558.4878652128368, 975.5017115797407},
+        {1.0, -120.28961819893993, 58.60059810912276},
+        {0.0, 797.7665926374921, -530.0884822652556},
+        {0.0, -248.62486746176887, -983.5555931167586},
+        {0.0, 910.1931415438364, 35.953135142478914},
+        {1.0, -304.741023136228, 253.0138864886694},
+        {0.0, -510.13133519018925, -642.3600729680307},
+        {0.0, 988.5683650098642, -751.2030447890847},
+        {1.0, -118.0142080751416, 352.20209758019996},
+        {0.0, -638.757222741898, -685.6631975937353},
+        {0.0, 759.5622347453971, -722.2769348273996},
+        {0.0, -740.3498419247273, -974.2677201928796},
+        {0.0, -776.6102763008262, -993.7697826767383},
+        {1.0, -895.9448277148887, -462.29125820523006},
+        {0.0, -311.8810163384071, -318.9742942085709},
+        {0.0, 368.78035230644787, -273.65009131252566},
+        {0.0, 731.1488644867686, -184.2725009666142},
+        {1.0, 240.0262332913362, 544.8792933528591},
+        {1.0, -129.8786600652611, 122.64122390591797},
+        {1.0, -998.8693504661202, -989.3959455521401},
+        {0.0, 358.9021702584721, -372.46195332982563},
+        {0.0, 423.66170839399, -3.6733713507491075},
+        {0.0, 320.08527272511014, -267.49487239617406},
+        {1.0, 628.8557340365153, 716.1736088420723},
+        {1.0, 87.0852622927755, 191.08205494997515},
+        {0.0, -163.5535634273158, -401.43333064263857},
+        {1.0, 241.57291015127043, 354.07473809573935},
+        {0.0, 425.42982178930424, -659.6389818980119},
+        {1.0, -513.057622632338, -150.48805414197307},
+        {0.0, 435.2888705572377, -500.4699931158425},
+        {1.0, -761.2341202466506, 919.1637075257438},
+        {1.0, -254.8539665845866, 711.5522826694619},
+        {0.0, -350.2587997576785, -911.7842377194485},
+        {0.0, 588.5547568621123, -16.003674634160916},
+        {0.0, -557.7880688291352, -939.7740734026603},
+        {0.0, 683.6988697659988, -285.8831419034458},
+        {0.0, 782.8461154585116, 426.91516285206694},
+        {1.0, -792.3388875152918, 361.1342300030676},
+        {1.0, -673.792921360787, 820.8934158286147},
+        {1.0, -15.357504282120772, 15.275909249335541},
+        {0.0, -99.22050275699814, -249.077767711845},
+        {1.0, -820.111231678807, -320.1107983145504},
+        {0.0, 911.7878651586336, 825.2998851049153},
+        {1.0, -750.2941326911656, -629.1546336560141},
+        {1.0, -890.6374102685097, -804.5407239545832},
+        {1.0, -204.75148861468108, 722.1116624961337},
+        {0.0, 519.1714356909579, 154.07772492651725},
+        {0.0, 982.2450336212896, 897.8824490832485},
+        {0.0, 554.4793545664838, 335.7541373769475},
+        {1.0, -339.90247025178235, 47.02715071976445},
+        {0.0, 901.2543768759774, -662.3275399668249},
+        {1.0, -942.3762411246095, -875.0025895092708},
+        {0.0, 418.20256050104604, -414.3102074305251},
+        {0.0, 625.0294460702908, -625.6315794655841},
+        {1.0, -449.74570685873516, 937.185777575773},
+        {0.0, 508.2386960118979, 454.0962431757914},
+        {1.0, 331.4089009636193, 589.2741722009719},
+        {1.0, 99.06469391982864, 187.0394494146019},
+        {1.0, -982.3370248476699, 322.0973186273661},
+        {1.0, 548.6443983489316, 708.7265431968447},
+        {0.0, 918.9454013804204, -383.8602043941679},
+        {1.0, 47.025960736300476, 171.219298464468},
+        {0.0, 378.2597384891858, 163.1492885941102},
+        {0.0, 438.65288112462554, -139.6734662005057},
+        {1.0, -831.8875659762939, 892.6667556591465},
+        {0.0, 883.0433572247841, -405.08376291753257},
+        {0.0, 885.9349479866808, -577.4873262774219},
+        {1.0, -614.7099535083557, -133.06983968843338},
+        {0.0, 111.7257364798395, -585.9016094589116},
+        {1.0, 453.9214560104581, 999.4093349063546},
+        {1.0, -660.6080448479984, -558.4295455433598},
+        {0.0, -466.8209751830958, -591.196870091049},
+        {0.0, -964.7665601618734, -997.9800903796079},
+        {0.0, -236.07763234295055, -450.41129146522917},
+        {0.0, -621.6876241277605, -797.4500041783042},
+        {0.0, -773.3591978507126, -890.0043590247606},
+        {1.0, -41.04699663875965, 822.3779367276668},
+        {0.0, -88.10853803965915, -192.37350885363378},
+        {0.0, 663.981740050287, -508.81572667480236},
+        {0.0, 15.59472374839936, -806.7541810675616},
+        {1.0, -892.7104844234832, -708.5235867565298},
+        {1.0, -484.65491520217245, 386.6430150137869},
+        {0.0, 865.0610549279427, 615.8811284084713},
+        {1.0, -824.4355093837889, 655.3234320109748},
+        {1.0, -274.68139814419976, -239.53727115479273},
+        {0.0, -86.4277464637313, -881.0777192437689},
+        {1.0, -581.4932661460668, 769.3538369247574},
+        {0.0, -432.5850223289913, -577.4260081674186},
+        {1.0, 166.76522990130684, 582.4331818363789},
+        {0.0, 396.8182460459341, 248.34183939490367},
+        {1.0, -509.8701926143476, 368.8796357552451},
+        {1.0, -482.54152901054886, -248.83959837521047},
+        {1.0, -300.50297994358345, 742.4139758199028},
+        {0.0, 163.28493788474384, -61.41706872692157},
+        {0.0, -399.2277405988791, -930.6519043114885},
+        {0.0, 44.13900477801826, -571.5314250642764},
+        {0.0, 457.8794897532496, -505.99693186447195},
+        {0.0, 16.85880382123935, -451.1811783607169},
+        {1.0, -743.4540696447744, 325.39937301862096},
+        {1.0, 57.40459247973081, 106.58399169789641},
+        {1.0, 183.98880310846016, 499.74779967287395},
+        {1.0, 567.0903172389608, 820.4986606446041},
+        {0.0, 672.4806526088855, 300.601012280614},
+        {0.0, -343.8894522407976, -761.4942297431235},
+        {0.0, 870.247864223385, -168.14608036197296},
+        {1.0, 593.005455426467, 673.1630290763387},
+        {0.0, -625.9494316959813, -983.6968015830237},
+        {1.0, 494.1754094118269, 992.2691899024903},
+        {0.0, 61.401789304312615, -773.2837841463802},
+        {1.0, -194.76742246565573, 69.77988116139159},
+        {0.0, 206.82364861578685, 121.15474801344544},
+        {1.0, -265.964495521001, 50.790074285276205},
+        {0.0, 818.3873132702915, 36.49793444927877},
+        {0.0, 99.81409878465752, -628.0274914181116},
+        {0.0, 464.149315901346, -321.29715928735277},
+        {1.0, -164.52462729937565, 952.4896905712137},
+        {0.0, -63.17364851415209, -149.49056773721736},
+        {0.0, 882.9288293898815, 171.00117804059573},
+        {0.0, 473.3733180102365, -689.3426862684687},
+        {0.0, 165.7220875180078, -354.71003889056044},
+        {0.0, 525.5517697849327, 415.84107073078167},
+        {0.0, -38.184721358457864, -99.36030799911896},
+        {0.0, 242.96729902384163, -156.16029387422054},
+        {0.0, 448.4711090805122, -495.01683482080705},
+        {1.0, -80.15226220702493, 970.6850105496733},
+        {0.0, 870.3328249998483, 583.0363909361256},
+        {0.0, -238.61798549246464, -430.95739845768026},
+        {0.0, -153.01230031899092, -482.12077718764306},
+        {1.0, -118.06183953458049, 40.44154430898425},
+        {1.0, -876.8968143885145, -370.6419068924105},
+        {0.0, 989.8165746071368, -943.0636134966381},
+        {0.0, 448.68476431428917, 44.44832374987436},
+        {0.0, -5.562631397638029, -594.7883897866259},
+        {0.0, 880.7175397337289, 786.6444839355895},
+        {0.0, 476.3278235630439, -756.8025350513306},
+        {0.0, -209.1261948306602, -366.9709734757247},
+        {1.0, -1.5342655753494228, 295.69953419777266},
+        {1.0, 98.88194946977887, 709.984198980128},
+        {1.0, -102.4522435336255, 348.55854643990347},
+        {1.0, 431.6422144084929, 488.26608578711966},
+        {1.0, -629.5648689407153, -389.98821373225144},
+        {1.0, -655.6263155228037, 89.12505314113082},
+        {0.0, -201.6475575882739, -902.9470477574147},
+        {1.0, -342.30143560116915, 157.21169053018912},
+        {1.0, -671.4797028289656, -49.48397951858112},
+        {1.0, -993.3541982679827, 428.50119148048657},
+        {0.0, 158.95824836793054, 115.93705315336206},
+        {1.0, -858.292999815246, 946.8912002937116},
+        {1.0, -223.10861890967476, 190.7507270694814},
+        {0.0, -147.9091707330915, -899.2785339400244},
+        {0.0, 254.55648822491457, -260.9331332388332},
+        {0.0, 560.3172529225217, 388.76836664538814},
+        {0.0, 924.1007767093995, 56.69156104001263},
+        {0.0, 62.42705110549082, -888.0360838024912},
+        {0.0, 222.43761905783595, 88.18795871018938},
+        {0.0, 489.8756173625022, 421.3474970424486},
+        {0.0, 246.6646015601891, -506.3175818566548},
+        {0.0, -620.5001534479718, -774.7836865370457},
+        {1.0, -654.0153133260937, -369.1547696738236},
+        {0.0, 853.1429595371762, -87.56985188355861},
+        {1.0, -226.84561483455388, 122.80144293902458},
+        {1.0, 335.09779003775316, 731.0032200516428},
+        {1.0, 87.90214612318391, 724.8989520503376},
+        {0.0, -51.792728592205776, -298.0103777307395},
+        {1.0, -421.181682827191, 41.01565470282776},
+        {1.0, -626.6392286104665, 227.98017875883284},
+        {1.0, -839.0341042344045, 990.7893877153003},
+        {1.0, -9.321936022159207, 125.24249479969853},
+        {0.0, 665.2916192497585, 314.9312297793483},
+        {1.0, -236.71130814979108, 41.56269468081973},
+        {1.0, -695.4935496704909, -364.376100277162},
+        {0.0, 60.90303121087936, -525.9732822401365},
+        {1.0, -740.9211189318623, 328.1577766746841},
+        {0.0, 636.7728693761635, 231.63887313030887},
+        {0.0, 783.8640093145868, -86.94016828207737},
+        {1.0, -122.79445443476675, 446.8427679254348},
+        {0.0, -599.127065456006, -641.9946421169902},
+        {0.0, -133.3932116798295, -715.8087793479069},
+        {0.0, 868.1768857382554, -356.8832640029416},
+        {1.0, -729.5079555062296, 48.18869346933934},
+        {1.0, -323.311327276945, 51.37289795053448},
+        {1.0, -863.9094602749768, -526.3307161874084},
+        {0.0, -172.237643059304, -545.395840196842},
+        {1.0, 379.0803154405653, 860.9286051762328},
+        {0.0, 646.3490077056538, 221.13771257535495},
+        {1.0, -493.2329575593668, 938.8602740452263},
+        {0.0, 852.1508064390962, 186.42129731281898},
+        {0.0, -105.17633183875978, -819.8477185986328},
+        {0.0, 794.7790444633961, 225.19911969860573},
+        {0.0, 306.4485552684148, 290.3991023596727},
+        {1.0, -348.52545404552563, -302.8538669615166},
+        {1.0, -621.5896829696857, -586.764214213187},
+        {0.0, -360.9052184666539, -501.2314262330038},
+        {0.0, 512.0475423578778, -968.4211685736286},
+        {0.0, -1.0553261239787162, -649.1131987920394},
+        {1.0, -353.0059560079317, -343.82940709059096},
+        {0.0, 281.71038662642286, -536.6960537047482},
+        {1.0, -919.2355704939898, 782.9875939766282},
+        {1.0, -554.7648476025646, 670.76664941987},
+        {0.0, 287.54041983444336, 106.2628262971964},
+        {1.0, -71.36414070058743, 481.00905876949264},
+        {1.0, -525.4581932812421, 507.16990298296923},
+        {0.0, 510.1084615227803, -813.3443471544821},
+        {0.0, -515.8000398448883, -551.1523846072581},
+        {1.0, -941.5905835281701, 178.53493537516124},
+        {1.0, -826.4320007540575, -391.32308974320074},
+        {1.0, -362.25207668798646, 711.1776477575349},
+        {1.0, -363.13146140965796, 58.76850122459791},
+        {1.0, -637.0939514034111, -57.18171960880602},
+        {1.0, 811.8537434287423, 893.8406118576338},
+        {1.0, -351.36128471993413, -164.8367432830371},
+        {0.0, -625.8073644486308, -938.5091097468568},
+        {0.0, 131.36904305993585, 59.945922200265386},
+        {1.0, 300.49666138667953, 544.089396622054},
+        {1.0, 150.9533638033147, 943.667562848439},
+        {1.0, -232.3556550990304, 976.0470122102599},
+        {1.0, 135.8097187722467, 262.21166985817695},
+        {0.0, -97.51353115825805, -890.6273287611524},
+        {1.0, -711.4020131465077, -20.13806627790268},
+        {0.0, 917.1543030685937, -872.6562190191934},
+        {1.0, -657.7632592299774, -596.4956657628013},
+        {0.0, 806.7273372492091, 154.3973882475018},
+        {0.0, 371.7932221354017, -847.5721372522485},
+        {0.0, 887.0251089691258, -306.6059397900773},
+        {1.0, -171.52557116367404, 819.6507572581761},
+        {0.0, 632.2374116222845, -635.8014704304069},
+        {0.0, -213.33363068356653, -639.038384943213},
+        {0.0, 737.7847710201636, -843.291366957395},
+        {0.0, -430.7114667797465, -665.7014140302028},
+        {0.0, 18.317432837854085, -309.1307864153605},
+        {0.0, 689.3196508440624, 398.22692583132357},
+        {0.0, 908.6965655126414, -321.7431267700932},
+        {0.0, 604.2361606207025, -174.1208906780612},
+        {1.0, -816.014328616853, -468.5728222442267},
+        {1.0, -124.50677921712554, 439.4225345583168},
+        {0.0, -736.4729915358428, -745.435394454091},
+        {1.0, -201.1314081356761, 132.070557003796},
+        {1.0, -538.2469045343253, 719.2630473774586},
+        {1.0, -579.3039091203984, 961.7644587928542},
+        {1.0, -131.07569768983058, -14.067659190625022},
+        {1.0, -961.9324831150435, 815.7775199747161},
+        {0.0, 959.0805916122792, 210.22031178108682},
+        {0.0, 537.3004634155134, -821.1232504829824},
+        {1.0, -525.577776451393, 523.8546325250404},
+        {1.0, -490.37425007561785, 613.9247103792861},
+        {1.0, 725.2941641152454, 924.7691776631311},
+        {0.0, 850.5191959199387, -911.7156754307339},
+        {1.0, -535.3827552133765, -256.1333041657481},
+        {1.0, 93.24441210512305, 980.899958839474},
+        {1.0, 125.58210878499744, 489.9200659506546},
+        {1.0, -265.0907509361897, -181.36232727265053},
+        {1.0, -805.0528978104943, -774.3428711441273},
+        {0.0, 299.481029365769, 274.2467784888322},
+        {1.0, -872.6432839751412, -724.9692038478101},
+        {0.0, -327.77109720806027, -346.06090524099113},
+        {0.0, -769.9407295518204, -947.4499512111647},
+        {0.0, 708.176001237056, -701.9900242821255},
+        {0.0, 429.7900423607498, -767.8607100772805},
+        {0.0, 514.9666605063433, -252.09527799878242},
+        {1.0, -392.6943024744394, 943.3642876383242},
+        {0.0, -171.97676164837765, -964.9749845719992},
+        {0.0, 25.3949751703301, -761.3459408840288},
+        {0.0, 327.0516125752938, -81.26274312696592},
+        {0.0, -926.4851014957853, -970.9563176084357},
+        {1.0, -985.2416286372801, -758.6127879964147},
+        {0.0, 338.7854869375187, -231.37122411100802},
+        {1.0, -995.9157184785086, -310.8674450540059},
+        {0.0, 485.52790893379097, 7.909018196822899},
+        {1.0, -289.76601009744377, -93.43411467378803},
+        {1.0, -352.91681813664957, 970.6609344632727},
+        {1.0, -634.2596635738871, 478.54324561131875},
+        {1.0, -496.623286353002, 526.7778661797483},
+        {0.0, 837.0404771301767, 671.1823960639354},
+        {0.0, -284.5931069950618, -893.2503900000672},
+        {0.0, 739.6925158457948, -572.886151546864},
+        {1.0, 505.37418939555437, 914.4939776238757},
+        {0.0, 65.79978723030536, -59.26282586191303},
+        {0.0, 775.1318885055389, -698.3367782064498},
+        {1.0, -871.3166585822554, -351.74555670546727}
+    };
+
+    /** 4 sets grouped around of square vertices. */
+    protected static final double[][] fourSetsInSquareVertices = {
+        {0, 9.35096604945605, 9.946073797069054},
+        {0, 9.135109633114403, 9.962676066205383},
+        {0, 9.046654725589521, 9.610699793950662},
+        {0, 9.827221553421282, 9.4176319880153},
+        {0, 9.277441430833566, 9.502990699976},
+        {0, 9.444827307967367, 9.903310367805602},
+        {0, 9.911404997680545, 9.226246217883297},
+        {0, 9.950231642973769, 9.453518533258803},
+        {0, 9.281545278543017, 9.438272102773379},
+        {0, 9.032306746555102, 9.517675092676706},
+        {0, 9.286542956290456, 9.15288903978334},
+        {0, 9.896451632473255, 9.019751070009821},
+        {0, 9.611642481367562, 9.17209652044495},
+        {0, 9.592540623266126, 9.306160678545629},
+        {0, 9.817470117880873, 9.838651444371973},
+        {0, 9.263220850397941, 9.139179322873582},
+        {0, 9.949097640181272, 9.624710378790242},
+        {0, 9.616004097319287, 9.421557303733453},
+        {0, 9.512900976289933, 9.28642137092367},
+        {0, 9.207793663546337, 9.40094289636865},
+        {0, 9.079279410265883, 9.76978559451163},
+        {0, 9.328945661288095, 9.645773710532888},
+        {0, 9.80101696222916, 9.511903913501255},
+        {0, 9.882593127029741, 9.73545127073394},
+        {0, 9.75372887212885, 9.435141350132769},
+        {0, 9.288527674365598, 9.055665753045206},
+        {0, 9.88272159816372, 9.055932205550423},
+        {0, 9.385642321423624, 9.922172934733265},
+        {0, 9.830217517055729, 9.415174260405154},
+        {0, 9.184970761195489, 9.03515483431538},
+        {0, 9.747503155479809, 9.38708759338332},
+        {0, 9.953962908254736, 9.483949174467012},
+        {0, 9.271685731881993, 9.128890010491494},
+        {0, 9.441240324686845, 9.07960435205457},
+        {0, 9.168560731741703, 9.256530860101089},
+        {0, 9.010517147230432, 9.94335328515589},
+        {0, 9.1749227239244, 9.018681913631386},
+        {0, 9.413360501729251, 9.302212703700196},
+        {0, 9.439461439481182, 9.318631395882242},
+        {0, 9.531551691985907, 9.232525664308465},
+        {0, 9.466805772615563, 9.511711890834333},
+        {0, 9.633242901042053, 9.972778102570045},
+        {0, 9.517692290376388, 9.73537462150143},
+        {0, 9.187046049036134, 9.059073377533783},
+        {0, 9.121523234392956, 9.504221886903101},
+        {0, 9.493957951674021, 9.608201135992367},
+        {0, 9.981993764415321, 9.333278989889811},
+        {0, 9.371277571698762, 9.110041365023866},
+        {0, 9.681446270907697, 9.7870063720198},
+        {0, 9.639466883264246, 9.434768030033164},
+        {0, 9.391982858267035, 9.934707093985823},
+        {0, 9.550060071547726, 9.473132681990514},
+        {0, 9.256562054384402, 9.211913854106896},
+        {0, 9.46408385327689, 9.158869250798142},
+        {0, 9.442994981367162, 9.189227375629654},
+        {0, 9.697833866121318, 9.21112449845501},
+        {0, 9.115534908153043, 9.115227178046245},
+        {0, 9.835218474137239, 9.98174155822633},
+        {0, 9.026698146309743, 9.248759846540965},
+        {0, 9.68118581769866, 9.40512628823504},
+        {0, 9.81721640069966, 9.369105145483651},
+        {0, 9.975877208452287, 9.640693828024975},
+        {0, 9.823272242807437, 9.46823993908653},
+        {0, 9.638281188176519, 9.534774307683374},
+        {0, 9.597003178481613, 9.84238115941204},
+        {0, 9.941999007792681, 9.331877359355289},
+        {0, 9.050540877852525, 9.244472301490417},
+        {0, 9.358931306187054, 9.900809398285286},
+        {0, 9.170247599517836, 9.87585551194908},
+        {0, 9.461705027907554, 9.167319400226486},
+        {0, 9.076729207165052, 9.677578134220534},
+        {0, 9.488544686081216, 9.62380634923249},
+        {0, 9.929150661994122, 9.152491122614597},
+        {0, 9.890051482992417, 9.1709621079536},
+        {0, 9.839485513056095, 9.643849781319778},
+        {0, 9.749461922180853, 9.045432748127462},
+        {0, 9.58439542919333, 9.225044809549836},
+        {0, 9.479465134364697, 9.706551666966702},
+        {0, 9.00707492076871, 9.839317970534172},
+        {0, 9.948409701102793, 9.380261430658763},
+        {0, 9.264850115578076, 9.696516344063658},
+        {0, 9.977078194073387, 9.213405339955512},
+        {0, 9.648087669569941, 9.898977891084664},
+        {0, 9.724090075117749, 9.876133066062916},
+        {0, 9.445249316659568, 9.373023119966643},
+        {0, 9.995541563884071, 9.57923804140667},
+        {0, 9.667359233860397, 9.720098746660245},
+        {0, 9.379303845088474, 9.520602789251743},
+        {0, 9.996287800651865, 9.838061655335768},
+        {0, 9.318835567328465, 9.009915558605616},
+        {0, 9.103894679089793, 9.674971708485224},
+        {0, 9.346826400314828, 9.888779618232787},
+        {0, 9.659116962016478, 9.608712473271416},
+        {0, 9.661516337354719, 9.416786365864226},
+        {0, 9.642593770590324, 9.251344999039574},
+        {0, 9.134003475979116, 9.551760245909657},
+        {0, 9.524862003327057, 9.307789887454172},
+        {0, 9.883705581666579, 9.325086464359684},
+        {0, 9.96076863440133, 9.81636527085299},
+        {0, 9.995704158311584, 9.544553004819253},
+        {1, -9.094953387232211, 9.06233128328723},
+        {1, -9.304897363378368, 9.143926554861004},
+        {1, -9.03524958020074, 9.370326522034881},
+        {1, -9.120893310395626, 9.271851530835537},
+        {1, -9.510902040922451, 9.2470398948938},
+        {1, -9.6525973741057, 9.725355730393005},
+        {1, -9.65730261326345, 9.757814601272596},
+        {1, -9.597463454487615, 9.870093256106818},
+        {1, -9.190101362739775, 9.594505054154807},
+        {1, -9.72020516663928, 9.49084494643775},
+        {1, -9.723347588431338, 9.129139508430457},
+        {1, -9.33996314024198, 9.525934956132764},
+        {1, -9.824803485424123, 9.128546700002982},
+        {1, -9.346973220919576, 9.934992542662958},
+        {1, -9.685940369418338, 9.30810392592615},
+        {1, -9.064058121381708, 9.846942888423445},
+        {1, -9.368987058951426, 9.557135466015499},
+        {1, -9.782353308524383, 9.857550405413855},
+        {1, -9.281500887267686, 9.056968941046172},
+        {1, -9.514451522447168, 9.713696846961527},
+        {1, -9.607099689382135, 9.682075033940093},
+        {1, -9.144871412854759, 9.146320338346246},
+        {1, -9.54203309158306, 9.220014377847022},
+        {1, -9.238269645840251, 9.948063795512258},
+        {1, -9.286942806777112, 9.522342489392214},
+        {1, -9.591474157985536, 9.240285207594253},
+        {1, -9.652843973116592, 9.557983695755953},
+        {1, -9.126794849562028, 9.452966323026885},
+        {1, -9.877221229728452, 9.151312939643672},
+        {1, -9.170379066479606, 9.381576400806694},
+        {1, -9.411298671068392, 9.133322302544746},
+        {1, -9.666443924685849, 9.66428867311317},
+        {1, -9.347964494643556, 9.012849397302583},
+        {1, -9.493681117964078, 9.332240464982554},
+        {1, -9.623975723800413, 9.419921503264844},
+        {1, -9.292219487063763, 9.00214102314859},
+        {1, -9.194419464738496, 9.640048387436925},
+        {1, -9.886720923292938, 9.834939723803704},
+        {1, -9.90520284610924, 9.17595267606471},
+        {1, -9.284829868633738, 9.268795876426012},
+        {1, -9.498878372098952, 9.5997098342015},
+        {1, -9.359302922869169, 9.47880701571168},
+        {1, -9.258562740082393, 9.497531680793207},
+        {1, -9.895388929537848, 9.00756585816333},
+        {1, -9.627928477333924, 9.391262771761872},
+        {1, -9.525281129279826, 9.796892255719904},
+        {1, -9.59598592778135, 9.067874949457092},
+        {1, -9.110283105135892, 9.821653780489235},
+        {1, -9.343973780672988, 9.63557812382392},
+        {1, -9.87812414314095, 9.978007969979139},
+        {1, -9.98832246915748, 9.623150872300222},
+        {1, -9.115997082508613, 9.965470531748467},
+        {1, -9.874391718339105, 9.214113577543877},
+        {1, -9.671664494678888, 9.15862012290195},
+        {1, -9.031596433460688, 9.616814958480965},
+        {1, -9.758627761132653, 9.511908952613643},
+        {1, -9.205087108977219, 9.840949306240816},
+        {1, -9.171734592697309, 9.702842939318314},
+        {1, -9.082886085070493, 9.524201651321903},
+        {1, -9.74595864484071, 9.219346103723025},
+        {1, -9.898468941378516, 9.994402484197503},
+        {1, -9.341582531784448, 9.193680038418634},
+        {1, -9.570090524257228, 9.201198104295603},
+        {1, -9.88361320124743, 9.027615263347323},
+        {1, -9.154222720481965, 9.799927021695417},
+        {1, -9.364221227791875, 9.042090834574182},
+        {1, -9.333131749015948, 9.790442620484125},
+        {1, -9.286700941581561, 9.89073867458494},
+        {1, -9.348737197099151, 9.637939060929087},
+        {1, -9.442420524656606, 9.07802294456236},
+        {1, -9.069329135123306, 9.658515489139848},
+        {1, -9.306682910312364, 9.20831776028291},
+        {1, -9.033846541544232, 9.32904963306478},
+        {1, -9.706767953982897, 9.9204656840812},
+        {1, -9.855922299233484, 9.212398390928783},
+        {1, -9.31778377138365, 9.001381041592891},
+        {1, -9.498262395904716, 9.627240779587641},
+        {1, -9.165515191167106, 9.8269942856602},
+        {1, -9.975445549855277, 9.940934989111799},
+        {1, -9.083105286998059, 9.006127740460453},
+        {1, -9.570145038082837, 9.682155599203648},
+        {1, -9.61392195996382, 9.417864984298848},
+        {1, -9.274771331302999, 9.641773516631659},
+        {1, -9.296296304670749, 9.782496135034126},
+        {1, -9.906415110246952, 9.754391405446135},
+        {1, -9.401887484923442, 9.177845637020802},
+        {1, -9.530971211940608, 9.165119804525942},
+        {1, -9.82379861350907, 9.79567065636976},
+        {1, -9.652776399686564, 9.905939382705197},
+        {1, -9.876593047451918, 9.945310791455892},
+        {1, -9.663611565135188, 9.362793091580434},
+        {1, -9.199103361444621, 9.635196006461447},
+        {1, -9.190013322848332, 9.124127000468004},
+        {1, -9.29736354578434, 9.717999298890678},
+        {1, -9.220547853711237, 9.559927412569595},
+        {1, -9.300431356958706, 9.76396216541998},
+        {1, -9.157649670754807, 9.990846988919046},
+        {1, -9.681918677002109, 9.68618286595764},
+        {1, -9.309195235661146, 9.312880801021818},
+        {1, -9.061160475710913, 9.076614202325946},
+        {2, -9.062489260904384, -9.29639290758419},
+        {2, -9.228543182338143, -9.678377216077045},
+        {2, -9.058090832908235, -9.193945883550121},
+        {2, -9.133051729493113, -9.591373007767894},
+        {2, -9.287844094445548, -9.551255004015},
+        {2, -9.007505358739156, -9.364102496975889},
+        {2, -9.573448348548297, -9.721351111009751},
+        {2, -9.839063104064442, -9.913376420693114},
+        {2, -9.009615911555375, -9.726047024128608},
+        {2, -9.101017317976435, -9.704243867142955},
+        {2, -9.982108914119253, -9.16651010251761},
+        {2, -9.446194150458751, -9.254956921695555},
+        {2, -9.189473272816354, -9.810681137049205},
+        {2, -9.118077427599777, -9.540094810610913},
+        {2, -9.771250464767986, -9.523914718655663},
+        {2, -9.66962428717098, -9.363171620624835},
+        {2, -9.312167530669402, -9.343252976723711},
+        {2, -9.464223946364095, -9.030677424225916},
+        {2, -9.360044171938823, -9.307110078788382},
+        {2, -9.011501658023455, -9.36530250968901},
+        {2, -9.775885771959839, -9.99889314514122},
+        {2, -9.674611861667914, -9.258187855592231},
+        {2, -9.738640777018995, -9.111785670315005},
+        {2, -9.246690988432968, -9.721028941948624},
+        {2, -9.390261807995243, -9.588861735182837},
+        {2, -9.291113352727827, -9.269267155328981},
+        {2, -9.851335630543913, -9.706611637556188},
+        {2, -9.585157995064394, -9.405552049981731},
+        {2, -9.918436572526948, -9.16760421314763},
+        {2, -9.845493743518675, -9.355482956823167},
+        {2, -9.731220848845956, -9.225343258111073},
+        {2, -9.222705334863235, -9.494812693860784},
+        {2, -9.981016698450784, -9.905493543993186},
+        {2, -9.46735837748333, -9.4826245649012},
+        {2, -9.244414641225871, -9.747631208358092},
+        {2, -9.055383358563462, -9.531078141057671},
+        {2, -9.769432919539609, -9.61352749756392},
+        {2, -9.369900693663043, -9.108143337018905},
+        {2, -9.607028386780009, -9.114073621581822},
+        {2, -9.777391839524553, -9.011542490337462},
+        {2, -9.006992341646022, -9.807142738339437},
+        {2, -9.268800709859363, -9.64049865255139},
+        {2, -9.675247117678266, -9.59986912340877},
+        {2, -9.64637138569114, -9.373492063216789},
+        {2, -9.107966171477159, -9.89296888054194},
+        {2, -9.844813041035149, -9.265286980535892},
+        {2, -9.741557572466677, -9.332262102684087},
+        {2, -9.877113842998332, -9.236779890169021},
+        {2, -9.717067250147496, -9.064661056318842},
+        {2, -9.621588376526242, -9.877688077281952},
+        {2, -9.517814042484112, -9.540587835450802},
+        {2, -9.301056957528804, -9.825047942369075},
+        {2, -9.571496535251406, -9.7886645523611},
+        {2, -9.720509286872675, -9.391715190333258},
+        {2, -9.440573147395899, -9.788983529514448},
+        {2, -9.26187156355727, -9.6495064067468},
+        {2, -9.658496105019307, -9.56612823492413},
+        {2, -9.380443710902496, -9.68085867523561},
+        {2, -9.942337341048844, -9.051311192273833},
+        {2, -9.078217384202866, -9.916249447505033},
+        {2, -9.040049870218203, -9.034931091928817},
+        {2, -9.38309299369458, -9.652061835126116},
+        {2, -9.2185666133056, -9.230952753648268},
+        {2, -9.556045604713985, -9.68622417688499},
+        {2, -9.763408055045888, -9.879577482698977},
+        {2, -9.4247203087675, -9.639176769093654},
+        {2, -9.87358328609414, -9.895570735983382},
+        {2, -9.819441742886346, -9.8365613475581},
+        {2, -9.658089225310723, -9.489731885421016},
+        {2, -9.943990436893316, -9.452660725226394},
+        {2, -9.499576083220616, -9.936796975306573},
+        {2, -9.209278737078256, -9.515912871664437},
+        {2, -9.822627739746856, -9.208467277950026},
+        {2, -9.250697491903084, -9.388580147580788},
+        {2, -9.499425743259364, -9.350980719673753},
+        {2, -9.275926339651928, -9.617104986484284},
+        {2, -9.1796228747286, -9.600489090237376},
+        {2, -9.349551823375743, -9.006466892950566},
+        {2, -9.894633921415739, -9.68766261225829},
+        {2, -9.65858550958029, -9.981852526887},
+        {2, -9.260496691277194, -9.809097777192473},
+        {2, -9.819512412109138, -9.897278497873733},
+        {2, -9.095722203640902, -9.373361177409254},
+        {2, -9.350211015838992, -9.8070103689666},
+        {2, -9.666932714082296, -9.012476306081684},
+        {2, -9.808494394881976, -9.141856503904373},
+        {2, -9.659369482494562, -9.813220865668578},
+        {2, -9.695328684452264, -9.348824074007899},
+        {2, -9.559852026507784, -9.874175917293163},
+        {2, -9.09372192117967, -9.409697201374975},
+        {2, -9.287303427948462, -9.501710345463191},
+        {2, -9.559530752361578, -9.915461534620048},
+        {2, -9.582664553428488, -9.403076102016477},
+        {2, -9.52173572568699, -9.741375773070464},
+        {2, -9.65354706029232, -9.818082622224445},
+        {2, -9.415838021477068, -9.532580879297706},
+        {2, -9.574004758496413, -9.086286237660188},
+        {2, -9.270611925252807, -9.702167164438746},
+        {2, -9.95686463396123, -9.162427711243494},
+        {2, -9.274599236711888, -9.877754856511778},
+        {3, 9.14689232210878, -9.775341371070157},
+        {3, 9.023355885230728, -9.650091265039629},
+        {3, 9.445914402990603, -9.367844134021585},
+        {3, 9.12739637867819, -9.790557561179597},
+        {3, 9.041303217790349, -9.586261899702581},
+        {3, 9.205210383417626, -9.985844424729768},
+        {3, 9.760747183322884, -9.739749414833623},
+        {3, 9.43601987318095, -9.299718258558077},
+        {3, 9.14356842517825, -9.295462642558103},
+        {3, 9.833809705258039, -9.468846417300268},
+        {3, 9.718477547371677, -9.823866211360837},
+        {3, 9.922658697442182, -9.769889056711964},
+        {3, 9.33663363137869, -9.088267105115708},
+        {3, 9.137230799593524, -9.764401780467223},
+        {3, 9.858088980083506, -9.304992329702712},
+        {3, 9.382828855133841, -9.493306421449871},
+        {3, 9.798884510277261, -9.380868512833228},
+        {3, 9.62129491417874, -9.539240839001467},
+        {3, 9.779444510688629, -9.192918853610157},
+        {3, 9.200804425227417, -9.214343851278091},
+        {3, 9.775531213188497, -9.073023597174036},
+        {3, 9.390609731389022, -9.977531450420052},
+        {3, 9.852766749781729, -9.994823748876888},
+        {3, 9.206238360247045, -9.67091791978384},
+        {3, 9.188602950870685, -9.510463637602879},
+        {3, 9.331589555754434, -9.922823935052168},
+        {3, 9.476697182752012, -9.313064140116326},
+        {3, 9.356805613304504, -9.835977587036306},
+        {3, 9.339818380404573, -9.255810669018475},
+        {3, 9.17366847248557, -9.947584334056048},
+        {3, 9.53360390823212, -9.795041609984915},
+        {3, 9.609560038477422, -9.285015745600694},
+        {3, 9.577553857280723, -9.96914900300197},
+        {3, 9.464374595524664, -9.618239089480822},
+        {3, 9.398719356212853, -9.7406758194444},
+        {3, 9.154688949078198, -9.248998548314239},
+        {3, 9.679073636776373, -9.965328464852867},
+        {3, 9.47893626848198, -9.9671543632786},
+        {3, 9.068547258387513, -9.297377035663166},
+        {3, 9.076923603177063, -9.914463831030272},
+        {3, 9.976578331543791, -9.360722370503666},
+        {3, 9.089452654960278, -9.675962954595512},
+        {3, 9.070526769096297, -9.878206691195288},
+        {3, 9.930847945955737, -9.07583308430197},
+        {3, 9.241217613699337, -9.631175172125698},
+        {3, 9.124100921554351, -9.228953372107389},
+        {3, 9.508344880276217, -9.860603437908713},
+        {3, 9.11156100183317, -9.325392997885503},
+        {3, 9.817235693989044, -9.39425968469714},
+        {3, 9.001600449220064, -9.425174755596974},
+        {3, 9.548114105927628, -9.808330723888258},
+        {3, 9.26226050324015, -9.767116578977086},
+        {3, 9.614597629315545, -9.041844364395292},
+        {3, 9.538354218499835, -9.098393947752555},
+        {3, 9.103392813936214, -9.09952673162608},
+        {3, 9.420097750306217, -9.098700662928707},
+        {3, 9.751668557712422, -9.38794903932924},
+        {3, 9.931275926738792, -9.567768498966414},
+        {3, 9.046080675655736, -9.638494792341994},
+        {3, 9.770315794108765, -9.43037261292599},
+        {3, 9.752980345824852, -9.748629501818872},
+        {3, 9.451157497026747, -9.122294173303064},
+        {3, 9.8842318143802, -9.26500677925286},
+        {3, 9.757618739984443, -9.43137249310142},
+        {3, 9.312628300108653, -9.35390228978602},
+        {3, 9.290443903557156, -9.235565486135597},
+        {3, 9.006123561818931, -9.152783217337547},
+        {3, 9.570084759165916, -9.927282503148907},
+        {3, 9.421900208122063, -9.081045753111953},
+        {3, 9.653736596553786, -9.901709124803725},
+        {3, 9.18417654510616, -9.251983632346962},
+        {3, 9.528620521688604, -9.153806541933662},
+        {3, 9.804333603959915, -9.140503586471738},
+        {3, 9.450969957775413, -9.158071229394206},
+        {3, 9.20163405176059, -9.485982651544383},
+        {3, 9.54780101021909, -9.037382999154193},
+        {3, 9.075028540176401, -9.398829949196564},
+        {3, 9.874199751417624, -9.811808331246274},
+        {3, 9.954958362231842, -9.233648957978966},
+        {3, 9.03258466527879, -9.432021155003355},
+        {3, 9.611337142970585, -9.18050106929956},
+        {3, 9.748186934551581, -9.283395815931486},
+        {3, 9.203357880317562, -9.734448423320636},
+        {3, 9.918736141570848, -9.139927237002235},
+        {3, 9.923607379931829, -9.747509729243488},
+        {3, 9.11966639233584, -9.144799648581206},
+        {3, 9.332910738465808, -9.836313230806809},
+        {3, 9.72782406722705, -9.636979470475252},
+        {3, 9.587733884348484, -9.429388313887008},
+        {3, 9.713001308076503, -9.378155762534723},
+        {3, 9.553879064305177, -9.456119811781296},
+        {3, 9.326716553614768, -9.398273985573583},
+        {3, 9.845721054911849, -9.2609414976378},
+        {3, 9.43826634715752, -9.226109072709436},
+        {3, 9.46721793264904, -9.959943210987339},
+        {3, 9.47560676057465, -9.963482009295927},
+        {3, 9.006435968586619, -9.202759792205478},
+        {3, 9.053062605095485, -9.798289703474865},
+        {3, 9.959296741639132, -9.762961500922069},
+        {3, 9.882357321966778, -9.069477551120192}
+    };
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/composition/CompositionTestSuite.java b/modules/ml/src/test/java/org/apache/ignite/ml/composition/CompositionTestSuite.java
new file mode 100644
index 0000000..8714eb2
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/composition/CompositionTestSuite.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.composition;
+
+import org.apache.ignite.ml.composition.boosting.GDBTrainerTest;
+import org.apache.ignite.ml.composition.predictionsaggregator.MeanValuePredictionsAggregatorTest;
+import org.apache.ignite.ml.composition.predictionsaggregator.OnMajorityPredictionsAggregatorTest;
+import org.apache.ignite.ml.composition.predictionsaggregator.WeightedPredictionsAggregatorTest;
+import org.junit.runner.RunWith;
+import org.junit.runners.Suite;
+
+/**
+ * Test suite for all tests located in org.apache.ignite.ml.composition package.
+ */
+@RunWith(Suite.class)
+@Suite.SuiteClasses({
+    GDBTrainerTest.class,
+    MeanValuePredictionsAggregatorTest.class,
+    OnMajorityPredictionsAggregatorTest.class,
+    WeightedPredictionsAggregatorTest.class
+})
+public class CompositionTestSuite {
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/composition/boosting/GDBTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/composition/boosting/GDBTrainerTest.java
index bef5e9b..4c3655b 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/composition/boosting/GDBTrainerTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/composition/boosting/GDBTrainerTest.java
@@ -19,12 +19,16 @@
 
 import java.util.HashMap;
 import java.util.Map;
+import java.util.function.BiFunction;
 import org.apache.ignite.ml.Model;
 import org.apache.ignite.ml.composition.ModelsComposition;
+import org.apache.ignite.ml.composition.boosting.convergence.mean.MeanAbsValueConvergenceCheckerFactory;
+import org.apache.ignite.ml.composition.boosting.convergence.simple.ConvergenceCheckerStubFactory;
 import org.apache.ignite.ml.composition.predictionsaggregator.WeightedPredictionsAggregator;
+import org.apache.ignite.ml.dataset.impl.local.LocalDatasetBuilder;
+import org.apache.ignite.ml.math.functions.IgniteBiFunction;
 import org.apache.ignite.ml.math.primitives.vector.Vector;
 import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
-import org.apache.ignite.ml.trainers.DatasetTrainer;
 import org.apache.ignite.ml.tree.DecisionTreeConditionalNode;
 import org.apache.ignite.ml.tree.boosting.GDBBinaryClassifierOnTreesTrainer;
 import org.apache.ignite.ml.tree.boosting.GDBRegressionOnTreesTrainer;
@@ -52,7 +56,9 @@
             learningSample.put(i, new double[] {xs[i], ys[i]});
         }
 
-        DatasetTrainer<Model<Vector, Double>, Double> trainer = new GDBRegressionOnTreesTrainer(1.0, 2000, 3, 0.0);
+        GDBTrainer trainer = new GDBRegressionOnTreesTrainer(1.0, 2000, 3, 0.0)
+            .withUsingIdx(true);
+
         Model<Vector, Double> mdl = trainer.fit(
             learningSample, 1,
             (k, v) -> VectorUtils.of(v[0]),
@@ -70,17 +76,47 @@
 
         assertEquals(0.0, mse, 0.0001);
 
-        assertTrue(mdl instanceof ModelsComposition);
         ModelsComposition composition = (ModelsComposition)mdl;
+        assertTrue(composition.toString().length() > 0);
+        assertTrue(composition.toString(true).length() > 0);
+        assertTrue(composition.toString(false).length() > 0);
+
         composition.getModels().forEach(m -> assertTrue(m instanceof DecisionTreeConditionalNode));
 
         assertEquals(2000, composition.getModels().size());
         assertTrue(composition.getPredictionsAggregator() instanceof WeightedPredictionsAggregator);
+
+        trainer = trainer.withCheckConvergenceStgyFactory(new MeanAbsValueConvergenceCheckerFactory(0.1));
+        assertTrue(trainer.fit(
+            learningSample, 1,
+            (k, v) -> VectorUtils.of(v[0]),
+            (k, v) -> v[1]
+        ).getModels().size() < 2000);
     }
 
     /** */
     @Test
     public void testFitClassifier() {
+        testClassifier((trainer, learningSample) -> trainer.fit(
+            learningSample, 1,
+            (k, v) -> VectorUtils.of(v[0]),
+            (k, v) -> v[1]
+        ));
+    }
+
+    /** */
+    @Test
+    public void testFitClassifierWithLearningStrategy() {
+        testClassifier((trainer, learningSample) -> trainer.fit(
+            new LocalDatasetBuilder<>(learningSample, 1),
+            (k, v) -> VectorUtils.of(v[0]),
+            (k, v) -> v[1]
+        ));
+    }
+
+    /** */
+    private void testClassifier(BiFunction<GDBTrainer, Map<Integer, double[]>,
+        Model<Vector, Double>> fitter) {
         int sampleSize = 100;
         double[] xs = new double[sampleSize];
         double[] ys = new double[sampleSize];
@@ -94,19 +130,18 @@
         for (int i = 0; i < sampleSize; i++)
             learningSample.put(i, new double[] {xs[i], ys[i]});
 
-        DatasetTrainer<Model<Vector, Double>, Double> trainer = new GDBBinaryClassifierOnTreesTrainer(0.3, 500, 3, 0.0);
-        Model<Vector, Double> mdl = trainer.fit(
-            learningSample, 1,
-            (k, v) -> VectorUtils.of(v[0]),
-            (k, v) -> v[1]
-        );
+        GDBTrainer trainer = new GDBBinaryClassifierOnTreesTrainer(0.3, 500, 3, 0.0)
+            .withUsingIdx(true)
+            .withCheckConvergenceStgyFactory(new MeanAbsValueConvergenceCheckerFactory(0.3));
+
+        Model<Vector, Double> mdl = fitter.apply(trainer, learningSample);
 
         int errorsCnt = 0;
         for (int j = 0; j < sampleSize; j++) {
             double x = xs[j];
             double y = ys[j];
             double p = mdl.apply(VectorUtils.of(x));
-            if(p != y)
+            if (p != y)
                 errorsCnt++;
         }
 
@@ -116,7 +151,61 @@
         ModelsComposition composition = (ModelsComposition)mdl;
         composition.getModels().forEach(m -> assertTrue(m instanceof DecisionTreeConditionalNode));
 
-        assertEquals(500, composition.getModels().size());
+        assertTrue(composition.getModels().size() < 500);
         assertTrue(composition.getPredictionsAggregator() instanceof WeightedPredictionsAggregator);
+
+        trainer = trainer.withCheckConvergenceStgyFactory(new ConvergenceCheckerStubFactory());
+        assertEquals(500, ((ModelsComposition)fitter.apply(trainer, learningSample)).getModels().size());
+    }
+
+    /** */
+    @Test
+    public void testUpdate() {
+        int sampleSize = 100;
+        double[] xs = new double[sampleSize];
+        double[] ys = new double[sampleSize];
+
+        for (int i = 0; i < sampleSize; i++) {
+            xs[i] = i;
+            ys[i] = ((int)(xs[i] / 10.0) % 2) == 0 ? -1.0 : 1.0;
+        }
+
+        Map<Integer, double[]> learningSample = new HashMap<>();
+        for (int i = 0; i < sampleSize; i++)
+            learningSample.put(i, new double[] {xs[i], ys[i]});
+        IgniteBiFunction<Integer, double[], Vector> fExtr = (k, v) -> VectorUtils.of(v[0]);
+        IgniteBiFunction<Integer, double[], Double> lExtr = (k, v) -> v[1];
+
+        GDBTrainer classifTrainer = new GDBBinaryClassifierOnTreesTrainer(0.3, 500, 3, 0.0)
+            .withUsingIdx(true)
+            .withCheckConvergenceStgyFactory(new MeanAbsValueConvergenceCheckerFactory(0.3));
+        GDBTrainer regressTrainer = new GDBRegressionOnTreesTrainer(0.3, 500, 3, 0.0)
+            .withUsingIdx(true)
+            .withCheckConvergenceStgyFactory(new MeanAbsValueConvergenceCheckerFactory(0.3));
+
+        testUpdate(learningSample, fExtr, lExtr, classifTrainer);
+        testUpdate(learningSample, fExtr, lExtr, regressTrainer);
+    }
+
+    /** */
+    private void testUpdate(Map<Integer, double[]> dataset, IgniteBiFunction<Integer, double[], Vector> fExtr,
+        IgniteBiFunction<Integer, double[], Double> lExtr, GDBTrainer trainer) {
+
+        ModelsComposition originalMdl = trainer.fit(dataset, 1, fExtr, lExtr);
+        ModelsComposition updatedOnSameDataset = trainer.update(originalMdl, dataset, 1, fExtr, lExtr);
+
+        LocalDatasetBuilder<Integer, double[]> epmtyDataset = new LocalDatasetBuilder<>(new HashMap<>(), 1);
+        ModelsComposition updatedOnEmptyDataset = trainer.updateModel(originalMdl, epmtyDataset, fExtr, lExtr);
+
+        dataset.forEach((k,v) -> {
+            Vector features = fExtr.apply(k, v);
+
+            Double originalAnswer = originalMdl.apply(features);
+            Double updatedMdlAnswer1 = updatedOnSameDataset.apply(features);
+            Double updatedMdlAnswer2 = updatedOnEmptyDataset.apply(features);
+
+            assertEquals(originalAnswer, updatedMdlAnswer1, 0.01);
+            assertEquals(originalAnswer, updatedMdlAnswer2, 0.01);
+        });
     }
 }
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/composition/boosting/convergence/ConvergenceCheckerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/composition/boosting/convergence/ConvergenceCheckerTest.java
new file mode 100644
index 0000000..50fdf8b
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/composition/boosting/convergence/ConvergenceCheckerTest.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.composition.boosting.convergence;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import org.apache.ignite.ml.composition.ModelsComposition;
+import org.apache.ignite.ml.composition.boosting.loss.Loss;
+import org.apache.ignite.ml.dataset.impl.local.LocalDatasetBuilder;
+import org.apache.ignite.ml.math.functions.IgniteBiFunction;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
+import org.junit.Before;
+
+/** */
+public abstract class ConvergenceCheckerTest {
+    /** Not converged model. */
+    protected ModelsComposition notConvergedMdl = new ModelsComposition(Collections.emptyList(), null) {
+        @Override public Double apply(Vector features) {
+            return 2.1 * features.get(0);
+        }
+    };
+
+    /** Converged model. */
+    protected ModelsComposition convergedMdl = new ModelsComposition(Collections.emptyList(), null) {
+        @Override public Double apply(Vector features) {
+            return 2 * (features.get(0) + 1);
+        }
+    };
+
+    /** Features extractor. */
+    protected IgniteBiFunction<double[], Double, Vector> fExtr = (x, y) -> VectorUtils.of(x);
+
+    /** Label extractor. */
+    protected IgniteBiFunction<double[], Double, Double> lbExtr = (x, y) -> y;
+
+    /** Data. */
+    protected Map<double[], Double> data;
+
+    /** */
+    @Before
+    public void setUp() throws Exception {
+        data = new HashMap<>();
+        for(int i = 0; i < 10; i ++)
+            data.put(new double[]{i, i + 1}, (double)(2 * (i + 1)));
+    }
+
+    /** */
+    public ConvergenceChecker<double[], Double> createChecker(ConvergenceCheckerFactory factory,
+        LocalDatasetBuilder<double[], Double> datasetBuilder) {
+
+        return factory.create(data.size(),
+            x -> x,
+            new Loss() {
+                @Override public double error(long sampleSize, double lb, double mdlAnswer) {
+                    return mdlAnswer - lb;
+                }
+
+                @Override public double gradient(long sampleSize, double lb, double mdlAnswer) {
+                    return mdlAnswer - lb;
+                }
+            },
+            datasetBuilder, fExtr, lbExtr
+        );
+    }
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/composition/boosting/convergence/mean/MeanAbsValueConvergenceCheckerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/composition/boosting/convergence/mean/MeanAbsValueConvergenceCheckerTest.java
new file mode 100644
index 0000000..0b42db8
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/composition/boosting/convergence/mean/MeanAbsValueConvergenceCheckerTest.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.composition.boosting.convergence.mean;
+
+import org.apache.ignite.ml.composition.boosting.convergence.ConvergenceChecker;
+import org.apache.ignite.ml.composition.boosting.convergence.ConvergenceCheckerTest;
+import org.apache.ignite.ml.dataset.impl.local.LocalDataset;
+import org.apache.ignite.ml.dataset.impl.local.LocalDatasetBuilder;
+import org.apache.ignite.ml.dataset.primitive.FeatureMatrixWithLabelsOnHeapData;
+import org.apache.ignite.ml.dataset.primitive.FeatureMatrixWithLabelsOnHeapDataBuilder;
+import org.apache.ignite.ml.dataset.primitive.builder.context.EmptyContextBuilder;
+import org.apache.ignite.ml.dataset.primitive.context.EmptyContext;
+import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+/** */
+public class MeanAbsValueConvergenceCheckerTest extends ConvergenceCheckerTest {
+    /** */
+    @Test
+    public void testConvergenceChecking() {
+        LocalDatasetBuilder<double[], Double> datasetBuilder = new LocalDatasetBuilder<>(data, 1);
+        ConvergenceChecker<double[], Double> checker = createChecker(
+            new MeanAbsValueConvergenceCheckerFactory(0.1), datasetBuilder);
+
+        double error = checker.computeError(VectorUtils.of(1, 2), 4.0, notConvergedMdl);
+        Assert.assertEquals(1.9, error, 0.01);
+        Assert.assertFalse(checker.isConverged(datasetBuilder, notConvergedMdl));
+        Assert.assertTrue(checker.isConverged(datasetBuilder, convergedMdl));
+
+        try(LocalDataset<EmptyContext, FeatureMatrixWithLabelsOnHeapData> dataset = datasetBuilder.build(
+            new EmptyContextBuilder<>(), new FeatureMatrixWithLabelsOnHeapDataBuilder<>(fExtr, lbExtr))) {
+
+            double onDSError = checker.computeMeanErrorOnDataset(dataset, notConvergedMdl);
+            Assert.assertEquals(1.55, onDSError, 0.01);
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+    }
+
+    /** Mean error more sensitive to anomalies in data */
+    @Test
+    public void testConvergenceCheckingWithAnomaliesInData() {
+        data.put(new double[]{10, 11}, 100000.0);
+        LocalDatasetBuilder<double[], Double> datasetBuilder = new LocalDatasetBuilder<>(data, 1);
+        ConvergenceChecker<double[], Double> checker = createChecker(
+            new MeanAbsValueConvergenceCheckerFactory(0.1), datasetBuilder);
+
+        try(LocalDataset<EmptyContext, FeatureMatrixWithLabelsOnHeapData> dataset = datasetBuilder.build(
+            new EmptyContextBuilder<>(), new FeatureMatrixWithLabelsOnHeapDataBuilder<>(fExtr, lbExtr))) {
+
+            double onDSError = checker.computeMeanErrorOnDataset(dataset, notConvergedMdl);
+            Assert.assertEquals(9090.41, onDSError, 0.01);
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+    }
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/composition/boosting/convergence/median/MedianOfMedianConvergenceCheckerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/composition/boosting/convergence/median/MedianOfMedianConvergenceCheckerTest.java
new file mode 100644
index 0000000..d6880b4
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/composition/boosting/convergence/median/MedianOfMedianConvergenceCheckerTest.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.composition.boosting.convergence.median;
+
+import org.apache.ignite.ml.composition.boosting.convergence.ConvergenceChecker;
+import org.apache.ignite.ml.composition.boosting.convergence.ConvergenceCheckerTest;
+import org.apache.ignite.ml.dataset.impl.local.LocalDataset;
+import org.apache.ignite.ml.dataset.impl.local.LocalDatasetBuilder;
+import org.apache.ignite.ml.dataset.primitive.FeatureMatrixWithLabelsOnHeapData;
+import org.apache.ignite.ml.dataset.primitive.FeatureMatrixWithLabelsOnHeapDataBuilder;
+import org.apache.ignite.ml.dataset.primitive.builder.context.EmptyContextBuilder;
+import org.apache.ignite.ml.dataset.primitive.context.EmptyContext;
+import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+/** */
+public class MedianOfMedianConvergenceCheckerTest extends ConvergenceCheckerTest {
+    /** */
+    @Test
+    public void testConvergenceChecking() {
+        data.put(new double[]{10, 11}, 100000.0);
+        LocalDatasetBuilder<double[], Double> datasetBuilder = new LocalDatasetBuilder<>(data, 1);
+
+        ConvergenceChecker<double[], Double> checker = createChecker(
+            new MedianOfMedianConvergenceCheckerFactory(0.1), datasetBuilder);
+
+        double error = checker.computeError(VectorUtils.of(1, 2), 4.0, notConvergedMdl);
+        Assert.assertEquals(1.9, error, 0.01);
+        Assert.assertFalse(checker.isConverged(datasetBuilder, notConvergedMdl));
+        Assert.assertTrue(checker.isConverged(datasetBuilder, convergedMdl));
+
+        try(LocalDataset<EmptyContext, FeatureMatrixWithLabelsOnHeapData> dataset = datasetBuilder.build(
+            new EmptyContextBuilder<>(), new FeatureMatrixWithLabelsOnHeapDataBuilder<>(fExtr, lbExtr))) {
+
+            double onDSError = checker.computeMeanErrorOnDataset(dataset, notConvergedMdl);
+            Assert.assertEquals(1.6, onDSError, 0.01);
+        } catch (Exception e) {
+            throw new RuntimeException(e);
+        }
+    }
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/composition/predictionsaggregator/WeightedPredictionsAggregatorTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/composition/predictionsaggregator/WeightedPredictionsAggregatorTest.java
index ae0b166..b387d9c 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/composition/predictionsaggregator/WeightedPredictionsAggregatorTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/composition/predictionsaggregator/WeightedPredictionsAggregatorTest.java
@@ -20,6 +20,7 @@
 import org.junit.Test;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 /** */
 public class WeightedPredictionsAggregatorTest {
@@ -43,4 +44,19 @@
         WeightedPredictionsAggregator aggregator = new WeightedPredictionsAggregator(new double[] {1.0, 0.5, 0.25});
         aggregator.apply(new double[] { });
     }
+
+    /** */
+    @Test
+    public void testToString() {
+        PredictionsAggregator aggr = (PredictionsAggregator)doubles -> null;
+        assertTrue(aggr.toString().length() > 0);
+        assertTrue(aggr.toString(true).length() > 0);
+        assertTrue(aggr.toString(false).length() > 0);
+
+        WeightedPredictionsAggregator aggregator = new WeightedPredictionsAggregator(new double[] {});
+        assertTrue(aggregator.toString().length() > 0);
+        assertTrue(aggregator.toString(true).length() > 0);
+        assertTrue(aggregator.toString(false).length() > 0);
+    }
+
 }
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/dataset/DatasetTestSuite.java b/modules/ml/src/test/java/org/apache/ignite/ml/dataset/DatasetTestSuite.java
index 3be79a4..babddfb 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/dataset/DatasetTestSuite.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/dataset/DatasetTestSuite.java
@@ -17,6 +17,7 @@
 
 package org.apache.ignite.ml.dataset;
 
+import org.apache.ignite.ml.dataset.feature.ObjectHistogramTest;
 import org.apache.ignite.ml.dataset.impl.cache.CacheBasedDatasetBuilderTest;
 import org.apache.ignite.ml.dataset.impl.cache.CacheBasedDatasetTest;
 import org.apache.ignite.ml.dataset.impl.cache.util.ComputeUtilsTest;
@@ -24,6 +25,8 @@
 import org.apache.ignite.ml.dataset.impl.cache.util.PartitionDataStorageTest;
 import org.apache.ignite.ml.dataset.impl.local.LocalDatasetBuilderTest;
 import org.apache.ignite.ml.dataset.primitive.DatasetWrapperTest;
+import org.apache.ignite.ml.dataset.primitive.SimpleDatasetTest;
+import org.apache.ignite.ml.dataset.primitive.SimpleLabeledDatasetTest;
 import org.junit.runner.RunWith;
 import org.junit.runners.Suite;
 
@@ -38,7 +41,10 @@
     PartitionDataStorageTest.class,
     CacheBasedDatasetBuilderTest.class,
     CacheBasedDatasetTest.class,
-    LocalDatasetBuilderTest.class
+    LocalDatasetBuilderTest.class,
+    SimpleDatasetTest.class,
+    SimpleLabeledDatasetTest.class,
+    ObjectHistogramTest.class
 })
 public class DatasetTestSuite {
     // No-op.
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/dataset/feature/ObjectHistogramTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/dataset/feature/ObjectHistogramTest.java
new file mode 100644
index 0000000..131b69b
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/dataset/feature/ObjectHistogramTest.java
@@ -0,0 +1,170 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.dataset.feature;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Optional;
+import java.util.Random;
+import java.util.TreeMap;
+import org.apache.ignite.ml.math.functions.IgniteFunction;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+/** */
+public class ObjectHistogramTest {
+    /** Data first partition. */
+    private double[] dataFirstPart = new double[] {0., 0., 0., 0., 1., 1., 1, 2., 2., 3., 4., 5.};
+    /** Data second partition. */
+    private double[] dataSecondPart = new double[] {0., 1., 0., 1., 0., 1., 0, 1., 0., 1., 0., 5., 6.};
+
+    /** */
+    private ObjectHistogram<Double> hist1;
+    /** */
+    private ObjectHistogram<Double> hist2;
+
+    /**
+     *
+     */
+    @Before
+    public void setUp() throws Exception {
+        hist1 = new ObjectHistogram<>(this::computeBucket, x -> 1.);
+        hist2 = new ObjectHistogram<>(this::computeBucket, x -> 1.);
+
+        fillHist(hist1, dataFirstPart);
+        fillHist(hist2, dataSecondPart);
+    }
+
+    /**
+     * @param hist History.
+     * @param data Data.
+     */
+    private void fillHist(ObjectHistogram<Double> hist, double[] data) {
+        for (int i = 0; i < data.length; i++)
+            hist.addElement(data[i]);
+    }
+
+    /**
+     *
+     */
+    @Test
+    public void testBuckets() {
+        testBuckets(hist1, new int[] {0, 1, 2, 3, 4, 5}, new int[] {4, 3, 2, 1, 1, 1});
+        testBuckets(hist2, new int[] {0, 1, 5, 6}, new int[] {6, 5, 1, 1});
+    }
+
+    /**
+     * @param hist History.
+     * @param expectedBuckets Expected buckets.
+     * @param expectedCounters Expected counters.
+     */
+    private void testBuckets(ObjectHistogram<Double> hist, int[] expectedBuckets, int[] expectedCounters) {
+        int size = hist.buckets().size();
+        int[] buckets = new int[size];
+        int[] counters = new int[size];
+        int ptr = 0;
+        for (int bucket : hist.buckets()) {
+            counters[ptr] = hist.getValue(bucket).get().intValue();
+            buckets[ptr++] = bucket;
+        }
+
+        assertArrayEquals(expectedBuckets, buckets);
+        assertArrayEquals(expectedCounters, counters);
+    }
+
+    /**
+     *
+     */
+    @Test
+    public void testAdd() {
+        double value = 100.;
+        hist1.addElement(value);
+        Optional<Double> counter = hist1.getValue(computeBucket(value));
+
+        assertTrue(counter.isPresent());
+        assertEquals(1, counter.get().intValue());
+    }
+
+    /**
+     *
+     */
+    @Test
+    public void testAddHist() {
+        ObjectHistogram<Double> result = hist1.plus(hist2);
+        testBuckets(result, new int[] {0, 1, 2, 3, 4, 5, 6}, new int[] {10, 8, 2, 1, 1, 2, 1});
+    }
+
+    /**
+     *
+     */
+    @Test
+    public void testDistributionFunction() {
+        TreeMap<Integer, Double> distribution = hist1.computeDistributionFunction();
+
+        int[] buckets = new int[distribution.size()];
+        double[] sums = new double[distribution.size()];
+
+        int ptr = 0;
+        for(int bucket : distribution.keySet()) {
+            sums[ptr] = distribution.get(bucket);
+            buckets[ptr++] = bucket;
+        }
+
+        assertArrayEquals(new int[] {0, 1, 2, 3, 4, 5}, buckets);
+        assertArrayEquals(new double[] {4., 7., 9., 10., 11., 12.}, sums, 0.01);
+    }
+
+    @Test
+    public void testOfSum() {
+        IgniteFunction<Double, Integer> bucketMap = x -> (int) (Math.ceil(x * 100) % 100);
+        IgniteFunction<Double, Double> counterMap = x -> Math.pow(x, 2);
+
+        ObjectHistogram<Double> forAllHistogram = new ObjectHistogram<>(bucketMap, counterMap);
+        Random rnd = new Random();
+        List<ObjectHistogram<Double>> partitions = new ArrayList<>();
+        int cntOfPartitions = rnd.nextInt(100);
+        int sizeOfDataset = rnd.nextInt(10000);
+        for(int i = 0; i < cntOfPartitions; i++)
+            partitions.add(new ObjectHistogram<>(bucketMap, counterMap));
+
+        for(int i = 0; i < sizeOfDataset; i++) {
+            double objVal = rnd.nextDouble();
+            forAllHistogram.addElement(objVal);
+            partitions.get(rnd.nextInt(partitions.size())).addElement(objVal);
+        }
+
+        Optional<ObjectHistogram<Double>> leftSum = partitions.stream().reduce((x,y) -> x.plus(y));
+        Optional<ObjectHistogram<Double>> rightSum = partitions.stream().reduce((x,y) -> y.plus(x));
+        assertTrue(leftSum.isPresent());
+        assertTrue(rightSum.isPresent());
+        assertTrue(forAllHistogram.isEqualTo(leftSum.get()));
+        assertTrue(forAllHistogram.isEqualTo(rightSum.get()));
+        assertTrue(leftSum.get().isEqualTo(rightSum.get()));
+    }
+
+    /**
+     * @param value Value.
+     */
+    private int computeBucket(Double value) {
+        return (int)Math.rint(value);
+    }
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/dataset/impl/cache/CacheBasedDatasetTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/dataset/impl/cache/CacheBasedDatasetTest.java
index 2e39e65..d96c935 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/dataset/impl/cache/CacheBasedDatasetTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/dataset/impl/cache/CacheBasedDatasetTest.java
@@ -63,7 +63,7 @@
     }
 
     /** {@inheritDoc} */
-    @Override protected void beforeTest() throws Exception {
+    @Override protected void beforeTest() {
         /* Grid instance. */
         ignite = grid(NODE_COUNT);
         ignite.configuration().setPeerClassLoadingEnabled(true);
@@ -87,6 +87,9 @@
             (upstream, upstreamSize, ctx) -> new SimpleDatasetData(new double[0], 0)
         );
 
+        assertEquals("Upstream cache name from dataset",
+            upstreamCache.getName(), dataset.getUpstreamCache().getName());
+
         assertTrue("Before computation all partitions should not be reserved",
             areAllPartitionsNotReserved(upstreamCache.getName(), dataset.getDatasetCache().getName()));
 
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/dataset/impl/local/LocalDatasetBuilderTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/dataset/impl/local/LocalDatasetBuilderTest.java
index 8a5eb3a..8dc9354 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/dataset/impl/local/LocalDatasetBuilderTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/dataset/impl/local/LocalDatasetBuilderTest.java
@@ -21,6 +21,8 @@
 import java.util.HashMap;
 import java.util.Map;
 import java.util.concurrent.atomic.AtomicLong;
+import org.apache.ignite.ml.dataset.PartitionContextBuilder;
+import org.apache.ignite.ml.dataset.PartitionDataBuilder;
 import org.junit.Test;
 
 import static org.junit.Assert.assertEquals;
@@ -38,18 +40,10 @@
 
         LocalDatasetBuilder<Integer, Integer> builder = new LocalDatasetBuilder<>(data, 10);
 
-        LocalDataset<Serializable, TestPartitionData> dataset = builder.build(
-            (upstream, upstreamSize) -> null,
-            (upstream, upstreamSize, ctx) -> {
-                int[] arr = new int[Math.toIntExact(upstreamSize)];
+        LocalDataset<Serializable, TestPartitionData> dataset = buildDataset(builder);
 
-                int ptr = 0;
-                while (upstream.hasNext())
-                    arr[ptr++] = upstream.next().getValue();
-
-                return new TestPartitionData(arr);
-            }
-        );
+        assertEquals(10, dataset.getCtx().size());
+        assertEquals(10, dataset.getData().size());
 
         AtomicLong cnt = new AtomicLong();
 
@@ -76,18 +70,7 @@
 
         LocalDatasetBuilder<Integer, Integer> builder = new LocalDatasetBuilder<>(data, (k, v) -> k % 2 == 0,10);
 
-        LocalDataset<Serializable, TestPartitionData> dataset = builder.build(
-            (upstream, upstreamSize) -> null,
-            (upstream, upstreamSize, ctx) -> {
-                int[] arr = new int[Math.toIntExact(upstreamSize)];
-
-                int ptr = 0;
-                while (upstream.hasNext())
-                    arr[ptr++] = upstream.next().getValue();
-
-                return new TestPartitionData(arr);
-            }
-        );
+        LocalDataset<Serializable, TestPartitionData> dataset = buildDataset(builder);
 
         AtomicLong cnt = new AtomicLong();
 
@@ -105,6 +88,28 @@
         assertEquals(10, cnt.intValue());
     }
 
+    /** */
+    private LocalDataset<Serializable, TestPartitionData> buildDataset(
+        LocalDatasetBuilder<Integer, Integer> builder) {
+        PartitionContextBuilder<Integer, Integer, Serializable> partCtxBuilder = (upstream, upstreamSize) -> null;
+
+        PartitionDataBuilder<Integer, Integer, Serializable, TestPartitionData> partDataBuilder
+            = (upstream, upstreamSize, ctx) -> {
+            int[] arr = new int[Math.toIntExact(upstreamSize)];
+
+            int ptr = 0;
+            while (upstream.hasNext())
+                arr[ptr++] = upstream.next().getValue();
+
+            return new TestPartitionData(arr);
+        };
+
+        return builder.build(
+            partCtxBuilder.andThen(x -> null),
+            partDataBuilder.andThen((x, y) -> x)
+        );
+    }
+
     /**
      * Test partition {@code data}.
      */
@@ -122,7 +127,7 @@
         }
 
         /** {@inheritDoc} */
-        @Override public void close() throws Exception {
+        @Override public void close() {
             // Do nothing, GC will clean up.
         }
     }
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/dataset/primitive/DatasetWrapperTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/dataset/primitive/DatasetWrapperTest.java
index 7fa4b66..2f004bf 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/dataset/primitive/DatasetWrapperTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/dataset/primitive/DatasetWrapperTest.java
@@ -21,6 +21,7 @@
 import org.apache.ignite.ml.dataset.Dataset;
 import org.apache.ignite.ml.math.functions.IgniteBiFunction;
 import org.apache.ignite.ml.math.functions.IgniteBinaryOperator;
+import org.apache.ignite.ml.math.functions.IgniteFunction;
 import org.apache.ignite.ml.math.functions.IgniteTriFunction;
 import org.junit.Before;
 import org.junit.Test;
@@ -29,6 +30,7 @@
 import org.mockito.runners.MockitoJUnitRunner;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
@@ -59,25 +61,69 @@
     public void testComputeWithCtx() {
         doReturn(42).when(dataset).computeWithCtx(any(IgniteTriFunction.class), any(), any());
 
-        Integer res = wrapper.computeWithCtx(mock(IgniteTriFunction.class), mock(IgniteBinaryOperator.class),
-            (Integer) null);
+        Integer res = (Integer) wrapper.computeWithCtx(mock(IgniteTriFunction.class), mock(IgniteBinaryOperator.class),
+            null);
 
         assertEquals(42, res.intValue());
+
         verify(dataset, times(1)).computeWithCtx(any(IgniteTriFunction.class), any(), any());
     }
 
+    /** Tests {@code computeWithCtx()} method. */
+    @Test
+    @SuppressWarnings("unchecked")
+    public void testComputeWithCtx2() {
+        doReturn(42).when(dataset).computeWithCtx(any(IgniteTriFunction.class), any(), any());
+
+        Integer res = (Integer) wrapper.computeWithCtx(mock(IgniteBiFunction.class), mock(IgniteBinaryOperator.class),
+            null);
+
+        assertEquals(42, res.intValue());
+
+        verify(dataset, times(1)).computeWithCtx(any(IgniteTriFunction.class), any(), any());
+    }
+
+    /** Tests {@code computeWithCtx()} method. */
+    @Test
+    @SuppressWarnings("unchecked")
+    public void testComputeWithCtx3() {
+        wrapper.computeWithCtx((ctx, data) -> {
+            assertNotNull(ctx);
+            assertNotNull(data);
+        });
+
+        verify(dataset, times(1)).computeWithCtx(any(IgniteTriFunction.class),
+            any(IgniteBinaryOperator.class), any());
+    }
+
     /** Tests {@code compute()} method. */
     @Test
     @SuppressWarnings("unchecked")
     public void testCompute() {
         doReturn(42).when(dataset).compute(any(IgniteBiFunction.class), any(), any());
 
-        Integer res = wrapper.compute(mock(IgniteBiFunction.class), mock(IgniteBinaryOperator.class), (Integer) null);
+        Integer res = (Integer) wrapper.compute(mock(IgniteBiFunction.class), mock(IgniteBinaryOperator.class),
+            null);
 
         assertEquals(42, res.intValue());
+
         verify(dataset, times(1)).compute(any(IgniteBiFunction.class), any(), any());
     }
 
+    /** Tests {@code compute()} method. */
+    @Test
+    @SuppressWarnings("unchecked")
+    public void testCompute2() {
+        doReturn(42).when(dataset).compute(any(IgniteBiFunction.class), any(IgniteBinaryOperator.class), any());
+
+        Integer res = (Integer) wrapper.compute(mock(IgniteFunction.class), mock(IgniteBinaryOperator.class),
+            null);
+
+        assertEquals(42, res.intValue());
+
+        verify(dataset, times(1)).compute(any(IgniteBiFunction.class), any(IgniteBinaryOperator.class), any());
+    }
+
     /** Tests {@code close()} method. */
     @Test
     public void testClose() throws Exception {
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/dataset/primitive/SimpleDatasetTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/dataset/primitive/SimpleDatasetTest.java
new file mode 100644
index 0000000..eaa03d2
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/dataset/primitive/SimpleDatasetTest.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.dataset.primitive;
+
+import java.util.HashMap;
+import java.util.Map;
+import org.apache.ignite.ml.dataset.DatasetFactory;
+import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
+import org.junit.Test;
+
+import static org.junit.Assert.assertArrayEquals;
+
+/**
+ * Tests for {@link SimpleDataset}.
+ */
+public class SimpleDatasetTest {
+    /** Basic test for SimpleDataset features. IMPL NOTE derived from LocalDatasetExample. */
+    @Test
+    public void basicTest() throws Exception {
+        Map<Integer, DataPoint> dataPoints = new HashMap<Integer, DataPoint>() {{
+            put(1, new DataPoint(42, 10000));
+            put(2, new DataPoint(32, 64000));
+            put(3, new DataPoint(53, 120000));
+            put(4, new DataPoint(24, 70000));
+        }};
+
+        // Creates a local simple dataset containing features and providing standard dataset API.
+        try (SimpleDataset<?> dataset = DatasetFactory.createSimpleDataset(
+            dataPoints,
+            2,
+            (k, v) -> VectorUtils.of(v.getAge(), v.getSalary())
+        )) {
+            assertArrayEquals("Mean values.", new double[] {37.75, 66000.0}, dataset.mean(), 0);
+
+            assertArrayEquals("Standard deviation values.",
+                new double[] {10.871407452579449, 38961.519477556314}, dataset.std(), 0);
+
+            double[][] covExp = new double[][] {
+                new double[] {118.1875, 135500.0},
+                new double[] {135500.0, 1.518E9}
+            };
+            double[][] cov = dataset.cov();
+            int rowCov = 0;
+            for (double[] row : cov)
+                assertArrayEquals("Covariance matrix row " + rowCov,
+                    covExp[rowCov++], row, 0);
+
+
+            double[][] corrExp = new double[][] {
+                new double[] {1.0000000000000002, 0.31990250167874007},
+                new double[] {0.31990250167874007, 1.0}
+            };
+            double[][] corr = dataset.corr();
+            int rowCorr = 0;
+            for (double[] row : corr)
+                assertArrayEquals("Correlation matrix row " + rowCorr,
+                    corrExp[rowCorr++], row, 0);
+        }
+    }
+
+    /** */
+    private static class DataPoint {
+        /** Age. */
+        private final double age;
+
+        /** Salary. */
+        private final double salary;
+
+        /**
+         * Constructs a new instance of person.
+         *
+         * @param age Age.
+         * @param salary Salary.
+         */
+        DataPoint(double age, double salary) {
+            this.age = age;
+            this.salary = salary;
+        }
+
+        /** */
+        double getAge() {
+            return age;
+        }
+
+        /** */
+        double getSalary() {
+            return salary;
+        }
+    }
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/dataset/primitive/SimpleLabeledDatasetTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/dataset/primitive/SimpleLabeledDatasetTest.java
new file mode 100644
index 0000000..f7b0f13
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/dataset/primitive/SimpleLabeledDatasetTest.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.dataset.primitive;
+
+import java.util.HashMap;
+import java.util.Map;
+import org.apache.ignite.ml.dataset.DatasetFactory;
+import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
+import org.junit.Test;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertNull;
+
+/**
+ * Tests for {@link SimpleLabeledDataset}.
+ */
+public class SimpleLabeledDatasetTest {
+    /** Basic test for SimpleLabeledDataset features. */
+    @Test
+    public void basicTest() throws Exception {
+        Map<Integer, DataPoint> dataPoints = new HashMap<Integer, DataPoint>() {{
+            put(5, new DataPoint(42, 10000));
+            put(6, new DataPoint(32, 64000));
+            put(7, new DataPoint(53, 120000));
+            put(8, new DataPoint(24, 70000));
+        }};
+
+        double[][] actualFeatures = new double[2][];
+        double[][] actualLabels = new double[2][];
+        int[] actualRows = new int[2];
+
+        // Creates a local simple dataset containing features and providing standard dataset API.
+        try (SimpleLabeledDataset<?> dataset = DatasetFactory.createSimpleLabeledDataset(
+            dataPoints,
+            2,
+            (k, v) -> VectorUtils.of(v.getAge(), v.getSalary()),
+            (k, v) -> new double[] {k, v.getAge(), v.getSalary()}
+        )) {
+            assertNull(dataset.compute((data, partIdx) -> {
+                actualFeatures[partIdx] = data.getFeatures();
+                actualLabels[partIdx] = data.getLabels();
+                actualRows[partIdx] = data.getRows();
+                return null;
+            }, (k, v) -> null));
+        }
+
+        double[][] expFeatures = new double[][] {
+            new double[] {42.0, 32.0, 10000.0, 64000.0},
+            new double[] {53.0, 24.0, 120000.0, 70000.0}
+        };
+        int rowFeat = 0;
+        for (double[] row : actualFeatures)
+            assertArrayEquals("Features partition index " + rowFeat,
+                expFeatures[rowFeat++], row, 0);
+
+        double[][] expLabels = new double[][] {
+            new double[] {5.0, 6.0, 42.0, 32.0, 10000.0, 64000.0},
+            new double[] {7.0, 8.0, 53.0, 24.0, 120000.0, 70000.0}
+        };
+        int rowLbl = 0;
+        for (double[] row : actualLabels)
+            assertArrayEquals("Labels partition index " + rowLbl,
+                expLabels[rowLbl++], row, 0);
+
+        assertArrayEquals("Rows per partitions", new int[] {2, 2}, actualRows);
+    }
+
+    /** */
+    private static class DataPoint {
+        /** Age. */
+        private final double age;
+
+        /** Salary. */
+        private final double salary;
+
+        /**
+         * Constructs a new instance of person.
+         *
+         * @param age Age.
+         * @param salary Salary.
+         */
+        DataPoint(double age, double salary) {
+            this.age = age;
+            this.salary = salary;
+        }
+
+        /** */
+        double getAge() {
+            return age;
+        }
+
+        /** */
+        double getSalary() {
+            return salary;
+        }
+    }
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/environment/EnvironmentTestSuite.java b/modules/ml/src/test/java/org/apache/ignite/ml/environment/EnvironmentTestSuite.java
new file mode 100644
index 0000000..ae9f490
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/environment/EnvironmentTestSuite.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.environment;
+
+import org.junit.runner.RunWith;
+import org.junit.runners.Suite;
+
+/**
+ * Test suite for all tests located in org.apache.ignite.ml.trees package.
+ */
+@RunWith(Suite.class)
+@Suite.SuiteClasses({
+    LearningEnvironmentBuilderTest.class,
+    LearningEnvironmentTest.class,
+    PromiseTest.class
+})
+public class EnvironmentTestSuite {
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/environment/LearningEnvironmentBuilderTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/environment/LearningEnvironmentBuilderTest.java
new file mode 100644
index 0000000..56f262b
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/environment/LearningEnvironmentBuilderTest.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.environment;
+
+import org.apache.ignite.logger.NullLogger;
+import org.apache.ignite.ml.environment.logging.ConsoleLogger;
+import org.apache.ignite.ml.environment.logging.CustomMLLogger;
+import org.apache.ignite.ml.environment.logging.MLLogger;
+import org.apache.ignite.ml.environment.logging.NoOpLogger;
+import org.apache.ignite.ml.environment.parallelism.DefaultParallelismStrategy;
+import org.apache.ignite.ml.environment.parallelism.NoParallelismStrategy;
+import org.junit.Test;
+
+import static org.apache.ignite.ml.environment.parallelism.ParallelismStrategy.Type.NO_PARALLELISM;
+import static org.apache.ignite.ml.environment.parallelism.ParallelismStrategy.Type.ON_DEFAULT_POOL;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
+
+
+/**
+ * Tests for {@link LearningEnvironmentBuilder}.
+ */
+public class LearningEnvironmentBuilderTest {
+    /** */
+    @Test
+    public void basic() {
+        LearningEnvironment env = LearningEnvironment.DEFAULT;
+
+        assertNotNull("Strategy", env.parallelismStrategy());
+        assertNotNull("Logger", env.logger());
+        assertNotNull("Logger for class", env.logger(this.getClass()));
+    }
+
+    /** */
+    @Test
+    public void withParallelismStrategy() {
+        assertTrue(LearningEnvironment.builder().withParallelismStrategy(NoParallelismStrategy.INSTANCE).build()
+            .parallelismStrategy() instanceof NoParallelismStrategy);
+
+        assertTrue(LearningEnvironment.builder().withParallelismStrategy(new DefaultParallelismStrategy()).build()
+            .parallelismStrategy() instanceof DefaultParallelismStrategy);
+    }
+
+    /** */
+    @Test
+    public void withParallelismStrategyType() {
+        assertTrue(LearningEnvironment.builder().withParallelismStrategy(NO_PARALLELISM).build()
+            .parallelismStrategy() instanceof NoParallelismStrategy);
+
+        assertTrue(LearningEnvironment.builder().withParallelismStrategy(ON_DEFAULT_POOL).build()
+            .parallelismStrategy() instanceof DefaultParallelismStrategy);
+    }
+
+    /** */
+    @Test
+    public void withLoggingFactory() {
+        assertTrue(LearningEnvironment.builder().withLoggingFactory(ConsoleLogger.factory(MLLogger.VerboseLevel.HIGH))
+            .build().logger() instanceof ConsoleLogger);
+
+        assertTrue(LearningEnvironment.builder().withLoggingFactory(ConsoleLogger.factory(MLLogger.VerboseLevel.HIGH))
+            .build().logger(this.getClass()) instanceof ConsoleLogger);
+
+        assertTrue(LearningEnvironment.builder().withLoggingFactory(NoOpLogger.factory())
+            .build().logger() instanceof NoOpLogger);
+
+        assertTrue(LearningEnvironment.builder().withLoggingFactory(NoOpLogger.factory())
+            .build().logger(this.getClass()) instanceof NoOpLogger);
+
+        assertTrue(LearningEnvironment.builder().withLoggingFactory(CustomMLLogger.factory(new NullLogger()))
+            .build().logger() instanceof CustomMLLogger);
+
+        assertTrue(LearningEnvironment.builder().withLoggingFactory(CustomMLLogger.factory(new NullLogger()))
+            .build().logger(this.getClass()) instanceof CustomMLLogger);
+    }
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/environment/LearningEnvironmentTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/environment/LearningEnvironmentTest.java
new file mode 100644
index 0000000..7e5a079
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/environment/LearningEnvironmentTest.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.environment;
+
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+import org.apache.ignite.ml.dataset.feature.FeatureMeta;
+import org.apache.ignite.ml.environment.logging.ConsoleLogger;
+import org.apache.ignite.ml.environment.logging.MLLogger;
+import org.apache.ignite.ml.environment.parallelism.DefaultParallelismStrategy;
+import org.apache.ignite.ml.environment.parallelism.ParallelismStrategy;
+import org.apache.ignite.ml.tree.randomforest.RandomForestRegressionTrainer;
+import org.apache.ignite.ml.tree.randomforest.data.FeaturesCountSelectionStrategies;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * Tests for {@link LearningEnvironment} that require to start the whole Ignite infrastructure. IMPL NOTE based on
+ * RandomForestRegressionExample example.
+ */
+public class LearningEnvironmentTest {
+    /** */
+    @Test
+    public void testBasic() throws InterruptedException {
+        RandomForestRegressionTrainer trainer = new RandomForestRegressionTrainer(
+            IntStream.range(0, 0).mapToObj(
+                x -> new FeatureMeta("", 0, false)).collect(Collectors.toList())
+        ).withCountOfTrees(101)
+            .withFeaturesCountSelectionStrgy(FeaturesCountSelectionStrategies.ONE_THIRD)
+            .withMaxDepth(4)
+            .withMinImpurityDelta(0.)
+            .withSubSampleSize(0.3)
+            .withSeed(0);
+
+        LearningEnvironment environment = LearningEnvironment.builder()
+            .withParallelismStrategy(ParallelismStrategy.Type.ON_DEFAULT_POOL)
+            .withLoggingFactory(ConsoleLogger.factory(MLLogger.VerboseLevel.LOW))
+            .build();
+        trainer.setEnvironment(environment);
+        assertEquals(DefaultParallelismStrategy.class, environment.parallelismStrategy().getClass());
+        assertEquals(ConsoleLogger.class, environment.logger().getClass());
+    }
+}
+
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/environment/PromiseTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/environment/PromiseTest.java
new file mode 100644
index 0000000..50b0f1e
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/environment/PromiseTest.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.environment;
+
+import java.util.Optional;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
+import org.apache.ignite.ml.environment.parallelism.Promise;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+
+/**
+ * Tests for {@link Promise} functionality.
+ */
+public class PromiseTest {
+    /** */
+    @Test
+    public void testUnsafeGet() {
+        assertNull("Strategy", new TestPromise().unsafeGet());
+    }
+
+    /** */
+    @Test
+    public void testGetOpt() {
+        assertEquals(Optional.empty(), (new TestPromise() {
+            /** {@inheritDoc} */
+            @Override public Object get() throws ExecutionException {
+                throw new ExecutionException("test", new RuntimeException("test cause"));
+            }
+        }).getOpt());
+    }
+
+    /** */
+    private static class TestPromise implements Promise<Object> {
+        /** {@inheritDoc} */
+        @Override public boolean cancel(boolean mayInterruptIfRunning) {
+            return false;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean isCancelled() {
+            return false;
+        }
+
+        /** {@inheritDoc} */
+        @Override public boolean isDone() {
+            return true;
+        }
+
+        /** {@inheritDoc} */
+        @Override public Object get() throws ExecutionException {
+            return null;
+        }
+
+        /** {@inheritDoc} */
+        @Override public Object get(long timeout, @NotNull TimeUnit unit) {
+            return null;
+        }
+    }
+}
+
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/knn/ANNClassificationTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/knn/ANNClassificationTest.java
new file mode 100644
index 0000000..199644b
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/knn/ANNClassificationTest.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.knn;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import org.apache.ignite.ml.TestUtils;
+import org.apache.ignite.ml.common.TrainerTest;
+import org.apache.ignite.ml.knn.ann.ANNClassificationModel;
+import org.apache.ignite.ml.knn.ann.ANNClassificationTrainer;
+import org.apache.ignite.ml.knn.classification.NNStrategy;
+import org.apache.ignite.ml.math.distances.EuclideanDistance;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
+import org.junit.Assert;
+import org.junit.Test;
+
+/** Tests behaviour of ANNClassificationTest. */
+public class ANNClassificationTest extends TrainerTest {
+    /** */
+    @Test
+    public void testBinaryClassification() {
+        Map<Integer, double[]> cacheMock = new HashMap<>();
+
+        for (int i = 0; i < twoClusters.length; i++)
+            cacheMock.put(i, twoClusters[i]);
+
+        ANNClassificationTrainer trainer = new ANNClassificationTrainer()
+            .withK(10)
+            .withMaxIterations(10)
+            .withEpsilon(1e-4)
+            .withDistance(new EuclideanDistance())
+            .withSeed(1234L);
+
+        Assert.assertEquals(10, trainer.getK());
+        Assert.assertEquals(10, trainer.getMaxIterations());
+        TestUtils.assertEquals(1e-4, trainer.getEpsilon(), PRECISION);
+        Assert.assertEquals(new EuclideanDistance(), trainer.getDistance());
+        Assert.assertEquals(1234L, trainer.getSeed());
+
+        NNClassificationModel mdl = trainer.fit(
+            cacheMock,
+            parts,
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 1, v.length)),
+            (k, v) -> v[0]
+        ).withK(3)
+            .withDistanceMeasure(new EuclideanDistance())
+            .withStrategy(NNStrategy.SIMPLE);
+
+        Assert.assertNotNull(((ANNClassificationModel) mdl).getCandidates());
+
+        Assert.assertTrue(mdl.toString().contains(NNStrategy.SIMPLE.name()));
+        Assert.assertTrue(mdl.toString(true).contains(NNStrategy.SIMPLE.name()));
+        Assert.assertTrue(mdl.toString(false).contains(NNStrategy.SIMPLE.name()));
+    }
+
+    /** */
+    @Test
+    public void testUpdate() {
+        Map<Integer, double[]> cacheMock = new HashMap<>();
+
+        for (int i = 0; i < twoClusters.length; i++)
+            cacheMock.put(i, twoClusters[i]);
+
+        ANNClassificationTrainer trainer = new ANNClassificationTrainer()
+            .withK(10)
+            .withMaxIterations(10)
+            .withEpsilon(1e-4)
+            .withDistance(new EuclideanDistance());
+
+        ANNClassificationModel originalMdl = (ANNClassificationModel) trainer.withSeed(1234L).fit(
+            cacheMock,
+            parts,
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 1, v.length)),
+            (k, v) -> v[0]
+        ).withK(3)
+            .withDistanceMeasure(new EuclideanDistance())
+            .withStrategy(NNStrategy.SIMPLE);
+
+        ANNClassificationModel updatedOnSameDataset = (ANNClassificationModel) trainer.withSeed(1234L).update(originalMdl,
+            cacheMock, parts,
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 0, v.length - 1)),
+            (k, v) -> v[2]
+        ).withK(3)
+            .withDistanceMeasure(new EuclideanDistance())
+            .withStrategy(NNStrategy.SIMPLE);
+
+        ANNClassificationModel updatedOnEmptyDataset = (ANNClassificationModel) trainer.withSeed(1234L).update(originalMdl,
+            new HashMap<Integer, double[]>(), parts,
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 0, v.length - 1)),
+            (k, v) -> v[2]
+        ).withK(3)
+            .withDistanceMeasure(new EuclideanDistance())
+            .withStrategy(NNStrategy.SIMPLE);
+
+        Vector v1 = VectorUtils.of(550, 550);
+        Vector v2 = VectorUtils.of(-550, -550);
+        TestUtils.assertEquals(originalMdl.apply(v1), updatedOnSameDataset.apply(v1), PRECISION);
+        TestUtils.assertEquals(originalMdl.apply(v2), updatedOnSameDataset.apply(v2), PRECISION);
+        TestUtils.assertEquals(originalMdl.apply(v1), updatedOnEmptyDataset.apply(v1), PRECISION);
+        TestUtils.assertEquals(originalMdl.apply(v2), updatedOnEmptyDataset.apply(v2), PRECISION);
+    }
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/knn/KNNClassificationTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/knn/KNNClassificationTest.java
index ab1ecee..748123a 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/knn/KNNClassificationTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/knn/KNNClassificationTest.java
@@ -24,7 +24,7 @@
 import java.util.Map;
 import org.apache.ignite.ml.knn.classification.KNNClassificationModel;
 import org.apache.ignite.ml.knn.classification.KNNClassificationTrainer;
-import org.apache.ignite.ml.knn.classification.KNNStrategy;
+import org.apache.ignite.ml.knn.classification.NNStrategy;
 import org.apache.ignite.ml.math.distances.EuclideanDistance;
 import org.apache.ignite.ml.math.primitives.vector.Vector;
 import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
@@ -34,8 +34,9 @@
 import org.junit.runners.Parameterized;
 
 import static junit.framework.TestCase.assertEquals;
+import static junit.framework.TestCase.assertTrue;
 
-/** Tests behaviour of KNNClassificationTest. */
+/** Tests behaviour of KNNClassification. */
 @RunWith(Parameterized.class)
 public class KNNClassificationTest {
     /** Number of parts to be tested. */
@@ -57,8 +58,14 @@
     }
 
     /** */
+    @Test(expected = IllegalStateException.class)
+    public void testNullDataset() {
+        new KNNClassificationModel(null).apply(null);
+    }
+
+    /** */
     @Test
-    public void testBinaryClassificationTest() {
+    public void testBinaryClassification() {
         Map<Integer, double[]> data = new HashMap<>();
         data.put(0, new double[] {1.0, 1.0, 1.0});
         data.put(1, new double[] {1.0, 2.0, 1.0});
@@ -69,14 +76,18 @@
 
         KNNClassificationTrainer trainer = new KNNClassificationTrainer();
 
-        KNNClassificationModel knnMdl = trainer.fit(
+        NNClassificationModel knnMdl = trainer.fit(
             data,
             parts,
             (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 0, v.length - 1)),
             (k, v) -> v[2]
         ).withK(3)
             .withDistanceMeasure(new EuclideanDistance())
-            .withStrategy(KNNStrategy.SIMPLE);
+            .withStrategy(NNStrategy.SIMPLE);
+
+        assertTrue(knnMdl.toString().length() > 0);
+        assertTrue(knnMdl.toString(true).length() > 0);
+        assertTrue(knnMdl.toString(false).length() > 0);
 
         Vector firstVector = new DenseVector(new double[] {2.0, 2.0});
         assertEquals(knnMdl.apply(firstVector), 1.0);
@@ -86,7 +97,7 @@
 
     /** */
     @Test
-    public void testBinaryClassificationWithSmallestKTest() {
+    public void testBinaryClassificationWithSmallestK() {
         Map<Integer, double[]> data = new HashMap<>();
         data.put(0, new double[] {1.0, 1.0, 1.0});
         data.put(1, new double[] {1.0, 2.0, 1.0});
@@ -97,14 +108,14 @@
 
         KNNClassificationTrainer trainer = new KNNClassificationTrainer();
 
-        KNNClassificationModel knnMdl = trainer.fit(
+        NNClassificationModel knnMdl = trainer.fit(
             data,
             parts,
             (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 0, v.length - 1)),
             (k, v) -> v[2]
         ).withK(1)
             .withDistanceMeasure(new EuclideanDistance())
-            .withStrategy(KNNStrategy.SIMPLE);
+            .withStrategy(NNStrategy.SIMPLE);
 
         Vector firstVector = new DenseVector(new double[] {2.0, 2.0});
         assertEquals(knnMdl.apply(firstVector), 1.0);
@@ -125,14 +136,14 @@
 
         KNNClassificationTrainer trainer = new KNNClassificationTrainer();
 
-        KNNClassificationModel knnMdl = trainer.fit(
+        NNClassificationModel knnMdl = trainer.fit(
             data,
             parts,
             (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 0, v.length - 1)),
             (k, v) -> v[2]
         ).withK(3)
             .withDistanceMeasure(new EuclideanDistance())
-            .withStrategy(KNNStrategy.SIMPLE);
+            .withStrategy(NNStrategy.SIMPLE);
 
         Vector vector = new DenseVector(new double[] {-1.01, -1.01});
         assertEquals(knnMdl.apply(vector), 2.0);
@@ -151,16 +162,55 @@
 
         KNNClassificationTrainer trainer = new KNNClassificationTrainer();
 
-        KNNClassificationModel knnMdl = trainer.fit(
+        NNClassificationModel knnMdl = trainer.fit(
             data,
             parts,
             (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 0, v.length - 1)),
             (k, v) -> v[2]
         ).withK(3)
             .withDistanceMeasure(new EuclideanDistance())
-            .withStrategy(KNNStrategy.WEIGHTED);
+            .withStrategy(NNStrategy.WEIGHTED);
 
         Vector vector = new DenseVector(new double[] {-1.01, -1.01});
         assertEquals(knnMdl.apply(vector), 1.0);
     }
+
+    /** */
+    @Test
+    public void testUpdate() {
+        Map<Integer, double[]> data = new HashMap<>();
+        data.put(0, new double[] {10.0, 10.0, 1.0});
+        data.put(1, new double[] {10.0, 20.0, 1.0});
+        data.put(2, new double[] {-1, -1, 1.0});
+        data.put(3, new double[] {-2, -2, 2.0});
+        data.put(4, new double[] {-1.0, -2.0, 2.0});
+        data.put(5, new double[] {-2.0, -1.0, 2.0});
+
+        KNNClassificationTrainer trainer = new KNNClassificationTrainer();
+
+        KNNClassificationModel originalMdl = (KNNClassificationModel)trainer.fit(
+            data,
+            parts,
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 0, v.length - 1)),
+            (k, v) -> v[2]
+        ).withK(3)
+            .withDistanceMeasure(new EuclideanDistance())
+            .withStrategy(NNStrategy.WEIGHTED);
+
+        KNNClassificationModel updatedOnSameDataset = trainer.update(originalMdl,
+            data, parts,
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 0, v.length - 1)),
+            (k, v) -> v[2]
+        );
+
+        KNNClassificationModel updatedOnEmptyDataset = trainer.update(originalMdl,
+            new HashMap<Integer, double[]>(), parts,
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 0, v.length - 1)),
+            (k, v) -> v[2]
+        );
+
+        Vector vector = new DenseVector(new double[] {-1.01, -1.01});
+        assertEquals(originalMdl.apply(vector), updatedOnSameDataset.apply(vector));
+        assertEquals(originalMdl.apply(vector), updatedOnEmptyDataset.apply(vector));
+    }
 }
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/knn/KNNRegressionTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/knn/KNNRegressionTest.java
index 586e6c8..52ff1ec 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/knn/KNNRegressionTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/knn/KNNRegressionTest.java
@@ -23,7 +23,7 @@
 import java.util.List;
 import java.util.Map;
 import org.apache.ignite.ml.dataset.impl.local.LocalDatasetBuilder;
-import org.apache.ignite.ml.knn.classification.KNNStrategy;
+import org.apache.ignite.ml.knn.classification.NNStrategy;
 import org.apache.ignite.ml.knn.regression.KNNRegressionModel;
 import org.apache.ignite.ml.knn.regression.KNNRegressionTrainer;
 import org.apache.ignite.ml.math.distances.EuclideanDistance;
@@ -35,6 +35,8 @@
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
+import static junit.framework.TestCase.assertEquals;
+
 /**
  * Tests for {@link KNNRegressionTrainer}.
  */
@@ -77,7 +79,7 @@
             (k, v) -> v[0]
         ).withK(1)
             .withDistanceMeasure(new EuclideanDistance())
-            .withStrategy(KNNStrategy.SIMPLE);
+            .withStrategy(NNStrategy.SIMPLE);
 
         Vector vector = new DenseVector(new double[] {0, 0, 0, 5.0, 0.0});
         System.out.println(knnMdl.apply(vector));
@@ -87,41 +89,17 @@
     /** */
     @Test
     public void testLongly() {
-        Map<Integer, double[]> data = new HashMap<>();
-        data.put(0, new double[] {60323, 83.0, 234289, 2356, 1590, 107608, 1947});
-        data.put(1, new double[] {61122, 88.5, 259426, 2325, 1456, 108632, 1948});
-        data.put(2, new double[] {60171, 88.2, 258054, 3682, 1616, 109773, 1949});
-        data.put(3, new double[] {61187, 89.5, 284599, 3351, 1650, 110929, 1950});
-        data.put(4, new double[] {63221, 96.2, 328975, 2099, 3099, 112075, 1951});
-        data.put(5, new double[] {63639, 98.1, 346999, 1932, 3594, 113270, 1952});
-        data.put(6, new double[] {64989, 99.0, 365385, 1870, 3547, 115094, 1953});
-        data.put(7, new double[] {63761, 100.0, 363112, 3578, 3350, 116219, 1954});
-        data.put(8, new double[] {66019, 101.2, 397469, 2904, 3048, 117388, 1955});
-        data.put(9, new double[] {68169, 108.4, 442769, 2936, 2798, 120445, 1957});
-        data.put(10, new double[] {66513, 110.8, 444546, 4681, 2637, 121950, 1958});
-        data.put(11, new double[] {68655, 112.6, 482704, 3813, 2552, 123366, 1959});
-        data.put(12, new double[] {69564, 114.2, 502601, 3931, 2514, 125368, 1960});
-        data.put(13, new double[] {69331, 115.7, 518173, 4806, 2572, 127852, 1961});
-        data.put(14, new double[] {70551, 116.9, 554894, 4007, 2827, 130081, 1962});
-
-        KNNRegressionTrainer trainer = new KNNRegressionTrainer();
-
-        KNNRegressionModel knnMdl = (KNNRegressionModel) trainer.fit(
-            new LocalDatasetBuilder<>(data, parts),
-            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 1, v.length)),
-            (k, v) -> v[0]
-        ).withK(3)
-            .withDistanceMeasure(new EuclideanDistance())
-            .withStrategy(KNNStrategy.SIMPLE);
-
-        Vector vector = new DenseVector(new double[] {104.6, 419180, 2822, 2857, 118734, 1956});
-        System.out.println(knnMdl.apply(vector));
-        Assert.assertEquals(67857, knnMdl.apply(vector), 2000);
+        testLongly(NNStrategy.SIMPLE);
     }
 
     /** */
     @Test
     public void testLonglyWithWeightedStrategy() {
+        testLongly(NNStrategy.WEIGHTED);
+    }
+
+    /** */
+    private void testLongly(NNStrategy stgy) {
         Map<Integer, double[]> data = new HashMap<>();
         data.put(0, new double[] {60323, 83.0, 234289, 2356, 1590, 107608, 1947});
         data.put(1, new double[] {61122, 88.5, 259426, 2325, 1456, 108632, 1948});
@@ -147,10 +125,54 @@
             (k, v) -> v[0]
         ).withK(3)
             .withDistanceMeasure(new EuclideanDistance())
-            .withStrategy(KNNStrategy.SIMPLE);
+            .withStrategy(stgy);
 
         Vector vector = new DenseVector(new double[] {104.6, 419180, 2822, 2857, 118734, 1956});
-        System.out.println(knnMdl.apply(vector));
+
+        Assert.assertNotNull(knnMdl.apply(vector));
+
         Assert.assertEquals(67857, knnMdl.apply(vector), 2000);
+
+        Assert.assertTrue(knnMdl.toString().contains(stgy.name()));
+        Assert.assertTrue(knnMdl.toString(true).contains(stgy.name()));
+        Assert.assertTrue(knnMdl.toString(false).contains(stgy.name()));
+    }
+
+    /** */
+    @Test
+    public void testUpdate() {
+        Map<Integer, double[]> data = new HashMap<>();
+        data.put(0, new double[] {11.0, 0, 0, 0, 0, 0});
+        data.put(1, new double[] {12.0, 2.0, 0, 0, 0, 0});
+        data.put(2, new double[] {13.0, 0, 3.0, 0, 0, 0});
+        data.put(3, new double[] {14.0, 0, 0, 4.0, 0, 0});
+        data.put(4, new double[] {15.0, 0, 0, 0, 5.0, 0});
+        data.put(5, new double[] {16.0, 0, 0, 0, 0, 6.0});
+
+        KNNRegressionTrainer trainer = new KNNRegressionTrainer();
+
+        KNNRegressionModel originalMdl = (KNNRegressionModel) trainer.fit(
+            new LocalDatasetBuilder<>(data, parts),
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 1, v.length)),
+            (k, v) -> v[0]
+        ).withK(1)
+            .withDistanceMeasure(new EuclideanDistance())
+            .withStrategy(NNStrategy.SIMPLE);
+
+        KNNRegressionModel updatedOnSameDataset = trainer.update(originalMdl,
+            data, parts,
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 0, v.length - 1)),
+            (k, v) -> v[2]
+        );
+
+        KNNRegressionModel updatedOnEmptyDataset = trainer.update(originalMdl,
+            new HashMap<Integer, double[]>(), parts,
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 0, v.length - 1)),
+            (k, v) -> v[2]
+        );
+
+        Vector vector = new DenseVector(new double[] {0, 0, 0, 5.0, 0.0});
+        assertEquals(originalMdl.apply(vector), updatedOnSameDataset.apply(vector));
+        assertEquals(originalMdl.apply(vector), updatedOnEmptyDataset.apply(vector));
     }
 }
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/knn/KNNTestSuite.java b/modules/ml/src/test/java/org/apache/ignite/ml/knn/KNNTestSuite.java
index 55ef24e..0303d26 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/knn/KNNTestSuite.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/knn/KNNTestSuite.java
@@ -25,9 +25,10 @@
  */
 @RunWith(Suite.class)
 @Suite.SuiteClasses({
+    ANNClassificationTest.class,
     KNNClassificationTest.class,
     KNNRegressionTest.class,
-    LabeledDatasetTest.class
+    LabeledVectorSetTest.class
 })
 public class KNNTestSuite {
 }
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/knn/LabeledDatasetHelper.java b/modules/ml/src/test/java/org/apache/ignite/ml/knn/LabeledDatasetHelper.java
index dbcdb99..f3b8b3a 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/knn/LabeledDatasetHelper.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/knn/LabeledDatasetHelper.java
@@ -21,7 +21,7 @@
 import java.net.URISyntaxException;
 import java.nio.file.Path;
 import java.nio.file.Paths;
-import org.apache.ignite.ml.structures.LabeledDataset;
+import org.apache.ignite.ml.structures.LabeledVectorSet;
 import org.apache.ignite.ml.structures.preprocessing.LabeledDatasetLoader;
 
 /**
@@ -37,7 +37,7 @@
      * @param rsrcPath path to dataset.
      * @return null if path is incorrect.
      */
-    public static LabeledDataset loadDatasetFromTxt(String rsrcPath, boolean isFallOnBadData) {
+    public static LabeledVectorSet loadDatasetFromTxt(String rsrcPath, boolean isFallOnBadData) {
         try {
             Path path = Paths.get(LabeledDatasetHelper.class.getClassLoader().getResource(rsrcPath).toURI());
             try {
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/knn/LabeledDatasetTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/knn/LabeledDatasetTest.java
deleted file mode 100644
index a029e49..0000000
--- a/modules/ml/src/test/java/org/apache/ignite/ml/knn/LabeledDatasetTest.java
+++ /dev/null
@@ -1,275 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.ml.knn;
-
-import java.io.IOException;
-import java.net.URISyntaxException;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import org.apache.ignite.ml.math.ExternalizableTest;
-import org.apache.ignite.ml.math.exceptions.CardinalityException;
-import org.apache.ignite.ml.math.exceptions.NoDataException;
-import org.apache.ignite.ml.math.exceptions.knn.EmptyFileException;
-import org.apache.ignite.ml.math.exceptions.knn.FileParsingException;
-import org.apache.ignite.ml.math.primitives.vector.Vector;
-import org.apache.ignite.ml.structures.LabeledDataset;
-import org.apache.ignite.ml.structures.LabeledDatasetTestTrainPair;
-import org.apache.ignite.ml.structures.LabeledVector;
-import org.apache.ignite.ml.structures.preprocessing.LabeledDatasetLoader;
-import org.junit.Test;
-
-import static junit.framework.TestCase.assertEquals;
-import static junit.framework.TestCase.fail;
-
-/** Tests behaviour of KNNClassificationTest. */
-public class LabeledDatasetTest implements ExternalizableTest<LabeledDataset> {
-    /** */
-    private static final String KNN_IRIS_TXT = "datasets/knn/iris.txt";
-
-    /** */
-    private static final String NO_DATA_TXT = "datasets/knn/no_data.txt";
-
-    /** */
-    private static final String EMPTY_TXT = "datasets/knn/empty.txt";
-
-    /** */
-    private static final String IRIS_INCORRECT_TXT = "datasets/knn/iris_incorrect.txt";
-
-    /** */
-    private static final String IRIS_MISSED_DATA = "datasets/knn/missed_data.txt";
-
-    /** */
-    @Test
-    public void testFeatureNames() {
-        double[][] mtx =
-            new double[][] {
-                {1.0, 1.0},
-                {1.0, 2.0},
-                {2.0, 1.0},
-                {-1.0, -1.0},
-                {-1.0, -2.0},
-                {-2.0, -1.0}};
-        double[] lbs = new double[] {1.0, 1.0, 1.0, 2.0, 2.0, 2.0};
-
-        String[] featureNames = new String[] {"x", "y"};
-        final LabeledDataset dataset = new LabeledDataset(mtx, lbs, featureNames, false);
-
-        assertEquals(dataset.getFeatureName(0), "x");
-    }
-
-    /** */
-    @Test
-    public void testAccessMethods() {
-        double[][] mtx =
-            new double[][] {
-                {1.0, 1.0},
-                {1.0, 2.0},
-                {2.0, 1.0},
-                {-1.0, -1.0},
-                {-1.0, -2.0},
-                {-2.0, -1.0}};
-        double[] lbs = new double[] {1.0, 1.0, 1.0, 2.0, 2.0, 2.0};
-
-        final LabeledDataset dataset = new LabeledDataset(mtx, lbs, null, false);
-
-        assertEquals(dataset.colSize(), 2);
-        assertEquals(dataset.rowSize(), 6);
-
-        final LabeledVector<Vector, Double> row = (LabeledVector<Vector, Double>)dataset.getRow(0);
-
-        assertEquals(row.features().get(0), 1.0);
-        assertEquals(row.label(), 1.0);
-        dataset.setLabel(0, 2.0);
-        assertEquals(row.label(), 2.0);
-    }
-
-    /** */
-    @Test
-    public void testFailOnYNull() {
-        double[][] mtx =
-            new double[][] {
-                {1.0, 1.0},
-                {1.0, 2.0},
-                {2.0, 1.0},
-                {-1.0, -1.0},
-                {-1.0, -2.0},
-                {-2.0, -1.0}};
-        double[] lbs = new double[] {};
-
-        try {
-            new LabeledDataset(mtx, lbs);
-            fail("CardinalityException");
-        }
-        catch (CardinalityException e) {
-            return;
-        }
-        fail("CardinalityException");
-    }
-
-    /** */
-    @Test
-    public void testFailOnXNull() {
-        double[][] mtx =
-            new double[][] {};
-        double[] lbs = new double[] {1.0, 1.0, 1.0, 2.0, 2.0, 2.0};
-
-        try {
-            new LabeledDataset(mtx, lbs);
-            fail("CardinalityException");
-        }
-        catch (CardinalityException e) {
-            return;
-        }
-        fail("CardinalityException");
-    }
-
-    /** */
-    @Test
-    public void testLoadingCorrectTxtFile() {
-        LabeledDataset training = LabeledDatasetHelper.loadDatasetFromTxt(KNN_IRIS_TXT, false);
-        assertEquals(training.rowSize(), 150);
-    }
-
-    /** */
-    @Test
-    public void testLoadingEmptyFile() {
-        try {
-            LabeledDatasetHelper.loadDatasetFromTxt(EMPTY_TXT, false);
-            fail("EmptyFileException");
-        }
-        catch (EmptyFileException e) {
-            return;
-        }
-        fail("EmptyFileException");
-    }
-
-    /** */
-    @Test
-    public void testLoadingFileWithFirstEmptyRow() {
-        try {
-            LabeledDatasetHelper.loadDatasetFromTxt(NO_DATA_TXT, false);
-            fail("NoDataException");
-        }
-        catch (NoDataException e) {
-            return;
-        }
-        fail("NoDataException");
-    }
-
-    /** */
-    @Test
-    public void testLoadingFileWithIncorrectData() {
-        LabeledDataset training = LabeledDatasetHelper.loadDatasetFromTxt(IRIS_INCORRECT_TXT, false);
-        assertEquals(149, training.rowSize());
-    }
-
-    /** */
-    @Test
-    public void testFailOnLoadingFileWithIncorrectData() {
-        try {
-            LabeledDatasetHelper.loadDatasetFromTxt(IRIS_INCORRECT_TXT, true);
-            fail("FileParsingException");
-        }
-        catch (FileParsingException e) {
-            return;
-        }
-        fail("FileParsingException");
-
-    }
-
-    /** */
-    @Test
-    public void testLoadingFileWithMissedData() throws URISyntaxException, IOException {
-        Path path = Paths.get(this.getClass().getClassLoader().getResource(IRIS_MISSED_DATA).toURI());
-
-        LabeledDataset training = LabeledDatasetLoader.loadFromTxtFile(path, ",", false, false);
-
-        assertEquals(training.features(2).get(1), 0.0);
-    }
-
-    /** */
-    @Test
-    public void testSplitting() {
-        double[][] mtx =
-            new double[][] {
-                {1.0, 1.0},
-                {1.0, 2.0},
-                {2.0, 1.0},
-                {-1.0, -1.0},
-                {-1.0, -2.0},
-                {-2.0, -1.0}};
-        double[] lbs = new double[] {1.0, 1.0, 1.0, 2.0, 2.0, 2.0};
-
-        LabeledDataset training = new LabeledDataset(mtx, lbs);
-
-        LabeledDatasetTestTrainPair split1 = new LabeledDatasetTestTrainPair(training, 0.67);
-
-        assertEquals(4, split1.test().rowSize());
-        assertEquals(2, split1.train().rowSize());
-
-        LabeledDatasetTestTrainPair split2 = new LabeledDatasetTestTrainPair(training, 0.65);
-
-        assertEquals(3, split2.test().rowSize());
-        assertEquals(3, split2.train().rowSize());
-
-        LabeledDatasetTestTrainPair split3 = new LabeledDatasetTestTrainPair(training, 0.4);
-
-        assertEquals(2, split3.test().rowSize());
-        assertEquals(4, split3.train().rowSize());
-
-        LabeledDatasetTestTrainPair split4 = new LabeledDatasetTestTrainPair(training, 0.3);
-
-        assertEquals(1, split4.test().rowSize());
-        assertEquals(5, split4.train().rowSize());
-    }
-
-    /** */
-    @Test
-    public void testLabels() {
-        double[][] mtx =
-            new double[][] {
-                {1.0, 1.0},
-                {1.0, 2.0},
-                {2.0, 1.0},
-                {-1.0, -1.0},
-                {-1.0, -2.0},
-                {-2.0, -1.0}};
-        double[] lbs = new double[] {1.0, 1.0, 1.0, 2.0, 2.0, 2.0};
-
-        LabeledDataset dataset = new LabeledDataset(mtx, lbs);
-        final double[] labels = dataset.labels();
-        for (int i = 0; i < lbs.length; i++)
-            assertEquals(lbs[i], labels[i]);
-    }
-
-    /** */
-    @Override public void testExternalization() {
-        double[][] mtx =
-            new double[][] {
-                {1.0, 1.0},
-                {1.0, 2.0},
-                {2.0, 1.0},
-                {-1.0, -1.0},
-                {-1.0, -2.0},
-                {-2.0, -1.0}};
-        double[] lbs = new double[] {1.0, 1.0, 1.0, 2.0, 2.0, 2.0};
-
-        LabeledDataset dataset = new LabeledDataset(mtx, lbs);
-        this.externalizeTest(dataset);
-    }
-}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/knn/LabeledVectorSetTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/knn/LabeledVectorSetTest.java
new file mode 100644
index 0000000..2303e96
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/knn/LabeledVectorSetTest.java
@@ -0,0 +1,294 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.knn;
+
+import java.io.IOException;
+import java.net.URISyntaxException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.Objects;
+import org.apache.ignite.ml.math.ExternalizableTest;
+import org.apache.ignite.ml.math.exceptions.CardinalityException;
+import org.apache.ignite.ml.math.exceptions.NoDataException;
+import org.apache.ignite.ml.math.exceptions.knn.EmptyFileException;
+import org.apache.ignite.ml.math.exceptions.knn.FileParsingException;
+import org.apache.ignite.ml.math.exceptions.knn.NoLabelVectorException;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.structures.LabeledVector;
+import org.apache.ignite.ml.structures.LabeledVectorSet;
+import org.apache.ignite.ml.structures.LabeledVectorSetTestTrainPair;
+import org.apache.ignite.ml.structures.preprocessing.LabeledDatasetLoader;
+import org.junit.Test;
+
+import static junit.framework.TestCase.assertEquals;
+import static junit.framework.TestCase.fail;
+
+/** Tests behaviour of KNNClassificationTest. */
+public class LabeledVectorSetTest implements ExternalizableTest<LabeledVectorSet> {
+    /** */
+    private static final String KNN_IRIS_TXT = "datasets/knn/iris.txt";
+
+    /** */
+    private static final String NO_DATA_TXT = "datasets/knn/no_data.txt";
+
+    /** */
+    private static final String EMPTY_TXT = "datasets/knn/empty.txt";
+
+    /** */
+    private static final String IRIS_INCORRECT_TXT = "datasets/knn/iris_incorrect.txt";
+
+    /** */
+    private static final String IRIS_MISSED_DATA = "datasets/knn/missed_data.txt";
+
+    /** */
+    @Test
+    public void testFeatureNames() {
+        double[][] mtx =
+            new double[][] {
+                {1.0, 1.0},
+                {1.0, 2.0},
+                {2.0, 1.0},
+                {-1.0, -1.0},
+                {-1.0, -2.0},
+                {-2.0, -1.0}};
+        double[] lbs = new double[] {1.0, 1.0, 1.0, 2.0, 2.0, 2.0};
+
+        String[] featureNames = new String[] {"x", "y"};
+        final LabeledVectorSet dataset = new LabeledVectorSet(mtx, lbs, featureNames, false);
+
+        assertEquals(dataset.getFeatureName(0), "x");
+    }
+
+    /** */
+    @Test
+    public void testAccessMethods() {
+        double[][] mtx =
+            new double[][] {
+                {1.0, 1.0},
+                {1.0, 2.0},
+                {2.0, 1.0},
+                {-1.0, -1.0},
+                {-1.0, -2.0},
+                {-2.0, -1.0}};
+        double[] lbs = new double[] {1.0, 1.0, 1.0, 2.0, 2.0, 2.0};
+
+        final LabeledVectorSet dataset = new LabeledVectorSet(mtx, lbs, null, false);
+
+        assertEquals(dataset.colSize(), 2);
+        assertEquals(dataset.rowSize(), 6);
+
+        assertEquals(dataset.label(0), lbs[0], 0);
+
+        assertEquals(dataset.copy().colSize(), 2);
+
+        @SuppressWarnings("unchecked")
+        final LabeledVector<Vector, Double> row = (LabeledVector<Vector, Double>)dataset.getRow(0);
+
+        assertEquals(row.features().get(0), 1.0);
+        assertEquals(row.label(), 1.0);
+        dataset.setLabel(0, 2.0);
+        assertEquals(row.label(), 2.0);
+
+        assertEquals(0, new LabeledVectorSet().rowSize());
+        assertEquals(1, new LabeledVectorSet(1, 2).rowSize());
+        assertEquals(1, new LabeledVectorSet(1, 2, true).rowSize());
+        assertEquals(1, new LabeledVectorSet(1, 2, null, true).rowSize());
+    }
+
+    /** */
+    @Test
+    public void testFailOnYNull() {
+        double[][] mtx =
+            new double[][] {
+                {1.0, 1.0},
+                {1.0, 2.0},
+                {2.0, 1.0},
+                {-1.0, -1.0},
+                {-1.0, -2.0},
+                {-2.0, -1.0}};
+        double[] lbs = new double[] {};
+
+        try {
+            new LabeledVectorSet(mtx, lbs);
+            fail("CardinalityException");
+        }
+        catch (CardinalityException e) {
+            return;
+        }
+        fail("CardinalityException");
+    }
+
+    /** */
+    @Test
+    public void testFailOnXNull() {
+        double[][] mtx =
+            new double[][] {};
+        double[] lbs = new double[] {1.0, 1.0, 1.0, 2.0, 2.0, 2.0};
+
+        try {
+            new LabeledVectorSet(mtx, lbs);
+            fail("CardinalityException");
+        }
+        catch (CardinalityException e) {
+            return;
+        }
+        fail("CardinalityException");
+    }
+
+    /** */
+    @Test
+    public void testLoadingCorrectTxtFile() {
+        LabeledVectorSet training = LabeledDatasetHelper.loadDatasetFromTxt(KNN_IRIS_TXT, false);
+        assertEquals(training.rowSize(), 150);
+    }
+
+    /** */
+    @Test
+    public void testLoadingEmptyFile() {
+        try {
+            LabeledDatasetHelper.loadDatasetFromTxt(EMPTY_TXT, false);
+            fail("EmptyFileException");
+        }
+        catch (EmptyFileException e) {
+            return;
+        }
+        fail("EmptyFileException");
+    }
+
+    /** */
+    @Test
+    public void testLoadingFileWithFirstEmptyRow() {
+        try {
+            LabeledDatasetHelper.loadDatasetFromTxt(NO_DATA_TXT, false);
+            fail("NoDataException");
+        }
+        catch (NoDataException e) {
+            return;
+        }
+        fail("NoDataException");
+    }
+
+    /** */
+    @Test
+    public void testLoadingFileWithIncorrectData() {
+        LabeledVectorSet training = LabeledDatasetHelper.loadDatasetFromTxt(IRIS_INCORRECT_TXT, false);
+        assertEquals(149, training.rowSize());
+    }
+
+    /** */
+    @Test
+    public void testFailOnLoadingFileWithIncorrectData() {
+        try {
+            LabeledDatasetHelper.loadDatasetFromTxt(IRIS_INCORRECT_TXT, true);
+            fail("FileParsingException");
+        }
+        catch (FileParsingException e) {
+            return;
+        }
+        fail("FileParsingException");
+
+    }
+
+    /** */
+    @Test
+    public void testLoadingFileWithMissedData() throws URISyntaxException, IOException {
+        Path path = Paths.get(Objects.requireNonNull(this.getClass().getClassLoader().getResource(IRIS_MISSED_DATA)).toURI());
+
+        LabeledVectorSet training = LabeledDatasetLoader.loadFromTxtFile(path, ",", false, false);
+
+        assertEquals(training.features(2).get(1), 0.0);
+    }
+
+    /** */
+    @Test
+    public void testSplitting() {
+        double[][] mtx =
+            new double[][] {
+                {1.0, 1.0},
+                {1.0, 2.0},
+                {2.0, 1.0},
+                {-1.0, -1.0},
+                {-1.0, -2.0},
+                {-2.0, -1.0}};
+        double[] lbs = new double[] {1.0, 1.0, 1.0, 2.0, 2.0, 2.0};
+
+        LabeledVectorSet training = new LabeledVectorSet(mtx, lbs);
+
+        LabeledVectorSetTestTrainPair split1 = new LabeledVectorSetTestTrainPair(training, 0.67);
+
+        assertEquals(4, split1.test().rowSize());
+        assertEquals(2, split1.train().rowSize());
+
+        LabeledVectorSetTestTrainPair split2 = new LabeledVectorSetTestTrainPair(training, 0.65);
+
+        assertEquals(3, split2.test().rowSize());
+        assertEquals(3, split2.train().rowSize());
+
+        LabeledVectorSetTestTrainPair split3 = new LabeledVectorSetTestTrainPair(training, 0.4);
+
+        assertEquals(2, split3.test().rowSize());
+        assertEquals(4, split3.train().rowSize());
+
+        LabeledVectorSetTestTrainPair split4 = new LabeledVectorSetTestTrainPair(training, 0.3);
+
+        assertEquals(1, split4.test().rowSize());
+        assertEquals(5, split4.train().rowSize());
+    }
+
+    /** */
+    @Test
+    public void testLabels() {
+        double[][] mtx =
+            new double[][] {
+                {1.0, 1.0},
+                {1.0, 2.0},
+                {2.0, 1.0},
+                {-1.0, -1.0},
+                {-1.0, -2.0},
+                {-2.0, -1.0}};
+        double[] lbs = new double[] {1.0, 1.0, 1.0, 2.0, 2.0, 2.0};
+
+        LabeledVectorSet dataset = new LabeledVectorSet(mtx, lbs);
+        final double[] labels = dataset.labels();
+        for (int i = 0; i < lbs.length; i++)
+            assertEquals(lbs[i], labels[i]);
+    }
+
+    /** */
+    @Test(expected = NoLabelVectorException.class)
+    @SuppressWarnings("unchecked")
+    public void testSetLabelInvalid() {
+        new LabeledVectorSet(new LabeledVector[1]).setLabel(0, 2.0);
+    }
+
+    /** */
+    @Override public void testExternalization() {
+        double[][] mtx =
+            new double[][] {
+                {1.0, 1.0},
+                {1.0, 2.0},
+                {2.0, 1.0},
+                {-1.0, -1.0},
+                {-1.0, -2.0},
+                {-2.0, -1.0}};
+        double[] lbs = new double[] {1.0, 1.0, 1.0, 2.0, 2.0, 2.0};
+
+        LabeledVectorSet dataset = new LabeledVectorSet(mtx, lbs);
+        this.externalizeTest(dataset);
+    }
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/math/BlasTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/math/BlasTest.java
index 61bde69..3bd7240 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/math/BlasTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/math/BlasTest.java
@@ -19,6 +19,7 @@
 
 import java.util.Arrays;
 import java.util.function.BiPredicate;
+import org.apache.ignite.ml.math.exceptions.NonSquareMatrixException;
 import org.apache.ignite.ml.math.primitives.matrix.Matrix;
 import org.apache.ignite.ml.math.primitives.matrix.impl.DenseMatrix;
 import org.apache.ignite.ml.math.primitives.matrix.impl.SparseMatrix;
@@ -282,6 +283,15 @@
         Assert.assertEquals(exp, y);
     }
 
+    /** Tests 'syr' operation for non-square dense matrix A. */
+    @Test(expected = NonSquareMatrixException.class)
+    public void testSyrNonSquareMatrix() {
+        double alpha = 3.0;
+        DenseMatrix a = new DenseMatrix(new double[][] {{10.0, 11.0, 12.0}, {0.0, 1.0, 2.0}}, 2);
+        Vector x = new DenseVector(new double[] {1.0, 2.0});
+        new Blas().syr(alpha, x, a);
+    }
+
     /**
      * Create a sparse vector from array.
      *
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/math/distances/DistanceTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/math/distances/DistanceTest.java
index ed2ca11..4cfb092 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/math/distances/DistanceTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/math/distances/DistanceTest.java
@@ -35,26 +35,31 @@
     private Vector v2;
 
     /** */
+    private double[] data2;
+
+    /** */
     @Before
     public void setup() {
+        data2 = new double[] {2.0, 1.0, 0.0};
         v1 = new DenseVector(new double[] {0.0, 0.0, 0.0});
-        v2 = new DenseVector(new double[] {2.0, 1.0, 0.0});
+        v2 = new DenseVector(data2);
     }
 
     /** */
     @Test
-    public void euclideanDistance() throws Exception {
-
+    public void euclideanDistance() {
         double expRes = Math.pow(5, 0.5);
 
         DistanceMeasure distanceMeasure = new EuclideanDistance();
 
         Assert.assertEquals(expRes, distanceMeasure.compute(v1, v2), PRECISION);
+
+        Assert.assertEquals(expRes, new EuclideanDistance().compute(v1, data2), PRECISION);
     }
 
     /** */
     @Test
-    public void manhattanDistance() throws Exception {
+    public void manhattanDistance() {
         double expRes = 3;
 
         DistanceMeasure distanceMeasure = new ManhattanDistance();
@@ -64,7 +69,7 @@
 
     /** */
     @Test
-    public void hammingDistance() throws Exception {
+    public void hammingDistance() {
         double expRes = 2;
 
         DistanceMeasure distanceMeasure = new HammingDistance();
@@ -72,4 +77,15 @@
         Assert.assertEquals(expRes, distanceMeasure.compute(v1, v2), PRECISION);
     }
 
+    /** */
+    @Test(expected = UnsupportedOperationException.class)
+    public void manhattanDistance2() {
+        new ManhattanDistance().compute(v1, data2);
+    }
+
+    /** */
+    @Test(expected = UnsupportedOperationException.class)
+    public void hammingDistance2() {
+        new HammingDistance().compute(v1, data2);
+    }
 }
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/math/isolve/lsqr/LSQROnHeapTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/math/isolve/lsqr/LSQROnHeapTest.java
index 5d1dac3..6af03df 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/math/isolve/lsqr/LSQROnHeapTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/math/isolve/lsqr/LSQROnHeapTest.java
@@ -29,6 +29,8 @@
 import org.junit.runners.Parameterized;
 
 import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 
 /**
  * Tests for {@link LSQROnHeap}.
@@ -73,7 +75,17 @@
 
         LSQRResult res = lsqr.solve(0, 1e-12, 1e-12, 1e8, -1, false, null);
 
+        assertEquals(3, res.getIterations());
+        assertEquals(1, res.getIsstop());
+        assertEquals(7.240617907140957E-14, res.getR1norm(), 0.0001);
+        assertEquals(7.240617907140957E-14, res.getR2norm(), 0.0001);
+        assertEquals(6.344288770224759, res.getAnorm(), 0.0001);
+        assertEquals(40.540617492419464, res.getAcond(), 0.0001);
+        assertEquals(3.4072322214704627E-13, res.getArnorm(), 0.0001);
+        assertEquals(3.000000000000001, res.getXnorm(), 0.0001);
+        assertArrayEquals(new double[]{0.0, 0.0, 0.0}, res.getVar(), 1e-6);
         assertArrayEquals(new double[]{1, -2, -2}, res.getX(), 1e-6);
+        assertTrue(res.toString().length() > 0);
     }
 
     /** Tests solving simple linear system with specified x0. */
@@ -97,6 +109,8 @@
         LSQRResult res = lsqr.solve(0, 1e-12, 1e-12, 1e8, -1, false,
             new double[] {999, 999, 999});
 
+        assertEquals(3, res.getIterations());
+
         assertArrayEquals(new double[]{1, -2, -2}, res.getX(), 1e-6);
     }
 
@@ -126,6 +140,8 @@
         )) {
             LSQRResult res = lsqr.solve(0, 1e-12, 1e-12, 1e8, -1, false, null);
 
+            assertEquals(8, res.getIterations());
+
             assertArrayEquals(new double[]{72.26948107,  15.95144674,  24.07403921,  66.73038781}, res.getX(), 1e-6);
         }
     }
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/math/primitives/vector/DelegatingVectorConstructorTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/math/primitives/vector/DelegatingVectorConstructorTest.java
index 6b44c38..fbe6db8 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/math/primitives/vector/DelegatingVectorConstructorTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/math/primitives/vector/DelegatingVectorConstructorTest.java
@@ -33,12 +33,14 @@
     public void basicTest() {
         final Vector parent = new DenseVector(new double[] {0, 1});
 
-        final Vector delegate = new DelegatingVector(parent);
+        final DelegatingVector delegate = new DelegatingVector(parent);
 
         final int size = parent.size();
 
         assertEquals("Delegate size differs from expected.", size, delegate.size());
 
+        assertEquals("Delegate vector differs from expected.", parent, delegate.getVector());
+
         for (int idx = 0; idx < size; idx++)
             assertDelegate(parent, delegate, idx);
     }
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/math/primitives/vector/SparseVectorConstructorTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/math/primitives/vector/SparseVectorConstructorTest.java
index b53a952..1a6956f 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/math/primitives/vector/SparseVectorConstructorTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/math/primitives/vector/SparseVectorConstructorTest.java
@@ -17,11 +17,15 @@
 
 package org.apache.ignite.ml.math.primitives.vector;
 
+import java.util.HashMap;
+import java.util.Map;
 import org.apache.ignite.ml.math.StorageConstants;
 import org.apache.ignite.ml.math.primitives.vector.impl.SparseVector;
 import org.junit.Test;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
 
 /** */
 public class SparseVectorConstructorTest {
@@ -52,4 +56,21 @@
             new SparseVector(1, StorageConstants.SEQUENTIAL_ACCESS_MODE).size());
 
     }
+
+    /** */
+    @Test
+    public void noParamsCtorTest() {
+        assertNotNull(new SparseVector().nonZeroSpliterator());
+    }
+
+    /** */
+    @Test
+    public void mapCtorTest() {
+        Map<Integer, Double> map = new HashMap<Integer, Double>() {{
+            put(1, 1.);
+        }};
+
+        assertTrue("Copy true", new SparseVector(map, true).isRandomAccess());
+        assertTrue("Copy false", new SparseVector(map, false).isRandomAccess());
+    }
 }
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/nn/LossFunctionsTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/nn/LossFunctionsTest.java
new file mode 100644
index 0000000..bef05ec
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/nn/LossFunctionsTest.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.nn;
+
+import org.apache.ignite.ml.math.functions.IgniteDifferentiableVectorToDoubleFunction;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
+import org.apache.ignite.ml.optimization.LossFunctions;
+import org.junit.Test;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertNotNull;
+
+/**
+ * Tests for {@link LossFunctions}.
+ */
+public class LossFunctionsTest {
+    /** */
+    @Test
+    public void testMSE() {
+        IgniteDifferentiableVectorToDoubleFunction f = LossFunctions.MSE.apply(new DenseVector(new double[] {2.0, 1.0}));
+
+        assertNotNull(f);
+
+        test(new double[] {1.0, 3.0}, f);
+    }
+
+    /** */
+    @Test
+    public void testLOG() {
+        IgniteDifferentiableVectorToDoubleFunction f = LossFunctions.LOG.apply(new DenseVector(new double[] {2.0, 1.0}));
+
+        assertNotNull(f);
+
+        test(new double[] {1.0, 3.0}, f);
+    }
+
+    /** */
+    @Test
+    public void testL2() {
+        IgniteDifferentiableVectorToDoubleFunction f = LossFunctions.L2.apply(new DenseVector(new double[] {2.0, 1.0}));
+
+        assertNotNull(f);
+
+        test(new double[] {1.0, 3.0}, f);
+    }
+
+    /** */
+    @Test
+    public void testL1() {
+        IgniteDifferentiableVectorToDoubleFunction f = LossFunctions.L1.apply(new DenseVector(new double[] {2.0, 1.0}));
+
+        assertNotNull(f);
+
+        test(new double[] {1.0, 3.0}, f);
+    }
+
+    /** */
+    @Test
+    public void testHINGE() {
+        IgniteDifferentiableVectorToDoubleFunction f = LossFunctions.HINGE.apply(new DenseVector(new double[] {2.0, 1.0}));
+
+        assertNotNull(f);
+
+        test(new double[] {1.0, 3.0}, f);
+    }
+
+    /** */
+    private void test(double[] expData, IgniteDifferentiableVectorToDoubleFunction f) {
+        verify(expData, f.differential(new DenseVector(new double[] {3.0, 4.0})));
+    }
+
+    /** */
+    private void verify(double[] expData, Vector actual) {
+        assertArrayEquals(expData, new DenseVector(actual.size()).assign(actual).getStorage().data(), 0);
+    }
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/nn/MLPTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/nn/MLPTest.java
index 51620b7..0f15dda 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/nn/MLPTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/nn/MLPTest.java
@@ -58,13 +58,13 @@
             withAddedLayer(2, true, Activators.SIGMOID).
             withAddedLayer(1, true, Activators.SIGMOID);
 
-        MultilayerPerceptron mlp = new MultilayerPerceptron(conf, new MLPConstInitializer(1, 2));
+        MultilayerPerceptron mlp1 = new MultilayerPerceptron(conf, new MLPConstInitializer(1, 2));
 
-        mlp.setWeights(1, new DenseMatrix(new double[][] {{20.0, 20.0}, {-20.0, -20.0}}));
-        mlp.setBiases(1, new DenseVector(new double[] {-10.0, 30.0}));
+        mlp1.setWeights(1, new DenseMatrix(new double[][] {{20.0, 20.0}, {-20.0, -20.0}}));
+        mlp1.setBiases(1, new DenseVector(new double[] {-10.0, 30.0}));
 
-        mlp.setWeights(2, new DenseMatrix(new double[][] {{20.0, 20.0}}));
-        mlp.setBiases(2, new DenseVector(new double[] {-30.0}));
+        MultilayerPerceptron mlp2 = mlp1.setWeights(2, new DenseMatrix(new double[][] {{20.0, 20.0}}));
+        MultilayerPerceptron mlp = mlp2.setBiases(2, new DenseVector(new double[] {-30.0}));
 
         Matrix input = new DenseMatrix(new double[][] {{0.0, 0.0}, {0.0, 1.0}, {1.0, 0.0}, {1.0, 1.0}});
 
@@ -106,6 +106,39 @@
     }
 
     /**
+     * Test three layer MLP.
+     */
+    @Test
+    public void testStackedTwiceMLP() {
+        int firstLayerNeuronsCnt = 3;
+        int secondLayerNeuronsCnt = 2;
+        int thirdLayerNeuronsCnt = 4;
+        MLPConstInitializer initer = new MLPConstInitializer(1, 2);
+
+        MLPArchitecture mlpLayer1Conf = new MLPArchitecture(4).
+            withAddedLayer(firstLayerNeuronsCnt, true, Activators.SIGMOID);
+        MLPArchitecture mlpLayer2Conf = new MLPArchitecture(firstLayerNeuronsCnt).
+            withAddedLayer(secondLayerNeuronsCnt, false, Activators.SIGMOID);
+        MLPArchitecture mlpLayer3Conf = new MLPArchitecture(secondLayerNeuronsCnt).
+            withAddedLayer(thirdLayerNeuronsCnt, false, Activators.SIGMOID);
+
+        MultilayerPerceptron mlp1 = new MultilayerPerceptron(mlpLayer1Conf, initer);
+        MultilayerPerceptron mlp2 = new MultilayerPerceptron(mlpLayer2Conf, initer);
+        MultilayerPerceptron mlp3 = new MultilayerPerceptron(mlpLayer3Conf, initer);
+
+        Assert.assertEquals(1., mlp1.weight(1, 0, 1), 0);
+
+        MultilayerPerceptron stackedMLP = mlp1.add(mlp2).add(mlp3);
+
+        Assert.assertTrue(stackedMLP.toString().length() > 0);
+        Assert.assertTrue(stackedMLP.toString(true).length() > 0);
+        Assert.assertTrue(stackedMLP.toString(false).length() > 0);
+
+        Assert.assertEquals(4, stackedMLP.architecture().outputSize());
+        Assert.assertEquals(8, stackedMLP.architecture().layersCount());
+    }
+
+    /**
      * Test parameters count works well.
      */
     @Test
@@ -169,10 +202,10 @@
         MLPArchitecture conf = new MLPArchitecture(inputSize).
             withAddedLayer(firstLayerNeuronsCnt, false, Activators.SIGMOID);
 
-        MultilayerPerceptron mlp = new MultilayerPerceptron(conf);
+        MultilayerPerceptron mlp1 = new MultilayerPerceptron(conf);
 
-        mlp.setWeight(1, 0, 0, w10);
-        mlp.setWeight(1, 1, 0, w11);
+        mlp1.setWeight(1, 0, 0, w10);
+        MultilayerPerceptron mlp = mlp1.setWeight(1, 1, 0, w11);
         double x0 = 1.0;
         double x1 = 3.0;
 
@@ -197,7 +230,7 @@
         Vector weightsVec = mlp.weights(1).getRow(0);
         Tracer.showAscii(weightsVec);
 
-        Vector trueGrad = partialDer.apply(ytt, weightsVec, inputs.getCol(0));
+        Vector trueGrad = partialDer.andThen(x -> x).apply(ytt, weightsVec, inputs.getCol(0));
 
         Tracer.showAscii(trueGrad);
         Tracer.showAscii(grad);
@@ -205,4 +238,26 @@
         Assert.assertEquals(mlp.architecture().parametersCount(), grad.size());
         Assert.assertEquals(trueGrad, grad);
     }
+
+    /**
+     * Test methods related to per-neuron bias.
+     */
+    @Test
+    public void testNeuronBias() {
+        int inputSize = 3;
+        int firstLayerNeuronsCnt = 2;
+        int secondLayerNeurons = 1;
+
+        MLPArchitecture conf = new MLPArchitecture(inputSize).
+            withAddedLayer(firstLayerNeuronsCnt, false, Activators.SIGMOID).
+            withAddedLayer(secondLayerNeurons, true, Activators.SIGMOID);
+
+        MultilayerPerceptron mlp1 = new MultilayerPerceptron(conf, new MLPConstInitializer(100, 200));
+
+        MultilayerPerceptron mlp = mlp1.setBias(2, 0, 1.);
+        Assert.assertEquals(1., mlp.bias(2, 0), 0);
+
+        mlp.setBias(2, 0, 0.5);
+        Assert.assertEquals(0.5, mlp.bias(2, 0), 0);
+    }
 }
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/nn/MLPTestSuite.java b/modules/ml/src/test/java/org/apache/ignite/ml/nn/MLPTestSuite.java
index 2e41813..3f98ba5 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/nn/MLPTestSuite.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/nn/MLPTestSuite.java
@@ -27,7 +27,8 @@
 @Suite.SuiteClasses({
     MLPTest.class,
     MLPTrainerTest.class,
-    MLPTrainerIntegrationTest.class
+    MLPTrainerIntegrationTest.class,
+    LossFunctionsTest.class
 })
 public class MLPTestSuite {
     // No-op.
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/nn/MLPTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/nn/MLPTrainerTest.java
index a1d601c..6a6555e 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/nn/MLPTrainerTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/nn/MLPTrainerTest.java
@@ -29,6 +29,7 @@
 import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
 import org.apache.ignite.ml.nn.architecture.MLPArchitecture;
 import org.apache.ignite.ml.optimization.LossFunctions;
+import org.apache.ignite.ml.optimization.SmoothParametrized;
 import org.apache.ignite.ml.optimization.updatecalculators.NesterovParameterUpdate;
 import org.apache.ignite.ml.optimization.updatecalculators.NesterovUpdateCalculator;
 import org.apache.ignite.ml.optimization.updatecalculators.RPropParameterUpdate;
@@ -154,6 +155,69 @@
 
             TestUtils.checkIsInEpsilonNeighbourhood(new DenseVector(new double[]{0.0}), predict.getRow(0), 1E-1);
         }
+
+        /** */
+        @Test
+        public void testUpdate() {
+            UpdatesStrategy<SmoothParametrized, SimpleGDParameterUpdate> updatesStgy = new UpdatesStrategy<>(
+                new SimpleGDUpdateCalculator(0.2),
+                SimpleGDParameterUpdate::sumLocal,
+                SimpleGDParameterUpdate::avg
+            );
+
+            Map<Integer, double[][]> xorData = new HashMap<>();
+            xorData.put(0, new double[][]{{0.0, 0.0}, {0.0}});
+            xorData.put(1, new double[][]{{0.0, 1.0}, {1.0}});
+            xorData.put(2, new double[][]{{1.0, 0.0}, {1.0}});
+            xorData.put(3, new double[][]{{1.0, 1.0}, {0.0}});
+
+            MLPArchitecture arch = new MLPArchitecture(2).
+                withAddedLayer(10, true, Activators.RELU).
+                withAddedLayer(1, false, Activators.SIGMOID);
+
+            MLPTrainer<SimpleGDParameterUpdate> trainer = new MLPTrainer<>(
+                arch,
+                LossFunctions.MSE,
+                updatesStgy,
+                3000,
+                batchSize,
+                50,
+                123L
+            );
+
+            MultilayerPerceptron originalMdl = trainer.fit(
+                xorData,
+                parts,
+                (k, v) -> VectorUtils.of(v[0]),
+                (k, v) -> v[1]
+            );
+
+            MultilayerPerceptron updatedOnSameDS = trainer.update(
+                originalMdl,
+                xorData,
+                parts,
+                (k, v) -> VectorUtils.of(v[0]),
+                (k, v) -> v[1]
+            );
+
+            MultilayerPerceptron updatedOnEmptyDS = trainer.update(
+                originalMdl,
+                new HashMap<Integer, double[][]>(),
+                parts,
+                (k, v) -> VectorUtils.of(v[0]),
+                (k, v) -> v[1]
+            );
+
+            DenseMatrix matrix = new DenseMatrix(new double[][] {
+                {0.0, 0.0},
+                {0.0, 1.0},
+                {1.0, 0.0},
+                {1.0, 1.0}
+            });
+
+            TestUtils.checkIsInEpsilonNeighbourhood(originalMdl.apply(matrix).getRow(0), updatedOnSameDS.apply(matrix).getRow(0), 1E-1);
+            TestUtils.checkIsInEpsilonNeighbourhood(originalMdl.apply(matrix).getRow(0), updatedOnEmptyDS.apply(matrix).getRow(0), 1E-1);
+        }
     }
 
     /**
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/pipeline/PipelineMdlTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/pipeline/PipelineMdlTest.java
new file mode 100644
index 0000000..d740577
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/pipeline/PipelineMdlTest.java
@@ -0,0 +1,75 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.pipeline;
+
+import org.apache.ignite.ml.TestUtils;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
+import org.apache.ignite.ml.regressions.logistic.binomial.LogisticRegressionModel;
+import org.junit.Test;
+
+/**
+ * Tests for {@link PipelineMdl}.
+ */
+public class PipelineMdlTest {
+    /** Precision in test checks. */
+    private static final double PRECISION = 1e-6;
+
+    /** */
+    @Test
+    public void testPredict() {
+        Vector weights = new DenseVector(new double[] {2.0, 3.0});
+
+        verifyPredict(getMdl(new LogisticRegressionModel(weights, 1.0).withRawLabels(true)));
+    }
+
+    private PipelineMdl<Integer, double[]> getMdl(LogisticRegressionModel internalMdl) {
+        return new PipelineMdl<Integer, double[]>()
+            .withFeatureExtractor(null)
+            .withLabelExtractor(null)
+            .withInternalMdl(internalMdl);
+    }
+
+    /** */
+    private void verifyPredict(PipelineMdl mdl) {
+        Vector observation = new DenseVector(new double[] {1.0, 1.0});
+        TestUtils.assertEquals(sigmoid(1.0 + 2.0 * 1.0 + 3.0 * 1.0), mdl.apply(observation), PRECISION);
+
+        observation = new DenseVector(new double[] {2.0, 1.0});
+        TestUtils.assertEquals(sigmoid(1.0 + 2.0 * 2.0 + 3.0 * 1.0), mdl.apply(observation), PRECISION);
+
+        observation = new DenseVector(new double[] {1.0, 2.0});
+        TestUtils.assertEquals(sigmoid(1.0 + 2.0 * 1.0 + 3.0 * 2.0), mdl.apply(observation), PRECISION);
+
+        observation = new DenseVector(new double[] {-2.0, 1.0});
+        TestUtils.assertEquals(sigmoid(1.0 - 2.0 * 2.0 + 3.0 * 1.0), mdl.apply(observation), PRECISION);
+
+        observation = new DenseVector(new double[] {1.0, -2.0});
+        TestUtils.assertEquals(sigmoid(1.0 + 2.0 * 1.0 - 3.0 * 2.0), mdl.apply(observation), PRECISION);
+    }
+
+    /**
+     * Sigmoid function.
+     *
+     * @param z The regression value.
+     * @return The result.
+     */
+    private static double sigmoid(double z) {
+        return 1.0 / (1.0 + Math.exp(-z));
+    }
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/pipeline/PipelineTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/pipeline/PipelineTest.java
new file mode 100644
index 0000000..91bbcd4
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/pipeline/PipelineTest.java
@@ -0,0 +1,111 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.pipeline;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import org.apache.ignite.ml.TestUtils;
+import org.apache.ignite.ml.common.TrainerTest;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
+import org.apache.ignite.ml.nn.UpdatesStrategy;
+import org.apache.ignite.ml.optimization.updatecalculators.SimpleGDParameterUpdate;
+import org.apache.ignite.ml.optimization.updatecalculators.SimpleGDUpdateCalculator;
+import org.apache.ignite.ml.preprocessing.minmaxscaling.MinMaxScalerTrainer;
+import org.apache.ignite.ml.preprocessing.normalization.NormalizationTrainer;
+import org.apache.ignite.ml.regressions.logistic.binomial.LogisticRegressionSGDTrainer;
+import org.junit.Test;
+
+/**
+ * Tests for {@link Pipeline}.
+ */
+public class PipelineTest extends TrainerTest {
+    /**
+     * Test trainer on classification model y = x.
+     */
+    @Test
+    public void testTrainWithTheLinearlySeparableCase() {
+        Map<Integer, Double[]> cacheMock = new HashMap<>();
+
+        for (int i = 0; i < twoLinearlySeparableClasses.length; i++) {
+            double[] row = twoLinearlySeparableClasses[i];
+            Double[] convertedRow = new Double[row.length];
+            for (int j = 0; j < row.length; j++)
+                convertedRow[j] = row[j];
+            cacheMock.put(i, convertedRow);
+        }
+
+        LogisticRegressionSGDTrainer<?> trainer = new LogisticRegressionSGDTrainer<>(new UpdatesStrategy<>(
+            new SimpleGDUpdateCalculator().withLearningRate(0.2),
+            SimpleGDParameterUpdate::sumLocal,
+            SimpleGDParameterUpdate::avg
+        ), 100000, 10, 100, 123L);
+
+        PipelineMdl<Integer, Double[]> mdl = new Pipeline<Integer, Double[], Vector>()
+            .addFeatureExtractor((k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 1, v.length)))
+            .addLabelExtractor((k, v) -> v[0])
+            .addPreprocessor(new MinMaxScalerTrainer<Integer, Object[]>())
+            .addPreprocessor(new NormalizationTrainer<Integer, Object[]>()
+                .withP(1))
+            .addTrainer(trainer)
+            .fit(
+                cacheMock,
+                parts
+            );
+
+        TestUtils.assertEquals(0, mdl.apply(VectorUtils.of(100, 10)), PRECISION);
+        TestUtils.assertEquals(1, mdl.apply(VectorUtils.of(10, 100)), PRECISION);
+    }
+
+    /**
+     * Test the missed final state.
+     */
+    @Test(expected = IllegalStateException.class)
+    public void testTrainWithMissedFinalStage() {
+        Map<Integer, Double[]> cacheMock = new HashMap<>();
+
+        for (int i = 0; i < twoLinearlySeparableClasses.length; i++) {
+            double[] row = twoLinearlySeparableClasses[i];
+            Double[] convertedRow = new Double[row.length];
+            for (int j = 0; j < row.length; j++)
+                convertedRow[j] = row[j];
+            cacheMock.put(i, convertedRow);
+        }
+
+        LogisticRegressionSGDTrainer<?> trainer = new LogisticRegressionSGDTrainer<>(new UpdatesStrategy<>(
+            new SimpleGDUpdateCalculator().withLearningRate(0.2),
+            SimpleGDParameterUpdate::sumLocal,
+            SimpleGDParameterUpdate::avg
+        ), 100000, 10, 100, 123L);
+
+        PipelineMdl<Integer, Double[]> mdl = new Pipeline<Integer, Double[], Vector>()
+            .addFeatureExtractor((k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 1, v.length)))
+            .addLabelExtractor((k, v) -> v[0])
+            .addPreprocessor(new MinMaxScalerTrainer<Integer, Object[]>())
+            .addPreprocessor(new NormalizationTrainer<Integer, Object[]>()
+                .withP(1))
+            .fit(
+                cacheMock,
+                parts
+            );
+
+        TestUtils.assertEquals(0, mdl.apply(VectorUtils.of(100, 10)), PRECISION);
+        TestUtils.assertEquals(1, mdl.apply(VectorUtils.of(10, 100)), PRECISION);
+    }
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/pipeline/PipelineTestSuite.java b/modules/ml/src/test/java/org/apache/ignite/ml/pipeline/PipelineTestSuite.java
new file mode 100644
index 0000000..4c28db9
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/pipeline/PipelineTestSuite.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.pipeline;
+
+import org.junit.runner.RunWith;
+import org.junit.runners.Suite;
+
+/**
+ * Test suite for the pipeline.
+ */
+@RunWith(Suite.class)
+@Suite.SuiteClasses({
+    PipelineTest.class,
+    PipelineMdlTest.class
+})
+public class PipelineTestSuite {
+    // No-op.
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/preprocessing/binarization/BinarizationTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/preprocessing/binarization/BinarizationTrainerTest.java
index 8b10aaa..d465e82 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/preprocessing/binarization/BinarizationTrainerTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/preprocessing/binarization/BinarizationTrainerTest.java
@@ -22,12 +22,15 @@
 import java.util.Map;
 import org.apache.ignite.ml.dataset.DatasetBuilder;
 import org.apache.ignite.ml.dataset.impl.local.LocalDatasetBuilder;
+import org.apache.ignite.ml.math.functions.IgniteBiFunction;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
 import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 
 import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
 
 /**
  * Tests for {@link BinarizationTrainer}.
@@ -66,11 +69,38 @@
         BinarizationTrainer<Integer, double[]> binarizationTrainer = new BinarizationTrainer<Integer, double[]>()
             .withThreshold(10);
 
+        assertEquals(10., binarizationTrainer.getThreshold(), 0);
+
         BinarizationPreprocessor<Integer, double[]> preprocessor = binarizationTrainer.fit(
             datasetBuilder,
             (k, v) -> VectorUtils.of(v)
         );
 
+        assertEquals(binarizationTrainer.getThreshold(), preprocessor.getThreshold(), 0);
+
+        assertArrayEquals(new double[] {0, 0, 1}, preprocessor.apply(5, new double[] {1, 10, 100}).asArray(), 1e-8);
+    }
+
+    /** Tests default implementation of {@code fit()} method. */
+    @Test
+    public void testFitDefault() {
+        Map<Integer, double[]> data = new HashMap<>();
+        data.put(1, new double[] {2, 4, 1});
+        data.put(2, new double[] {1, 8, 22});
+        data.put(3, new double[] {4, 10, 100});
+        data.put(4, new double[] {0, 22, 300});
+
+        BinarizationTrainer<Integer, double[]> binarizationTrainer = new BinarizationTrainer<Integer, double[]>()
+            .withThreshold(10);
+
+        assertEquals(10., binarizationTrainer.getThreshold(), 0);
+
+        IgniteBiFunction<Integer, double[], Vector> preprocessor = binarizationTrainer.fit(
+            data,
+            parts,
+            (k, v) -> VectorUtils.of(v)
+        );
+
         assertArrayEquals(new double[] {0, 0, 1}, preprocessor.apply(5, new double[] {1, 10, 100}).asArray(), 1e-8);
     }
 }
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/preprocessing/encoding/EncoderTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/preprocessing/encoding/EncoderTrainerTest.java
index c0157e9..6d01901 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/preprocessing/encoding/EncoderTrainerTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/preprocessing/encoding/EncoderTrainerTest.java
@@ -68,8 +68,8 @@
 
         EncoderTrainer<Integer, String[]> strEncoderTrainer = new EncoderTrainer<Integer, String[]>()
             .withEncoderType(EncoderType.STRING_ENCODER)
-            .encodeFeature(0)
-            .encodeFeature(1);
+            .withEncodedFeature(0)
+            .withEncodedFeature(1);
 
         EncoderPreprocessor<Integer, String[]> preprocessor = strEncoderTrainer.fit(
             datasetBuilder,
@@ -94,8 +94,8 @@
 
         EncoderTrainer<Integer, Object[]> strEncoderTrainer = new EncoderTrainer<Integer, Object[]>()
             .withEncoderType(EncoderType.ONE_HOT_ENCODER)
-            .encodeFeature(0)
-            .encodeFeature(1);
+            .withEncodedFeature(0)
+            .withEncodedFeature(1);
 
         EncoderPreprocessor<Integer, Object[]> preprocessor = strEncoderTrainer.fit(
             datasetBuilder,
@@ -120,8 +120,8 @@
 
         EncoderTrainer<Integer, Object[]> strEncoderTrainer = new EncoderTrainer<Integer, Object[]>()
             .withEncoderType(EncoderType.STRING_ENCODER)
-            .encodeFeature(0)
-            .encodeFeature(1);
+            .withEncodedFeature(0)
+            .withEncodedFeature(1);
 
         EncoderPreprocessor<Integer, Object[]> preprocessor = strEncoderTrainer.fit(
             datasetBuilder,
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/preprocessing/maxabsscaling/MaxAbsScalerPreprocessorTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/preprocessing/maxabsscaling/MaxAbsScalerPreprocessorTest.java
new file mode 100644
index 0000000..3c30f3e
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/preprocessing/maxabsscaling/MaxAbsScalerPreprocessorTest.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.preprocessing.maxabsscaling;
+
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
+import org.junit.Test;
+
+import static org.junit.Assert.assertArrayEquals;
+
+/**
+ * Tests for {@link MaxAbsScalerPreprocessor}.
+ */
+public class MaxAbsScalerPreprocessorTest {
+    /** Tests {@code apply()} method. */
+    @Test
+    public void testApply() {
+        double[][] data = new double[][] {
+            {2., 4., 1.},
+            {1., 8., 22.},
+            {-4., 10., 100.},
+            {0., 22., 300.}
+        };
+        double[] maxAbs = new double[] {4, 22, 300};
+        MaxAbsScalerPreprocessor<Integer, Vector> preprocessor = new MaxAbsScalerPreprocessor<>(
+            maxAbs,
+            (k, v) -> v
+        );
+
+        double[][] expectedData = new double[][] {
+            {.5, 4. / 22, 1. / 300},
+            {.25, 8. / 22, 22. / 300},
+            {-1., 10. / 22, 100. / 300},
+            {0., 22. / 22, 300. / 300}
+        };
+
+        for (int i = 0; i < data.length; i++)
+            assertArrayEquals(expectedData[i], preprocessor.apply(i, VectorUtils.of(data[i])).asArray(), 1e-8);
+    }
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/preprocessing/maxabsscaling/MaxAbsScalerTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/preprocessing/maxabsscaling/MaxAbsScalerTrainerTest.java
new file mode 100644
index 0000000..5711660
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/preprocessing/maxabsscaling/MaxAbsScalerTrainerTest.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.preprocessing.maxabsscaling;
+
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Map;
+import org.apache.ignite.ml.dataset.DatasetBuilder;
+import org.apache.ignite.ml.dataset.impl.local.LocalDatasetBuilder;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+
+import static org.junit.Assert.assertArrayEquals;
+
+/**
+ * Tests for {@link MaxAbsScalerTrainer}.
+ */
+@RunWith(Parameterized.class)
+public class MaxAbsScalerTrainerTest {
+    /** Parameters. */
+    @Parameterized.Parameters(name = "Data divided on {0} partitions")
+    public static Iterable<Integer[]> data() {
+        return Arrays.asList(
+            new Integer[] {1},
+            new Integer[] {2},
+            new Integer[] {3},
+            new Integer[] {5},
+            new Integer[] {7},
+            new Integer[] {100},
+            new Integer[] {1000}
+        );
+    }
+
+    /** Number of partitions. */
+    @Parameterized.Parameter
+    public int parts;
+
+    /** Tests {@code fit()} method. */
+    @Test
+    public void testFit() {
+        Map<Integer, Vector> data = new HashMap<>();
+        data.put(1, VectorUtils.of(2, -4, 1));
+        data.put(2, VectorUtils.of(1, -8, 22));
+        data.put(3, VectorUtils.of(-4, 10, 100));
+        data.put(4, VectorUtils.of(0, 22, 300));
+
+        DatasetBuilder<Integer, Vector> datasetBuilder = new LocalDatasetBuilder<>(data, parts);
+
+        MaxAbsScalerTrainer<Integer, Vector> standardizationTrainer = new MaxAbsScalerTrainer<>();
+
+        MaxAbsScalerPreprocessor<Integer, Vector> preprocessor = standardizationTrainer.fit(
+            datasetBuilder,
+            (k, v) -> v
+        );
+
+        assertArrayEquals(new double[] {4, 22, 300}, preprocessor.getMaxAbs(), 1e-8);
+    }
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/preprocessing/normalization/NormalizationTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/preprocessing/normalization/NormalizationTrainerTest.java
index b962701..7b02f20 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/preprocessing/normalization/NormalizationTrainerTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/preprocessing/normalization/NormalizationTrainerTest.java
@@ -29,6 +29,7 @@
 import org.junit.runners.Parameterized;
 
 import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertEquals;
 
 /**
  * Tests for {@link BinarizationTrainer}.
@@ -67,11 +68,15 @@
         NormalizationTrainer<Integer, double[]> normalizationTrainer = new NormalizationTrainer<Integer, double[]>()
             .withP(3);
 
+        assertEquals(3., normalizationTrainer.p(), 0);
+
         NormalizationPreprocessor<Integer, double[]> preprocessor = normalizationTrainer.fit(
             datasetBuilder,
             (k, v) -> VectorUtils.of(v)
         );
 
+        assertEquals(normalizationTrainer.p(), preprocessor.p(), 0);
+
         assertArrayEquals(new double[] {0.125, 0.99, 0.125}, preprocessor.apply(5, new double[]{1., 8., 1.}).asArray(), 1e-2);
     }
 }
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/regressions/linear/LinearRegressionLSQRTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/regressions/linear/LinearRegressionLSQRTrainerTest.java
index f771dae..9c35ac7 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/regressions/linear/LinearRegressionLSQRTrainerTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/regressions/linear/LinearRegressionLSQRTrainerTest.java
@@ -21,10 +21,9 @@
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Random;
+import org.apache.ignite.ml.common.TrainerTest;
 import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
 import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
 
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
@@ -32,42 +31,23 @@
 /**
  * Tests for {@link LinearRegressionLSQRTrainer}.
  */
-@RunWith(Parameterized.class)
-public class LinearRegressionLSQRTrainerTest {
-    /** Parameters. */
-    @Parameterized.Parameters(name = "Data divided on {0} partitions")
-    public static Iterable<Integer[]> data() {
-        return Arrays.asList(
-            new Integer[] {1},
-            new Integer[] {2},
-            new Integer[] {3},
-            new Integer[] {5},
-            new Integer[] {7},
-            new Integer[] {100},
-            new Integer[] {1000}
-        );
-    }
-
-    /** Number of partitions. */
-    @Parameterized.Parameter
-    public int parts;
-
+public class LinearRegressionLSQRTrainerTest extends TrainerTest {
     /**
      * Tests {@code fit()} method on a simple small dataset.
      */
     @Test
     public void testSmallDataFit() {
         Map<Integer, double[]> data = new HashMap<>();
-        data.put(0, new double[] {-1.0915526, 1.81983527, -0.91409478, 0.70890712, -24.55724107});
-        data.put(1, new double[] {-0.61072904, 0.37545517, 0.21705352, 0.09516495, -26.57226867});
-        data.put(2, new double[] {0.05485406, 0.88219898, -0.80584547, 0.94668307, 61.80919728});
-        data.put(3, new double[] {-0.24835094, -0.34000053, -1.69984651, -1.45902635, -161.65525991});
-        data.put(4, new double[] {0.63675392, 0.31675535, 0.38837437, -1.1221971, -14.46432611});
-        data.put(5, new double[] {0.14194017, 2.18158997, -0.28397346, -0.62090588, -3.2122197});
-        data.put(6, new double[] {-0.53487507, 1.4454797, 0.21570443, -0.54161422, -46.5469012});
-        data.put(7, new double[] {-1.58812173, -0.73216803, -2.15670676, -1.03195988, -247.23559889});
-        data.put(8, new double[] {0.20702671, 0.92864654, 0.32721202, -0.09047503, 31.61484949});
-        data.put(9, new double[] {-0.37890345, -0.04846179, -0.84122753, -1.14667474, -124.92598583});
+        data.put(0, new double[]{-1.0915526, 1.81983527, -0.91409478, 0.70890712, -24.55724107});
+        data.put(1, new double[]{-0.61072904, 0.37545517, 0.21705352, 0.09516495, -26.57226867});
+        data.put(2, new double[]{0.05485406, 0.88219898, -0.80584547, 0.94668307, 61.80919728});
+        data.put(3, new double[]{-0.24835094, -0.34000053, -1.69984651, -1.45902635, -161.65525991});
+        data.put(4, new double[]{0.63675392, 0.31675535, 0.38837437, -1.1221971, -14.46432611});
+        data.put(5, new double[]{0.14194017, 2.18158997, -0.28397346, -0.62090588, -3.2122197});
+        data.put(6, new double[]{-0.53487507, 1.4454797, 0.21570443, -0.54161422, -46.5469012});
+        data.put(7, new double[]{-1.58812173, -0.73216803, -2.15670676, -1.03195988, -247.23559889});
+        data.put(8, new double[]{0.20702671, 0.92864654, 0.32721202, -0.09047503, 31.61484949});
+        data.put(9, new double[]{-0.37890345, -0.04846179, -0.84122753, -1.14667474, -124.92598583});
 
         LinearRegressionLSQRTrainer trainer = new LinearRegressionLSQRTrainer();
 
@@ -79,7 +59,7 @@
         );
 
         assertArrayEquals(
-            new double[]{72.26948107,  15.95144674,  24.07403921,  66.73038781},
+            new double[]{72.26948107, 15.95144674, 24.07403921, 66.73038781},
             mdl.getWeights().getStorage().data(),
             1e-6
         );
@@ -121,4 +101,55 @@
 
         assertEquals(intercept, mdl.getIntercept(), 1e-6);
     }
+
+    /** */
+    @Test
+    public void testUpdate() {
+        Random rnd = new Random(0);
+        Map<Integer, double[]> data = new HashMap<>();
+        double[] coef = new double[100];
+        double intercept = rnd.nextDouble() * 10;
+
+        for (int i = 0; i < 100000; i++) {
+            double[] x = new double[coef.length + 1];
+
+            for (int j = 0; j < coef.length; j++)
+                x[j] = rnd.nextDouble() * 10;
+
+            x[coef.length] = intercept;
+
+            data.put(i, x);
+        }
+
+        LinearRegressionLSQRTrainer trainer = new LinearRegressionLSQRTrainer();
+
+        LinearRegressionModel originalModel = trainer.fit(
+            data,
+            parts,
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 0, v.length - 1)),
+            (k, v) -> v[coef.length]
+        );
+
+        LinearRegressionModel updatedOnSameDS = trainer.update(
+            originalModel,
+            data,
+            parts,
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 0, v.length - 1)),
+            (k, v) -> v[coef.length]
+        );
+
+        LinearRegressionModel updatedOnEmpyDS = trainer.update(
+            originalModel,
+            new HashMap<Integer, double[]>(),
+            parts,
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 0, v.length - 1)),
+            (k, v) -> v[coef.length]
+        );
+
+        assertArrayEquals(originalModel.getWeights().getStorage().data(), updatedOnSameDS.getWeights().getStorage().data(), 1e-6);
+        assertEquals(originalModel.getIntercept(), updatedOnSameDS.getIntercept(), 1e-6);
+
+        assertArrayEquals(originalModel.getWeights().getStorage().data(), updatedOnEmpyDS.getWeights().getStorage().data(), 1e-6);
+        assertEquals(originalModel.getIntercept(), updatedOnEmpyDS.getIntercept(), 1e-6);
+    }
 }
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/regressions/linear/LinearRegressionModelTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/regressions/linear/LinearRegressionModelTest.java
index 71d831d..66871b0 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/regressions/linear/LinearRegressionModelTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/regressions/linear/LinearRegressionModelTest.java
@@ -25,6 +25,8 @@
 import org.apache.ignite.ml.regressions.logistic.multiclass.LogRegressionMultiClassModel;
 import org.junit.Test;
 
+import static org.junit.Assert.assertTrue;
+
 /**
  * Tests for {@link LinearRegressionModel}.
  */
@@ -38,6 +40,10 @@
         Vector weights = new DenseVector(new double[]{2.0, 3.0});
         LinearRegressionModel mdl = new LinearRegressionModel(weights, 1.0);
 
+        assertTrue(mdl.toString().length() > 0);
+        assertTrue(mdl.toString(true).length() > 0);
+        assertTrue(mdl.toString(false).length() > 0);
+
         Vector observation = new DenseVector(new double[]{1.0, 1.0});
         TestUtils.assertEquals(1.0 + 2.0 * 1.0 + 3.0 * 1.0, mdl.apply(observation), PRECISION);
 
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/regressions/linear/LinearRegressionSGDTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/regressions/linear/LinearRegressionSGDTrainerTest.java
index ee38938..86b0f27 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/regressions/linear/LinearRegressionSGDTrainerTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/regressions/linear/LinearRegressionSGDTrainerTest.java
@@ -20,13 +20,12 @@
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.Map;
+import org.apache.ignite.ml.common.TrainerTest;
 import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
 import org.apache.ignite.ml.nn.UpdatesStrategy;
 import org.apache.ignite.ml.optimization.updatecalculators.RPropParameterUpdate;
 import org.apache.ignite.ml.optimization.updatecalculators.RPropUpdateCalculator;
 import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
 
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
@@ -34,47 +33,29 @@
 /**
  * Tests for {@link LinearRegressionSGDTrainer}.
  */
-@RunWith(Parameterized.class)
-public class LinearRegressionSGDTrainerTest {
-    /** Parameters. */
-    @Parameterized.Parameters(name = "Data divided on {0} partitions")
-    public static Iterable<Integer[]> data() {
-        return Arrays.asList(
-            new Integer[] {1},
-            new Integer[] {2},
-            new Integer[] {3},
-            new Integer[] {5},
-            new Integer[] {7},
-            new Integer[] {100}
-        );
-    }
-
-    /** Number of partitions. */
-    @Parameterized.Parameter
-    public int parts;
-
+public class LinearRegressionSGDTrainerTest extends TrainerTest {
     /**
      * Tests {@code fit()} method on a simple small dataset.
      */
     @Test
     public void testSmallDataFit() {
         Map<Integer, double[]> data = new HashMap<>();
-        data.put(0, new double[] {-1.0915526, 1.81983527, -0.91409478, 0.70890712, -24.55724107});
-        data.put(1, new double[] {-0.61072904, 0.37545517, 0.21705352, 0.09516495, -26.57226867});
-        data.put(2, new double[] {0.05485406, 0.88219898, -0.80584547, 0.94668307, 61.80919728});
-        data.put(3, new double[] {-0.24835094, -0.34000053, -1.69984651, -1.45902635, -161.65525991});
-        data.put(4, new double[] {0.63675392, 0.31675535, 0.38837437, -1.1221971, -14.46432611});
-        data.put(5, new double[] {0.14194017, 2.18158997, -0.28397346, -0.62090588, -3.2122197});
-        data.put(6, new double[] {-0.53487507, 1.4454797, 0.21570443, -0.54161422, -46.5469012});
-        data.put(7, new double[] {-1.58812173, -0.73216803, -2.15670676, -1.03195988, -247.23559889});
-        data.put(8, new double[] {0.20702671, 0.92864654, 0.32721202, -0.09047503, 31.61484949});
-        data.put(9, new double[] {-0.37890345, -0.04846179, -0.84122753, -1.14667474, -124.92598583});
+        data.put(0, new double[]{-1.0915526, 1.81983527, -0.91409478, 0.70890712, -24.55724107});
+        data.put(1, new double[]{-0.61072904, 0.37545517, 0.21705352, 0.09516495, -26.57226867});
+        data.put(2, new double[]{0.05485406, 0.88219898, -0.80584547, 0.94668307, 61.80919728});
+        data.put(3, new double[]{-0.24835094, -0.34000053, -1.69984651, -1.45902635, -161.65525991});
+        data.put(4, new double[]{0.63675392, 0.31675535, 0.38837437, -1.1221971, -14.46432611});
+        data.put(5, new double[]{0.14194017, 2.18158997, -0.28397346, -0.62090588, -3.2122197});
+        data.put(6, new double[]{-0.53487507, 1.4454797, 0.21570443, -0.54161422, -46.5469012});
+        data.put(7, new double[]{-1.58812173, -0.73216803, -2.15670676, -1.03195988, -247.23559889});
+        data.put(8, new double[]{0.20702671, 0.92864654, 0.32721202, -0.09047503, 31.61484949});
+        data.put(9, new double[]{-0.37890345, -0.04846179, -0.84122753, -1.14667474, -124.92598583});
 
         LinearRegressionSGDTrainer<?> trainer = new LinearRegressionSGDTrainer<>(new UpdatesStrategy<>(
             new RPropUpdateCalculator(),
             RPropParameterUpdate::sumLocal,
             RPropParameterUpdate::avg
-        ), 100000,  10, 100, 123L);
+        ), 100000, 10, 100, 123L);
 
         LinearRegressionModel mdl = trainer.fit(
             data,
@@ -84,11 +65,73 @@
         );
 
         assertArrayEquals(
-            new double[] {72.26948107, 15.95144674, 24.07403921, 66.73038781},
+            new double[]{72.26948107, 15.95144674, 24.07403921, 66.73038781},
             mdl.getWeights().getStorage().data(),
             1e-1
         );
 
         assertEquals(2.8421709430404007e-14, mdl.getIntercept(), 1e-1);
     }
+
+    /** */
+    @Test
+    public void testUpdate() {
+        Map<Integer, double[]> data = new HashMap<>();
+        data.put(0, new double[]{-1.0915526, 1.81983527, -0.91409478, 0.70890712, -24.55724107});
+        data.put(1, new double[]{-0.61072904, 0.37545517, 0.21705352, 0.09516495, -26.57226867});
+        data.put(2, new double[]{0.05485406, 0.88219898, -0.80584547, 0.94668307, 61.80919728});
+        data.put(3, new double[]{-0.24835094, -0.34000053, -1.69984651, -1.45902635, -161.65525991});
+        data.put(4, new double[]{0.63675392, 0.31675535, 0.38837437, -1.1221971, -14.46432611});
+        data.put(5, new double[]{0.14194017, 2.18158997, -0.28397346, -0.62090588, -3.2122197});
+        data.put(6, new double[]{-0.53487507, 1.4454797, 0.21570443, -0.54161422, -46.5469012});
+        data.put(7, new double[]{-1.58812173, -0.73216803, -2.15670676, -1.03195988, -247.23559889});
+        data.put(8, new double[]{0.20702671, 0.92864654, 0.32721202, -0.09047503, 31.61484949});
+        data.put(9, new double[]{-0.37890345, -0.04846179, -0.84122753, -1.14667474, -124.92598583});
+
+        LinearRegressionSGDTrainer<?> trainer = new LinearRegressionSGDTrainer<>(new UpdatesStrategy<>(
+            new RPropUpdateCalculator(),
+            RPropParameterUpdate::sumLocal,
+            RPropParameterUpdate::avg
+        ), 100000, 10, 100, 0L);
+
+        LinearRegressionModel originalModel = trainer.withSeed(0).fit(
+            data,
+            parts,
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 0, v.length - 1)),
+            (k, v) -> v[4]
+        );
+
+
+        LinearRegressionModel updatedOnSameDS = trainer.withSeed(0).update(
+            originalModel,
+            data,
+            parts,
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 0, v.length - 1)),
+            (k, v) -> v[4]
+        );
+
+        LinearRegressionModel updatedOnEmptyDS = trainer.withSeed(0).update(
+            originalModel,
+            new HashMap<Integer, double[]>(),
+            parts,
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 0, v.length - 1)),
+            (k, v) -> v[4]
+        );
+
+        assertArrayEquals(
+            originalModel.getWeights().getStorage().data(),
+            updatedOnSameDS.getWeights().getStorage().data(),
+            1.0
+        );
+
+        assertEquals(originalModel.getIntercept(), updatedOnSameDS.getIntercept(), 1.0);
+
+        assertArrayEquals(
+            originalModel.getWeights().getStorage().data(),
+            updatedOnEmptyDS.getWeights().getStorage().data(),
+            1e-1
+        );
+
+        assertEquals(originalModel.getIntercept(), updatedOnEmptyDS.getIntercept(), 1e-1);
+    }
 }
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/regressions/logistic/LogRegMultiClassTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/regressions/logistic/LogRegMultiClassTrainerTest.java
index 1d25524..73c88428 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/regressions/logistic/LogRegMultiClassTrainerTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/regressions/logistic/LogRegMultiClassTrainerTest.java
@@ -19,53 +19,34 @@
 
 import java.util.Arrays;
 import java.util.HashMap;
+import java.util.List;
 import java.util.Map;
-import java.util.concurrent.ThreadLocalRandom;
 import org.apache.ignite.ml.TestUtils;
+import org.apache.ignite.ml.common.TrainerTest;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
 import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
-import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
 import org.apache.ignite.ml.nn.UpdatesStrategy;
 import org.apache.ignite.ml.optimization.SmoothParametrized;
 import org.apache.ignite.ml.optimization.updatecalculators.SimpleGDParameterUpdate;
 import org.apache.ignite.ml.optimization.updatecalculators.SimpleGDUpdateCalculator;
 import org.apache.ignite.ml.regressions.logistic.multiclass.LogRegressionMultiClassModel;
 import org.apache.ignite.ml.regressions.logistic.multiclass.LogRegressionMultiClassTrainer;
-import org.apache.ignite.ml.svm.SVMLinearBinaryClassificationTrainer;
 import org.junit.Assert;
 import org.junit.Test;
 
 /**
- * Tests for {@link SVMLinearBinaryClassificationTrainer}.
+ * Tests for {@link LogRegressionMultiClassTrainer}.
  */
-public class LogRegMultiClassTrainerTest {
-    /** Fixed size of Dataset. */
-    private static final int AMOUNT_OF_OBSERVATIONS = 1000;
-
-    /** Fixed size of columns in Dataset. */
-    private static final int AMOUNT_OF_FEATURES = 2;
-
-    /** Precision in test checks. */
-    private static final double PRECISION = 1e-2;
-
+public class LogRegMultiClassTrainerTest extends TrainerTest {
     /**
-     * Test trainer on classification model y = x.
+     * Test trainer on 4 sets grouped around of square vertices.
      */
     @Test
     public void testTrainWithTheLinearlySeparableCase() {
-        Map<Integer, double[]> data = new HashMap<>();
+        Map<Integer, double[]> cacheMock = new HashMap<>();
 
-        ThreadLocalRandom rndX = ThreadLocalRandom.current();
-        ThreadLocalRandom rndY = ThreadLocalRandom.current();
-
-        for (int i = 0; i < AMOUNT_OF_OBSERVATIONS; i++) {
-            double x = rndX.nextDouble(-1000, 1000);
-            double y = rndY.nextDouble(-1000, 1000);
-            double[] vec = new double[AMOUNT_OF_FEATURES + 1];
-            vec[0] = y - x > 0 ? 1 : -1; // assign label.
-            vec[1] = x;
-            vec[2] = y;
-            data.put(i, vec);
-        }
+        for (int i = 0; i < fourSetsInSquareVertices.length; i++)
+            cacheMock.put(i, fourSetsInSquareVertices[i]);
 
         final UpdatesStrategy<SmoothParametrized, SimpleGDParameterUpdate> stgy = new UpdatesStrategy<>(
             new SimpleGDUpdateCalculator(0.2),
@@ -80,20 +61,82 @@
             .withBatchSize(100)
             .withSeed(123L);
 
-        Assert.assertEquals(trainer.amountOfIterations(), 1000);
-        Assert.assertEquals(trainer.amountOfLocIterations(), 10);
-        Assert.assertEquals(trainer.batchSize(), 100, PRECISION);
+        Assert.assertEquals(trainer.getAmountOfIterations(), 1000);
+        Assert.assertEquals(trainer.getAmountOfLocIterations(), 10);
+        Assert.assertEquals(trainer.getBatchSize(), 100, PRECISION);
         Assert.assertEquals(trainer.seed(), 123L);
-        Assert.assertEquals(trainer.updatesStgy(), stgy);
+        Assert.assertEquals(trainer.getUpdatesStgy(), stgy);
 
         LogRegressionMultiClassModel mdl = trainer.fit(
-            data,
-            10,
+            cacheMock,
+            parts,
             (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 1, v.length)),
             (k, v) -> v[0]
         );
 
-        TestUtils.assertEquals(-1, mdl.apply(new DenseVector(new double[]{100, 10})), PRECISION);
-        TestUtils.assertEquals(1, mdl.apply(new DenseVector(new double[]{10, 100})), PRECISION);
+        Assert.assertTrue(mdl.toString().length() > 0);
+        Assert.assertTrue(mdl.toString(true).length() > 0);
+        Assert.assertTrue(mdl.toString(false).length() > 0);
+
+        TestUtils.assertEquals(1, mdl.apply(VectorUtils.of(10, 10)), PRECISION);
+        TestUtils.assertEquals(1, mdl.apply(VectorUtils.of(-10, 10)), PRECISION);
+        TestUtils.assertEquals(2, mdl.apply(VectorUtils.of(-10, -10)), PRECISION);
+        TestUtils.assertEquals(3, mdl.apply(VectorUtils.of(10, -10)), PRECISION);
+    }
+
+    /** */
+    @Test
+    public void testUpdate() {
+        Map<Integer, double[]> cacheMock = new HashMap<>();
+
+        for (int i = 0; i < fourSetsInSquareVertices.length; i++)
+            cacheMock.put(i, fourSetsInSquareVertices[i]);
+
+        LogRegressionMultiClassTrainer<?> trainer = new LogRegressionMultiClassTrainer<>()
+            .withUpdatesStgy(new UpdatesStrategy<>(
+                new SimpleGDUpdateCalculator(0.2),
+                SimpleGDParameterUpdate::sumLocal,
+                SimpleGDParameterUpdate::avg
+            ))
+            .withAmountOfIterations(1000)
+            .withAmountOfLocIterations(10)
+            .withBatchSize(100)
+            .withSeed(123L);
+
+        LogRegressionMultiClassModel originalModel = trainer.fit(
+            cacheMock,
+            parts,
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 1, v.length)),
+            (k, v) -> v[0]
+        );
+
+        LogRegressionMultiClassModel updatedOnSameDS = trainer.update(
+            originalModel,
+            cacheMock,
+            parts,
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 1, v.length)),
+            (k, v) -> v[0]
+        );
+
+        LogRegressionMultiClassModel updatedOnEmptyDS = trainer.update(
+            originalModel,
+            new HashMap<Integer, double[]>(),
+            parts,
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 1, v.length)),
+            (k, v) -> v[0]
+        );
+
+        List<Vector> vectors = Arrays.asList(
+            VectorUtils.of(10, 10),
+            VectorUtils.of(-10, 10),
+            VectorUtils.of(-10, -10),
+            VectorUtils.of(10, -10)
+        );
+
+
+        for (Vector vec : vectors) {
+            TestUtils.assertEquals(originalModel.apply(vec), updatedOnSameDS.apply(vec), PRECISION);
+            TestUtils.assertEquals(originalModel.apply(vec), updatedOnEmptyDS.apply(vec), PRECISION);
+        }
     }
 }
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/regressions/logistic/LogisticRegressionModelTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/regressions/logistic/LogisticRegressionModelTest.java
index bb6a77d..89c9cca 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/regressions/logistic/LogisticRegressionModelTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/regressions/logistic/LogisticRegressionModelTest.java
@@ -24,6 +24,10 @@
 import org.apache.ignite.ml.regressions.logistic.binomial.LogisticRegressionModel;
 import org.junit.Test;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
 /**
  * Tests for {@link LogisticRegressionModel}.
  */
@@ -35,8 +39,35 @@
     @Test
     public void testPredict() {
         Vector weights = new DenseVector(new double[]{2.0, 3.0});
-        LogisticRegressionModel mdl = new LogisticRegressionModel(weights, 1.0).withRawLabels(true);
 
+        assertFalse(new LogisticRegressionModel(weights, 1.0).isKeepingRawLabels());
+
+        assertEquals(0.1, new LogisticRegressionModel(weights, 1.0).withThreshold(0.1).threshold(), 0);
+
+        assertTrue(new LogisticRegressionModel(weights, 1.0).toString().length() > 0);
+        assertTrue(new LogisticRegressionModel(weights, 1.0).toString(true).length() > 0);
+        assertTrue(new LogisticRegressionModel(weights, 1.0).toString(false).length() > 0);
+
+        verifyPredict(new LogisticRegressionModel(weights, 1.0).withRawLabels(true));
+        verifyPredict(new LogisticRegressionModel(null, 1.0).withRawLabels(true).withWeights(weights));
+        verifyPredict(new LogisticRegressionModel(weights, 1.0).withRawLabels(true).withThreshold(0.5));
+        verifyPredict(new LogisticRegressionModel(weights, 0.0).withRawLabels(true).withIntercept(1.0));
+    }
+
+    /** */
+    @Test(expected = CardinalityException.class)
+    public void testPredictOnAnObservationWithWrongCardinality() {
+        Vector weights = new DenseVector(new double[]{2.0, 3.0});
+
+        LogisticRegressionModel mdl = new LogisticRegressionModel(weights, 1.0);
+
+        Vector observation = new DenseVector(new double[]{1.0});
+
+        mdl.apply(observation);
+    }
+
+    /** */
+    private void verifyPredict(LogisticRegressionModel mdl) {
         Vector observation = new DenseVector(new double[]{1.0, 1.0});
         TestUtils.assertEquals(sigmoid(1.0 + 2.0 * 1.0 + 3.0 * 1.0), mdl.apply(observation), PRECISION);
 
@@ -53,18 +84,6 @@
         TestUtils.assertEquals(sigmoid(1.0 + 2.0 * 1.0 - 3.0 * 2.0), mdl.apply(observation), PRECISION);
     }
 
-    /** */
-    @Test(expected = CardinalityException.class)
-    public void testPredictOnAnObservationWithWrongCardinality() {
-        Vector weights = new DenseVector(new double[]{2.0, 3.0});
-
-        LogisticRegressionModel mdl = new LogisticRegressionModel(weights, 1.0);
-
-        Vector observation = new DenseVector(new double[]{1.0});
-
-        mdl.apply(observation);
-    }
-
     /**
      * Sigmoid function.
      * @param z The regression value.
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/regressions/logistic/LogisticRegressionSGDTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/regressions/logistic/LogisticRegressionSGDTrainerTest.java
index ad4aaf1..1da0d1a 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/regressions/logistic/LogisticRegressionSGDTrainerTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/regressions/logistic/LogisticRegressionSGDTrainerTest.java
@@ -20,84 +20,90 @@
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.Map;
-import java.util.concurrent.ThreadLocalRandom;
 import org.apache.ignite.ml.TestUtils;
+import org.apache.ignite.ml.common.TrainerTest;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
 import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
-import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
 import org.apache.ignite.ml.nn.UpdatesStrategy;
 import org.apache.ignite.ml.optimization.updatecalculators.SimpleGDParameterUpdate;
 import org.apache.ignite.ml.optimization.updatecalculators.SimpleGDUpdateCalculator;
 import org.apache.ignite.ml.regressions.logistic.binomial.LogisticRegressionModel;
 import org.apache.ignite.ml.regressions.logistic.binomial.LogisticRegressionSGDTrainer;
 import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
 
 /**
- * Tests for {@LogisticRegressionSGDTrainer}.
+ * Tests for {@link LogisticRegressionSGDTrainer}.
  */
-@RunWith(Parameterized.class)
-public class LogisticRegressionSGDTrainerTest {
-    /** Fixed size of Dataset. */
-    private static final int AMOUNT_OF_OBSERVATIONS = 1000;
-
-    /** Fixed size of columns in Dataset. */
-    private static final int AMOUNT_OF_FEATURES = 2;
-
-    /** Precision in test checks. */
-    private static final double PRECISION = 1e-2;
-
-    /** Parameters. */
-    @Parameterized.Parameters(name = "Data divided on {0} partitions")
-    public static Iterable<Integer[]> data() {
-        return Arrays.asList(
-            new Integer[] {1},
-            new Integer[] {2},
-            new Integer[] {3},
-            new Integer[] {5},
-            new Integer[] {7},
-            new Integer[] {100}
-        );
-    }
-
-    /** Number of partitions. */
-    @Parameterized.Parameter
-    public int parts;
-
+public class LogisticRegressionSGDTrainerTest extends TrainerTest {
     /**
      * Test trainer on classification model y = x.
      */
     @Test
     public void trainWithTheLinearlySeparableCase() {
-        Map<Integer, double[]> data = new HashMap<>();
+        Map<Integer, double[]> cacheMock = new HashMap<>();
 
-        ThreadLocalRandom rndX = ThreadLocalRandom.current();
-        ThreadLocalRandom rndY = ThreadLocalRandom.current();
-
-        for (int i = 0; i < AMOUNT_OF_OBSERVATIONS; i++) {
-            double x = rndX.nextDouble(-1000, 1000);
-            double y = rndY.nextDouble(-1000, 1000);
-            double[] vec = new double[AMOUNT_OF_FEATURES + 1];
-            vec[0] = y - x > 0 ? 1 : 0; // assign label.
-            vec[1] = x;
-            vec[2] = y;
-            data.put(i, vec);
-        }
+        for (int i = 0; i < twoLinearlySeparableClasses.length; i++)
+            cacheMock.put(i, twoLinearlySeparableClasses[i]);
 
         LogisticRegressionSGDTrainer<?> trainer = new LogisticRegressionSGDTrainer<>(new UpdatesStrategy<>(
-            new SimpleGDUpdateCalculator(0.2),
+            new SimpleGDUpdateCalculator().withLearningRate(0.2),
             SimpleGDParameterUpdate::sumLocal,
             SimpleGDParameterUpdate::avg
-        ), 100000,  10, 100, 123L);
+        ), 100000, 10, 100, 123L);
 
         LogisticRegressionModel mdl = trainer.fit(
-            data,
-            10,
+            cacheMock,
+            parts,
             (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 1, v.length)),
             (k, v) -> v[0]
         );
 
-        TestUtils.assertEquals(0, mdl.apply(new DenseVector(new double[]{100, 10})), PRECISION);
-        TestUtils.assertEquals(1, mdl.apply(new DenseVector(new double[]{10, 100})), PRECISION);
+        TestUtils.assertEquals(0, mdl.apply(VectorUtils.of(100, 10)), PRECISION);
+        TestUtils.assertEquals(1, mdl.apply(VectorUtils.of(10, 100)), PRECISION);
+    }
+
+    /** */
+    @Test
+    public void testUpdate() {
+        Map<Integer, double[]> cacheMock = new HashMap<>();
+
+        for (int i = 0; i < twoLinearlySeparableClasses.length; i++)
+            cacheMock.put(i, twoLinearlySeparableClasses[i]);
+
+        LogisticRegressionSGDTrainer<?> trainer = new LogisticRegressionSGDTrainer<>(new UpdatesStrategy<>(
+            new SimpleGDUpdateCalculator().withLearningRate(0.2),
+            SimpleGDParameterUpdate::sumLocal,
+            SimpleGDParameterUpdate::avg
+        ), 100000, 10, 100, 123L);
+
+        LogisticRegressionModel originalModel = trainer.fit(
+            cacheMock,
+            parts,
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 1, v.length)),
+            (k, v) -> v[0]
+        );
+
+        LogisticRegressionModel updatedOnSameDS = trainer.update(
+            originalModel,
+            cacheMock,
+            parts,
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 1, v.length)),
+            (k, v) -> v[0]
+        );
+
+        LogisticRegressionModel updatedOnEmptyDS = trainer.update(
+            originalModel,
+            new HashMap<Integer, double[]>(),
+            parts,
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 1, v.length)),
+            (k, v) -> v[0]
+        );
+
+        Vector v1 = VectorUtils.of(100, 10);
+        Vector v2 = VectorUtils.of(10, 100);
+        TestUtils.assertEquals(originalModel.apply(v1), updatedOnSameDS.apply(v1), PRECISION);
+        TestUtils.assertEquals(originalModel.apply(v2), updatedOnSameDS.apply(v2), PRECISION);
+        TestUtils.assertEquals(originalModel.apply(v2), updatedOnEmptyDS.apply(v2), PRECISION);
+        TestUtils.assertEquals(originalModel.apply(v1), updatedOnEmptyDS.apply(v1), PRECISION);
     }
 }
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/selection/SelectionTestSuite.java b/modules/ml/src/test/java/org/apache/ignite/ml/selection/SelectionTestSuite.java
index 3adae79..21c605b 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/selection/SelectionTestSuite.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/selection/SelectionTestSuite.java
@@ -21,6 +21,7 @@
 import org.apache.ignite.ml.selection.paramgrid.ParameterSetGeneratorTest;
 import org.apache.ignite.ml.selection.scoring.cursor.CacheBasedLabelPairCursorTest;
 import org.apache.ignite.ml.selection.scoring.cursor.LocalLabelPairCursorTest;
+import org.apache.ignite.ml.selection.scoring.evaluator.EvaluatorTest;
 import org.apache.ignite.ml.selection.scoring.metric.AccuracyTest;
 import org.apache.ignite.ml.selection.scoring.metric.FmeasureTest;
 import org.apache.ignite.ml.selection.scoring.metric.PrecisionTest;
@@ -36,6 +37,7 @@
 @RunWith(Suite.class)
 @Suite.SuiteClasses({
     CrossValidationTest.class,
+    EvaluatorTest.class,
     ParameterSetGeneratorTest.class,
     CacheBasedLabelPairCursorTest.class,
     LocalLabelPairCursorTest.class,
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/selection/cv/CrossValidationTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/selection/cv/CrossValidationTest.java
index 90918d8..3e8b9dd 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/selection/cv/CrossValidationTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/selection/cv/CrossValidationTest.java
@@ -47,7 +47,7 @@
 
         int folds = 4;
 
-        double[] scores = scoreCalculator.score(
+        verifyScores(folds, scoreCalculator.score(
             trainer,
             new Accuracy<>(),
             data,
@@ -55,12 +55,18 @@
             (k, v) -> VectorUtils.of(k),
             (k, v) -> v,
             folds
-        );
+        ));
 
-        assertEquals(folds, scores.length);
-
-        for (int i = 0; i < folds; i++)
-            assertEquals(1, scores[i], 1e-1);
+        verifyScores(folds, scoreCalculator.score(
+            trainer,
+            new Accuracy<>(),
+            data,
+            (e1, e2) -> true,
+            1,
+            (k, v) -> VectorUtils.of(k),
+            (k, v) -> v,
+            folds
+        ));
     }
 
     /** */
@@ -93,4 +99,12 @@
         for (int i = 0; i < folds; i++)
             assertTrue(scores[i] < 0.6);
     }
+
+    /** */
+    private void verifyScores(int folds, double[] scores) {
+        assertEquals(folds, scores.length);
+
+        for (int i = 0; i < folds; i++)
+            assertEquals(1, scores[i], 1e-1);
+    }
 }
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/selection/scoring/evaluator/EvaluatorTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/selection/scoring/evaluator/EvaluatorTest.java
new file mode 100644
index 0000000..6f7aa36
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/selection/scoring/evaluator/EvaluatorTest.java
@@ -0,0 +1,398 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.selection.scoring.evaluator;
+
+import java.text.NumberFormat;
+import java.text.ParseException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Locale;
+import java.util.UUID;
+import java.util.concurrent.atomic.AtomicReference;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction;
+import org.apache.ignite.configuration.CacheConfiguration;
+import org.apache.ignite.internal.util.IgniteUtils;
+import org.apache.ignite.ml.math.functions.IgniteBiFunction;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.preprocessing.encoding.EncoderTrainer;
+import org.apache.ignite.ml.preprocessing.encoding.EncoderType;
+import org.apache.ignite.ml.preprocessing.imputing.ImputerTrainer;
+import org.apache.ignite.ml.preprocessing.minmaxscaling.MinMaxScalerTrainer;
+import org.apache.ignite.ml.preprocessing.normalization.NormalizationTrainer;
+import org.apache.ignite.ml.selection.cv.CrossValidation;
+import org.apache.ignite.ml.selection.cv.CrossValidationResult;
+import org.apache.ignite.ml.selection.paramgrid.ParamGrid;
+import org.apache.ignite.ml.selection.scoring.metric.Accuracy;
+import org.apache.ignite.ml.selection.split.TrainTestDatasetSplitter;
+import org.apache.ignite.ml.selection.split.TrainTestSplit;
+import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer;
+import org.apache.ignite.ml.tree.DecisionTreeNode;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+import org.apache.ignite.thread.IgniteThread;
+
+import static org.junit.Assert.assertArrayEquals;
+
+/**
+ * Tests for {@link Evaluator} that require to start the whole Ignite infrastructure. IMPL NOTE based on
+ * Step_8_CV_with_Param_Grid example.
+ */
+public class EvaluatorTest extends GridCommonAbstractTest {
+    /** Number of nodes in grid */
+    private static final int NODE_COUNT = 3;
+
+    /** Ignite instance. */
+    private Ignite ignite;
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        for (int i = 1; i <= NODE_COUNT; i++)
+            startGrid(i);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() {
+        stopAllGrids();
+    }
+
+    /**
+     * {@inheritDoc}
+     */
+    @Override protected void beforeTest() {
+        /* Grid instance. */
+        ignite = grid(NODE_COUNT);
+        ignite.configuration().setPeerClassLoadingEnabled(true);
+        IgniteUtils.setCurrentIgniteName(ignite.configuration().getIgniteInstanceName());
+    }
+
+    /** */
+    public void testBasic() throws InterruptedException {
+        AtomicReference<Double> actualAccuracy = new AtomicReference<>(null);
+        AtomicReference<Double> actualAccuracy2 = new AtomicReference<>(null);
+        AtomicReference<CrossValidationResult> res = new AtomicReference<>(null);
+        List<double[]> actualScores = new ArrayList<>();
+
+        IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
+            EvaluatorTest.class.getSimpleName(), () -> {
+            CVContext ctx = new CVContext(ignite);
+
+            ParamGrid paramGrid = new ParamGrid()
+                .addHyperParam("maxDeep", new Double[] {1.0, 2.0, 3.0, 4.0, 5.0, 10.0, 10.0})
+                .addHyperParam("minImpurityDecrease", new Double[] {0.0, 0.25, 0.5});
+
+            CrossValidationResult crossValidationRes =
+                new CrossValidation<DecisionTreeNode, Double, Integer, Object[]>().score(
+                    new DecisionTreeClassificationTrainer(),
+                    new Accuracy<>(),
+                    ctx.ignite,
+                    ctx.cache,
+                    ctx.split.getTrainFilter(),
+                    ctx.preprocessor,
+                    ctx.lbExtractor,
+                    3,
+                    paramGrid
+                );
+
+            res.set(crossValidationRes);
+
+            DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer()
+                .withMaxDeep(crossValidationRes.getBest("maxDeep"))
+                .withMinImpurityDecrease(crossValidationRes.getBest("minImpurityDecrease"));
+
+            crossValidationRes.getScoringBoard().forEach((hyperParams, score) -> actualScores.add(score));
+
+            ctx.evaluate(trainer, actualAccuracy, actualAccuracy2);
+        });
+
+        igniteThread.start();
+
+        igniteThread.join();
+
+        assertResults(res.get(), actualScores, actualAccuracy.get(), actualAccuracy2.get());
+    }
+
+    /** */
+    public void testBasic2() throws InterruptedException {
+        AtomicReference<Double> actualAccuracy = new AtomicReference<>(null);
+        AtomicReference<Double> actualAccuracy2 = new AtomicReference<>(null);
+        AtomicReference<double[]> res = new AtomicReference<>(null);
+
+        IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
+            EvaluatorTest.class.getSimpleName(), () -> {
+            CVContext ctx = new CVContext(ignite);
+
+            res.set(new CrossValidation<DecisionTreeNode, Double, Integer, Object[]>().score(
+                new DecisionTreeClassificationTrainer(),
+                new Accuracy<>(),
+                ctx.ignite,
+                ctx.cache,
+                ctx.split.getTrainFilter(),
+                ctx.preprocessor,
+                ctx.lbExtractor,
+                3
+            ));
+
+            DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer();
+
+            ctx.evaluate(trainer, actualAccuracy, actualAccuracy2);
+        });
+
+        igniteThread.start();
+
+        igniteThread.join();
+
+        assertResults2(res.get(), actualAccuracy.get(), actualAccuracy2.get());
+    }
+
+    /** */
+    public void testBasic3() throws InterruptedException {
+        AtomicReference<Double> actualAccuracy = new AtomicReference<>(null);
+        AtomicReference<Double> actualAccuracy2 = new AtomicReference<>(null);
+        AtomicReference<double[]> res = new AtomicReference<>(null);
+
+        IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(),
+            EvaluatorTest.class.getSimpleName(), () -> {
+            CVContext ctx = new CVContext(ignite);
+
+            res.set(new CrossValidation<DecisionTreeNode, Double, Integer, Object[]>().score(
+                new DecisionTreeClassificationTrainer(),
+                new Accuracy<>(),
+                ctx.ignite,
+                ctx.cache,
+                ctx.preprocessor,
+                ctx.lbExtractor,
+                3
+            ));
+
+            DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer();
+
+            ctx.evaluate(trainer, actualAccuracy, actualAccuracy2);
+        });
+
+        igniteThread.start();
+
+        igniteThread.join();
+
+        assertResults2(res.get(), actualAccuracy.get(), actualAccuracy2.get());
+    }
+
+    /** */
+    private void assertResults(CrossValidationResult res, List<double[]> scores, double accuracy, double accuracy2) {
+        assertTrue(res.toString().length() > 0);
+        assertEquals("Best maxDeep", 1.0, res.getBest("maxDeep"));
+        assertEquals("Best minImpurityDecrease", 0.0, res.getBest("minImpurityDecrease"));
+        assertArrayEquals("Best score", new double[] {0.6666666666666666, 0.4, 0}, res.getBestScore(), 0);
+        assertEquals("Best hyper params size", 2, res.getBestHyperParams().size());
+        assertEquals("Best average score", 0.35555555555555557, res.getBestAvgScore());
+
+        assertEquals("Scores amount", 18, scores.size());
+
+        int idx = 0;
+        for (double[] actualScore : scores)
+            assertEquals("Score size at index " + idx++, 3, actualScore.length);
+
+        assertEquals("Accuracy", 1.0, accuracy);
+        assertTrue("Accuracy without filter", accuracy2 > 0.);
+    }
+
+    /** */
+    private void assertResults2(double[] scores, double accuracy, double accuracy2) {
+        assertEquals("Scores array length", 3, scores.length);
+
+        assertEquals("Accuracy", 1.0, accuracy);
+        assertTrue("Accuracy without filter", accuracy2 > 0.);
+    }
+
+    /** */
+    private static class CVContext {
+        /** */
+        private final Ignite ignite;
+        /** */
+        private final IgniteCache<Integer, Object[]> cache;
+        /** */
+        private final TrainTestSplit<Integer, Object[]> split;
+        /** */
+        private final IgniteBiFunction<Integer, Object[], Double> lbExtractor;
+        /** */
+        private final IgniteBiFunction<Integer, Object[], Vector> preprocessor;
+
+        /** */
+        CVContext(Ignite ignite) {
+            this.ignite = ignite;
+
+            cache = dataToCache();
+
+            split = new TrainTestDatasetSplitter<Integer, Object[]>().split(0.75);
+
+            lbExtractor = (k, v) -> (double)v[1];
+
+            // Tune hyperparams with K-fold Cross-Validation on the split training set.
+            preprocessor = setupProcessors();
+        }
+
+        /** */
+        void evaluate(DecisionTreeClassificationTrainer trainer,
+            AtomicReference<Double> actualAccuracy, AtomicReference<Double> actualAccuracy2) {
+            // Train decision tree model.
+            DecisionTreeNode  bestMdl = trainer.fit(
+                ignite,
+                cache,
+                split.getTrainFilter(),
+                preprocessor,
+                lbExtractor
+            );
+
+            actualAccuracy.set(Evaluator.evaluate(
+                cache,
+                split.getTestFilter(),
+                bestMdl,
+                preprocessor,
+                lbExtractor,
+                new Accuracy<>()
+            ));
+
+            actualAccuracy2.set(Evaluator.evaluate(
+                cache,
+                bestMdl,
+                preprocessor,
+                lbExtractor,
+                new Accuracy<>()
+            ));
+        }
+
+        /** */
+        private IgniteBiFunction<Integer, Object[], Vector> setupProcessors() {
+            // Defines first preprocessor that extracts features from an upstream data.
+            // Extracts "pclass", "sibsp", "parch", "sex", "embarked", "age", "fare"
+            IgniteBiFunction<Integer, Object[], Object[]> featureExtractor
+                = (k, v) -> new Object[] {v[0], v[3], v[4], v[5], v[6], v[8], v[10]};
+
+            IgniteBiFunction<Integer, Object[], Vector> strEncoderPreprocessor = new EncoderTrainer<Integer, Object[]>()
+                .withEncoderType(EncoderType.STRING_ENCODER)
+                .withEncodedFeature(1)
+                .withEncodedFeature(6) // <--- Changed index here
+                .fit(ignite,
+                    cache,
+                    featureExtractor
+                );
+
+            IgniteBiFunction<Integer, Object[], Vector> imputingPreprocessor = new ImputerTrainer<Integer, Object[]>()
+                .fit(ignite,
+                    cache,
+                    strEncoderPreprocessor
+                );
+
+            IgniteBiFunction<Integer, Object[], Vector> minMaxScalerPreprocessor = new MinMaxScalerTrainer<Integer, Object[]>()
+                .fit(
+                    ignite,
+                    cache,
+                    imputingPreprocessor
+                );
+
+            return new NormalizationTrainer<Integer, Object[]>()
+                .withP(2)
+                .fit(
+                    ignite,
+                    cache,
+                    minMaxScalerPreprocessor
+                );
+        }
+
+        /** */
+        private IgniteCache<Integer, Object[]> dataToCache() {
+            CacheConfiguration<Integer, Object[]> cacheConfiguration = new CacheConfiguration<>();
+            cacheConfiguration.setName(UUID.randomUUID().toString());
+            cacheConfiguration.setAffinity(new RendezvousAffinityFunction(false, 10));
+
+            IgniteCache<Integer, Object[]> dataCache = ignite.createCache(cacheConfiguration);
+
+            readPassengers(dataCache);
+
+            return dataCache;
+        }
+
+        /**
+         * Read passengers data.
+         *
+         * @param cache The ignite cache.
+         */
+        private void readPassengers(IgniteCache<Integer, Object[]> cache) {
+            // IMPL NOTE: pclass;survived;name;sex;age;sibsp;parch;ticket;fare;cabin;embarked;boat;body;homedest
+            List<String[]> passengers = Arrays.asList(
+                new String[] {
+                    "1", "1", "Allen, Miss. Elisabeth Walton", "",
+                    "29", "", "", "24160", "211,3375", "B5", "", "2", "", "St Louis, MO"},
+                new String[] {
+                    "1", "1", "Allison, Master. Hudson Trevor", "male",
+                    "0,9167", "1", "2", "113781", "151,55", "C22 C26", "S", "11", "", "Montreal, PQ / Chesterville, ON"},
+                new String[] {
+                    "1", "0", "Allison, Miss. Helen Loraine", "female",
+                    "2", "1", "2", "113781", "151,55", "C22 C26", "S", "", "", "Montreal, PQ / Chesterville, ON"},
+                new String[] {
+                    "1", "0", "Allison, Mr. Hudson Joshua Creighton",
+                    "male", "30", "1", "2", "113781", "151,55", "C22 C26", "S", "", "135", "Montreal, PQ / Chesterville, ON"},
+                new String[] {
+                    "1", "0", "Allison, Mrs. Hudson J C (Bessie Waldo Daniels)", "female",
+                    "25", "1", "2", "113781", "151,55", "C22 C26", "S", "", "", "Montreal, PQ / Chesterville, ON"},
+                new String[] {
+                    "1", "1", "Anderson, Mr. Harry", "male",
+                    "48", "0", "0", "19952", "26,55", "E12", "S", "3", "", "New York, NY"},
+                new String[] {
+                    "1", "1", "Andrews, Miss. Kornelia Theodosia", "female",
+                    "63", "1", "0", "13502", "77,9583", "D7", "S", "10", "", "Hudson, NY"},
+                new String[] {
+                    "1", "0", "Andrews, Mr. Thomas Jr", "male",
+                    "39", "0", "0", "112050", "0", "A36", "S", "", "", "Belfast, NI"},
+                new String[] {
+                    "1", "1", "Appleton, Mrs. Edward Dale (Charlotte Lamson)", "female",
+                    "53", "2", "0", "11769", "51,4792", "C101", "S", "D", "", "Bayside, Queens, NY"},
+                new String[] {
+                    "1", "0", "Artagaveytia, Mr. Ramon", "male",
+                    "71", "0", "0", "PC 17609", "49,5042", "", "C", "", "22", "Montevideo, Uruguay"});
+
+            int cnt = 1;
+            for (String[] details : passengers) {
+                Object[] data = new Object[details.length];
+
+                for (int i = 0; i < details.length; i++)
+                    data[i] = doubleOrString(details[i]);
+
+                cache.put(cnt++, data);
+            }
+        }
+
+        /** */
+        private Object doubleOrString(String data) {
+            NumberFormat format = NumberFormat.getInstance(Locale.FRANCE);
+            try {
+                return data.equals("") ? Double.NaN : Double.valueOf(data);
+            }
+            catch (java.lang.NumberFormatException e) {
+
+                try {
+                    return format.parse(data).doubleValue();
+                }
+                catch (ParseException e1) {
+                    return data;
+                }
+            }
+        }
+    }
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/selection/scoring/metric/FmeasureTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/selection/scoring/metric/FmeasureTest.java
index 4f13816..835d08d 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/selection/scoring/metric/FmeasureTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/selection/scoring/metric/FmeasureTest.java
@@ -31,7 +31,7 @@
     /** */
     @Test
     public void testScore() {
-        Metric<Integer> scoreCalculator = new Fmeasure<>(1);
+        Fmeasure<Integer> scoreCalculator = new Fmeasure<>(1);
 
         LabelPairCursor<Integer> cursor = new TestLabelPairCursor<>(
             Arrays.asList(1, 0, 1, 0, 1, 0),
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/selection/scoring/metric/PrecisionTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/selection/scoring/metric/PrecisionTest.java
index 72f4cd7..d7821d5 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/selection/scoring/metric/PrecisionTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/selection/scoring/metric/PrecisionTest.java
@@ -31,7 +31,7 @@
     /** */
     @Test
     public void testScore() {
-        Metric<Integer> scoreCalculator = new Precision<>(0);
+        Precision<Integer> scoreCalculator = new Precision<>(0);
 
         LabelPairCursor<Integer> cursor = new TestLabelPairCursor<>(
             Arrays.asList(1, 0, 1, 0, 1, 0),
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/selection/scoring/metric/RecallTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/selection/scoring/metric/RecallTest.java
index 5df465b..8c92acd 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/selection/scoring/metric/RecallTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/selection/scoring/metric/RecallTest.java
@@ -31,7 +31,7 @@
     /** */
     @Test
     public void testScore() {
-        Metric<Integer> scoreCalculator = new Recall<>(1);
+        Recall<Integer> scoreCalculator = new Recall<>(1);
 
         LabelPairCursor<Integer> cursor = new TestLabelPairCursor<>(
             Arrays.asList(1, 0, 1, 0, 1, 0),
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/structures/DatasetStructureTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/structures/DatasetStructureTest.java
new file mode 100644
index 0000000..79e7a16
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/structures/DatasetStructureTest.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.structures;
+
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Tests for {@link Dataset} basic features.
+ */
+public class DatasetStructureTest {
+    /**
+     * Basic test
+     */
+    @Test
+    @SuppressWarnings("unchecked")
+    public void testBasic() {
+        Assert.assertNull("Feature names constructor", new Dataset<DatasetRow<Vector>>(1, 1,
+            new String[] {"tests"}, false).data());
+
+        Dataset<DatasetRow<Vector>> dataset = new Dataset<DatasetRow<Vector>>(new DatasetRow[] {},
+            new FeatureMetadata[] {});
+
+        Assert.assertEquals("Expect empty data", 0, dataset.data().length);
+        Assert.assertEquals("Expect empty meta", 0, dataset.data().length);
+        Assert.assertFalse("Not distributed by default", dataset.isDistributed());
+
+        dataset.setData(new DatasetRow[] {new DatasetRow()});
+        dataset.setMeta(new FeatureMetadata[] {new FeatureMetadata()});
+        dataset.setDistributed(true);
+
+        Assert.assertEquals("Expect non empty data", 1, dataset.data().length);
+        Assert.assertEquals("Expect non empty meta", 1, dataset.data().length);
+        Assert.assertTrue("Expect distributed", dataset.isDistributed());
+        Assert.assertEquals(1, dataset.meta().length);
+    }
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/structures/StructuresTestSuite.java b/modules/ml/src/test/java/org/apache/ignite/ml/structures/StructuresTestSuite.java
new file mode 100644
index 0000000..01064a7
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/structures/StructuresTestSuite.java
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.structures;
+
+import org.junit.runner.RunWith;
+import org.junit.runners.Suite;
+
+/**
+ * Test suite for all tests located in org.apache.ignite.ml.trees package.
+ */
+@RunWith(Suite.class)
+@Suite.SuiteClasses({
+    DatasetStructureTest.class
+})
+public class StructuresTestSuite {
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/svm/SVMBinaryTrainerIntegrationTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/svm/SVMBinaryTrainerIntegrationTest.java
deleted file mode 100644
index d227de7..0000000
--- a/modules/ml/src/test/java/org/apache/ignite/ml/svm/SVMBinaryTrainerIntegrationTest.java
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.ml.svm;
-
-import java.util.Arrays;
-import java.util.UUID;
-import java.util.concurrent.ThreadLocalRandom;
-import org.apache.ignite.Ignite;
-import org.apache.ignite.IgniteCache;
-import org.apache.ignite.internal.util.IgniteUtils;
-import org.apache.ignite.ml.TestUtils;
-import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
-import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
-import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
-
-/**
- * Tests for {@link SVMLinearBinaryClassificationTrainer} that require to start the whole Ignite infrastructure.
- */
-public class SVMBinaryTrainerIntegrationTest extends GridCommonAbstractTest {
-    /** Fixed size of Dataset. */
-    private static final int AMOUNT_OF_OBSERVATIONS = 1000;
-
-    /** Fixed size of columns in Dataset. */
-    private static final int AMOUNT_OF_FEATURES = 2;
-
-    /** Precision in test checks. */
-    private static final double PRECISION = 1e-2;
-
-    /** Number of nodes in grid */
-    private static final int NODE_COUNT = 3;
-
-    /** Ignite instance. */
-    private Ignite ignite;
-
-    /** {@inheritDoc} */
-    @Override protected void beforeTestsStarted() throws Exception {
-        for (int i = 1; i <= NODE_COUNT; i++)
-            startGrid(i);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected void afterTestsStopped() {
-        stopAllGrids();
-    }
-
-    /**
-     * {@inheritDoc}
-     */
-    @Override protected void beforeTest() throws Exception {
-        /* Grid instance. */
-        ignite = grid(NODE_COUNT);
-        ignite.configuration().setPeerClassLoadingEnabled(true);
-        IgniteUtils.setCurrentIgniteName(ignite.configuration().getIgniteInstanceName());
-    }
-
-    /**
-     * Test trainer on classification model y = x.
-     */
-    public void testTrainWithTheLinearlySeparableCase() {
-        IgniteCache<Integer, double[]> data = ignite.getOrCreateCache(UUID.randomUUID().toString());
-
-        ThreadLocalRandom rndX = ThreadLocalRandom.current();
-        ThreadLocalRandom rndY = ThreadLocalRandom.current();
-
-        for (int i = 0; i < AMOUNT_OF_OBSERVATIONS; i++) {
-            double x = rndX.nextDouble(-1000, 1000);
-            double y = rndY.nextDouble(-1000, 1000);
-            double[] vec = new double[AMOUNT_OF_FEATURES + 1];
-            vec[0] = y - x > 0 ? 1 : -1; // assign label.
-            vec[1] = x;
-            vec[2] = y;
-            data.put(i, vec);
-        }
-
-        SVMLinearBinaryClassificationTrainer trainer = new SVMLinearBinaryClassificationTrainer();
-
-        SVMLinearBinaryClassificationModel mdl = trainer.fit(
-            ignite,
-            data,
-            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 1, v.length)),
-            (k, v) -> v[0]
-        );
-
-        TestUtils.assertEquals(-1, mdl.apply(new DenseVector(new double[]{100, 10})), PRECISION);
-        TestUtils.assertEquals(1, mdl.apply(new DenseVector(new double[]{10, 100})), PRECISION);
-    }
-}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/svm/SVMBinaryTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/svm/SVMBinaryTrainerTest.java
index ae94dd2..d6f77c0 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/svm/SVMBinaryTrainerTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/svm/SVMBinaryTrainerTest.java
@@ -20,55 +20,77 @@
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.Map;
-import java.util.concurrent.ThreadLocalRandom;
 import org.apache.ignite.ml.TestUtils;
+import org.apache.ignite.ml.common.TrainerTest;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
 import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
-import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
 import org.junit.Test;
 
 /**
  * Tests for {@link SVMLinearBinaryClassificationTrainer}.
  */
-public class SVMBinaryTrainerTest {
-    /** Fixed size of Dataset. */
-    private static final int AMOUNT_OF_OBSERVATIONS = 1000;
-
-    /** Fixed size of columns in Dataset. */
-    private static final int AMOUNT_OF_FEATURES = 2;
-
-    /** Precision in test checks. */
-    private static final double PRECISION = 1e-2;
-
+public class SVMBinaryTrainerTest extends TrainerTest {
     /**
      * Test trainer on classification model y = x.
      */
     @Test
     public void testTrainWithTheLinearlySeparableCase() {
-        Map<Integer, double[]> data = new HashMap<>();
+        Map<Integer, double[]> cacheMock = new HashMap<>();
 
-        ThreadLocalRandom rndX = ThreadLocalRandom.current();
-        ThreadLocalRandom rndY = ThreadLocalRandom.current();
+        for (int i = 0; i < twoLinearlySeparableClasses.length; i++)
+            cacheMock.put(i, twoLinearlySeparableClasses[i]);
 
-        for (int i = 0; i < AMOUNT_OF_OBSERVATIONS; i++) {
-            double x = rndX.nextDouble(-1000, 1000);
-            double y = rndY.nextDouble(-1000, 1000);
-            double[] vec = new double[AMOUNT_OF_FEATURES + 1];
-            vec[0] = y - x > 0 ? 1 : -1; // assign label.
-            vec[1] = x;
-            vec[2] = y;
-            data.put(i, vec);
-        }
-
-        SVMLinearBinaryClassificationTrainer trainer = new SVMLinearBinaryClassificationTrainer();
+        SVMLinearBinaryClassificationTrainer trainer = new SVMLinearBinaryClassificationTrainer()
+            .withSeed(1234L);
 
         SVMLinearBinaryClassificationModel mdl = trainer.fit(
-            data,
-            10,
+            cacheMock,
+            parts,
             (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 1, v.length)),
             (k, v) -> v[0]
         );
 
-        TestUtils.assertEquals(-1, mdl.apply(new DenseVector(new double[]{100, 10})), PRECISION);
-        TestUtils.assertEquals(1, mdl.apply(new DenseVector(new double[]{10, 100})), PRECISION);
+        TestUtils.assertEquals(0, mdl.apply(VectorUtils.of(100, 10)), PRECISION);
+        TestUtils.assertEquals(1, mdl.apply(VectorUtils.of(10, 100)), PRECISION);
+    }
+
+    /** */
+    @Test
+    public void testUpdate() {
+        Map<Integer, double[]> cacheMock = new HashMap<>();
+
+        for (int i = 0; i < twoLinearlySeparableClasses.length; i++)
+            cacheMock.put(i, twoLinearlySeparableClasses[i]);
+
+        SVMLinearBinaryClassificationTrainer trainer = new SVMLinearBinaryClassificationTrainer()
+            .withAmountOfIterations(1000)
+            .withSeed(1234L);
+
+        SVMLinearBinaryClassificationModel originalMdl = trainer.fit(
+            cacheMock,
+            parts,
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 1, v.length)),
+            (k, v) -> v[0]
+        );
+
+        SVMLinearBinaryClassificationModel updatedOnSameDS = trainer.update(
+            originalMdl,
+            cacheMock,
+            parts,
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 1, v.length)),
+            (k, v) -> v[0]
+        );
+
+        SVMLinearBinaryClassificationModel updatedOnEmptyDS = trainer.update(
+            originalMdl,
+            new HashMap<Integer, double[]>(),
+            parts,
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 1, v.length)),
+            (k, v) -> v[0]
+        );
+
+        Vector v = VectorUtils.of(100, 10);
+        TestUtils.assertEquals(originalMdl.apply(v), updatedOnSameDS.apply(v), PRECISION);
+        TestUtils.assertEquals(originalMdl.apply(v), updatedOnEmptyDS.apply(v), PRECISION);
     }
 }
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/svm/SVMModelTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/svm/SVMModelTest.java
index 9244c35..9c452f9 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/svm/SVMModelTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/svm/SVMModelTest.java
@@ -53,7 +53,11 @@
         observation = new DenseVector(new double[]{1.0, -2.0});
         TestUtils.assertEquals(1.0 + 2.0 * 1.0 - 3.0 * 2.0, mdl.apply(observation), PRECISION);
 
-        Assert.assertEquals(true, mdl.isKeepingRawLabels());
+        Assert.assertTrue(mdl.isKeepingRawLabels());
+
+        Assert.assertTrue(mdl.toString().length() > 0);
+        Assert.assertTrue(mdl.toString(true).length() > 0);
+        Assert.assertTrue(mdl.toString(false).length() > 0);
     }
 
 
@@ -68,6 +72,10 @@
         mdl.add(2, new SVMLinearBinaryClassificationModel(weights2, 0.0).withRawLabels(true));
         mdl.add(2, new SVMLinearBinaryClassificationModel(weights3, 0.0).withRawLabels(true));
 
+        Assert.assertTrue(mdl.toString().length() > 0);
+        Assert.assertTrue(mdl.toString(true).length() > 0);
+        Assert.assertTrue(mdl.toString(false).length() > 0);
+
         Vector observation = new DenseVector(new double[]{1.0, 1.0});
         TestUtils.assertEquals( 1.0, mdl.apply(observation), PRECISION);
     }
@@ -85,13 +93,13 @@
         TestUtils.assertEquals(1.0, mdl.apply(observation), PRECISION);
 
         observation = new DenseVector(new double[]{-1.0, -1.0});
-        TestUtils.assertEquals(-1.0, mdl.apply(observation), PRECISION);
+        TestUtils.assertEquals(0.0, mdl.apply(observation), PRECISION);
 
         observation = new DenseVector(new double[]{-2.0, 1.0});
-        TestUtils.assertEquals(-1.0, mdl.apply(observation), PRECISION);
+        TestUtils.assertEquals(0.0, mdl.apply(observation), PRECISION);
 
         observation = new DenseVector(new double[]{-1.0, -2.0});
-        TestUtils.assertEquals(-1.0, mdl.apply(observation), PRECISION);
+        TestUtils.assertEquals(0.0, mdl.apply(observation), PRECISION);
 
         final SVMLinearBinaryClassificationModel mdlWithNewData = mdl.withIntercept(-2.0).withWeights(new DenseVector(new double[] {-2.0, -2.0}));
         System.out.println("The SVM model is " + mdlWithNewData);
@@ -108,7 +116,7 @@
         SVMLinearBinaryClassificationModel mdl = new SVMLinearBinaryClassificationModel(weights, 1.0).withThreshold(5);
 
         Vector observation = new DenseVector(new double[]{1.0, 1.0});
-        TestUtils.assertEquals(-1.0, mdl.apply(observation), PRECISION);
+        TestUtils.assertEquals(0.0, mdl.apply(observation), PRECISION);
 
         observation = new DenseVector(new double[]{3.0, 4.0});
         TestUtils.assertEquals(1.0, mdl.apply(observation), PRECISION);
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/svm/SVMMultiClassTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/svm/SVMMultiClassTrainerTest.java
index b12b266..7c4809f 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/svm/SVMMultiClassTrainerTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/svm/SVMMultiClassTrainerTest.java
@@ -20,58 +20,81 @@
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.Map;
-import java.util.concurrent.ThreadLocalRandom;
 import org.apache.ignite.ml.TestUtils;
+import org.apache.ignite.ml.common.TrainerTest;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
 import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
-import org.apache.ignite.ml.math.primitives.vector.impl.DenseVector;
 import org.junit.Test;
 
 /**
  * Tests for {@link SVMLinearBinaryClassificationTrainer}.
  */
-public class SVMMultiClassTrainerTest {
-    /** Fixed size of Dataset. */
-    private static final int AMOUNT_OF_OBSERVATIONS = 1000;
-
-    /** Fixed size of columns in Dataset. */
-    private static final int AMOUNT_OF_FEATURES = 2;
-
-    /** Precision in test checks. */
-    private static final double PRECISION = 1e-2;
-
+public class SVMMultiClassTrainerTest extends TrainerTest {
     /**
-     * Test trainer on classification model y = x.
+     * Test trainer on 4 sets grouped around of square vertices.
      */
     @Test
     public void testTrainWithTheLinearlySeparableCase() {
-        Map<Integer, double[]> data = new HashMap<>();
+        Map<Integer, double[]> cacheMock = new HashMap<>();
 
-        ThreadLocalRandom rndX = ThreadLocalRandom.current();
-        ThreadLocalRandom rndY = ThreadLocalRandom.current();
-
-        for (int i = 0; i < AMOUNT_OF_OBSERVATIONS; i++) {
-            double x = rndX.nextDouble(-1000, 1000);
-            double y = rndY.nextDouble(-1000, 1000);
-            double[] vec = new double[AMOUNT_OF_FEATURES + 1];
-            vec[0] = y - x > 0 ? 1 : -1; // assign label.
-            vec[1] = x;
-            vec[2] = y;
-            data.put(i, vec);
-        }
+        for (int i = 0; i < twoLinearlySeparableClasses.length; i++)
+            cacheMock.put(i, twoLinearlySeparableClasses[i]);
 
         SVMLinearMultiClassClassificationTrainer trainer = new SVMLinearMultiClassClassificationTrainer()
             .withLambda(0.3)
-            .withAmountOfLocIterations(100)
-            .withAmountOfIterations(20);
+            .withAmountOfLocIterations(10)
+            .withAmountOfIterations(20)
+            .withSeed(1234L);
 
         SVMLinearMultiClassClassificationModel mdl = trainer.fit(
-            data,
-            10,
+            cacheMock,
+            parts,
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 1, v.length)),
+            (k, v) -> v[0]
+        );
+        TestUtils.assertEquals(0, mdl.apply(VectorUtils.of(100, 10)), PRECISION);
+        TestUtils.assertEquals(1, mdl.apply(VectorUtils.of(10, 100)), PRECISION);
+    }
+
+    /** */
+    @Test
+    public void testUpdate() {
+        Map<Integer, double[]> cacheMock = new HashMap<>();
+
+        for (int i = 0; i < twoLinearlySeparableClasses.length; i++)
+            cacheMock.put(i, twoLinearlySeparableClasses[i]);
+
+        SVMLinearMultiClassClassificationTrainer trainer = new SVMLinearMultiClassClassificationTrainer()
+            .withLambda(0.3)
+            .withAmountOfLocIterations(10)
+            .withAmountOfIterations(100)
+            .withSeed(1234L);
+
+        SVMLinearMultiClassClassificationModel originalMdl = trainer.fit(
+            cacheMock,
+            parts,
             (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 1, v.length)),
             (k, v) -> v[0]
         );
 
-        TestUtils.assertEquals(-1, mdl.apply(new DenseVector(new double[]{100, 10})), PRECISION);
-        TestUtils.assertEquals(1, mdl.apply(new DenseVector(new double[]{10, 100})), PRECISION);
+        SVMLinearMultiClassClassificationModel updatedOnSameDS = trainer.update(
+            originalMdl,
+            cacheMock,
+            parts,
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 1, v.length)),
+            (k, v) -> v[0]
+        );
+
+        SVMLinearMultiClassClassificationModel updatedOnEmptyDS = trainer.update(
+            originalMdl,
+            new HashMap<Integer, double[]>(),
+            parts,
+            (k, v) -> VectorUtils.of(Arrays.copyOfRange(v, 1, v.length)),
+            (k, v) -> v[0]
+        );
+
+        Vector v = VectorUtils.of(100, 10);
+        TestUtils.assertEquals(originalMdl.apply(v), updatedOnSameDS.apply(v), PRECISION);
+        TestUtils.assertEquals(originalMdl.apply(v), updatedOnEmptyDS.apply(v), PRECISION);
     }
 }
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/svm/SVMTestSuite.java b/modules/ml/src/test/java/org/apache/ignite/ml/svm/SVMTestSuite.java
index 822ad18..df7263f 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/svm/SVMTestSuite.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/svm/SVMTestSuite.java
@@ -28,7 +28,6 @@
     SVMModelTest.class,
     SVMBinaryTrainerTest.class,
     SVMMultiClassTrainerTest.class,
-    SVMBinaryTrainerIntegrationTest.class
 })
 public class SVMTestSuite {
     // No-op.
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeClassificationTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeClassificationTrainerTest.java
index c84da12..b82885e 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeClassificationTrainerTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeClassificationTrainerTest.java
@@ -29,6 +29,7 @@
 import org.junit.runners.Parameterized;
 
 import static junit.framework.TestCase.assertEquals;
+import static junit.framework.TestCase.assertNotNull;
 import static junit.framework.TestCase.assertTrue;
 
 /**
@@ -40,12 +41,12 @@
     private static final int[] partsToBeTested = new int[] {1, 2, 3, 4, 5, 7};
 
     /** Number of partitions. */
-    @Parameterized.Parameter(0)
+    @Parameterized.Parameter()
     public int parts;
 
     /** Use index [= 1 if true]. */
     @Parameterized.Parameter(1)
-    public int useIndex;
+    public int useIdx;
 
     /** Test parameters. */
     @Parameterized.Parameters(name = "Data divided on {0} partitions. Use index = {1}.")
@@ -73,7 +74,7 @@
         }
 
         DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(1, 0)
-            .withUseIndex(useIndex == 1);
+            .withUseIndex(useIdx == 1);
 
         DecisionTreeNode tree = trainer.fit(
             data,
@@ -87,6 +88,10 @@
         DecisionTreeConditionalNode node = (DecisionTreeConditionalNode)tree;
 
         assertEquals(0, node.getThreshold(), 1e-3);
+        assertEquals(0, node.getCol());
+        assertNotNull(node.toString());
+        assertNotNull(node.toString(true));
+        assertNotNull(node.toString(false));
 
         assertTrue(node.getThenNode() instanceof DecisionTreeLeafNode);
         assertTrue(node.getElseNode() instanceof DecisionTreeLeafNode);
@@ -96,5 +101,9 @@
 
         assertEquals(1, thenNode.getVal(), 1e-10);
         assertEquals(0, elseNode.getVal(), 1e-10);
+
+        assertNotNull(thenNode.toString());
+        assertNotNull(thenNode.toString(true));
+        assertNotNull(thenNode.toString(false));
     }
 }
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainerTest.java
index 4e64925..84975a8 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainerTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeRegressionTrainerTest.java
@@ -73,7 +73,7 @@
         }
 
         DecisionTreeRegressionTrainer trainer = new DecisionTreeRegressionTrainer(1, 0)
-            .withUseIndex(useIndex == 1);
+            .withUsingIdx(useIndex == 1);
 
         DecisionTreeNode tree = trainer.fit(
             data,
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeTestSuite.java b/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeTestSuite.java
index 867103e..2cbb486 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeTestSuite.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/tree/DecisionTreeTestSuite.java
@@ -24,8 +24,6 @@
 import org.apache.ignite.ml.tree.impurity.mse.MSEImpurityMeasureTest;
 import org.apache.ignite.ml.tree.impurity.util.SimpleStepFunctionCompressorTest;
 import org.apache.ignite.ml.tree.impurity.util.StepFunctionTest;
-import org.apache.ignite.ml.tree.randomforest.RandomForestClassifierTrainerTest;
-import org.apache.ignite.ml.tree.randomforest.RandomForestRegressionTrainerTest;
 import org.junit.runner.RunWith;
 import org.junit.runners.Suite;
 
@@ -44,9 +42,7 @@
     MSEImpurityMeasureCalculatorTest.class,
     MSEImpurityMeasureTest.class,
     StepFunctionTest.class,
-    SimpleStepFunctionCompressorTest.class,
-    RandomForestClassifierTrainerTest.class,
-    RandomForestRegressionTrainerTest.class
+    SimpleStepFunctionCompressorTest.class
 })
 public class DecisionTreeTestSuite {
 }
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/tree/impurity/util/SimpleStepFunctionCompressorTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/tree/impurity/util/SimpleStepFunctionCompressorTest.java
index 001404f..579d592 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/tree/impurity/util/SimpleStepFunctionCompressorTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/tree/impurity/util/SimpleStepFunctionCompressorTest.java
@@ -27,6 +27,39 @@
 public class SimpleStepFunctionCompressorTest {
     /** */
     @Test
+    @SuppressWarnings("unchecked")
+    public void testDefaultCompress() {
+        StepFunction<TestImpurityMeasure> function = new StepFunction<>(
+            new double[]{1, 2, 3, 4},
+            TestImpurityMeasure.asTestImpurityMeasures(1, 2, 3, 4)
+        );
+
+        SimpleStepFunctionCompressor<TestImpurityMeasure> compressor = new SimpleStepFunctionCompressor<>();
+
+        StepFunction<TestImpurityMeasure> resFunction = compressor.compress(new StepFunction [] {function})[0];
+
+        assertArrayEquals(new double[]{1, 2, 3, 4}, resFunction.getX(), 1e-10);
+        assertArrayEquals(TestImpurityMeasure.asTestImpurityMeasures(1, 2, 3, 4), resFunction.getY());
+    }
+
+    /** */
+    @Test
+    public void testDefaults() {
+        StepFunction<TestImpurityMeasure> function = new StepFunction<>(
+            new double[]{1, 2, 3, 4},
+            TestImpurityMeasure.asTestImpurityMeasures(1, 2, 3, 4)
+        );
+
+        SimpleStepFunctionCompressor<TestImpurityMeasure> compressor = new SimpleStepFunctionCompressor<>();
+
+        StepFunction<TestImpurityMeasure> resFunction = compressor.compress(function);
+
+        assertArrayEquals(new double[]{1, 2, 3, 4}, resFunction.getX(), 1e-10);
+        assertArrayEquals(TestImpurityMeasure.asTestImpurityMeasures(1, 2, 3, 4), resFunction.getY());
+    }
+
+    /** */
+    @Test
     public void testCompressSmallFunction() {
         StepFunction<TestImpurityMeasure> function = new StepFunction<>(
             new double[]{1, 2, 3, 4},
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestClassifierTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestClassifierTrainerTest.java
index 055223b..087f4e8 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestClassifierTrainerTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestClassifierTrainerTest.java
@@ -21,11 +21,11 @@
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import org.apache.ignite.ml.composition.ModelOnFeaturesSubspace;
 import org.apache.ignite.ml.composition.ModelsComposition;
 import org.apache.ignite.ml.composition.predictionsaggregator.OnMajorityPredictionsAggregator;
+import org.apache.ignite.ml.dataset.feature.FeatureMeta;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
 import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
-import org.apache.ignite.ml.tree.DecisionTreeConditionalNode;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -33,6 +33,9 @@
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
+/**
+ * Tests for {@link RandomForestClassifierTrainer}.
+ */
 @RunWith(Parameterized.class)
 public class RandomForestClassifierTrainerTest {
     /**
@@ -46,6 +49,9 @@
     @Parameterized.Parameter
     public int parts;
 
+    /**
+     * Data iterator.
+     */
     @Parameterized.Parameters(name = "Data divided on {0} partitions")
     public static Iterable<Integer[]> data() {
         List<Integer[]> res = new ArrayList<>();
@@ -69,16 +75,46 @@
             sample.put(new double[] {x1, x2, x3, x4}, (double)(i % 2));
         }
 
-        RandomForestClassifierTrainer trainer = new RandomForestClassifierTrainer(4, 3, 5, 0.3, 4, 0.1);
+        ArrayList<FeatureMeta> meta = new ArrayList<>();
+        for (int i = 0; i < 4; i++)
+            meta.add(new FeatureMeta("", i, false));
+        RandomForestClassifierTrainer trainer = new RandomForestClassifierTrainer(meta)
+            .withCountOfTrees(5)
+            .withFeaturesCountSelectionStrgy(x -> 2);
 
         ModelsComposition mdl = trainer.fit(sample, parts, (k, v) -> VectorUtils.of(k), (k, v) -> v);
 
-        mdl.getModels().forEach(m -> {
-            assertTrue(m instanceof ModelOnFeaturesSubspace);
-            assertTrue(((ModelOnFeaturesSubspace) m).getMdl() instanceof DecisionTreeConditionalNode);
-        });
-
         assertTrue(mdl.getPredictionsAggregator() instanceof OnMajorityPredictionsAggregator);
         assertEquals(5, mdl.getModels().size());
     }
+
+    /** */
+    @Test
+    public void testUpdate() {
+        int sampleSize = 1000;
+        Map<double[], Double> sample = new HashMap<>();
+        for (int i = 0; i < sampleSize; i++) {
+            double x1 = i;
+            double x2 = x1 / 10.0;
+            double x3 = x2 / 10.0;
+            double x4 = x3 / 10.0;
+
+            sample.put(new double[] {x1, x2, x3, x4}, (double)(i % 2));
+        }
+
+        ArrayList<FeatureMeta> meta = new ArrayList<>();
+        for (int i = 0; i < 4; i++)
+            meta.add(new FeatureMeta("", i, false));
+        RandomForestClassifierTrainer trainer = new RandomForestClassifierTrainer(meta)
+            .withCountOfTrees(100)
+            .withFeaturesCountSelectionStrgy(x -> 2);
+
+        ModelsComposition originalModel = trainer.fit(sample, parts, (k, v) -> VectorUtils.of(k), (k, v) -> v);
+        ModelsComposition updatedOnSameDS = trainer.update(originalModel, sample, parts, (k, v) -> VectorUtils.of(k), (k, v) -> v);
+        ModelsComposition updatedOnEmptyDS = trainer.update(originalModel, new HashMap<double[], Double>(), parts, (k, v) -> VectorUtils.of(k), (k, v) -> v);
+
+        Vector v = VectorUtils.of(5, 0.5, 0.05, 0.005);
+        assertEquals(originalModel.apply(v), updatedOnSameDS.apply(v), 0.01);
+        assertEquals(originalModel.apply(v), updatedOnEmptyDS.apply(v), 0.01);
+    }
 }
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestRegressionTrainerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestRegressionTrainerTest.java
index 1421e0a..fcc20bd 100644
--- a/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestRegressionTrainerTest.java
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestRegressionTrainerTest.java
@@ -21,11 +21,11 @@
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
-import org.apache.ignite.ml.composition.ModelOnFeaturesSubspace;
 import org.apache.ignite.ml.composition.ModelsComposition;
 import org.apache.ignite.ml.composition.predictionsaggregator.MeanValuePredictionsAggregator;
+import org.apache.ignite.ml.dataset.feature.FeatureMeta;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
 import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
-import org.apache.ignite.ml.tree.DecisionTreeConditionalNode;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -33,6 +33,9 @@
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
+/**
+ * Tests for {@link RandomForestRegressionTrainer}.
+ */
 @RunWith(Parameterized.class)
 public class RandomForestRegressionTrainerTest {
     /**
@@ -69,16 +72,45 @@
             sample.put(x1 * x2 + x3 * x4, new double[] {x1, x2, x3, x4});
         }
 
-        RandomForestRegressionTrainer trainer = new RandomForestRegressionTrainer(4, 3, 5, 0.3, 4, 0.1);
+        ArrayList<FeatureMeta> meta = new ArrayList<>();
+        for(int i = 0; i < 4; i++)
+            meta.add(new FeatureMeta("", i, false));
+        RandomForestRegressionTrainer trainer = new RandomForestRegressionTrainer(meta)
+            .withCountOfTrees(5)
+            .withFeaturesCountSelectionStrgy(x -> 2);
 
         ModelsComposition mdl = trainer.fit(sample, parts, (k, v) -> VectorUtils.of(v), (k, v) -> k);
-
-        mdl.getModels().forEach(m -> {
-            assertTrue(m instanceof ModelOnFeaturesSubspace);
-            assertTrue(((ModelOnFeaturesSubspace) m).getMdl() instanceof DecisionTreeConditionalNode);
-        });
-
         assertTrue(mdl.getPredictionsAggregator() instanceof MeanValuePredictionsAggregator);
         assertEquals(5, mdl.getModels().size());
     }
+
+    /** */
+    @Test
+    public void testUpdate() {
+        int sampleSize = 1000;
+        Map<double[], Double> sample = new HashMap<>();
+        for (int i = 0; i < sampleSize; i++) {
+            double x1 = i;
+            double x2 = x1 / 10.0;
+            double x3 = x2 / 10.0;
+            double x4 = x3 / 10.0;
+
+            sample.put(new double[] {x1, x2, x3, x4}, (double)(i % 2));
+        }
+
+        ArrayList<FeatureMeta> meta = new ArrayList<>();
+        for (int i = 0; i < 4; i++)
+            meta.add(new FeatureMeta("", i, false));
+        RandomForestRegressionTrainer trainer = new RandomForestRegressionTrainer(meta)
+            .withCountOfTrees(100)
+            .withFeaturesCountSelectionStrgy(x -> 2);
+
+        ModelsComposition originalModel = trainer.fit(sample, parts, (k, v) -> VectorUtils.of(k), (k, v) -> v);
+        ModelsComposition updatedOnSameDS = trainer.update(originalModel, sample, parts, (k, v) -> VectorUtils.of(k), (k, v) -> v);
+        ModelsComposition updatedOnEmptyDS = trainer.update(originalModel, new HashMap<double[], Double>(), parts, (k, v) -> VectorUtils.of(k), (k, v) -> v);
+
+        Vector v = VectorUtils.of(5, 0.5, 0.05, 0.005);
+        assertEquals(originalModel.apply(v), updatedOnSameDS.apply(v), 0.1);
+        assertEquals(originalModel.apply(v), updatedOnEmptyDS.apply(v), 0.1);
+    }
 }
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestTest.java
new file mode 100644
index 0000000..9fa7f0e
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestTest.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.tree.randomforest;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Optional;
+import org.apache.ignite.ml.dataset.feature.FeatureMeta;
+import org.apache.ignite.ml.tree.randomforest.data.NodeSplit;
+import org.apache.ignite.ml.tree.randomforest.data.TreeNode;
+import org.junit.Test;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+/** */
+public class RandomForestTest {
+    /** Seed. */
+    private final long seed = 0;
+
+    /** Count of trees. */
+    private final int countOfTrees = 10;
+
+    /** Min imp delta. */
+    private final double minImpDelta = 1.0;
+
+    /** Max depth. */
+    private final int maxDepth = 1;
+
+    /** Meta. */
+    private final List<FeatureMeta> meta = Arrays.asList(
+        new FeatureMeta("", 0, false),
+        new FeatureMeta("", 1, true),
+        new FeatureMeta("", 2, false),
+        new FeatureMeta("", 3, true),
+        new FeatureMeta("", 4, false),
+        new FeatureMeta("", 5, true),
+        new FeatureMeta("", 6, false)
+    );
+
+    /** Rf. */
+    private RandomForestClassifierTrainer rf = new RandomForestClassifierTrainer(meta)
+        .withCountOfTrees(countOfTrees)
+        .withSeed(seed)
+        .withFeaturesCountSelectionStrgy(x -> 4)
+        .withMaxDepth(maxDepth)
+        .withMinImpurityDelta(minImpDelta)
+        .withSubSampleSize(0.1);
+
+    /** */
+    @Test
+    public void testNeedSplit() {
+        TreeNode node = new TreeNode(1, 1);
+        node.setImpurity(1000);
+        assertTrue(rf.needSplit(node, Optional.of(new NodeSplit(0, 0, node.getImpurity() - minImpDelta * 1.01))));
+        assertFalse(rf.needSplit(node, Optional.of(new NodeSplit(0, 0, node.getImpurity() - minImpDelta * 0.5))));
+        assertFalse(rf.needSplit(node, Optional.of(new NodeSplit(0, 0, node.getImpurity()))));
+
+        TreeNode child = node.toConditional(0, 0).get(0);
+        child.setImpurity(1000);
+        assertFalse(rf.needSplit(child, Optional.of(new NodeSplit(0, 0, child.getImpurity() - minImpDelta * 1.01))));
+    }
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestTreeTestSuite.java b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestTreeTestSuite.java
new file mode 100644
index 0000000..cc51352
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/RandomForestTreeTestSuite.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.tree.randomforest;
+
+import org.apache.ignite.ml.tree.randomforest.data.impurity.GiniFeatureHistogramTest;
+import org.apache.ignite.ml.tree.randomforest.data.impurity.MSEHistogramTest;
+import org.apache.ignite.ml.tree.randomforest.data.statistics.NormalDistributionStatisticsComputerTest;
+import org.junit.runner.RunWith;
+import org.junit.runners.Suite;
+
+/**
+ * Test suite for all tests located in {@link org.apache.ignite.ml.tree} package.
+ */
+@RunWith(Suite.class)
+@Suite.SuiteClasses({
+    RandomForestClassifierTrainerTest.class,
+    RandomForestRegressionTrainerTest.class,
+    GiniFeatureHistogramTest.class,
+    MSEHistogramTest.class,
+    NormalDistributionStatisticsComputerTest.class,
+    RandomForestTest.class
+})
+public class RandomForestTreeTestSuite {
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/data/TreeNodeTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/data/TreeNodeTest.java
new file mode 100644
index 0000000..943b5d8
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/data/TreeNodeTest.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.tree.randomforest.data;
+
+import java.util.List;
+import org.apache.ignite.ml.math.primitives.vector.Vector;
+import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+/** */
+public class TreeNodeTest {
+    /** Features 1. */
+    private final Vector features1 = VectorUtils.of(0., 1.);
+    /** Features 2. */
+    private final Vector features2 = VectorUtils.of(1., 0.);
+
+    /** */
+    @Test
+    public void testPredictNextIdCondNodeAtTreeCorner() {
+        TreeNode node = new TreeNode(5, 1);
+
+        assertEquals(TreeNode.Type.UNKNOWN, node.getType());
+        assertEquals(5, node.predictNextNodeKey(features1).nodeId());
+        assertEquals(5, node.predictNextNodeKey(features2).nodeId());
+    }
+
+    /** */
+    @Test
+    public void testPredictNextIdForLeaf() {
+        TreeNode node = new TreeNode(5, 1);
+        node.toLeaf(0.5);
+
+        assertEquals(TreeNode.Type.LEAF, node.getType());
+        assertEquals(5, node.predictNextNodeKey(features1).nodeId());
+        assertEquals(5, node.predictNextNodeKey(features2).nodeId());
+    }
+
+    /** */
+    @Test
+    public void testPredictNextIdForTree() {
+        TreeNode root = new TreeNode(1, 1);
+        root.toConditional(0, 0.1);
+
+        assertEquals(TreeNode.Type.CONDITIONAL, root.getType());
+        assertEquals(2, root.predictNextNodeKey(features1).nodeId());
+        assertEquals(3, root.predictNextNodeKey(features2).nodeId());
+    }
+
+    /** */
+    @Test
+    public void testPredictProba() {
+        TreeNode root = new TreeNode(1, 1);
+        List<TreeNode> leaves = root.toConditional(0, 0.1);
+        leaves.forEach(leaf -> {
+            leaf.toLeaf(leaf.getId().nodeId() % 2);
+        });
+
+        assertEquals(TreeNode.Type.CONDITIONAL, root.getType());
+        assertEquals(0.0, root.apply(features1), 0.001);
+        assertEquals(1.0, root.apply(features2), 0.001);
+    }
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/data/impurity/GiniFeatureHistogramTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/data/impurity/GiniFeatureHistogramTest.java
new file mode 100644
index 0000000..7ca6411
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/data/impurity/GiniFeatureHistogramTest.java
@@ -0,0 +1,254 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.tree.randomforest.data.impurity;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import org.apache.ignite.ml.dataset.feature.BucketMeta;
+import org.apache.ignite.ml.dataset.feature.FeatureMeta;
+import org.apache.ignite.ml.dataset.impl.bootstrapping.BootstrappedVector;
+import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
+import org.apache.ignite.ml.tree.randomforest.data.NodeSplit;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+/** */
+public class GiniFeatureHistogramTest extends ImpurityHistogramTest {
+    /** Feature 1 meta. */
+    private BucketMeta feature1Meta = new BucketMeta(new FeatureMeta("", 0, true));
+    /** Feature 2 meta. */
+    private BucketMeta feature2Meta = new BucketMeta(new FeatureMeta("", 1, false));
+    /** Feature 3 meta. */
+    private BucketMeta feature3Meta = new BucketMeta(new FeatureMeta("", 2, true));
+
+    /** */
+    @Before
+    public void setUp() throws Exception {
+        feature2Meta.setMinVal(-5);
+        feature2Meta.setBucketSize(1);
+    }
+
+    /** */
+    @Test
+    public void testAddVector() {
+        Map<Double, Integer> lblMapping = new HashMap<>();
+        lblMapping.put(1.0, 0);
+        lblMapping.put(2.0, 1);
+        lblMapping.put(3.0, 2);
+
+        GiniHistogram catFeatureSmpl1 = new GiniHistogram(0, lblMapping, feature1Meta);
+        GiniHistogram catFeatureSmpl2 = new GiniHistogram(1, lblMapping, feature1Meta);
+
+        GiniHistogram contFeatureSmpl1 = new GiniHistogram(0, lblMapping, feature2Meta);
+        GiniHistogram contFeatureSmpl2 = new GiniHistogram(1, lblMapping, feature2Meta);
+
+        for (BootstrappedVector vec : dataset) {
+            catFeatureSmpl1.addElement(vec);
+            catFeatureSmpl2.addElement(vec);
+            contFeatureSmpl1.addElement(vec);
+            contFeatureSmpl2.addElement(vec);
+        }
+
+        checkBucketIds(catFeatureSmpl1.buckets(), new Integer[] {0, 1});
+        checkBucketIds(catFeatureSmpl2.buckets(), new Integer[] {0, 1});
+        checkBucketIds(contFeatureSmpl1.buckets(), new Integer[] {1, 4, 6, 7, 8});
+        checkBucketIds(contFeatureSmpl2.buckets(), new Integer[] {1, 4, 6, 7, 8});
+
+        //categorical feature
+        checkCounters(catFeatureSmpl1.getHistForLabel(1.0), new double[] {2, 1}); //for feature values 0 and 1
+        checkBucketIds(catFeatureSmpl1.getHistForLabel(1.0).buckets(), new Integer[] {0, 1});
+        checkCounters(catFeatureSmpl1.getHistForLabel(2.0), new double[] {3});    //for feature value 1
+        checkBucketIds(catFeatureSmpl1.getHistForLabel(2.0).buckets(), new Integer[] {1});
+        checkCounters(catFeatureSmpl1.getHistForLabel(3.0), new double[] {2});    //for feature value 0
+        checkBucketIds(catFeatureSmpl1.getHistForLabel(3.0).buckets(), new Integer[] {0});
+
+        checkCounters(catFeatureSmpl2.getHistForLabel(1.0), new double[] {1, 2}); //for feature values 0 and 1
+        checkBucketIds(catFeatureSmpl2.getHistForLabel(1.0).buckets(), new Integer[] {0, 1});
+        checkCounters(catFeatureSmpl2.getHistForLabel(2.0), new double[] {3});    //for feature value 1
+        checkBucketIds(catFeatureSmpl2.getHistForLabel(2.0).buckets(), new Integer[] {1});
+        checkCounters(catFeatureSmpl2.getHistForLabel(3.0), new double[] {0});    //for feature value 0
+        checkBucketIds(catFeatureSmpl2.getHistForLabel(3.0).buckets(), new Integer[] {0});
+
+        //continuous feature
+        checkCounters(contFeatureSmpl1.getHistForLabel(1.0), new double[] {1, 2}); //for feature values 0 and 1
+        checkBucketIds(contFeatureSmpl1.getHistForLabel(1.0).buckets(), new Integer[] {4, 6});
+        checkCounters(contFeatureSmpl1.getHistForLabel(2.0), new double[] {1, 2});    //for feature value 1
+        checkBucketIds(contFeatureSmpl1.getHistForLabel(2.0).buckets(), new Integer[] {1, 7});
+        checkCounters(contFeatureSmpl1.getHistForLabel(3.0), new double[] {2});    //for feature value 0
+        checkBucketIds(contFeatureSmpl1.getHistForLabel(3.0).buckets(), new Integer[] {8});
+
+        checkCounters(contFeatureSmpl2.getHistForLabel(1.0), new double[] {2, 1}); //for feature values 0 and 1
+        checkBucketIds(contFeatureSmpl2.getHistForLabel(1.0).buckets(), new Integer[] {4, 6});
+        checkCounters(contFeatureSmpl2.getHistForLabel(2.0), new double[] {2, 1});    //for feature value 1
+        checkBucketIds(contFeatureSmpl2.getHistForLabel(2.0).buckets(), new Integer[] {1, 7});
+        checkCounters(contFeatureSmpl2.getHistForLabel(3.0), new double[] {0});    //for feature value 0
+        checkBucketIds(contFeatureSmpl2.getHistForLabel(3.0).buckets(), new Integer[] {8});
+    }
+
+    /** */
+    @Test
+    public void testSplit() {
+        Map<Double, Integer> lblMapping = new HashMap<>();
+        lblMapping.put(1.0, 0);
+        lblMapping.put(2.0, 1);
+
+        GiniHistogram catFeatureSmpl1 = new GiniHistogram(0, lblMapping, feature1Meta);
+        GiniHistogram contFeatureSmpl1 = new GiniHistogram(0, lblMapping, feature2Meta);
+        GiniHistogram emptyHist = new GiniHistogram(0, lblMapping, feature3Meta);
+        GiniHistogram catFeatureSmpl2 = new GiniHistogram(0, lblMapping, feature3Meta);
+
+        feature2Meta.setMinVal(-5);
+        feature2Meta.setBucketSize(1);
+
+        for (BootstrappedVector vec : toSplitDataset) {
+            catFeatureSmpl1.addElement(vec);
+            contFeatureSmpl1.addElement(vec);
+            catFeatureSmpl2.addElement(vec);
+        }
+
+        NodeSplit catSplit = catFeatureSmpl1.findBestSplit().get();
+        NodeSplit contSplit = contFeatureSmpl1.findBestSplit().get();
+        assertEquals(1.0, catSplit.getValue(), 0.01);
+        assertEquals(-0.5, contSplit.getValue(), 0.01);
+        assertFalse(emptyHist.findBestSplit().isPresent());
+        assertFalse(catFeatureSmpl2.findBestSplit().isPresent());
+    }
+
+    @Test
+    public void testOfSums() {
+        int sampleId = 0;
+        BucketMeta bucketMeta1 = new BucketMeta(new FeatureMeta("", 0, false));
+        bucketMeta1.setMinVal(0.);
+        bucketMeta1.setBucketSize(0.1);
+        BucketMeta bucketMeta2 = new BucketMeta(new FeatureMeta("", 1, true));
+
+        GiniHistogram forAllHist1 = new GiniHistogram(sampleId, lblMapping, bucketMeta1);
+        GiniHistogram forAllHist2 = new GiniHistogram(sampleId, lblMapping, bucketMeta2);
+
+        List<GiniHistogram> partitions1 = new ArrayList<>();
+        List<GiniHistogram> partitions2 = new ArrayList<>();
+        int countOfPartitions = rnd.nextInt(1000);
+        for(int i = 0; i < countOfPartitions; i++) {
+            partitions1.add(new GiniHistogram(sampleId,lblMapping, bucketMeta1));
+            partitions2.add(new GiniHistogram(sampleId,lblMapping, bucketMeta2));
+        }
+
+        int datasetSize = rnd.nextInt(10000);
+        for(int i = 0; i < datasetSize; i++) {
+            BootstrappedVector vec = randomVector(2, 1, true);
+            vec.features().set(1, (vec.features().get(1) * 100) % 100);
+
+            forAllHist1.addElement(vec);
+            forAllHist2.addElement(vec);
+            int partitionId = rnd.nextInt(countOfPartitions);
+            partitions1.get(partitionId).addElement(vec);
+            partitions2.get(partitionId).addElement(vec);
+        }
+
+        checkSums(forAllHist1, partitions1);
+        checkSums(forAllHist2, partitions2);
+
+        GiniHistogram emptyHist1 = new GiniHistogram(sampleId, lblMapping, bucketMeta1);
+        GiniHistogram emptyHist2 = new GiniHistogram(sampleId, lblMapping, bucketMeta2);
+        assertTrue(forAllHist1.isEqualTo(forAllHist1.plus(emptyHist1)));
+        assertTrue(forAllHist2.isEqualTo(forAllHist2.plus(emptyHist2)));
+        assertTrue(forAllHist1.isEqualTo(emptyHist1.plus(forAllHist1)));
+        assertTrue(forAllHist2.isEqualTo(emptyHist2.plus(forAllHist2)));
+    }
+
+    /** */
+    @Test
+    public void testJoin() {
+        Map<Double, Integer> lblMapping = new HashMap<>();
+        lblMapping.put(1.0, 0);
+        lblMapping.put(2.0, 1);
+        lblMapping.put(3.0, 2);
+
+        GiniHistogram catFeatureSmpl1 = new GiniHistogram(0, lblMapping, feature1Meta);
+        GiniHistogram catFeatureSmpl2 = new GiniHistogram(0, lblMapping, feature1Meta);
+
+        GiniHistogram contFeatureSmpl1 = new GiniHistogram(0, lblMapping, feature2Meta);
+        GiniHistogram contFeatureSmpl2 = new GiniHistogram(0, lblMapping, feature2Meta);
+
+        for (BootstrappedVector vec : dataset) {
+            catFeatureSmpl1.addElement(vec);
+            contFeatureSmpl1.addElement(vec);
+        }
+
+        for (BootstrappedVector vec : toSplitDataset) {
+            catFeatureSmpl2.addElement(vec);
+            contFeatureSmpl2.addElement(vec);
+        }
+
+        GiniHistogram res1 = catFeatureSmpl1.plus(catFeatureSmpl2);
+        GiniHistogram res2 = contFeatureSmpl1.plus(contFeatureSmpl2);
+
+        checkBucketIds(res1.buckets(), new Integer[] {0, 1, 2});
+        checkBucketIds(res2.buckets(), new Integer[] {1, 4, 6, 7, 8});
+
+        //categorical feature
+        checkCounters(res1.getHistForLabel(1.0), new double[] {3, 2, 6}); //for feature values 0 and 1
+        checkBucketIds(res1.getHistForLabel(1.0).buckets(), new Integer[] {0, 1, 2});
+        checkCounters(res1.getHistForLabel(2.0), new double[] {4, 6});    //for feature value 1
+        checkBucketIds(res1.getHistForLabel(2.0).buckets(), new Integer[] {0, 1});
+        checkCounters(res1.getHistForLabel(3.0), new double[] {2});    //for feature value 0
+        checkBucketIds(res1.getHistForLabel(3.0).buckets(), new Integer[] {0});
+
+        //continuous feature
+        checkCounters(res2.getHistForLabel(1.0), new double[] {1, 1, 8, 1}); //for feature values 0 and 1
+        checkBucketIds(res2.getHistForLabel(1.0).buckets(), new Integer[] {1, 4, 6, 8});
+        checkCounters(res2.getHistForLabel(2.0), new double[] {1, 4, 0, 5});    //for feature value 1
+        checkBucketIds(res2.getHistForLabel(2.0).buckets(), new Integer[] {1, 4, 6, 7});
+        checkCounters(res2.getHistForLabel(3.0), new double[] {2});    //for feature value 0
+        checkBucketIds(res2.getHistForLabel(3.0).buckets(), new Integer[] {8});
+    }
+
+    /** Dataset. */
+    private BootstrappedVector[] dataset = new BootstrappedVector[] {
+        new BootstrappedVector(VectorUtils.of(1, -1), 1, new int[] {1, 2}),
+        new BootstrappedVector(VectorUtils.of(1, 2), 2, new int[] {2, 1}),
+        new BootstrappedVector(VectorUtils.of(0, 3), 3, new int[] {2, 0}),
+        new BootstrappedVector(VectorUtils.of(0, 1), 1, new int[] {2, 1}),
+        new BootstrappedVector(VectorUtils.of(1, -4), 2, new int[] {1, 2}),
+    };
+
+    /** To split dataset. */
+    private BootstrappedVector[] toSplitDataset = new BootstrappedVector[] {
+        new BootstrappedVector(VectorUtils.of(0, -1, 0, 0), 2, new int[] {2}),
+        new BootstrappedVector(VectorUtils.of(0, -1, 0, 0), 2, new int[] {1}),
+        new BootstrappedVector(VectorUtils.of(0, -1, 0, 0), 2, new int[] {1}),
+        new BootstrappedVector(VectorUtils.of(0, 3, 0, 0), 1, new int[] {1}),
+        new BootstrappedVector(VectorUtils.of(0, 1, 0, 0), 2, new int[] {0}),
+        new BootstrappedVector(VectorUtils.of(1, 2, 0, 0), 2, new int[] {1}),
+        new BootstrappedVector(VectorUtils.of(1, 2, 0, 0), 2, new int[] {1}),
+        new BootstrappedVector(VectorUtils.of(1, 2, 0, 0), 2, new int[] {1}),
+        new BootstrappedVector(VectorUtils.of(1, -4, 0, 0), 1, new int[] {1}),
+        new BootstrappedVector(VectorUtils.of(2, 1, 0, 0), 1, new int[] {1}),
+        new BootstrappedVector(VectorUtils.of(2, 1, 0, 0), 1, new int[] {1}),
+        new BootstrappedVector(VectorUtils.of(2, 1, 0, 0), 1, new int[] {1}),
+        new BootstrappedVector(VectorUtils.of(2, 1, 0, 0), 1, new int[] {1}),
+        new BootstrappedVector(VectorUtils.of(2, 1, 0, 0), 1, new int[] {1}),
+        new BootstrappedVector(VectorUtils.of(2, 1, 0, 1), 1, new int[] {1}),
+    };
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/data/impurity/ImpurityHistogramTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/data/impurity/ImpurityHistogramTest.java
new file mode 100644
index 0000000..df4c154
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/data/impurity/ImpurityHistogramTest.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.tree.randomforest.data.impurity;
+
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.Set;
+import java.util.stream.DoubleStream;
+import java.util.stream.IntStream;
+import org.apache.ignite.ml.dataset.feature.Histogram;
+import org.apache.ignite.ml.dataset.feature.ObjectHistogram;
+import org.apache.ignite.ml.dataset.impl.bootstrapping.BootstrappedVector;
+import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
+
+import static org.junit.Assert.assertArrayEquals;
+import static org.junit.Assert.assertTrue;
+
+public class ImpurityHistogramTest {
+    protected static final int COUNT_OF_CLASSES = 3;
+    protected static final Map<Double, Integer> lblMapping = new HashMap<>();
+    protected Random rnd = new Random();
+
+    static {
+        for(int i = 0; i < COUNT_OF_CLASSES; i++)
+            lblMapping.put((double)i, i);
+    }
+
+    protected void checkBucketIds(Set<Integer> bucketIdsSet, Integer[] expected) {
+        Integer[] bucketIds = new Integer[bucketIdsSet.size()];
+        bucketIdsSet.toArray(bucketIds);
+        assertArrayEquals(expected, bucketIds);
+    }
+
+    protected void checkCounters(ObjectHistogram<BootstrappedVector> hist, double[] expected) {
+        double[] counters = hist.buckets().stream().mapToDouble(x -> hist.getValue(x).get()).toArray();
+        assertArrayEquals(expected, counters, 0.01);
+    }
+
+    protected BootstrappedVector randomVector(int countOfFeatures, int countOfSampes, boolean isClassification) {
+        double[] features = DoubleStream.generate(() -> rnd.nextDouble()).limit(countOfFeatures).toArray();
+        int[] counters = IntStream.generate(() -> rnd.nextInt(10)).limit(countOfSampes).toArray();
+        double lbl = isClassification ? Math.abs(rnd.nextInt() % COUNT_OF_CLASSES) : rnd.nextDouble();
+        return new BootstrappedVector(VectorUtils.of(features), lbl, counters);
+    }
+
+    protected <T extends Histogram<BootstrappedVector, T>> void checkSums(T expected, List<T> partitions) {
+        T leftSum = partitions.stream().reduce((x,y) -> x.plus(y)).get();
+        T rightSum = partitions.stream().reduce((x,y) -> y.plus(x)).get();
+        assertTrue(expected.isEqualTo(leftSum));
+        assertTrue(expected.isEqualTo(rightSum));
+    }
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/data/impurity/MSEHistogramTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/data/impurity/MSEHistogramTest.java
new file mode 100644
index 0000000..41bd5ff
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/data/impurity/MSEHistogramTest.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.tree.randomforest.data.impurity;
+
+import java.util.ArrayList;
+import java.util.List;
+import org.apache.ignite.ml.dataset.feature.BucketMeta;
+import org.apache.ignite.ml.dataset.feature.FeatureMeta;
+import org.apache.ignite.ml.dataset.impl.bootstrapping.BootstrappedVector;
+import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.junit.Assert.assertTrue;
+
+/** */
+public class MSEHistogramTest extends ImpurityHistogramTest {
+    /** Feature 1 meta. */
+    private BucketMeta feature1Meta = new BucketMeta(new FeatureMeta("", 0, true));
+    /** Feature 2 meta. */
+    private BucketMeta feature2Meta = new BucketMeta(new FeatureMeta("", 1, false));
+
+    /** */
+    @Before
+    public void setUp() throws Exception {
+        feature2Meta.setMinVal(-5);
+        feature2Meta.setBucketSize(1);
+    }
+
+    /** */
+    @Test
+    public void testAdd() {
+        MSEHistogram catHist1 = new MSEHistogram(0, feature1Meta);
+        MSEHistogram contHist1 = new MSEHistogram(0, feature2Meta);
+
+        MSEHistogram catHist2 = new MSEHistogram(1, feature1Meta);
+        MSEHistogram contHist2 = new MSEHistogram(1, feature2Meta);
+
+        for (BootstrappedVector vec : dataset) {
+            catHist1.addElement(vec);
+            catHist2.addElement(vec);
+            contHist1.addElement(vec);
+            contHist2.addElement(vec);
+        }
+
+        checkBucketIds(catHist1.buckets(), new Integer[] {0, 1});
+        checkBucketIds(catHist2.buckets(), new Integer[] {0, 1});
+        checkBucketIds(contHist1.buckets(), new Integer[] {1, 4, 6, 7, 8});
+        checkBucketIds(contHist2.buckets(), new Integer[] {1, 4, 6, 7, 8});
+
+        //counters
+        checkCounters(catHist1.getCounters(), new double[] {4, 4});
+        checkCounters(catHist2.getCounters(), new double[] {1, 5});
+        checkCounters(contHist1.getCounters(), new double[] {1, 1, 2, 2, 2});
+        checkCounters(contHist2.getCounters(), new double[] {2, 2, 1, 1, 0});
+
+        //ys
+        checkCounters(catHist1.getSumOfLabels(), new double[] {2 * 4 + 2 * 3, 5 + 1 + 2 * 2});
+        checkCounters(catHist2.getSumOfLabels(), new double[] {4, 2 * 5 + 2 * 1 + 2});
+        checkCounters(contHist1.getSumOfLabels(), new double[] {5 * 1, 1 * 1, 4 * 2, 2 * 2, 3 * 2});
+        checkCounters(contHist2.getSumOfLabels(), new double[]{ 2 * 5, 2 * 1, 1 * 4, 2 * 1, 0 * 3 });
+
+        //y2s
+        checkCounters(catHist1.getSumOfSquaredLabels(), new double[] {2 * 4 * 4 + 2 * 3 * 3, 5 * 5 + 1 + 2 * 2 * 2});
+        checkCounters(catHist2.getSumOfSquaredLabels(), new double[] {4 * 4, 2 * 5 * 5 + 2 * 1 * 1 + 2 * 2});
+        checkCounters(contHist1.getSumOfSquaredLabels(), new double[] {1 * 5 * 5, 1 * 1 * 1, 2 * 4 * 4, 2 * 2 * 2, 2 * 3 * 3});
+        checkCounters(contHist2.getSumOfSquaredLabels(), new double[]{ 2 * 5 * 5, 2 * 1 * 1, 1 * 4 * 4, 1 * 2 * 2, 0 * 3 * 3 });
+    }
+
+    @Test
+    public void testOfSums() {
+        int sampleId = 0;
+        BucketMeta bucketMeta1 = new BucketMeta(new FeatureMeta("", 0, false));
+        bucketMeta1.setMinVal(0.);
+        bucketMeta1.setBucketSize(0.1);
+        BucketMeta bucketMeta2 = new BucketMeta(new FeatureMeta("", 1, true));
+
+        MSEHistogram forAllHist1 = new MSEHistogram(sampleId, bucketMeta1);
+        MSEHistogram forAllHist2 = new MSEHistogram(sampleId, bucketMeta2);
+
+        List<MSEHistogram> partitions1 = new ArrayList<>();
+        List<MSEHistogram> partitions2 = new ArrayList<>();
+        int countOfPartitions = rnd.nextInt(100);
+        for(int i = 0; i < countOfPartitions; i++) {
+            partitions1.add(new MSEHistogram(sampleId, bucketMeta1));
+            partitions2.add(new MSEHistogram(sampleId, bucketMeta2));
+        }
+
+        int datasetSize = rnd.nextInt(1000);
+        for(int i = 0; i < datasetSize; i++) {
+            BootstrappedVector vec = randomVector(2, 1, false);
+            vec.features().set(1, (vec.features().get(1) * 100) % 100);
+
+            forAllHist1.addElement(vec);
+            forAllHist2.addElement(vec);
+            int partitionId = rnd.nextInt(countOfPartitions);
+            partitions1.get(partitionId).addElement(vec);
+            partitions2.get(partitionId).addElement(vec);
+        }
+
+        checkSums(forAllHist1, partitions1);
+        checkSums(forAllHist2, partitions2);
+
+        MSEHistogram emptyHist1 = new MSEHistogram(sampleId, bucketMeta1);
+        MSEHistogram emptyHist2 = new MSEHistogram(sampleId, bucketMeta2);
+        assertTrue(forAllHist1.isEqualTo(forAllHist1.plus(emptyHist1)));
+        assertTrue(forAllHist2.isEqualTo(forAllHist2.plus(emptyHist2)));
+        assertTrue(forAllHist1.isEqualTo(emptyHist1.plus(forAllHist1)));
+        assertTrue(forAllHist2.isEqualTo(emptyHist2.plus(forAllHist2)));
+    }
+
+    /** Dataset. */
+    private BootstrappedVector[] dataset = new BootstrappedVector[] {
+        new BootstrappedVector(VectorUtils.of(1, -4), 5, new int[] {1, 2}),
+        new BootstrappedVector(VectorUtils.of(1, -1), 1, new int[] {1, 2}),
+        new BootstrappedVector(VectorUtils.of(0, 1), 4, new int[] {2, 1}),
+        new BootstrappedVector(VectorUtils.of(1, 2), 2, new int[] {2, 1}),
+        new BootstrappedVector(VectorUtils.of(0, 3), 3, new int[] {2, 0}),
+    };
+}
diff --git a/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/data/statistics/NormalDistributionStatisticsComputerTest.java b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/data/statistics/NormalDistributionStatisticsComputerTest.java
new file mode 100644
index 0000000..79ee3b6
--- /dev/null
+++ b/modules/ml/src/test/java/org/apache/ignite/ml/tree/randomforest/data/statistics/NormalDistributionStatisticsComputerTest.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.ml.tree.randomforest.data.statistics;
+
+import java.util.Arrays;
+import java.util.List;
+import org.apache.ignite.ml.dataset.feature.FeatureMeta;
+import org.apache.ignite.ml.dataset.impl.bootstrapping.BootstrappedDatasetPartition;
+import org.apache.ignite.ml.dataset.impl.bootstrapping.BootstrappedVector;
+import org.apache.ignite.ml.math.primitives.vector.VectorUtils;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+
+/** */
+public class NormalDistributionStatisticsComputerTest {
+    /** Features Meta. */
+    private final List<FeatureMeta> meta = Arrays.asList(
+        new FeatureMeta("", 0, false),
+        new FeatureMeta("", 1, true),
+        new FeatureMeta("", 2, false),
+        new FeatureMeta("", 3, true),
+        new FeatureMeta("", 4, false),
+        new FeatureMeta("", 5, true),
+        new FeatureMeta("", 6, false)
+    );
+
+    /** Partition. */
+    private BootstrappedDatasetPartition partition = new BootstrappedDatasetPartition(new BootstrappedVector[] {
+        new BootstrappedVector(VectorUtils.of(0, 1, 2, 1, 4, 2, 6), 0., null),
+        new BootstrappedVector(VectorUtils.of(1, 0, 3, 2, 5, 3, 7), 0., null),
+        new BootstrappedVector(VectorUtils.of(2, 1, 4, 1, 6, 2, 8), 0., null),
+        new BootstrappedVector(VectorUtils.of(3, 0, 5, 2, 7, 3, 9), 0., null),
+        new BootstrappedVector(VectorUtils.of(4, 1, 6, 1, 8, 2, 10), 0., null),
+        new BootstrappedVector(VectorUtils.of(5, 0, 7, 2, 9, 3, 11), 0., null),
+        new BootstrappedVector(VectorUtils.of(6, 1, 8, 1, 10, 2, 12), 0., null),
+        new BootstrappedVector(VectorUtils.of(7, 0, 9, 2, 11, 3, 13), 0., null),
+        new BootstrappedVector(VectorUtils.of(8, 1, 10, 1, 12, 2, 14), 0., null),
+        new BootstrappedVector(VectorUtils.of(9, 0, 11, 2, 13, 3, 15), 0., null),
+    });
+
+    private NormalDistributionStatisticsComputer computer = new NormalDistributionStatisticsComputer();
+
+    /** */
+    @Test
+    public void computeStatsOnPartitionTest() {
+        List<NormalDistributionStatistics> result = computer.computeStatsOnPartition(partition, meta);
+        NormalDistributionStatistics[] expected = new NormalDistributionStatistics[] {
+            new NormalDistributionStatistics(0, 9, 285, 45, 10),
+            new NormalDistributionStatistics(Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0, 0, 10),
+            new NormalDistributionStatistics(2, 11, 505, 65, 10),
+            new NormalDistributionStatistics(Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0, 0, 10),
+            new NormalDistributionStatistics(4, 13, 805, 85, 10),
+            new NormalDistributionStatistics(Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0, 0, 10),
+            new NormalDistributionStatistics(6, 15, 1185, 105, 10),
+        };
+
+        assertEquals(expected.length, result.size());
+        for (int i = 0; i < expected.length; i++) {
+            NormalDistributionStatistics expectedStat = expected[i];
+            NormalDistributionStatistics resultStat = result.get(i);
+            assertEquals(expectedStat.mean(), resultStat.mean(), 0.01);
+            assertEquals(expectedStat.variance(), resultStat.variance(), 0.01);
+            assertEquals(expectedStat.std(), resultStat.std(), 0.01);
+            assertEquals(expectedStat.min(), resultStat.min(), 0.01);
+            assertEquals(expectedStat.max(), resultStat.max(), 0.01);
+        }
+    }
+
+    /** */
+    @Test
+    public void reduceStatsTest() {
+        List<NormalDistributionStatistics> left = Arrays.asList(
+            new NormalDistributionStatistics(0, 9, 285, 45, 10),
+            new NormalDistributionStatistics(Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0, 0, 10),
+            new NormalDistributionStatistics(2, 11, 505, 65, 10),
+            new NormalDistributionStatistics(Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0, 0, 10),
+            new NormalDistributionStatistics(4, 13, 805, 85, 10),
+            new NormalDistributionStatistics(Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0, 0, 10),
+            new NormalDistributionStatistics(6, 15, 1185, 105, 10)
+        );
+
+        List<NormalDistributionStatistics> right = Arrays.asList(
+            new NormalDistributionStatistics(6, 15, 1185, 105, 10),
+            new NormalDistributionStatistics(Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0, 0, 10),
+            new NormalDistributionStatistics(4, 13, 805, 85, 10),
+            new NormalDistributionStatistics(Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0, 0, 10),
+            new NormalDistributionStatistics(2, 11, 505, 65, 10),
+            new NormalDistributionStatistics(Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0, 0, 10),
+            new NormalDistributionStatistics(0, 9, 285, 45, 10)
+        );
+
+        List<NormalDistributionStatistics> result = computer.reduceStats(left, right, meta);
+        NormalDistributionStatistics[] expected = new NormalDistributionStatistics[] {
+            new NormalDistributionStatistics(0, 15, 1470, 150, 20),
+            new NormalDistributionStatistics(Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0, 0, 10),
+            new NormalDistributionStatistics(2, 13, 1310, 150, 20),
+            new NormalDistributionStatistics(Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0, 0, 10),
+            new NormalDistributionStatistics(2, 13, 1310, 150, 20),
+            new NormalDistributionStatistics(Double.POSITIVE_INFINITY, Double.NEGATIVE_INFINITY, 0, 0, 10),
+            new NormalDistributionStatistics(0, 15, 1470, 150, 20)
+        };
+
+        assertEquals(expected.length, result.size());
+        for (int i = 0; i < expected.length; i++) {
+            NormalDistributionStatistics expectedStat = expected[i];
+            NormalDistributionStatistics resultStat = result.get(i);
+            assertEquals(expectedStat.mean(), resultStat.mean(), 0.01);
+            assertEquals(expectedStat.variance(), resultStat.variance(), 0.01);
+            assertEquals(expectedStat.std(), resultStat.std(), 0.01);
+            assertEquals(expectedStat.min(), resultStat.min(), 0.01);
+            assertEquals(expectedStat.max(), resultStat.max(), 0.01);
+        }
+    }
+}
diff --git a/modules/osgi-karaf/src/main/resources/features.xml b/modules/osgi-karaf/src/main/resources/features.xml
index 3e3989e..b9af29f 100644
--- a/modules/osgi-karaf/src/main/resources/features.xml
+++ b/modules/osgi-karaf/src/main/resources/features.xml
@@ -164,7 +164,7 @@
             <![CDATA[The Apache Ignite Kafka module + dependencies. This module installs the Scala 2.11 library bundle.]]>
         </details>
         <feature prerequisite="true">wrap</feature>
-        <bundle start="true" dependency="true">mvn:org.scala-lang/scala-library/${scala211.library.version}</bundle>
+        <bundle start="true" dependency="true">mvn:org.scala-lang/scala-library/${scala.library.version}</bundle>
         <bundle start="true" dependency="true">wrap:mvn:com.yammer.metrics/metrics-core/${yammer.metrics.core.version}$Bundle-SymbolicName=yammer-metrics-core&amp;Bundle-Version=2.2.0&amp;Export-Package=*;-noimport:=true;version=${yammer.metrics.core.version}</bundle>
         <bundle start="true" dependency="true">wrap:mvn:com.yammer.metrics/metrics-annotation/${yammer.metrics.annotation.version}$Bundle-SymbolicName=yammer-metrics-annotation&amp;Bundle-Version=2.2.0&amp;Export-Package=*;-noimport:=true;version=${yammer.metrics.annotation.version}</bundle>
         <bundle start="true" dependency="true">wrap:mvn:org.apache.kafka/connect-api/${kafka.version}$Bundle-SymbolicName=connect-api&amp;Bundle-Version=${kafka.version}</bundle>
@@ -228,7 +228,7 @@
         <details>
             <![CDATA[The Apache Ignite Scala 2.11 integration module + dependencies. This module installs the Scala 2.11 library bundle.]]>
         </details>
-        <bundle start="true" dependency="true">mvn:org.scala-lang/scala-library/${scala211.library.version}</bundle>
+        <bundle start="true" dependency="true">mvn:org.scala-lang/scala-library/${scala.library.version}</bundle>
         <bundle start="true">mvn:org.apache.ignite/ignite-scalar/${project.version}</bundle>
     </feature>
 
diff --git a/modules/platforms/cpp/binary/include/ignite/binary/binary_raw_reader.h b/modules/platforms/cpp/binary/include/ignite/binary/binary_raw_reader.h
index c06cb91..73d2525 100644
--- a/modules/platforms/cpp/binary/include/ignite/binary/binary_raw_reader.h
+++ b/modules/platforms/cpp/binary/include/ignite/binary/binary_raw_reader.h
@@ -313,18 +313,30 @@
              */
             std::string ReadString()
             {
+                std::string res;
+
+                ReadString(res);
+
+                return res;
+            }
+
+            /**
+             * Read string from the stream.
+             *
+             * @param dst String.
+             */
+            void ReadString(std::string& dst)
+            {
                 int32_t len = ReadString(NULL, 0);
 
                 if (len != -1)
                 {
-                    ignite::common::FixedSizeArray<char> arr(len + 1);
+                    dst.resize(static_cast<size_t>(len));
 
-                    ReadString(arr.GetData(), static_cast<int32_t>(arr.GetSize()));
-
-                    return std::string(arr.GetData());
+                    ReadString(&dst[0], len);
                 }
                 else
-                    return std::string();
+                    dst.clear();
             }
 
             /**
diff --git a/modules/platforms/cpp/binary/include/ignite/impl/binary/binary_reader_impl.h b/modules/platforms/cpp/binary/include/ignite/impl/binary/binary_reader_impl.h
index 998c7c3..a3b880c 100644
--- a/modules/platforms/cpp/binary/include/ignite/impl/binary/binary_reader_impl.h
+++ b/modules/platforms/cpp/binary/include/ignite/impl/binary/binary_reader_impl.h
@@ -37,6 +37,11 @@
 
 namespace ignite
 {
+    namespace binary
+    {
+        class BinaryReader;
+    }
+
     namespace impl
     {
         namespace binary
@@ -897,9 +902,20 @@
                 /**
                  * Read object.
                  *
-                 * @return Read object.
+                 * @param res Read object.
                  */
                 template<typename T>
+                void ReadTopObject(T& res)
+                {
+                    return ignite::binary::ReadHelper<T>::Read(*this, res);
+                }
+
+                /**
+                 * Read object.
+                 *
+                 * @return Read object.
+                 */
+                template<typename R, typename T>
                 void ReadTopObject0(T& res)
                 {
                     int32_t pos = stream->Position();
@@ -928,7 +944,7 @@
 
                             stream->Position(curPos + portOff); // Position stream right on the object.
 
-                            ReadTopObject0<T>(res);
+                            ReadTopObject0<R, T>(res);
 
                             stream->Position(curPos + portLen + 4); // Position stream after binary.
 
@@ -1003,7 +1019,7 @@
                             BinaryReaderImpl readerImpl(stream, &idRslvr, pos, usrType,
                                                         typeId, hashCode, len, rawOff,
                                                         footerBegin, footerEnd, schemaType);
-                            ignite::binary::BinaryReader reader(&readerImpl);
+                            R reader(&readerImpl);
 
                             BType::Read(reader, res);
 
@@ -1426,43 +1442,56 @@
             };
 
             template<>
-            void IGNITE_IMPORT_EXPORT BinaryReaderImpl::ReadTopObject0<int8_t>(int8_t& res);
+            void IGNITE_IMPORT_EXPORT
+            BinaryReaderImpl::ReadTopObject0<ignite::binary::BinaryReader, int8_t>(int8_t& res);
 
             template<>
-            void IGNITE_IMPORT_EXPORT BinaryReaderImpl::ReadTopObject0<bool>(bool& res);
+            void IGNITE_IMPORT_EXPORT
+            BinaryReaderImpl::ReadTopObject0<ignite::binary::BinaryReader, bool>(bool& res);
 
             template<>
-            void IGNITE_IMPORT_EXPORT BinaryReaderImpl::ReadTopObject0<int16_t>(int16_t& res);
+            void IGNITE_IMPORT_EXPORT
+            BinaryReaderImpl::ReadTopObject0<ignite::binary::BinaryReader, int16_t>(int16_t& res);
 
             template<>
-            void IGNITE_IMPORT_EXPORT BinaryReaderImpl::ReadTopObject0<uint16_t>(uint16_t& res);
+            void IGNITE_IMPORT_EXPORT
+            BinaryReaderImpl::ReadTopObject0<ignite::binary::BinaryReader, uint16_t>(uint16_t& res);
 
             template<>
-            void IGNITE_IMPORT_EXPORT BinaryReaderImpl::ReadTopObject0<int32_t>(int32_t& res);
+            void IGNITE_IMPORT_EXPORT
+            BinaryReaderImpl::ReadTopObject0<ignite::binary::BinaryReader, int32_t>(int32_t& res);
 
             template<>
-            void IGNITE_IMPORT_EXPORT BinaryReaderImpl::ReadTopObject0<int64_t>(int64_t& res);
+            void IGNITE_IMPORT_EXPORT
+            BinaryReaderImpl::ReadTopObject0<ignite::binary::BinaryReader, int64_t>(int64_t& res);
 
             template<>
-            void IGNITE_IMPORT_EXPORT BinaryReaderImpl::ReadTopObject0<float>(float& res);
+            void IGNITE_IMPORT_EXPORT
+            BinaryReaderImpl::ReadTopObject0<ignite::binary::BinaryReader, float>(float& res);
 
             template<>
-            void IGNITE_IMPORT_EXPORT BinaryReaderImpl::ReadTopObject0<double>(double& res);
+            void IGNITE_IMPORT_EXPORT
+            BinaryReaderImpl::ReadTopObject0<ignite::binary::BinaryReader, double>(double& res);
 
             template<>
-            void IGNITE_IMPORT_EXPORT BinaryReaderImpl::ReadTopObject0<Guid>(Guid& res);
+            void IGNITE_IMPORT_EXPORT
+            BinaryReaderImpl::ReadTopObject0<ignite::binary::BinaryReader, Guid>(Guid& res);
 
             template<>
-            void IGNITE_IMPORT_EXPORT BinaryReaderImpl::ReadTopObject0<Date>(Date& res);
+            void IGNITE_IMPORT_EXPORT
+            BinaryReaderImpl::ReadTopObject0<ignite::binary::BinaryReader, Date>(Date& res);
 
             template<>
-            void IGNITE_IMPORT_EXPORT BinaryReaderImpl::ReadTopObject0<Timestamp>(Timestamp& res);
+            void IGNITE_IMPORT_EXPORT
+            BinaryReaderImpl::ReadTopObject0<ignite::binary::BinaryReader, Timestamp>(Timestamp& res);
 
             template<>
-            void IGNITE_IMPORT_EXPORT BinaryReaderImpl::ReadTopObject0<Time>(Time& res);
+            void IGNITE_IMPORT_EXPORT
+            BinaryReaderImpl::ReadTopObject0<ignite::binary::BinaryReader, Time>(Time& res);
 
             template<>
-            void IGNITE_IMPORT_EXPORT BinaryReaderImpl::ReadTopObject0<std::string>(std::string& res);
+            void IGNITE_IMPORT_EXPORT
+            BinaryReaderImpl::ReadTopObject0<ignite::binary::BinaryReader, std::string>(std::string& res);
 
             template<>
             inline int8_t BinaryReaderImpl::GetNull() const
diff --git a/modules/platforms/cpp/binary/include/ignite/impl/binary/binary_type_impl.h b/modules/platforms/cpp/binary/include/ignite/impl/binary/binary_type_impl.h
index 3183d4b..e9bbace 100644
--- a/modules/platforms/cpp/binary/include/ignite/impl/binary/binary_type_impl.h
+++ b/modules/platforms/cpp/binary/include/ignite/impl/binary/binary_type_impl.h
@@ -66,7 +66,7 @@
             template<typename W>
             static void Write(W& writer, const T& val)
             {
-                writer.WriteTopObject0(val);
+                writer.template WriteTopObject0<BinaryWriter>(val);
             }
         };
 
@@ -82,7 +82,7 @@
                 if (!val)
                     writer.WriteNull0();
                 else
-                    writer.WriteTopObject0(*val);
+                    writer.template WriteTopObject0<BinaryWriter>(*val);
             }
         };
 
@@ -97,10 +97,16 @@
             {
                 T res;
 
-                reader.template ReadTopObject0<T>(res);
+                Read<R>(reader, res);
 
                 return res;
             }
+
+            template<typename R>
+            static void Read(R& reader, T& val)
+            {
+                reader.template ReadTopObject0<BinaryReader, T>(val);
+            }
         };
 
         /**
@@ -117,10 +123,16 @@
 
                 std::auto_ptr<T> res(new T());
 
-                reader.template ReadTopObject0<T>(*res);
+                reader.template ReadTopObject0<BinaryReader, T>(*res);
 
                 return res.release();
             }
+
+            template<typename R>
+            static void Read(R& reader, T*& ptr)
+            {
+                ptr = Read<R>(reader);
+            }
         };
     }
 }
diff --git a/modules/platforms/cpp/binary/include/ignite/impl/binary/binary_writer_impl.h b/modules/platforms/cpp/binary/include/ignite/impl/binary/binary_writer_impl.h
index 4364bf5..e6cd487 100644
--- a/modules/platforms/cpp/binary/include/ignite/impl/binary/binary_writer_impl.h
+++ b/modules/platforms/cpp/binary/include/ignite/impl/binary/binary_writer_impl.h
@@ -41,6 +41,11 @@
 
 namespace ignite
 {
+    namespace binary
+    {
+        class BinaryWriter;
+    }
+
     namespace impl
     {
         namespace binary
@@ -715,7 +720,7 @@
                  *
                  * @param obj Object to write.
                  */
-                template<typename T>
+                template<typename W, typename T>
                 void WriteTopObject0(const T& obj)
                 {
                     typedef ignite::binary::BinaryType<T> BType;
@@ -736,7 +741,7 @@
                         int32_t pos = stream->Position();
 
                         BinaryWriterImpl writerImpl(stream, &idRslvr, metaMgr, metaHnd.Get(), pos);
-                        ignite::binary::BinaryWriter writer(&writerImpl);
+                        W writer(&writerImpl);
 
                         stream->WriteInt8(IGNITE_HDR_FULL);
                         stream->WriteInt8(IGNITE_PROTO_VER);
@@ -1019,43 +1024,56 @@
             };
 
             template<>
-            void IGNITE_IMPORT_EXPORT BinaryWriterImpl::WriteTopObject0(const int8_t& obj);
+            void IGNITE_IMPORT_EXPORT
+            BinaryWriterImpl::WriteTopObject0<ignite::binary::BinaryWriter, int8_t>(const int8_t& obj);
 
             template<>
-            void IGNITE_IMPORT_EXPORT BinaryWriterImpl::WriteTopObject0(const bool& obj);
+            void IGNITE_IMPORT_EXPORT
+            BinaryWriterImpl::WriteTopObject0<ignite::binary::BinaryWriter, bool>(const bool& obj);
 
             template<>
-            void IGNITE_IMPORT_EXPORT BinaryWriterImpl::WriteTopObject0(const int16_t& obj);
+            void IGNITE_IMPORT_EXPORT
+            BinaryWriterImpl::WriteTopObject0<ignite::binary::BinaryWriter, int16_t>(const int16_t& obj);
 
             template<>
-            void IGNITE_IMPORT_EXPORT BinaryWriterImpl::WriteTopObject0(const uint16_t& obj);
+            void IGNITE_IMPORT_EXPORT
+            BinaryWriterImpl::WriteTopObject0<ignite::binary::BinaryWriter, uint16_t>(const uint16_t& obj);
 
             template<>
-            void IGNITE_IMPORT_EXPORT BinaryWriterImpl::WriteTopObject0(const int32_t& obj);
+            void IGNITE_IMPORT_EXPORT
+            BinaryWriterImpl::WriteTopObject0<ignite::binary::BinaryWriter, int32_t>(const int32_t& obj);
 
             template<>
-            void IGNITE_IMPORT_EXPORT BinaryWriterImpl::WriteTopObject0(const int64_t& obj);
+            void IGNITE_IMPORT_EXPORT
+            BinaryWriterImpl::WriteTopObject0<ignite::binary::BinaryWriter, int64_t>(const int64_t& obj);
 
             template<>
-            void IGNITE_IMPORT_EXPORT BinaryWriterImpl::WriteTopObject0(const float& obj);
+            void IGNITE_IMPORT_EXPORT
+            BinaryWriterImpl::WriteTopObject0<ignite::binary::BinaryWriter, float>(const float& obj);
 
             template<>
-            void IGNITE_IMPORT_EXPORT BinaryWriterImpl::WriteTopObject0(const double& obj);
+            void IGNITE_IMPORT_EXPORT
+            BinaryWriterImpl::WriteTopObject0<ignite::binary::BinaryWriter, double>(const double& obj);
 
             template<>
-            void IGNITE_IMPORT_EXPORT BinaryWriterImpl::WriteTopObject0(const Guid& obj);
+            void IGNITE_IMPORT_EXPORT
+            BinaryWriterImpl::WriteTopObject0<ignite::binary::BinaryWriter, Guid>(const Guid& obj);
 
             template<>
-            void IGNITE_IMPORT_EXPORT BinaryWriterImpl::WriteTopObject0(const Date& obj);
+            void IGNITE_IMPORT_EXPORT
+            BinaryWriterImpl::WriteTopObject0<ignite::binary::BinaryWriter, Date>(const Date& obj);
 
             template<>
-            void IGNITE_IMPORT_EXPORT BinaryWriterImpl::WriteTopObject0(const Timestamp& obj);
+            void IGNITE_IMPORT_EXPORT
+            BinaryWriterImpl::WriteTopObject0<ignite::binary::BinaryWriter, Timestamp>(const Timestamp& obj);
 
             template<>
-            void IGNITE_IMPORT_EXPORT BinaryWriterImpl::WriteTopObject0(const Time& obj);
+            void IGNITE_IMPORT_EXPORT
+            BinaryWriterImpl::WriteTopObject0<ignite::binary::BinaryWriter, Time>(const Time& obj);
 
             template<>
-            void IGNITE_IMPORT_EXPORT BinaryWriterImpl::WriteTopObject0(const std::string& obj);
+            void IGNITE_IMPORT_EXPORT
+            BinaryWriterImpl::WriteTopObject0<ignite::binary::BinaryWriter, std::string>(const std::string& obj);
         }
     }
 }
diff --git a/modules/platforms/cpp/binary/src/impl/binary/binary_reader_impl.cpp b/modules/platforms/cpp/binary/src/impl/binary/binary_reader_impl.cpp
index 5e37887..3941b60 100644
--- a/modules/platforms/cpp/binary/src/impl/binary/binary_reader_impl.cpp
+++ b/modules/platforms/cpp/binary/src/impl/binary/binary_reader_impl.cpp
@@ -762,62 +762,62 @@
                 rawMode = true;
             }
 
-            template <>
-            void BinaryReaderImpl::ReadTopObject0<int8_t>(int8_t& res)
+            template<>
+            void BinaryReaderImpl::ReadTopObject0<ignite::binary::BinaryReader, int8_t>(int8_t& res)
             {
                 res = ReadTopObject0<int8_t>(IGNITE_TYPE_BYTE, BinaryUtils::ReadInt8);
             }
 
-            template <>
-            void BinaryReaderImpl::ReadTopObject0<bool>(bool& res)
+            template<>
+            void BinaryReaderImpl::ReadTopObject0<ignite::binary::BinaryReader, bool>(bool& res)
             {
                 res = ReadTopObject0<bool>(IGNITE_TYPE_BOOL, BinaryUtils::ReadBool);
             }
 
             template <>
-            void BinaryReaderImpl::ReadTopObject0<int16_t>(int16_t& res)
+            void BinaryReaderImpl::ReadTopObject0<ignite::binary::BinaryReader, int16_t>(int16_t& res)
             {
                 res = ReadTopObject0<int16_t>(IGNITE_TYPE_SHORT, BinaryUtils::ReadInt16);
             }
 
-            template <>
-            void BinaryReaderImpl::ReadTopObject0<uint16_t>(uint16_t& res)
+            template<>
+            void BinaryReaderImpl::ReadTopObject0<ignite::binary::BinaryReader, uint16_t>(uint16_t& res)
             {
                 res = ReadTopObject0<uint16_t>(IGNITE_TYPE_CHAR, BinaryUtils::ReadUInt16);
             }
 
-            template <>
-            void BinaryReaderImpl::ReadTopObject0<int32_t>(int32_t& res)
+            template<>
+            void BinaryReaderImpl::ReadTopObject0<ignite::binary::BinaryReader, int32_t>(int32_t& res)
             {
                 res = ReadTopObject0<int32_t>(IGNITE_TYPE_INT, BinaryUtils::ReadInt32);
             }
 
-            template <>
-            void BinaryReaderImpl::ReadTopObject0<int64_t>(int64_t& res)
+            template<>
+            void BinaryReaderImpl::ReadTopObject0<ignite::binary::BinaryReader, int64_t>(int64_t& res)
             {
                 res = ReadTopObject0<int64_t>(IGNITE_TYPE_LONG, BinaryUtils::ReadInt64);
             }
 
-            template <>
-            void BinaryReaderImpl::ReadTopObject0<float>(float& res)
+            template<>
+            void BinaryReaderImpl::ReadTopObject0<ignite::binary::BinaryReader, float>(float& res)
             {
                 res = ReadTopObject0<float>(IGNITE_TYPE_FLOAT, BinaryUtils::ReadFloat);
             }
 
-            template <>
-            void BinaryReaderImpl::ReadTopObject0<double>(double& res)
+            template<>
+            void BinaryReaderImpl::ReadTopObject0<ignite::binary::BinaryReader, double>(double& res)
             {
                 res = ReadTopObject0<double>(IGNITE_TYPE_DOUBLE, BinaryUtils::ReadDouble);
             }
 
-            template <>
-            void BinaryReaderImpl::ReadTopObject0<Guid>(Guid& res)
+            template<>
+            void BinaryReaderImpl::ReadTopObject0<ignite::binary::BinaryReader, Guid>(Guid& res)
             {
                 res = ReadTopObject0<Guid>(IGNITE_TYPE_UUID, BinaryUtils::ReadGuid);
             }
 
-            template <>
-            void BinaryReaderImpl::ReadTopObject0<Date>(Date& res)
+            template<>
+            void BinaryReaderImpl::ReadTopObject0<ignite::binary::BinaryReader, Date>(Date& res)
             {
                 int8_t typeId = stream->ReadInt8();
 
@@ -835,20 +835,20 @@
                 }
             }
 
-            template <>
-            void BinaryReaderImpl::ReadTopObject0<Timestamp>(Timestamp& res)
+            template<>
+            void BinaryReaderImpl::ReadTopObject0<ignite::binary::BinaryReader, Timestamp>(Timestamp& res)
             {
                 res = ReadTopObject0<Timestamp>(IGNITE_TYPE_TIMESTAMP, BinaryUtils::ReadTimestamp);
             }
 
             template<>
-            void BinaryReaderImpl::ReadTopObject0<Time>(Time& res)
+            void BinaryReaderImpl::ReadTopObject0<ignite::binary::BinaryReader, Time>(Time& res)
             {
                 res = ReadTopObject0<Time>(IGNITE_TYPE_TIME, BinaryUtils::ReadTime);
             }
 
             template<>
-            void BinaryReaderImpl::ReadTopObject0<std::string>(std::string& res)
+            void BinaryReaderImpl::ReadTopObject0<ignite::binary::BinaryReader, std::string>(std::string& res)
             {
                 int8_t typeId = stream->ReadInt8();
 
diff --git a/modules/platforms/cpp/binary/src/impl/binary/binary_writer_impl.cpp b/modules/platforms/cpp/binary/src/impl/binary/binary_writer_impl.cpp
index 2ac783a..51be3a0 100644
--- a/modules/platforms/cpp/binary/src/impl/binary/binary_writer_impl.cpp
+++ b/modules/platforms/cpp/binary/src/impl/binary/binary_writer_impl.cpp
@@ -260,7 +260,7 @@
                     stream->WriteInt32(len);
 
                     for (int i = 0; i < len; i++)
-                        WriteTopObject0(val[i]);
+                        WriteTopObject0<ignite::binary::BinaryWriter>(val[i]);
                 }
                 else
                 {
@@ -323,7 +323,7 @@
                     stream->WriteInt32(len);
 
                     for (int i = 0; i < len; i++)
-                        WriteTopObject0(val[i]);
+                        WriteTopObject0<ignite::binary::BinaryWriter>(val[i]);
                 }
                 else
                     stream->WriteInt8(IGNITE_HDR_NULL);
@@ -384,7 +384,7 @@
                     stream->WriteInt32(len);
 
                     for (int i = 0; i < len; i++)
-                        WriteTopObject0(val[i]);
+                        WriteTopObject0<ignite::binary::BinaryWriter>(val[i]);
                 }
                 else
                     stream->WriteInt8(IGNITE_HDR_NULL);
@@ -445,7 +445,7 @@
                     stream->WriteInt32(len);
 
                     for (int i = 0; i < len; i++)
-                        WriteTopObject0(val[i]);
+                        WriteTopObject0<ignite::binary::BinaryWriter>(val[i]);
                 }
                 else
                     stream->WriteInt8(IGNITE_HDR_NULL);
@@ -687,80 +687,80 @@
                     metaHnd->OnFieldWritten(fieldId, fieldName, fieldTypeId);
             }
 
-            template <>
-            void BinaryWriterImpl::WriteTopObject0<int8_t>(const int8_t& obj)
+            template<>
+            void BinaryWriterImpl::WriteTopObject0<ignite::binary::BinaryWriter, int8_t>(const int8_t& obj)
             {
                 WriteTopObject0<int8_t>(obj, BinaryUtils::WriteInt8, IGNITE_TYPE_BYTE);
             }
 
-            template <>
-            void BinaryWriterImpl::WriteTopObject0<bool>(const bool& obj)
+            template<>
+            void BinaryWriterImpl::WriteTopObject0<ignite::binary::BinaryWriter, bool>(const bool& obj)
             {
                 WriteTopObject0<bool>(obj, BinaryUtils::WriteBool, IGNITE_TYPE_BOOL);
             }
 
-            template <>
-            void BinaryWriterImpl::WriteTopObject0<int16_t>(const int16_t& obj)
+            template<>
+            void BinaryWriterImpl::WriteTopObject0<ignite::binary::BinaryWriter, int16_t>(const int16_t& obj)
             {
                 WriteTopObject0<int16_t>(obj, BinaryUtils::WriteInt16, IGNITE_TYPE_SHORT);
             }
 
-            template <>
-            void BinaryWriterImpl::WriteTopObject0<uint16_t>(const uint16_t& obj)
+            template<>
+            void BinaryWriterImpl::WriteTopObject0<ignite::binary::BinaryWriter, uint16_t>(const uint16_t& obj)
             {
                 WriteTopObject0<uint16_t>(obj, BinaryUtils::WriteUInt16, IGNITE_TYPE_CHAR);
             }
 
-            template <>
-            void BinaryWriterImpl::WriteTopObject0<int32_t>(const int32_t& obj)
+            template<>
+            void BinaryWriterImpl::WriteTopObject0<ignite::binary::BinaryWriter, int32_t>(const int32_t& obj)
             {
                 WriteTopObject0<int32_t>(obj, BinaryUtils::WriteInt32, IGNITE_TYPE_INT);
             }
 
-            template <>
-            void BinaryWriterImpl::WriteTopObject0<int64_t>(const int64_t& obj)
+            template<>
+            void BinaryWriterImpl::WriteTopObject0<ignite::binary::BinaryWriter, int64_t>(const int64_t& obj)
             {
                 WriteTopObject0<int64_t>(obj, BinaryUtils::WriteInt64, IGNITE_TYPE_LONG);
             }
 
-            template <>
-            void BinaryWriterImpl::WriteTopObject0<float>(const float& obj)
+            template<>
+            void BinaryWriterImpl::WriteTopObject0<ignite::binary::BinaryWriter, float>(const float& obj)
             {
                 WriteTopObject0<float>(obj, BinaryUtils::WriteFloat, IGNITE_TYPE_FLOAT);
             }
 
-            template <>
-            void BinaryWriterImpl::WriteTopObject0<double>(const double& obj)
+            template<>
+            void BinaryWriterImpl::WriteTopObject0<ignite::binary::BinaryWriter, double>(const double& obj)
             {
                 WriteTopObject0<double>(obj, BinaryUtils::WriteDouble, IGNITE_TYPE_DOUBLE);
             }
 
-            template <>
-            void BinaryWriterImpl::WriteTopObject0<Guid>(const Guid& obj)
+            template<>
+            void BinaryWriterImpl::WriteTopObject0<ignite::binary::BinaryWriter, Guid>(const Guid& obj)
             {
                 WriteTopObject0<Guid>(obj, BinaryUtils::WriteGuid, IGNITE_TYPE_UUID);
             }
 
-            template <>
-            void BinaryWriterImpl::WriteTopObject0<Date>(const Date& obj)
+            template<>
+            void BinaryWriterImpl::WriteTopObject0<ignite::binary::BinaryWriter, Date>(const Date& obj)
             {
                 WriteTopObject0<Date>(obj, BinaryUtils::WriteDate, IGNITE_TYPE_DATE);
             }
 
-            template <>
-            void BinaryWriterImpl::WriteTopObject0<Timestamp>(const Timestamp& obj)
+            template<>
+            void BinaryWriterImpl::WriteTopObject0<ignite::binary::BinaryWriter, Timestamp>(const Timestamp& obj)
             {
                 WriteTopObject0<Timestamp>(obj, BinaryUtils::WriteTimestamp, IGNITE_TYPE_TIMESTAMP);
             }
 
-            template <>
-            void BinaryWriterImpl::WriteTopObject0<Time>(const Time& obj)
+            template<>
+            void BinaryWriterImpl::WriteTopObject0<ignite::binary::BinaryWriter, Time>(const Time& obj)
             {
                 WriteTopObject0<Time>(obj, BinaryUtils::WriteTime, IGNITE_TYPE_TIME);
             }
 
             template<>
-            void BinaryWriterImpl::WriteTopObject0(const std::string& obj)
+            void BinaryWriterImpl::WriteTopObject0<ignite::binary::BinaryWriter, std::string>(const std::string& obj)
             {
                 const char* obj0 = obj.c_str();
 
diff --git a/modules/platforms/cpp/common/include/ignite/common/utils.h b/modules/platforms/cpp/common/include/ignite/common/utils.h
index 81b5432..4edbf4b 100644
--- a/modules/platforms/cpp/common/include/ignite/common/utils.h
+++ b/modules/platforms/cpp/common/include/ignite/common/utils.h
@@ -548,4 +548,4 @@
     }
 }
 
-#endif //_IGNITE_COMMON_UTILS
\ No newline at end of file
+#endif //_IGNITE_COMMON_UTILS
diff --git a/modules/platforms/cpp/core/include/ignite/cache/cache.h b/modules/platforms/cpp/core/include/ignite/cache/cache.h
index b16d7f5..c230361 100644
--- a/modules/platforms/cpp/core/include/ignite/cache/cache.h
+++ b/modules/platforms/cpp/core/include/ignite/cache/cache.h
@@ -259,12 +259,14 @@
              */
             V LocalPeek(const K& key, int32_t peekModes, IgniteError& err)
             {
+                V val;
+
                 impl::InCacheLocalPeekOperation<K> inOp(key, peekModes);
-                impl::Out1Operation<V> outOp;
+                impl::Out1Operation<V> outOp(val);
 
                 impl.Get()->LocalPeek(inOp, outOp, peekModes, err);
 
-                return outOp.GetResult();
+                return val;
             }
 
             /**
@@ -305,12 +307,13 @@
              */
             V Get(const K& key, IgniteError& err)
             {
+                V val;
                 impl::In1Operation<K> inOp(key);
-                impl::Out1Operation<V> outOp;
+                impl::Out1Operation<V> outOp(val);
 
                 impl.Get()->Get(inOp, outOp, err);
 
-                return outOp.GetResult();
+                return val;
             }
 
             /**
@@ -351,12 +354,14 @@
              */
             std::map<K, V> GetAll(const std::set<K>& keys, IgniteError& err)
             {
+                std::map<K, V> res;
+
                 impl::InSetOperation<K> inOp(keys);
-                impl::OutMapOperation<K, V> outOp;
+                impl::OutMapOperation<K, V> outOp(res);
 
                 impl.Get()->GetAll(inOp, outOp, err);
 
-                return outOp.GetResult();
+                return res;
             }
 
             /**
@@ -515,12 +520,14 @@
              */
             V GetAndPut(const K& key, const V& val, IgniteError& err)
             {
+                V oldVal;
+
                 impl::In2Operation<K, V> inOp(key, val);
-                impl::Out1Operation<V> outOp;
+                impl::Out1Operation<V> outOp(oldVal);
 
                 impl.Get()->GetAndPut(inOp, outOp, err);
 
-                return outOp.GetResult();
+                return oldVal;
             }
 
             /**
@@ -559,12 +566,14 @@
              */
             V GetAndReplace(const K& key, const V& val, IgniteError& err)
             {
+                V oldVal;
+
                 impl::In2Operation<K, V> inOp(key, val);
-                impl::Out1Operation<V> outOp;
+                impl::Out1Operation<V> outOp(oldVal);
 
                 impl.Get()->GetAndReplace(inOp, outOp, err);
 
-                return outOp.GetResult();
+                return oldVal;
             }
 
             /**
@@ -597,12 +606,14 @@
              */
             V GetAndRemove(const K& key, IgniteError& err)
             {
+                V oldVal;
+
                 impl::In1Operation<K> inOp(key);
-                impl::Out1Operation<V> outOp;
+                impl::Out1Operation<V> outOp(oldVal);
 
                 impl.Get()->GetAndRemove(inOp, outOp, err);
 
-                return outOp.GetResult();
+                return oldVal;
             }
 
             /**
@@ -694,12 +705,14 @@
              */
             V GetAndPutIfAbsent(const K& key, const V& val, IgniteError& err)
             {
+                V oldVal;
+
                 impl::In2Operation<K, V> inOp(key, val);
-                impl::Out1Operation<V> outOp;
+                impl::Out1Operation<V> outOp(oldVal);
 
                 impl.Get()->GetAndPutIfAbsent(inOp, outOp, err);
 
-                return outOp.GetResult();
+                return oldVal;
             }
 
             /**
@@ -1607,14 +1620,15 @@
             {
                 typedef impl::cache::CacheEntryProcessorHolder<P, A> ProcessorHolder;
 
+                R res;
                 ProcessorHolder procHolder(processor, arg);
 
                 impl::In2Operation<K, ProcessorHolder> inOp(key, procHolder);
-                impl::Out1Operation<R> outOp;
+                impl::Out1Operation<R> outOp(res);
 
                 impl.Get()->Invoke(inOp, outOp, err);
 
-                return outOp.GetResult();
+                return res;
             }
 
             /**
diff --git a/modules/platforms/cpp/core/include/ignite/cache/query/query_cursor.h b/modules/platforms/cpp/core/include/ignite/cache/query/query_cursor.h
index 3f7ccce..e77706b 100644
--- a/modules/platforms/cpp/core/include/ignite/cache/query/query_cursor.h
+++ b/modules/platforms/cpp/core/include/ignite/cache/query/query_cursor.h
@@ -158,19 +158,14 @@
                     impl::cache::query::QueryCursorImpl* impl0 = impl.Get();
 
                     if (impl0) {
-                        impl::Out2Operation<K, V> outOp;
+                        K key;
+                        V val;
+
+                        impl::Out2Operation<K, V> outOp(key, val);
 
                         impl0->GetNext(outOp, err);
 
-                        if (err.GetCode() == IgniteError::IGNITE_SUCCESS) 
-                        {
-                            K& key = outOp.Get1();
-                            V& val = outOp.Get2();
-
-                            return CacheEntry<K, V>(key, val);
-                        }
-                        else 
-                            return CacheEntry<K, V>();
+                        return CacheEntry<K, V>(key, val);
                     }
                     else
                     {
diff --git a/modules/platforms/cpp/core/include/ignite/impl/operations.h b/modules/platforms/cpp/core/include/ignite/impl/operations.h
index 1645fb56..9f816bf 100644
--- a/modules/platforms/cpp/core/include/ignite/impl/operations.h
+++ b/modules/platforms/cpp/core/include/ignite/impl/operations.h
@@ -66,7 +66,7 @@
         public:
             /**
              * Constructor.
-             * 
+             *
              * @param val Value.
              */
             In1Operation(const T& val) : val(val)
@@ -248,7 +248,7 @@
             const T& key;
 
             /** Peek modes. */
-            int32_t peekModes; 
+            int32_t peekModes;
 
             IGNITE_NO_COPY_ASSIGNMENT(InCacheLocalPeekOperation)
         };
@@ -333,15 +333,18 @@
         public:
             /**
              * Constructor.
+             *
+             * @param val Value.
              */
-            Out1Operation()
+            Out1Operation(T& val) :
+                val(val)
             {
                 // No-op.
             }
 
             virtual void ProcessOutput(binary::BinaryReaderImpl& reader)
             {
-                val = reader.ReadTopObject<T>();
+                reader.ReadTopObject<T>(val);
             }
 
             virtual void SetNull()
@@ -349,18 +352,9 @@
                 val = binary::BinaryUtils::GetDefaultValue<T>();
             }
 
-            /**
-             * Get value.
-             *
-             * @param Value.
-             */
-            T GetResult()
-            {
-                return val;
-            }
         private:
             /** Value. */
-            T val; 
+            T& val;
 
             IGNITE_NO_COPY_ASSIGNMENT(Out1Operation)
         };
@@ -374,16 +368,21 @@
         public:
             /**
              * Constructor.
+             *
+             * @param val1 Value 1.
+             * @param val2 Value 2.
              */
-            Out2Operation()
+            Out2Operation(T1& val1, T2& val2) :
+                val1(val1),
+                val2(val2)
             {
                 // No-op.
             }
 
             virtual void ProcessOutput(binary::BinaryReaderImpl& reader)
             {
-                val1 = reader.ReadTopObject<T1>();
-                val2 = reader.ReadTopObject<T2>();
+                reader.ReadTopObject<T1>(val1);
+                reader.ReadTopObject<T2>(val2);
             }
 
             virtual void SetNull()
@@ -392,32 +391,12 @@
                 val2 = binary::BinaryUtils::GetDefaultValue<T2>();
             }
 
-            /**
-             * Get value 1.
-             *
-             * @param Value 1.
-             */
-            T1& Get1()
-            {
-                return val1;
-            }
-
-            /**
-             * Get value 2.
-             *
-             * @param Value 2.
-             */
-            T2& Get2()
-            {
-                return val2;
-            }
-
         private:
             /** Value 1. */
-            T1 val1; 
-            
+            T1& val1;
+
             /** Value 2. */
-            T2 val2; 
+            T2& val2;
 
             IGNITE_NO_COPY_ASSIGNMENT(Out2Operation)
         };
@@ -431,18 +410,27 @@
         public:
             /**
              * Constructor.
+             *
+             * @param val1 Value 1.
+             * @param val2 Value 2.
+             * @param val3 Value 3.
+             * @param val4 Value 4.
              */
-            Out4Operation()
+            Out4Operation(T1& val1, T2& val2, T3& val3, T4& val4) :
+                val1(val1),
+                val2(val2),
+                val3(val3),
+                val4(val4)
             {
                 // No-op.
             }
 
             virtual void ProcessOutput(binary::BinaryReaderImpl& reader)
             {
-                val1 = reader.ReadTopObject<T1>();
-                val2 = reader.ReadTopObject<T2>();
-                val3 = reader.ReadTopObject<T3>();
-                val4 = reader.ReadTopObject<T4>();
+                reader.ReadTopObject<T1>(val1);
+                reader.ReadTopObject<T2>(val2);
+                reader.ReadTopObject<T3>(val3);
+                reader.ReadTopObject<T4>(val4);
             }
 
             virtual void SetNull()
@@ -453,58 +441,18 @@
                 val4 = binary::BinaryUtils::GetDefaultValue<T4>();
             }
 
-            /**
-             * Get value 1.
-             *
-             * @param Value 1.
-             */
-            T1& Get1()
-            {
-                return val1;
-            }
-
-            /**
-             * Get value 2.
-             *
-             * @param Value 2.
-             */
-            T2& Get2()
-            {
-                return val2;
-            }
-
-            /**
-             * Get value 3.
-             *
-             * @param Value 3.
-             */
-            T3& Get3()
-            {
-                return val3;
-            }
-
-            /**
-             * Get value 4.
-             *
-             * @param Value 4.
-             */
-            T4& Get4()
-            {
-                return val4;
-            }
-
         private:
             /** Value 1. */
-            T1 val1; 
-            
+            T1& val1;
+
             /** Value 2. */
-            T2 val2;
+            T2& val2;
 
             /** Value 3. */
-            T3 val3;
+            T3& val3;
 
             /** Value 4. */
-            T4 val4;
+            T4& val4;
 
             IGNITE_NO_COPY_ASSIGNMENT(Out4Operation)
         };
@@ -518,8 +466,11 @@
         public:
             /**
              * Constructor.
+             *
+             * @param val Value.
              */
-            OutMapOperation()
+            OutMapOperation(std::map<T1, T2>& val) :
+                val(val)
             {
                 // No-op.
             }
@@ -550,18 +501,9 @@
                 // No-op.
             }
 
-            /**
-             * Get value.
-             *
-             * @return Value.
-             */
-            std::map<T1, T2> GetResult()
-            {
-                return val;
-            }
         private:
             /** Value. */
-            std::map<T1, T2> val;
+            std::map<T1, T2>& val;
 
             IGNITE_NO_COPY_ASSIGNMENT(OutMapOperation)
         };
@@ -587,7 +529,7 @@
 
                 res.reserve(res.size() + cnt);
 
-                for (int i = 0; i < cnt; i++) 
+                for (int i = 0; i < cnt; i++)
                 {
                     K key = reader.ReadTopObject<K>();
                     V val = reader.ReadTopObject<V>();
@@ -604,7 +546,7 @@
         private:
             /** Entries. */
             std::vector<ignite::cache::CacheEntry<K, V> >& res;
-            
+
             IGNITE_NO_COPY_ASSIGNMENT(OutQueryGetAllOperation)
         };
 
@@ -645,7 +587,7 @@
         private:
             /** Out iter. */
             Iter iter;
-            
+
             IGNITE_NO_COPY_ASSIGNMENT(OutQueryGetAllOperationIter)
         };
 
diff --git a/modules/platforms/cpp/core/src/impl/cluster/cluster_group_impl.cpp b/modules/platforms/cpp/core/src/impl/cluster/cluster_group_impl.cpp
index 91f9d30..669a971 100644
--- a/modules/platforms/cpp/core/src/impl/cluster/cluster_group_impl.cpp
+++ b/modules/platforms/cpp/core/src/impl/cluster/cluster_group_impl.cpp
@@ -80,7 +80,7 @@
             {
                 IgniteError err;
 
-                int64_t res = OutInOpLong(Command::SET_ACTIVE, active ? 1 : 0, err);
+                OutInOpLong(Command::SET_ACTIVE, active ? 1 : 0, err);
 
                 IgniteError::ThrowIfNeeded(err);
             }
diff --git a/modules/platforms/cpp/odbc-test/Makefile.am b/modules/platforms/cpp/odbc-test/Makefile.am
index 87e3c89..14dae7c 100644
--- a/modules/platforms/cpp/odbc-test/Makefile.am
+++ b/modules/platforms/cpp/odbc-test/Makefile.am
@@ -58,6 +58,7 @@
     src/parser_test.cpp \
     src/cursor_test.cpp \
     src/connection_info_test.cpp \
+    src/connection_test.cpp \
     src/application_data_buffer_test.cpp \
     src/column_test.cpp \
     src/configuration_test.cpp \
@@ -84,6 +85,7 @@
     src/errors_test.cpp \
     src/odbc_test_suite.cpp \
     src/types_test.cpp \
+    src/transaction_test.cpp \
     src/authentication_test.cpp \
     ../odbc/src/log.cpp \
     ../odbc/src/cursor.cpp \
@@ -100,7 +102,8 @@
     ../odbc/src/column.cpp \
     ../odbc/src/common_types.cpp \
     ../odbc/src/utility.cpp \
-    ../odbc/src/result_page.cpp
+    ../odbc/src/result_page.cpp \
+    ../odbc/src/nested_tx_mode.cpp
 
 run-check: check
 	./ignite-odbc-tests -p
diff --git a/modules/platforms/cpp/odbc-test/config/queries-default.xml b/modules/platforms/cpp/odbc-test/config/queries-default.xml
index dbe3a10..996ef6e 100644
--- a/modules/platforms/cpp/odbc-test/config/queries-default.xml
+++ b/modules/platforms/cpp/odbc-test/config/queries-default.xml
@@ -40,97 +40,8 @@
 
         <property name="cacheConfiguration">
             <list>
-                <bean class="org.apache.ignite.configuration.CacheConfiguration">
-                    <property name="name" value="cache"/>
-                    <property name="cacheMode" value="PARTITIONED"/>
-                    <property name="atomicityMode" value="TRANSACTIONAL"/>
-                    <property name="writeSynchronizationMode" value="FULL_SYNC"/>
-
-                    <!-- Configure type metadata to enable queries. -->
-                    <property name="queryEntities">
-                        <list>
-                            <bean class="org.apache.ignite.cache.QueryEntity">
-                                <property name="keyType" value="java.lang.Long"/>
-                                <property name="valueType" value="TestType"/>
-
-                                <property name="fields">
-                                    <map>
-                                        <entry key="i8Field" value="java.lang.Byte"/>
-                                        <entry key="i16Field" value="java.lang.Short"/>
-                                        <entry key="i32Field" value="java.lang.Integer"/>
-                                        <entry key="i64Field" value="java.lang.Long"/>
-                                        <entry key="strField" value="java.lang.String"/>
-                                        <entry key="floatField" value="java.lang.Float"/>
-                                        <entry key="doubleField" value="java.lang.Double"/>
-                                        <entry key="boolField" value="java.lang.Boolean"/>
-                                        <entry key="guidField" value="java.util.UUID"/>
-                                        <entry key="dateField" value="java.util.Date"/>
-                                        <entry key="timeField" value="java.sql.Time"/>
-                                        <entry key="timestampField" value="java.sql.Timestamp"/>
-                                        <entry key="i8ArrayField" value="[B"/>
-                                    </map>
-                                </property>
-
-                                <property name="keyFields">
-                                    <list></list>
-                                </property>
-
-                                <property name="indexes">
-                                    <list>
-                                        <bean class="org.apache.ignite.cache.QueryIndex">
-                                            <constructor-arg value="i32Field"/>
-                                        </bean>
-                                        <bean class="org.apache.ignite.cache.QueryIndex">
-                                            <constructor-arg value="i64Field"/>
-                                        </bean>
-                                    </list>
-                                </property>
-                            </bean>
-                        </list>
-                    </property>
-                </bean>
-
-                <bean class="org.apache.ignite.configuration.CacheConfiguration">
-                    <property name="name" value="cache2"/>
-                    <property name="cacheMode" value="PARTITIONED"/>
-                    <property name="atomicityMode" value="TRANSACTIONAL"/>
-                    <property name="writeSynchronizationMode" value="FULL_SYNC"/>
-
-                    <!-- Configure type metadata to enable queries. -->
-                    <property name="queryEntities">
-                        <list>
-                            <bean class="org.apache.ignite.cache.QueryEntity">
-                                <property name="keyType" value="java.lang.Long"/>
-                                <property name="valueType" value="ComplexType"/>
-
-                                <property name="keyFieldName" value="k"/>
-                                <property name="valueFieldName" value="v"/>
-
-                                <property name="fields">
-                                    <map>
-                                        <entry key="k" value="java.lang.Long"/>
-                                        <entry key="v" value="ComplexType"/>
-                                        <entry key="i32Field" value="java.lang.Integer"/>
-                                        <entry key="objField" value="TestObject"/>
-                                        <entry key="strField" value="java.lang.String"/>
-                                    </map>
-                                </property>
-
-                                <property name="keyFields">
-                                    <list></list>
-                                </property>
-
-                                <property name="indexes">
-                                    <list>
-                                        <bean class="org.apache.ignite.cache.QueryIndex">
-                                            <constructor-arg value="i32Field"/>
-                                        </bean>
-                                    </list>
-                                </property>
-                            </bean>
-                        </list>
-                    </property>
-                </bean>
+                <bean parent="test-type-template"/>
+                <bean parent="complex-type-template"/>
             </list>
         </property>
 
@@ -156,4 +67,103 @@
             </bean>
         </property>
     </bean>
+
+    <bean id="test-type-template" abstract="true" class="org.apache.ignite.configuration.CacheConfiguration">
+        <property name="name" value="cache"/>
+        <property name="writeSynchronizationMode" value="FULL_SYNC"/>
+        <property name="cacheMode" value="PARTITIONED"/>
+        <property name="atomicityMode" value="TRANSACTIONAL"/>
+
+        <!-- Configure type metadata to enable queries. -->
+        <property name="queryEntities">
+            <list>
+                <bean class="org.apache.ignite.cache.QueryEntity">
+                    <property name="keyType" value="java.lang.Long"/>
+                    <property name="valueType" value="TestType"/>
+
+
+                    <property name="fields">
+                        <map>
+                            <entry key="i8Field" value="java.lang.Byte"/>
+                            <entry key="i16Field" value="java.lang.Short"/>
+                            <entry key="i32Field" value="java.lang.Integer"/>
+                            <entry key="i64Field" value="java.lang.Long"/>
+                            <entry key="strField" value="java.lang.String"/>
+                            <entry key="floatField" value="java.lang.Float"/>
+                            <entry key="doubleField" value="java.lang.Double"/>
+                            <entry key="boolField" value="java.lang.Boolean"/>
+                            <entry key="guidField" value="java.util.UUID"/>
+                            <entry key="dateField" value="java.util.Date"/>
+                            <entry key="timeField" value="java.sql.Time"/>
+                            <entry key="timestampField" value="java.sql.Timestamp"/>
+                            <entry key="i8ArrayField" value="[B"/>
+                        </map>
+                    </property>
+
+                    <property name="keyFields">
+                        <list></list>
+                    </property>
+
+                    <property name="indexes">
+                        <list>
+                            <bean class="org.apache.ignite.cache.QueryIndex">
+                                <constructor-arg value="i32Field"/>
+                            </bean>
+                            <bean class="org.apache.ignite.cache.QueryIndex">
+                                <constructor-arg value="i64Field"/>
+                            </bean>
+                        </list>
+                    </property>
+
+                    <property name="fieldsPrecision">
+                        <map>
+                            <entry key="strField" value="60" />
+                        </map>
+                    </property>
+                </bean>
+            </list>
+        </property>
+    </bean>
+
+    <bean id="complex-type-template" abstract="true" class="org.apache.ignite.configuration.CacheConfiguration">
+        <property name="name" value="cache2"/>
+        <property name="writeSynchronizationMode" value="FULL_SYNC"/>
+        <property name="cacheMode" value="PARTITIONED"/>
+        <property name="atomicityMode" value="TRANSACTIONAL"/>
+
+        <!-- Configure type metadata to enable queries. -->
+        <property name="queryEntities">
+            <list>
+                <bean class="org.apache.ignite.cache.QueryEntity">
+                    <property name="keyType" value="java.lang.Long"/>
+                    <property name="valueType" value="ComplexType"/>
+
+                    <property name="keyFieldName" value="k"/>
+                    <property name="valueFieldName" value="v"/>
+
+                    <property name="fields">
+                        <map>
+                            <entry key="k" value="java.lang.Long"/>
+                            <entry key="v" value="ComplexType"/>
+                            <entry key="i32Field" value="java.lang.Integer"/>
+                            <entry key="objField" value="TestObject"/>
+                            <entry key="strField" value="java.lang.String"/>
+                        </map>
+                    </property>
+
+                    <property name="keyFields">
+                        <list></list>
+                    </property>
+
+                    <property name="indexes">
+                        <list>
+                            <bean class="org.apache.ignite.cache.QueryIndex">
+                                <constructor-arg value="i32Field"/>
+                            </bean>
+                        </list>
+                    </property>
+                </bean>
+            </list>
+        </property>
+    </bean>
 </beans>
diff --git a/modules/platforms/cpp/odbc-test/config/queries-transaction-32.xml b/modules/platforms/cpp/odbc-test/config/queries-transaction-32.xml
new file mode 100644
index 0000000..b5479c7
--- /dev/null
+++ b/modules/platforms/cpp/odbc-test/config/queries-transaction-32.xml
@@ -0,0 +1,59 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<beans xmlns="http://www.springframework.org/schema/beans"
+       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+       xmlns:util="http://www.springframework.org/schema/util"
+       xsi:schemaLocation="
+        http://www.springframework.org/schema/beans
+        http://www.springframework.org/schema/beans/spring-beans.xsd">
+
+    <import resource="queries-default.xml"/>
+
+    <bean parent="queries.cfg">
+        <property name="cacheConfiguration">
+            <list>
+                <bean parent="test-type-template">
+                    <property name="atomicityMode" value="TRANSACTIONAL_SNAPSHOT"/>
+                </bean>
+                <bean parent="complex-type-template">
+                    <property name="atomicityMode" value="TRANSACTIONAL_SNAPSHOT"/>
+                </bean>
+            </list>
+        </property>
+
+        <property name="memoryConfiguration">
+            <bean class="org.apache.ignite.configuration.MemoryConfiguration">
+                <property name="systemCacheInitialSize" value="#{10 * 1024 * 1024}"/>
+                <property name="systemCacheMaxSize" value="#{40 * 1024 * 1024}"/>
+                <property name="defaultMemoryPolicyName" value="dfltPlc"/>
+
+                <property name="memoryPolicies">
+                    <list>
+                        <bean class="org.apache.ignite.configuration.MemoryPolicyConfiguration">
+                            <property name="name" value="dfltPlc"/>
+                            <property name="maxSize" value="#{100 * 1024 * 1024}"/>
+                            <property name="initialSize" value="#{10 * 1024 * 1024}"/>
+                        </bean>
+                    </list>
+                </property>
+            </bean>
+        </property>
+    </bean>
+</beans>
diff --git a/modules/platforms/cpp/odbc-test/config/queries-transaction.xml b/modules/platforms/cpp/odbc-test/config/queries-transaction.xml
new file mode 100644
index 0000000..9d8c535
--- /dev/null
+++ b/modules/platforms/cpp/odbc-test/config/queries-transaction.xml
@@ -0,0 +1,41 @@
+<?xml version="1.0" encoding="UTF-8"?>
+
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<beans xmlns="http://www.springframework.org/schema/beans"
+       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+       xmlns:util="http://www.springframework.org/schema/util"
+       xsi:schemaLocation="
+        http://www.springframework.org/schema/beans
+        http://www.springframework.org/schema/beans/spring-beans.xsd">
+
+    <import resource="queries-default.xml"/>
+
+    <bean parent="queries.cfg">
+        <property name="cacheConfiguration">
+            <list>
+                <bean parent="test-type-template">
+                    <property name="atomicityMode" value="TRANSACTIONAL_SNAPSHOT"/>
+                </bean>
+                <bean parent="complex-type-template">
+                    <property name="atomicityMode" value="TRANSACTIONAL_SNAPSHOT"/>
+                </bean>
+            </list>
+        </property>
+    </bean>
+</beans>
diff --git a/modules/platforms/cpp/odbc-test/include/sql_test_suite_fixture.h b/modules/platforms/cpp/odbc-test/include/sql_test_suite_fixture.h
index 8a76a67..b6af53d 100644
--- a/modules/platforms/cpp/odbc-test/include/sql_test_suite_fixture.h
+++ b/modules/platforms/cpp/odbc-test/include/sql_test_suite_fixture.h
@@ -109,7 +109,7 @@
         }
 
         /**
-         * Run query returning single result and check it to be equal to expected.
+         * Run query returning single result.
          *
          * @param request SQL request.
          * @param type Result type.
@@ -143,13 +143,13 @@
     void SqlTestSuiteFixture::CheckSingleResult<std::string>(const char* request, const std::string& expected);
 
     template<>
-    void SqlTestSuiteFixture::CheckSingleResult<int64_t>(const char* request, const int64_t& expected);
+    void SqlTestSuiteFixture::CheckSingleResult<SQLBIGINT>(const char* request, const SQLBIGINT& expected);
 
     template<>
-    void SqlTestSuiteFixture::CheckSingleResult<int32_t>(const char* request, const int32_t& expected);
+    void SqlTestSuiteFixture::CheckSingleResult<SQLINTEGER>(const char* request, const SQLINTEGER& expected);
 
     template<>
-    void SqlTestSuiteFixture::CheckSingleResult<int16_t>(const char* request, const int16_t& expected);
+    void SqlTestSuiteFixture::CheckSingleResult<SQLSMALLINT>(const char* request, const SQLSMALLINT& expected);
 
     template<>
     void SqlTestSuiteFixture::CheckSingleResult<int8_t>(const char* request, const int8_t& expected);
@@ -170,13 +170,13 @@
     void SqlTestSuiteFixture::CheckSingleResult<std::string>(const char* request);
 
     template<>
-    void SqlTestSuiteFixture::CheckSingleResult<int64_t>(const char* request);
+    void SqlTestSuiteFixture::CheckSingleResult<SQLBIGINT>(const char* request);
 
     template<>
-    void SqlTestSuiteFixture::CheckSingleResult<int32_t>(const char* request);
+    void SqlTestSuiteFixture::CheckSingleResult<SQLINTEGER>(const char* request);
 
     template<>
-    void SqlTestSuiteFixture::CheckSingleResult<int16_t>(const char* request);
+    void SqlTestSuiteFixture::CheckSingleResult<SQLSMALLINT>(const char* request);
 
     template<>
     void SqlTestSuiteFixture::CheckSingleResult<int8_t>(const char* request);
diff --git a/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj b/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj
index e4af960..3410ec5 100644
--- a/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj
+++ b/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj
@@ -164,6 +164,7 @@
     <ClCompile Include="..\..\..\odbc\src\diagnostic\diagnostic_record.cpp" />
     <ClCompile Include="..\..\..\odbc\src\diagnostic\diagnostic_record_storage.cpp" />
     <ClCompile Include="..\..\..\odbc\src\log.cpp" />
+    <ClCompile Include="..\..\..\odbc\src\nested_tx_mode.cpp" />
     <ClCompile Include="..\..\..\odbc\src\protocol_version.cpp" />
     <ClCompile Include="..\..\..\odbc\src\result_page.cpp" />
     <ClCompile Include="..\..\..\odbc\src\row.cpp" />
@@ -176,6 +177,7 @@
     <ClCompile Include="..\..\src\column_test.cpp" />
     <ClCompile Include="..\..\src\configuration_test.cpp" />
     <ClCompile Include="..\..\src\connection_info_test.cpp" />
+    <ClCompile Include="..\..\src\connection_test.cpp" />
     <ClCompile Include="..\..\src\cursor_test.cpp" />
     <ClCompile Include="..\..\src\errors_test.cpp" />
     <ClCompile Include="..\..\src\meta_queries_test.cpp" />
@@ -199,6 +201,7 @@
     <ClCompile Include="..\..\src\teamcity\teamcity_boost.cpp" />
     <ClCompile Include="..\..\src\teamcity\teamcity_messages.cpp" />
     <ClCompile Include="..\..\src\test_utils.cpp" />
+    <ClCompile Include="..\..\src\transaction_test.cpp" />
     <ClCompile Include="..\..\src\types_test.cpp" />
     <ClCompile Include="..\..\src\utility_test.cpp" />
   </ItemGroup>
@@ -232,6 +235,8 @@
     <None Include="..\..\config\queries-ssl.xml" />
     <None Include="..\..\config\queries-test-32.xml" />
     <None Include="..\..\config\queries-test.xml" />
+    <None Include="..\..\config\queries-transaction-32.xml" />
+    <None Include="..\..\config\queries-transaction.xml" />
   </ItemGroup>
   <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
   <ImportGroup Label="ExtensionTargets">
diff --git a/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj.filters b/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj.filters
index e2749ca..3065df0 100644
--- a/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj.filters
+++ b/modules/platforms/cpp/odbc-test/project/vs/odbc-test.vcxproj.filters
@@ -25,6 +25,9 @@
     <ClCompile Include="..\..\src\connection_info_test.cpp">
       <Filter>Code</Filter>
     </ClCompile>
+    <ClCompile Include="..\..\src\connection_test.cpp">
+      <Filter>Code</Filter>
+    </ClCompile>
     <ClCompile Include="..\..\src\parser_test.cpp">
       <Filter>Code</Filter>
     </ClCompile>
@@ -142,6 +145,9 @@
     <ClCompile Include="..\..\src\queries_ssl_test.cpp">
       <Filter>Code</Filter>
     </ClCompile>
+    <ClCompile Include="..\..\src\transaction_test.cpp">
+      <Filter>Code</Filter>
+    </ClCompile>
     <ClCompile Include="..\..\..\odbc\src\config\connection_string_parser.cpp">
       <Filter>Externals</Filter>
     </ClCompile>
@@ -163,6 +169,9 @@
     <ClCompile Include="..\..\src\authentication_test.cpp">
       <Filter>Code</Filter>
     </ClCompile>
+    <ClCompile Include="..\..\..\odbc\src\nested_tx_mode.cpp">
+      <Filter>Externals</Filter>
+    </ClCompile>
   </ItemGroup>
   <ItemGroup>
     <ClInclude Include="..\..\include\test_type.h">
@@ -206,5 +215,11 @@
     <None Include="..\..\config\queries-auth-32.xml">
       <Filter>Configs</Filter>
     </None>
+    <None Include="..\..\config\queries-transaction.xml">
+      <Filter>Configs</Filter>
+    </None>
+    <None Include="..\..\config\queries-transaction-32.xml">
+      <Filter>Configs</Filter>
+    </None>
   </ItemGroup>
 </Project>
\ No newline at end of file
diff --git a/modules/platforms/cpp/odbc-test/src/application_data_buffer_test.cpp b/modules/platforms/cpp/odbc-test/src/application_data_buffer_test.cpp
index 8696c4c..7329bcb 100644
--- a/modules/platforms/cpp/odbc-test/src/application_data_buffer_test.cpp
+++ b/modules/platforms/cpp/odbc-test/src/application_data_buffer_test.cpp
@@ -158,7 +158,7 @@
 
 BOOST_AUTO_TEST_CASE(TestPutStringToLong)
 {
-    long numBuf;
+    SQLINTEGER numBuf;
     SqlLen reslen = 0;
 
     ApplicationDataBuffer appBuf(OdbcNativeType::AI_SIGNED_LONG, &numBuf, sizeof(numBuf), &reslen);
@@ -275,7 +275,7 @@
 
 BOOST_AUTO_TEST_CASE(TestPutDecimalToLong)
 {
-    long numBuf;
+    SQLINTEGER numBuf;
     SqlLen reslen = 0;
 
     ApplicationDataBuffer appBuf(OdbcNativeType::AI_SIGNED_LONG, &numBuf, sizeof(numBuf), &reslen);
diff --git a/modules/platforms/cpp/odbc-test/src/connection_test.cpp b/modules/platforms/cpp/odbc-test/src/connection_test.cpp
new file mode 100644
index 0000000..709ef61
--- /dev/null
+++ b/modules/platforms/cpp/odbc-test/src/connection_test.cpp
@@ -0,0 +1,136 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifdef _WIN32
+#   include <windows.h>
+#endif
+
+#include <sql.h>
+#include <sqlext.h>
+
+#include <string>
+
+#ifndef _MSC_VER
+#   define BOOST_TEST_DYN_LINK
+#endif
+
+#include <boost/test/unit_test.hpp>
+
+#include "ignite/ignite.h"
+#include "ignite/ignition.h"
+
+#include "test_type.h"
+#include "test_utils.h"
+#include "odbc_test_suite.h"
+
+using namespace ignite;
+using namespace ignite::common;
+using namespace ignite_test;
+
+using namespace boost::unit_test;
+
+/**
+ * Test setup fixture.
+ */
+struct ConnectionTestSuiteFixture: odbc::OdbcTestSuite
+{
+    /**
+     * Constructor.
+     */
+    ConnectionTestSuiteFixture() :
+        OdbcTestSuite()
+    {
+        StartNode();
+    }
+
+    /**
+     * Start a node.
+     */
+    void StartNode()
+    {
+        StartTestNode("queries-test.xml", "NodeMain");
+    }
+
+    /**
+     * Execute the query and return an error code.
+     */
+    std::string ExecQueryAndReturnError()
+    {
+        SQLCHAR selectReq[] = "select count(*) from TestType";
+
+        SQLRETURN ret = SQLExecDirect(stmt, selectReq, sizeof(selectReq));
+
+        std::string err;
+
+        if (!SQL_SUCCEEDED(ret))
+            err = ExtractErrorCode(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+        return err;
+    }
+
+    /**
+     * Extract code from ODBC error message.
+     *
+     * @param err Error.
+     * @return Code.
+     */
+    static std::string ExtractErrorCode(const std::string& err)
+    {
+        std::string code;
+
+        size_t idx = err.find(':');
+
+        if ((idx != std::string::npos) && (idx > 0))
+            code = err.substr(0, idx);
+
+        return code;
+    }
+
+    /**
+     * Destructor.
+     */
+    ~ConnectionTestSuiteFixture()
+    {
+        // No-op.
+    }
+};
+
+BOOST_FIXTURE_TEST_SUITE(ConnectionTestSuite, ConnectionTestSuiteFixture)
+
+BOOST_AUTO_TEST_CASE(TestConnectionRestore)
+{
+    Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache");
+
+    // Check that query was successfully executed.
+    BOOST_CHECK_EQUAL(ExecQueryAndReturnError(), "");
+
+    // Stop node.
+    Ignition::StopAll(true);
+
+    // Query execution should throw ODBC error.
+    BOOST_CHECK_EQUAL(ExecQueryAndReturnError(), "08S01");
+
+    // Reusing a closed connection should not crash an application.
+    BOOST_CHECK_EQUAL(ExecQueryAndReturnError(), "08001");
+
+    StartNode();
+
+    // Check that connection was restored.
+    BOOST_CHECK_EQUAL(ExecQueryAndReturnError(), "");
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/modules/platforms/cpp/odbc-test/src/meta_queries_test.cpp b/modules/platforms/cpp/odbc-test/src/meta_queries_test.cpp
index 8e22e3d..5cacb96 100644
--- a/modules/platforms/cpp/odbc-test/src/meta_queries_test.cpp
+++ b/modules/platforms/cpp/odbc-test/src/meta_queries_test.cpp
@@ -180,6 +180,8 @@
 
     if (!SQL_SUCCEEDED(ret))
         BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+    BOOST_CHECK_EQUAL(intVal, 60);
 }
 
 BOOST_AUTO_TEST_CASE(TestColAttributesColumnPresicion)
@@ -197,6 +199,8 @@
 
     if (!SQL_SUCCEEDED(ret))
         BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+    BOOST_CHECK_EQUAL(intVal, 60);
 }
 
 BOOST_AUTO_TEST_CASE(TestColAttributesColumnScale)
@@ -278,6 +282,19 @@
     CheckSingleRowResultSetWithGetData(stmt);
 }
 
+BOOST_AUTO_TEST_CASE(TestInsertTooLongValueFail)
+{
+    Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache");
+
+    SQLCHAR insertReq[] =
+        "insert into TestType(_key, strField) VALUES(42, '0123456789012345678901234567890123456789012345678901234567891')";
+
+    SQLRETURN ret = SQLExecDirect(stmt, insertReq, SQL_NTS);
+
+    if (SQL_SUCCEEDED(ret))
+        BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+}
+
 BOOST_AUTO_TEST_CASE(TestGetInfoScrollOptions)
 {
     Connect("DRIVER={Apache Ignite};ADDRESS=127.0.0.1:11110;SCHEMA=cache");
diff --git a/modules/platforms/cpp/odbc-test/src/odbc_test_suite.cpp b/modules/platforms/cpp/odbc-test/src/odbc_test_suite.cpp
index 5ab1359..641dcdc 100644
--- a/modules/platforms/cpp/odbc-test/src/odbc_test_suite.cpp
+++ b/modules/platforms/cpp/odbc-test/src/odbc_test_suite.cpp
@@ -339,7 +339,7 @@
                 timestampFields[i].hour = timeFields[i].hour;
                 timestampFields[i].minute = timeFields[i].minute;
                 timestampFields[i].second = timeFields[i].second;
-                timestampFields[i].fraction = std::abs(seed * 914873) % 1000000000;
+                timestampFields[i].fraction = static_cast<uint64_t>(std::abs(seed * 914873)) % 1000000000;
 
                 for (int j = 0; j < 42; ++j)
                     i8ArrayFields[i * 42 + j] = seed * 42 + j;
diff --git a/modules/platforms/cpp/odbc-test/src/queries_test.cpp b/modules/platforms/cpp/odbc-test/src/queries_test.cpp
index 73727dd..57b7c10 100644
--- a/modules/platforms/cpp/odbc-test/src/queries_test.cpp
+++ b/modules/platforms/cpp/odbc-test/src/queries_test.cpp
@@ -30,7 +30,6 @@
 #   define BOOST_TEST_DYN_LINK
 #endif
 
-#include <boost/regex.hpp>
 #include <boost/test/unit_test.hpp>
 
 #include "ignite/ignite.h"
@@ -262,12 +261,12 @@
 
 BOOST_AUTO_TEST_CASE(TestTwoRowsInt32)
 {
-    CheckTwoRowsInt<signed long>(SQL_C_SLONG);
+    CheckTwoRowsInt<SQLINTEGER>(SQL_C_SLONG);
 }
 
 BOOST_AUTO_TEST_CASE(TestTwoRowsUint32)
 {
-    CheckTwoRowsInt<unsigned long>(SQL_C_ULONG);
+    CheckTwoRowsInt<SQLUINTEGER>(SQL_C_ULONG);
 }
 
 BOOST_AUTO_TEST_CASE(TestTwoRowsInt64)
@@ -1625,10 +1624,9 @@
     BOOST_REQUIRE_EQUAL(ret, SQL_ERROR);
 
     std::string error = GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt);
-    std::string pattern = "42000: Table \"B\" not found; SQL statement:\\vSELECT a FROM B.*";
+    std::string pattern = "42000: Table \"B\" not found; SQL statement:\nSELECT a FROM B";
 
-    boost::cmatch what;
-    if (!boost::regex_match(error.c_str(), what, boost::regex(pattern)))
+    if (error.substr(0, pattern.size()) != pattern)
         BOOST_FAIL("'" + error + "' does not match '" + pattern + "'");
 }
 
diff --git a/modules/platforms/cpp/odbc-test/src/row_test.cpp b/modules/platforms/cpp/odbc-test/src/row_test.cpp
index 1aced92..2fdd784 100644
--- a/modules/platforms/cpp/odbc-test/src/row_test.cpp
+++ b/modules/platforms/cpp/odbc-test/src/row_test.cpp
@@ -79,7 +79,7 @@
 {
     SqlLen reslen;
 
-    long longBuf;
+    SQLINTEGER longBuf;
     char strBuf[1024];
     SQLGUID guidBuf;
     char bitBuf;
diff --git a/modules/platforms/cpp/odbc-test/src/sql_aggregate_functions_test.cpp b/modules/platforms/cpp/odbc-test/src/sql_aggregate_functions_test.cpp
index fd55d99..c5bc932 100644
--- a/modules/platforms/cpp/odbc-test/src/sql_aggregate_functions_test.cpp
+++ b/modules/platforms/cpp/odbc-test/src/sql_aggregate_functions_test.cpp
@@ -74,7 +74,7 @@
 
     avg /= static_cast<int32_t>(in.size());
 
-    CheckSingleResult<int64_t>("SELECT {fn AVG(i32Field)} FROM TestType", avg);
+    CheckSingleResult<SQLBIGINT>("SELECT {fn AVG(i32Field)} FROM TestType", avg);
 }
 
 BOOST_AUTO_TEST_CASE(TestAggregateFunctionAvgIntDistinct)
@@ -98,7 +98,7 @@
 
     testCache.Put(in.size() + 10, in[0]);
 
-    CheckSingleResult<int64_t>("SELECT {fn AVG(DISTINCT i32Field)} FROM TestType", avg);
+    CheckSingleResult<SQLBIGINT>("SELECT {fn AVG(DISTINCT i32Field)} FROM TestType", avg);
 }
 
 BOOST_AUTO_TEST_CASE(TestAggregateFunctionAvgFloat)
@@ -154,7 +154,7 @@
     for (int32_t i = 0; i < static_cast<int32_t>(in.size()); ++i)
         testCache.Put(i, in[i]);
 
-    CheckSingleResult<int64_t>("SELECT {fn COUNT(*)} FROM TestType", in.size());
+    CheckSingleResult<SQLBIGINT>("SELECT {fn COUNT(*)} FROM TestType", in.size());
 }
 
 BOOST_AUTO_TEST_CASE(TestAggregateFunctionCountDistinct)
@@ -170,7 +170,7 @@
 
     testCache.Put(in.size() + 10, in[0]);
 
-    CheckSingleResult<int64_t>("SELECT {fn COUNT(DISTINCT i32Field)} FROM TestType", in.size());
+    CheckSingleResult<SQLBIGINT>("SELECT {fn COUNT(DISTINCT i32Field)} FROM TestType", in.size());
 }
 
 BOOST_AUTO_TEST_CASE(TestAggregateFunctionMax)
@@ -185,7 +185,7 @@
     for (int32_t i = 0; i < static_cast<int32_t>(in.size()); ++i)
         testCache.Put(i, in[i]);
 
-    CheckSingleResult<int64_t>("SELECT {fn MAX(i32Field)} FROM TestType", in[2].i32Field);
+    CheckSingleResult<SQLBIGINT>("SELECT {fn MAX(i32Field)} FROM TestType", in[2].i32Field);
 }
 
 BOOST_AUTO_TEST_CASE(TestAggregateFunctionMin)
@@ -200,7 +200,7 @@
     for (int32_t i = 0; i < static_cast<int32_t>(in.size()); ++i)
         testCache.Put(i, in[i]);
 
-    CheckSingleResult<int64_t>("SELECT {fn MIN(i32Field)} FROM TestType", in[1].i32Field);
+    CheckSingleResult<SQLBIGINT>("SELECT {fn MIN(i32Field)} FROM TestType", in[1].i32Field);
 }
 
 BOOST_AUTO_TEST_CASE(TestAggregateFunctionSum)
@@ -221,7 +221,7 @@
         sum += in[i].i32Field;
     }
 
-    CheckSingleResult<int64_t>("SELECT {fn SUM(i32Field)} FROM TestType", sum);
+    CheckSingleResult<SQLBIGINT>("SELECT {fn SUM(i32Field)} FROM TestType", sum);
 }
 
 BOOST_AUTO_TEST_CASE(TestAggregateFunctionSumDistinct)
@@ -244,7 +244,7 @@
 
     testCache.Put(in.size() + 10, in[0]);
 
-    CheckSingleResult<int64_t>("SELECT {fn SUM(DISTINCT i32Field)} FROM TestType", sum);
+    CheckSingleResult<SQLBIGINT>("SELECT {fn SUM(DISTINCT i32Field)} FROM TestType", sum);
 }
 
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/modules/platforms/cpp/odbc-test/src/sql_date_time_functions_test.cpp b/modules/platforms/cpp/odbc-test/src/sql_date_time_functions_test.cpp
index c822fec..d56b897 100644
--- a/modules/platforms/cpp/odbc-test/src/sql_date_time_functions_test.cpp
+++ b/modules/platforms/cpp/odbc-test/src/sql_date_time_functions_test.cpp
@@ -78,8 +78,8 @@
 
     testCache.Put(1, in);
 
-    CheckSingleResult<int32_t>("SELECT {fn DAYOFMONTH(dateField)} FROM TestType", 29);
-    CheckSingleResult<int32_t>("SELECT {fn DAY_OF_MONTH(dateField)} FROM TestType", 29);
+    CheckSingleResult<SQLINTEGER>("SELECT {fn DAYOFMONTH(dateField)} FROM TestType", 29);
+    CheckSingleResult<SQLINTEGER>("SELECT {fn DAY_OF_MONTH(dateField)} FROM TestType", 29);
 }
 
 BOOST_AUTO_TEST_CASE(TestDayofweek)
@@ -90,8 +90,8 @@
 
     testCache.Put(1, in);
 
-    CheckSingleResult<int32_t>("SELECT {fn DAYOFWEEK(dateField)} FROM TestType", 2);
-    CheckSingleResult<int32_t>("SELECT {fn DAY_OF_WEEK(dateField)} FROM TestType", 2);
+    CheckSingleResult<SQLINTEGER>("SELECT {fn DAYOFWEEK(dateField)} FROM TestType", 2);
+    CheckSingleResult<SQLINTEGER>("SELECT {fn DAY_OF_WEEK(dateField)} FROM TestType", 2);
 }
 
 BOOST_AUTO_TEST_CASE(TestDayofyear)
@@ -102,8 +102,8 @@
 
     testCache.Put(1, in);
 
-    CheckSingleResult<int32_t>("SELECT {fn DAYOFYEAR(dateField)} FROM TestType", 242);
-    CheckSingleResult<int32_t>("SELECT {fn DAY_OF_YEAR(dateField)} FROM TestType", 242);
+    CheckSingleResult<SQLINTEGER>("SELECT {fn DAYOFYEAR(dateField)} FROM TestType", 242);
+    CheckSingleResult<SQLINTEGER>("SELECT {fn DAY_OF_YEAR(dateField)} FROM TestType", 242);
 }
 
 BOOST_AUTO_TEST_CASE(TestExtract)
@@ -114,12 +114,12 @@
 
     testCache.Put(1, in);
 
-    CheckSingleResult<int32_t>("SELECT {fn EXTRACT(YEAR FROM timestampField)} FROM TestType", 2016);
-    CheckSingleResult<int32_t>("SELECT {fn EXTRACT(MONTH FROM timestampField)} FROM TestType", 2);
-    CheckSingleResult<int32_t>("SELECT {fn EXTRACT(DAY FROM timestampField)} FROM TestType", 24);
-    CheckSingleResult<int32_t>("SELECT {fn EXTRACT(HOUR FROM timestampField)} FROM TestType", 13);
-    CheckSingleResult<int32_t>("SELECT {fn EXTRACT(MINUTE FROM timestampField)} FROM TestType", 45);
-    CheckSingleResult<int32_t>("SELECT {fn EXTRACT(SECOND FROM timestampField)} FROM TestType", 23);
+    CheckSingleResult<SQLINTEGER>("SELECT {fn EXTRACT(YEAR FROM timestampField)} FROM TestType", 2016);
+    CheckSingleResult<SQLINTEGER>("SELECT {fn EXTRACT(MONTH FROM timestampField)} FROM TestType", 2);
+    CheckSingleResult<SQLINTEGER>("SELECT {fn EXTRACT(DAY FROM timestampField)} FROM TestType", 24);
+    CheckSingleResult<SQLINTEGER>("SELECT {fn EXTRACT(HOUR FROM timestampField)} FROM TestType", 13);
+    CheckSingleResult<SQLINTEGER>("SELECT {fn EXTRACT(MINUTE FROM timestampField)} FROM TestType", 45);
+    CheckSingleResult<SQLINTEGER>("SELECT {fn EXTRACT(SECOND FROM timestampField)} FROM TestType", 23);
 }
 
 BOOST_AUTO_TEST_CASE(TestHour)
@@ -130,7 +130,7 @@
 
     testCache.Put(1, in);
 
-    CheckSingleResult<int32_t>("SELECT {fn HOUR(timestampField)} FROM TestType", 13);
+    CheckSingleResult<SQLINTEGER>("SELECT {fn HOUR(timestampField)} FROM TestType", 13);
 }
 
 BOOST_AUTO_TEST_CASE(TestMinute)
@@ -141,7 +141,7 @@
 
     testCache.Put(1, in);
 
-    CheckSingleResult<int32_t>("SELECT {fn MINUTE(timestampField)} FROM TestType", 45);
+    CheckSingleResult<SQLINTEGER>("SELECT {fn MINUTE(timestampField)} FROM TestType", 45);
 }
 
 BOOST_AUTO_TEST_CASE(TestMonth)
@@ -152,7 +152,7 @@
 
     testCache.Put(1, in);
 
-    CheckSingleResult<int32_t>("SELECT {fn MONTH(timestampField)} FROM TestType", 2);
+    CheckSingleResult<SQLINTEGER>("SELECT {fn MONTH(timestampField)} FROM TestType", 2);
 }
 
 BOOST_AUTO_TEST_CASE(TestMonthname)
@@ -179,7 +179,7 @@
 
     testCache.Put(1, in);
 
-    CheckSingleResult<int32_t>("SELECT {fn QUARTER(timestampField)} FROM TestType", 1);
+    CheckSingleResult<SQLINTEGER>("SELECT {fn QUARTER(timestampField)} FROM TestType", 1);
 }
 
 BOOST_AUTO_TEST_CASE(TestSecond)
@@ -190,7 +190,7 @@
 
     testCache.Put(1, in);
 
-    CheckSingleResult<int32_t>("SELECT {fn SECOND(timestampField)} FROM TestType", 23);
+    CheckSingleResult<SQLINTEGER>("SELECT {fn SECOND(timestampField)} FROM TestType", 23);
 }
 
 BOOST_AUTO_TEST_CASE(TestWeek)
@@ -201,7 +201,7 @@
 
     testCache.Put(1, in);
 
-    CheckSingleResult<int32_t>("SELECT {fn WEEK(timestampField)} FROM TestType", 9);
+    CheckSingleResult<SQLINTEGER>("SELECT {fn WEEK(timestampField)} FROM TestType", 9);
 }
 
 BOOST_AUTO_TEST_CASE(TestYear)
@@ -212,7 +212,7 @@
 
     testCache.Put(1, in);
 
-    CheckSingleResult<int32_t>("SELECT {fn YEAR(timestampField)} FROM TestType", 2016);
+    CheckSingleResult<SQLINTEGER>("SELECT {fn YEAR(timestampField)} FROM TestType", 2016);
 }
 
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/modules/platforms/cpp/odbc-test/src/sql_esc_convert_function_test.cpp b/modules/platforms/cpp/odbc-test/src/sql_esc_convert_function_test.cpp
index 6879519..f583d02 100644
--- a/modules/platforms/cpp/odbc-test/src/sql_esc_convert_function_test.cpp
+++ b/modules/platforms/cpp/odbc-test/src/sql_esc_convert_function_test.cpp
@@ -69,17 +69,17 @@
 
 BOOST_AUTO_TEST_CASE(TestEscConvertFunctionInt64)
 {
-    CheckSingleResult<int64_t>("SELECT {fn CONVERT(72623859790382856, SQL_BIGINT)}", 72623859790382856);
+    CheckSingleResult<SQLBIGINT>("SELECT {fn CONVERT(72623859790382856, SQL_BIGINT)}", 72623859790382856);
 }
 
 BOOST_AUTO_TEST_CASE(TestEscConvertFunctionInt32)
 {
-    CheckSingleResult<int32_t>("SELECT {fn CONVERT(1234567890, SQL_INTEGER)}", 1234567890);
+    CheckSingleResult<SQLINTEGER>("SELECT {fn CONVERT(1234567890, SQL_INTEGER)}", 1234567890);
 }
 
 BOOST_AUTO_TEST_CASE(TestEscConvertFunctionInt16)
 {
-    CheckSingleResult<int16_t>("SELECT {fn CONVERT(12345, SQL_SMALLINT)}", 12345);
+    CheckSingleResult<SQLSMALLINT>("SELECT {fn CONVERT(12345, SQL_SMALLINT)}", 12345);
 }
 
 BOOST_AUTO_TEST_CASE(TestEscConvertFunctionInt8)
diff --git a/modules/platforms/cpp/odbc-test/src/sql_get_info_test.cpp b/modules/platforms/cpp/odbc-test/src/sql_get_info_test.cpp
index 630597c..7842704 100644
--- a/modules/platforms/cpp/odbc-test/src/sql_get_info_test.cpp
+++ b/modules/platforms/cpp/odbc-test/src/sql_get_info_test.cpp
@@ -136,7 +136,7 @@
     CheckStrInfo(SQL_LIKE_ESCAPE_CLAUSE, "N");
     CheckStrInfo(SQL_MAX_ROW_SIZE_INCLUDES_LONG, "Y");
     CheckStrInfo(SQL_MULT_RESULT_SETS, "N");
-    CheckStrInfo(SQL_MULTIPLE_ACTIVE_TXN, "N");
+    CheckStrInfo(SQL_MULTIPLE_ACTIVE_TXN, "Y");
     CheckStrInfo(SQL_ORDER_BY_COLUMNS_IN_SELECT, "N");
     CheckStrInfo(SQL_PROCEDURE_TERM, "stored procedure");
     CheckStrInfo(SQL_PROCEDURES, "N");
@@ -182,7 +182,7 @@
     CheckIntInfo(SQL_CREATE_VIEW, 0);
     CheckIntInfo(SQL_CURSOR_SENSITIVITY, SQL_INSENSITIVE);
     CheckIntInfo(SQL_DDL_INDEX, SQL_DI_CREATE_INDEX | SQL_DI_DROP_INDEX);
-    CheckIntInfo(SQL_DEFAULT_TXN_ISOLATION, 0);
+    CheckIntInfo(SQL_DEFAULT_TXN_ISOLATION, SQL_TXN_REPEATABLE_READ);
     CheckIntInfo(SQL_DROP_ASSERTION, 0);
     CheckIntInfo(SQL_DROP_CHARACTER_SET, 0);
     CheckIntInfo(SQL_DROP_COLLATION, 0);
@@ -212,7 +212,7 @@
     CheckIntInfo(SQL_SQL92_GRANT, 0);
     CheckIntInfo(SQL_SQL92_REVOKE, 0);
     CheckIntInfo(SQL_STANDARD_CLI_CONFORMANCE, 0);
-    CheckIntInfo(SQL_TXN_ISOLATION_OPTION, 0);
+    CheckIntInfo(SQL_TXN_ISOLATION_OPTION, SQL_TXN_REPEATABLE_READ);
     CheckIntInfo(SQL_UNION, SQL_U_UNION | SQL_U_UNION_ALL);
 
     CheckIntInfo(SQL_SCHEMA_USAGE, SQL_SU_DML_STATEMENTS | SQL_SU_TABLE_DEFINITION | SQL_SU_PRIVILEGE_DEFINITION |
@@ -384,7 +384,7 @@
     CheckShortInfo(SQL_MAX_CONCURRENT_ACTIVITIES, 0);
     CheckShortInfo(SQL_CURSOR_COMMIT_BEHAVIOR, SQL_CB_PRESERVE);
     CheckShortInfo(SQL_CURSOR_ROLLBACK_BEHAVIOR, SQL_CB_PRESERVE);
-    CheckShortInfo(SQL_TXN_CAPABLE, SQL_TC_NONE);
+    CheckShortInfo(SQL_TXN_CAPABLE, SQL_TC_DDL_COMMIT);
     CheckShortInfo(SQL_QUOTED_IDENTIFIER_CASE, SQL_IC_SENSITIVE);
     CheckShortInfo(SQL_ACTIVE_ENVIRONMENTS, 0);
     CheckShortInfo(SQL_CONCAT_NULL_BEHAVIOR, SQL_CB_NULL);
diff --git a/modules/platforms/cpp/odbc-test/src/sql_numeric_functions_test.cpp b/modules/platforms/cpp/odbc-test/src/sql_numeric_functions_test.cpp
index 723f784..97e203b 100644
--- a/modules/platforms/cpp/odbc-test/src/sql_numeric_functions_test.cpp
+++ b/modules/platforms/cpp/odbc-test/src/sql_numeric_functions_test.cpp
@@ -63,7 +63,7 @@
 
     testCache.Put(1, in);
 
-    CheckSingleResult<int32_t>("SELECT {fn ABS(i32Field)} FROM TestType", std::abs(in.i32Field));
+    CheckSingleResult<SQLINTEGER>("SELECT {fn ABS(i32Field)} FROM TestType", std::abs(in.i32Field));
 }
 
 BOOST_AUTO_TEST_CASE(TestNumericFunctionAcos)
@@ -206,7 +206,7 @@
 
     testCache.Put(1, in);
 
-    CheckSingleResult<int64_t>("SELECT {fn MOD(i64Field, 3)} FROM TestType", in.i64Field % 3);
+    CheckSingleResult<SQLBIGINT>("SELECT {fn MOD(i64Field, 3)} FROM TestType", in.i64Field % 3);
 }
 
 BOOST_AUTO_TEST_CASE(TestNumericFunctionPi)
diff --git a/modules/platforms/cpp/odbc-test/src/sql_operators_test.cpp b/modules/platforms/cpp/odbc-test/src/sql_operators_test.cpp
index de884ca..d47f0fb 100644
--- a/modules/platforms/cpp/odbc-test/src/sql_operators_test.cpp
+++ b/modules/platforms/cpp/odbc-test/src/sql_operators_test.cpp
@@ -38,27 +38,27 @@
 
 BOOST_AUTO_TEST_CASE(TestOperatorAddInt)
 {
-    CheckSingleResult<int32_t>("SELECT 123 + 51", 123 + 51);
+    CheckSingleResult<SQLINTEGER>("SELECT 123 + 51", 123 + 51);
 };
 
 BOOST_AUTO_TEST_CASE(TestOperatorSubInt)
 {
-    CheckSingleResult<int32_t>("SELECT 123 - 51", 123 - 51);
+    CheckSingleResult<SQLINTEGER>("SELECT 123 - 51", 123 - 51);
 };
 
 BOOST_AUTO_TEST_CASE(TestOperatorDivInt)
 {
-    CheckSingleResult<int32_t>("SELECT 123 / 51", 123 / 51);
+    CheckSingleResult<SQLINTEGER>("SELECT 123 / 51", 123 / 51);
 };
 
 BOOST_AUTO_TEST_CASE(TestOperatorModInt)
 {
-    CheckSingleResult<int32_t>("SELECT 123 % 51", 123 % 51);
+    CheckSingleResult<SQLINTEGER>("SELECT 123 % 51", 123 % 51);
 };
 
 BOOST_AUTO_TEST_CASE(TestOperatorMultInt)
 {
-    CheckSingleResult<int32_t>("SELECT 123 * 51", 123 * 51);
+    CheckSingleResult<SQLINTEGER>("SELECT 123 * 51", 123 * 51);
 };
 
 BOOST_AUTO_TEST_CASE(TestOperatorAddDouble)
diff --git a/modules/platforms/cpp/odbc-test/src/sql_string_functions_test.cpp b/modules/platforms/cpp/odbc-test/src/sql_string_functions_test.cpp
index 389f2f4..5a932f1 100644
--- a/modules/platforms/cpp/odbc-test/src/sql_string_functions_test.cpp
+++ b/modules/platforms/cpp/odbc-test/src/sql_string_functions_test.cpp
@@ -59,7 +59,7 @@
 
     testCache.Put(1, in);
 
-    CheckSingleResult<int32_t>("SELECT {fn ASCII(strField)} FROM TestType", static_cast<int32_t>('H'));
+    CheckSingleResult<SQLINTEGER>("SELECT {fn ASCII(strField)} FROM TestType", static_cast<int32_t>('H'));
 }
 
 BOOST_AUTO_TEST_CASE(TestStringFunctionBitLength)
@@ -69,7 +69,7 @@
 
     testCache.Put(1, in);
 
-    CheckSingleResult<int64_t>("SELECT {fn BIT_LENGTH(strField)} FROM TestType", in.strField.size() * 16);
+    CheckSingleResult<SQLBIGINT>("SELECT {fn BIT_LENGTH(strField)} FROM TestType", in.strField.size() * 16);
 }
 
 BOOST_AUTO_TEST_CASE(TestStringFunctionChar)
@@ -90,7 +90,7 @@
 
     testCache.Put(1, in);
 
-    CheckSingleResult<int64_t>("SELECT {fn CHAR_LENGTH(strField)} FROM TestType", in.strField.size());
+    CheckSingleResult<SQLBIGINT>("SELECT {fn CHAR_LENGTH(strField)} FROM TestType", in.strField.size());
 }
 
 BOOST_AUTO_TEST_CASE(TestStringFunctionCharacterLength)
@@ -100,7 +100,7 @@
 
     testCache.Put(1, in);
 
-    CheckSingleResult<int64_t>("SELECT {fn CHARACTER_LENGTH(strField)} FROM TestType", in.strField.size());
+    CheckSingleResult<SQLBIGINT>("SELECT {fn CHARACTER_LENGTH(strField)} FROM TestType", in.strField.size());
 }
 
 BOOST_AUTO_TEST_CASE(TestStringFunctionConcat)
@@ -121,7 +121,7 @@
 
     testCache.Put(1, in);
 
-    CheckSingleResult<int32_t>("SELECT {fn DIFFERENCE(strField, \'Hola!\')} FROM TestType", 4);
+    CheckSingleResult<SQLINTEGER>("SELECT {fn DIFFERENCE(strField, \'Hola!\')} FROM TestType", 4);
 }
 
 BOOST_AUTO_TEST_CASE(TestStringFunctionInsert)
@@ -161,7 +161,7 @@
 
     testCache.Put(1, in);
 
-    CheckSingleResult<int64_t>("SELECT {fn LENGTH(strField)} FROM TestType", in.strField.size());
+    CheckSingleResult<SQLBIGINT>("SELECT {fn LENGTH(strField)} FROM TestType", in.strField.size());
 }
 
 BOOST_AUTO_TEST_CASE(TestStringFunctionLocate)
@@ -171,7 +171,7 @@
 
     testCache.Put(1, in);
 
-    CheckSingleResult<int64_t>("SELECT {fn LOCATE(\'ip\', strField)} FROM TestType", 7);
+    CheckSingleResult<SQLBIGINT>("SELECT {fn LOCATE(\'ip\', strField)} FROM TestType", 7);
 }
 
 BOOST_AUTO_TEST_CASE(TestStringFunctionLocate2)
@@ -181,7 +181,7 @@
 
     testCache.Put(1, in);
 
-    CheckSingleResult<int64_t>("SELECT {fn LOCATE(\'ip\', strField, 10)} FROM TestType", 43);
+    CheckSingleResult<SQLBIGINT>("SELECT {fn LOCATE(\'ip\', strField, 10)} FROM TestType", 43);
 }
 
 BOOST_AUTO_TEST_CASE(TestStringFunctionLtrim)
@@ -201,7 +201,7 @@
 
     testCache.Put(1, in);
 
-    CheckSingleResult<int64_t>("SELECT {fn OCTET_LENGTH(strField)} FROM TestType", in.strField.size() * 2);
+    CheckSingleResult<SQLBIGINT>("SELECT {fn OCTET_LENGTH(strField)} FROM TestType", in.strField.size() * 2);
 }
 
 BOOST_AUTO_TEST_CASE(TestStringFunctionPosition)
@@ -211,7 +211,7 @@
 
     testCache.Put(1, in);
 
-    CheckSingleResult<int64_t>("SELECT {fn POSITION(\'sit\', strField)} FROM TestType", 19);
+    CheckSingleResult<SQLBIGINT>("SELECT {fn POSITION(\'sit\', strField)} FROM TestType", 19);
 }
 
 BOOST_AUTO_TEST_CASE(TestStringFunctionRepeat)
diff --git a/modules/platforms/cpp/odbc-test/src/sql_system_functions_test.cpp b/modules/platforms/cpp/odbc-test/src/sql_system_functions_test.cpp
index 30fa036..2496bc7 100644
--- a/modules/platforms/cpp/odbc-test/src/sql_system_functions_test.cpp
+++ b/modules/platforms/cpp/odbc-test/src/sql_system_functions_test.cpp
@@ -41,7 +41,7 @@
 
 BOOST_AUTO_TEST_CASE(TestSystemFunctionIfnull)
 {
-    CheckSingleResult<int32_t>("SELECT {fn IFNULL(NULL, 42)}", 42);
+    CheckSingleResult<SQLINTEGER>("SELECT {fn IFNULL(NULL, 42)}", 42);
 }
 
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/modules/platforms/cpp/odbc-test/src/sql_test_suite_fixture.cpp b/modules/platforms/cpp/odbc-test/src/sql_test_suite_fixture.cpp
index 42dd3f7..31d6717 100644
--- a/modules/platforms/cpp/odbc-test/src/sql_test_suite_fixture.cpp
+++ b/modules/platforms/cpp/odbc-test/src/sql_test_suite_fixture.cpp
@@ -133,21 +133,21 @@
     }
 
     template<>
-    void SqlTestSuiteFixture::CheckSingleResult<int64_t>(const char* request, const int64_t& expected)
+    void SqlTestSuiteFixture::CheckSingleResult<SQLBIGINT>(const char* request, const SQLBIGINT& expected)
     {
-        CheckSingleResultNum0<int64_t>(request, expected, SQL_C_SBIGINT);
+        CheckSingleResultNum0<SQLBIGINT>(request, expected, SQL_C_SBIGINT);
     }
 
     template<>
-    void SqlTestSuiteFixture::CheckSingleResult<int32_t>(const char* request, const int32_t& expected)
+    void SqlTestSuiteFixture::CheckSingleResult<SQLINTEGER>(const char* request, const SQLINTEGER& expected)
     {
-        CheckSingleResultNum0<int32_t>(request, expected, SQL_C_SLONG);
+        CheckSingleResultNum0<SQLINTEGER>(request, expected, SQL_C_SLONG);
     }
 
     template<>
-    void SqlTestSuiteFixture::CheckSingleResult<int16_t>(const char* request, const int16_t& expected)
+    void SqlTestSuiteFixture::CheckSingleResult<SQLSMALLINT>(const char* request, const SQLSMALLINT& expected)
     {
-        CheckSingleResultNum0<int16_t>(request, expected, SQL_C_SSHORT);
+        CheckSingleResultNum0<SQLSMALLINT>(request, expected, SQL_C_SSHORT);
     }
 
     template<>
@@ -213,21 +213,21 @@
     }
 
     template<>
-    void SqlTestSuiteFixture::CheckSingleResult<int64_t>(const char* request)
+    void SqlTestSuiteFixture::CheckSingleResult<SQLBIGINT>(const char* request)
     {
-        CheckSingleResultNum0<int64_t>(request, SQL_C_SBIGINT);
+        CheckSingleResultNum0<SQLBIGINT>(request, SQL_C_SBIGINT);
     }
 
     template<>
-    void SqlTestSuiteFixture::CheckSingleResult<int32_t>(const char* request)
+    void SqlTestSuiteFixture::CheckSingleResult<SQLINTEGER>(const char* request)
     {
-        CheckSingleResultNum0<int32_t>(request, SQL_C_SLONG);
+        CheckSingleResultNum0<SQLINTEGER>(request, SQL_C_SLONG);
     }
 
     template<>
-    void SqlTestSuiteFixture::CheckSingleResult<int16_t>(const char* request)
+    void SqlTestSuiteFixture::CheckSingleResult<SQLSMALLINT>(const char* request)
     {
-        CheckSingleResultNum0<int16_t>(request, SQL_C_SSHORT);
+        CheckSingleResultNum0<SQLSMALLINT>(request, SQL_C_SSHORT);
     }
 
     template<>
diff --git a/modules/platforms/cpp/odbc-test/src/sql_types_test.cpp b/modules/platforms/cpp/odbc-test/src/sql_types_test.cpp
index 60d9d7e..6ec6250 100644
--- a/modules/platforms/cpp/odbc-test/src/sql_types_test.cpp
+++ b/modules/platforms/cpp/odbc-test/src/sql_types_test.cpp
@@ -54,7 +54,7 @@
     testCache.Put(1, in1);
     testCache.Put(2, in2);
 
-    CheckSingleResult<int32_t>(
+    CheckSingleResult<SQLINTEGER>(
         "SELECT i32Field FROM TestType WHERE guidField = {guid '04cc382a-0b82-f520-08d0-07a0620c0004'}", in2.i32Field);
 }
 
@@ -197,7 +197,7 @@
 
     testCache.Put(1, in1);
 
-    CheckSingleResult<int32_t>(
+    CheckSingleResult<SQLINTEGER>(
         "SELECT i32Field FROM TestType WHERE timestampField = '2017-01-13 19:54:01.987654321'", in1.i32Field);
 
     CheckSingleResult<Timestamp>(
@@ -259,7 +259,7 @@
 
     testCache.Put(1, in1);
 
-    CheckSingleResult<int32_t>("SELECT i32Field FROM TestType WHERE timeField = '19:54:01'", in1.i32Field);
+    CheckSingleResult<SQLINTEGER>("SELECT i32Field FROM TestType WHERE timeField = '19:54:01'", in1.i32Field);
 
     CheckSingleResult<Time>("SELECT timeField FROM TestType WHERE i32Field = 1", in1.timeField);
 }
diff --git a/modules/platforms/cpp/odbc-test/src/sql_value_expressions_test.cpp b/modules/platforms/cpp/odbc-test/src/sql_value_expressions_test.cpp
index fd12688..32d49d3 100644
--- a/modules/platforms/cpp/odbc-test/src/sql_value_expressions_test.cpp
+++ b/modules/platforms/cpp/odbc-test/src/sql_value_expressions_test.cpp
@@ -37,7 +37,7 @@
 
     testCache.Put(1, in);
 
-    CheckSingleResult<int32_t>(
+    SqlTestSuiteFixture::CheckSingleResult<SQLINTEGER>(
         "SELECT "
             "CASE i32Field WHEN 82 "
                 "THEN (i32Field / 2) "
@@ -45,7 +45,7 @@
             "END "
         "FROM TestType", in.i32Field / 2);
 
-    CheckSingleResult<int32_t>(
+    SqlTestSuiteFixture::CheckSingleResult<SQLINTEGER>(
         "SELECT "
             "CASE i32Field WHEN 22 "
                 "THEN (i32Field / 2) "
@@ -63,7 +63,7 @@
 
     testCache.Put(1, in);
 
-    CheckSingleResult<int32_t>("SELECT CAST(strField AS INT) + i32Field FROM TestType", 
+    CheckSingleResult<SQLINTEGER>("SELECT CAST(strField AS INT) + i32Field FROM TestType", 
         common::LexicalCast<int32_t>(in.strField) + in.i32Field);
 
     CheckSingleResult<std::string>("SELECT CAST(i32Field AS VARCHAR) || strField FROM TestType",
diff --git a/modules/platforms/cpp/odbc-test/src/transaction_test.cpp b/modules/platforms/cpp/odbc-test/src/transaction_test.cpp
new file mode 100644
index 0000000..fb2b6c9
--- /dev/null
+++ b/modules/platforms/cpp/odbc-test/src/transaction_test.cpp
@@ -0,0 +1,738 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifdef _WIN32
+#   include <windows.h>
+#endif
+
+#include <sql.h>
+#include <sqlext.h>
+
+#include <string>
+
+#ifndef _MSC_VER
+#   define BOOST_TEST_DYN_LINK
+#endif
+
+#include <boost/test/unit_test.hpp>
+
+#include "ignite/ignition.h"
+
+#include "test_utils.h"
+#include "odbc_test_suite.h"
+
+using namespace ignite;
+using namespace ignite_test;
+
+using namespace boost::unit_test;
+
+/**
+ * Test setup fixture.
+ */
+struct TransactionTestSuiteFixture : public odbc::OdbcTestSuite
+{
+    static Ignite StartAdditionalNode(const char* name)
+    {
+        return StartTestNode("queries-transaction.xml", name);
+    }
+
+    /**
+     * Constructor.
+     */
+    TransactionTestSuiteFixture() :
+        grid(StartAdditionalNode("NodeMain"))
+    {
+        // No-op.
+    }
+
+    /**
+     * Destructor.
+     */
+    ~TransactionTestSuiteFixture()
+    {
+        // No-op.
+    }
+
+    /**
+     * Insert test string value in cache and make all the neccessary checks.
+     *
+     * @param key Key.
+     * @param value Value.
+     */
+    void InsertTestValue(int64_t key, const std::string& value)
+    {
+        SQLCHAR insertReq[] = "INSERT INTO TestType(_key, strField) VALUES(?, ?)";
+
+        SQLRETURN ret;
+
+        ret = SQLPrepare(stmt, insertReq, SQL_NTS);
+
+        if (!SQL_SUCCEEDED(ret))
+            BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+        char strField[1024] = { 0 };
+        SQLLEN strFieldLen = 0;
+
+        ret = SQLBindParameter(stmt, 1, SQL_PARAM_INPUT, SQL_C_SLONG, SQL_BIGINT, 0, 0, &key, 0, 0);
+
+        if (!SQL_SUCCEEDED(ret))
+            BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+        ret = SQLBindParameter(stmt, 2, SQL_PARAM_INPUT, SQL_C_CHAR, SQL_VARCHAR, sizeof(strField),
+            sizeof(strField), &strField, sizeof(strField), &strFieldLen);
+
+        if (!SQL_SUCCEEDED(ret))
+            BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+        strncpy(strField, value.c_str(), sizeof(strField));
+        strFieldLen = SQL_NTS;
+
+        ret = SQLExecute(stmt);
+
+        if (!SQL_SUCCEEDED(ret))
+            BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+        SQLLEN affected = 0;
+        ret = SQLRowCount(stmt, &affected);
+
+        if (!SQL_SUCCEEDED(ret))
+            BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+        BOOST_CHECK_EQUAL(affected, 1);
+
+        ret = SQLMoreResults(stmt);
+
+        if (ret != SQL_NO_DATA)
+            BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+        ResetStatement();
+    }
+
+    /**
+     * Update test string value in cache and make all the neccessary checks.
+     *
+     * @param key Key.
+     * @param value Value.
+     */
+    void UpdateTestValue(int64_t key, const std::string& value)
+    {
+        SQLCHAR insertReq[] = "UPDATE TestType SET strField=? WHERE _key=?";
+
+        SQLRETURN ret;
+
+        ret = SQLPrepare(stmt, insertReq, SQL_NTS);
+
+        if (!SQL_SUCCEEDED(ret))
+            BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+        char strField[1024] = { 0 };
+        SQLLEN strFieldLen = 0;
+
+        ret = SQLBindParameter(stmt, 1, SQL_PARAM_INPUT, SQL_C_CHAR, SQL_VARCHAR, sizeof(strField),
+            sizeof(strField), &strField, sizeof(strField), &strFieldLen);
+
+        if (!SQL_SUCCEEDED(ret))
+            BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+        ret = SQLBindParameter(stmt, 2, SQL_PARAM_INPUT, SQL_C_SLONG, SQL_BIGINT, 0, 0, &key, 0, 0);
+
+        if (!SQL_SUCCEEDED(ret))
+            BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+        strncpy(strField, value.c_str(), sizeof(strField));
+        strFieldLen = SQL_NTS;
+
+        ret = SQLExecute(stmt);
+
+        if (!SQL_SUCCEEDED(ret))
+            BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+        SQLLEN affected = 0;
+        ret = SQLRowCount(stmt, &affected);
+
+        if (!SQL_SUCCEEDED(ret))
+            BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+        BOOST_CHECK_EQUAL(affected, 1);
+
+        ret = SQLMoreResults(stmt);
+
+        if (ret != SQL_NO_DATA)
+            BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+        ResetStatement();
+    }
+
+    /**
+     * Delete test string value.
+     *
+     * @param key Key.
+     */
+    void DeleteTestValue(int64_t key)
+    {
+        SQLCHAR insertReq[] = "DELETE FROM TestType WHERE _key=?";
+
+        SQLRETURN ret;
+
+        ret = SQLPrepare(stmt, insertReq, SQL_NTS);
+
+        if (!SQL_SUCCEEDED(ret))
+            BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+        ret = SQLBindParameter(stmt, 1, SQL_PARAM_INPUT, SQL_C_SLONG, SQL_BIGINT, 0, 0, &key, 0, 0);
+
+        if (!SQL_SUCCEEDED(ret))
+            BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+        ret = SQLExecute(stmt);
+
+        if (!SQL_SUCCEEDED(ret))
+            BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+        SQLLEN affected = 0;
+        ret = SQLRowCount(stmt, &affected);
+
+        if (!SQL_SUCCEEDED(ret))
+            BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+        BOOST_CHECK_EQUAL(affected, 1);
+
+        ret = SQLMoreResults(stmt);
+
+        if (ret != SQL_NO_DATA)
+            BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+        ResetStatement();
+    }
+
+    /**
+     * Selects and checks the value.
+     *
+     * @param key Key.
+     * @param expect Expected value.
+     */
+    void CheckTestValue(int64_t key, const std::string& expect)
+    {
+        // Just selecting everything to make sure everything is OK
+        SQLCHAR selectReq[] = "SELECT strField FROM TestType WHERE _key = ?";
+
+        char strField[1024] = { 0 };
+        SQLLEN strFieldLen = 0;
+
+        SQLRETURN ret = SQLBindCol(stmt, 1, SQL_C_CHAR, &strField, sizeof(strField), &strFieldLen);
+
+        if (!SQL_SUCCEEDED(ret))
+            BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+        ret = SQLBindParameter(stmt, 1, SQL_PARAM_INPUT, SQL_C_SLONG, SQL_BIGINT, 0, 0, &key, 0, 0);
+
+        if (!SQL_SUCCEEDED(ret))
+            BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+        ret = SQLExecDirect(stmt, selectReq, sizeof(selectReq));
+
+        if (!SQL_SUCCEEDED(ret))
+            BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+        ret = SQLFetch(stmt);
+
+        if (!SQL_SUCCEEDED(ret))
+            BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+        BOOST_CHECK_EQUAL(std::string(strField, strFieldLen), expect);
+
+        ret = SQLFetch(stmt);
+
+        BOOST_CHECK_EQUAL(ret, SQL_NO_DATA);
+
+        ret = SQLMoreResults(stmt);
+
+        if (ret != SQL_NO_DATA)
+            BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+        ResetStatement();
+    }
+
+    /**
+     * Selects and checks that value is absent.
+     *
+     * @param key Key.
+     */
+    void CheckNoTestValue(int64_t key)
+    {
+        // Just selecting everything to make sure everything is OK
+        SQLCHAR selectReq[] = "SELECT strField FROM TestType WHERE _key = ?";
+
+        char strField[1024] = { 0 };
+        SQLLEN strFieldLen = 0;
+
+        SQLRETURN ret = SQLBindCol(stmt, 1, SQL_C_CHAR, &strField, sizeof(strField), &strFieldLen);
+
+        if (!SQL_SUCCEEDED(ret))
+            BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+        ret = SQLBindParameter(stmt, 1, SQL_PARAM_INPUT, SQL_C_SLONG, SQL_BIGINT, 0, 0, &key, 0, 0);
+
+        if (!SQL_SUCCEEDED(ret))
+            BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+        ret = SQLExecDirect(stmt, selectReq, sizeof(selectReq));
+
+        if (!SQL_SUCCEEDED(ret))
+            BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+        ret = SQLFetch(stmt);
+
+        BOOST_CHECK_EQUAL(ret, SQL_NO_DATA);
+
+        ret = SQLMoreResults(stmt);
+
+        if (ret != SQL_NO_DATA)
+            BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+        ResetStatement();
+    }
+
+    /**
+     * Reset statement state.
+     */
+    void ResetStatement()
+    {
+        SQLRETURN ret = SQLFreeStmt(stmt, SQL_RESET_PARAMS);
+
+        if (!SQL_SUCCEEDED(ret))
+            BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+
+        ret = SQLFreeStmt(stmt, SQL_UNBIND);
+
+        if (!SQL_SUCCEEDED(ret))
+            BOOST_FAIL(GetOdbcErrorMessage(SQL_HANDLE_STMT, stmt));
+    }
+
+    /** Node started during the test. */
+    Ignite grid;
+};
+
+BOOST_FIXTURE_TEST_SUITE(TransactionTestSuite, TransactionTestSuiteFixture)
+
+BOOST_AUTO_TEST_CASE(TransactionConnectionCommit)
+{
+    Connect("DRIVER={Apache Ignite};address=127.0.0.1:11110;schema=cache");
+
+    SQLRETURN ret = SQLSetConnectAttr(dbc, SQL_ATTR_AUTOCOMMIT, SQL_AUTOCOMMIT_OFF, 0);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    InsertTestValue(42, "Some");
+
+    CheckTestValue(42, "Some");
+
+    ret = SQLEndTran(SQL_HANDLE_DBC, dbc, SQL_COMMIT);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    CheckTestValue(42, "Some");
+}
+
+BOOST_AUTO_TEST_CASE(TransactionConnectionRollbackInsert)
+{
+    Connect("DRIVER={Apache Ignite};address=127.0.0.1:11110;schema=cache");
+
+    SQLRETURN ret = SQLSetConnectAttr(dbc, SQL_ATTR_AUTOCOMMIT, SQL_AUTOCOMMIT_OFF, 0);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    InsertTestValue(42, "Some");
+
+    CheckTestValue(42, "Some");
+
+    ret = SQLEndTran(SQL_HANDLE_DBC, dbc, SQL_ROLLBACK);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    CheckNoTestValue(42);
+}
+
+BOOST_AUTO_TEST_CASE(TransactionConnectionRollbackUpdate1)
+{
+    Connect("DRIVER={Apache Ignite};address=127.0.0.1:11110;schema=cache");
+
+    InsertTestValue(42, "Some");
+
+    CheckTestValue(42, "Some");
+
+    SQLRETURN ret = SQLSetConnectAttr(dbc, SQL_ATTR_AUTOCOMMIT, SQL_AUTOCOMMIT_OFF, 0);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    UpdateTestValue(42, "Other");
+
+    CheckTestValue(42, "Other");
+
+    ret = SQLEndTran(SQL_HANDLE_DBC, dbc, SQL_ROLLBACK);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    CheckTestValue(42, "Some");
+}
+
+BOOST_AUTO_TEST_CASE(TransactionConnectionRollbackUpdate2)
+{
+    Connect("DRIVER={Apache Ignite};address=127.0.0.1:11110;schema=cache");
+
+    SQLRETURN ret = SQLSetConnectAttr(dbc, SQL_ATTR_AUTOCOMMIT, SQL_AUTOCOMMIT_OFF, 0);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    InsertTestValue(42, "Some");
+
+    CheckTestValue(42, "Some");
+
+    ret = SQLEndTran(SQL_HANDLE_DBC, dbc, SQL_COMMIT);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    CheckTestValue(42, "Some");
+
+    UpdateTestValue(42, "Other");
+
+    CheckTestValue(42, "Other");
+
+    ret = SQLEndTran(SQL_HANDLE_DBC, dbc, SQL_ROLLBACK);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    CheckTestValue(42, "Some");
+}
+
+BOOST_AUTO_TEST_CASE(TransactionConnectionRollbackDelete1)
+{
+    Connect("DRIVER={Apache Ignite};address=127.0.0.1:11110;schema=cache");
+
+    InsertTestValue(42, "Some");
+
+    CheckTestValue(42, "Some");
+
+    SQLRETURN ret = SQLSetConnectAttr(dbc, SQL_ATTR_AUTOCOMMIT, SQL_AUTOCOMMIT_OFF, 0);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    DeleteTestValue(42);
+
+    CheckNoTestValue(42);
+
+    ret = SQLEndTran(SQL_HANDLE_DBC, dbc, SQL_ROLLBACK);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    CheckTestValue(42, "Some");
+}
+
+BOOST_AUTO_TEST_CASE(TransactionConnectionRollbackDelete2)
+{
+    Connect("DRIVER={Apache Ignite};address=127.0.0.1:11110;schema=cache");
+
+    SQLRETURN ret = SQLSetConnectAttr(dbc, SQL_ATTR_AUTOCOMMIT, SQL_AUTOCOMMIT_OFF, 0);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    InsertTestValue(42, "Some");
+
+    CheckTestValue(42, "Some");
+
+    ret = SQLEndTran(SQL_HANDLE_DBC, dbc, SQL_COMMIT);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    CheckTestValue(42, "Some");
+
+    DeleteTestValue(42);
+
+    CheckNoTestValue(42);
+
+    ret = SQLEndTran(SQL_HANDLE_DBC, dbc, SQL_ROLLBACK);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    CheckTestValue(42, "Some");
+}
+
+BOOST_AUTO_TEST_CASE(TransactionConnectionTxModeError)
+{
+    Connect("DRIVER={Apache Ignite};address=127.0.0.1:11110;schema=cache;nested_tx_mode=error");
+
+    SQLRETURN ret = SQLSetConnectAttr(dbc, SQL_ATTR_AUTOCOMMIT, SQL_AUTOCOMMIT_OFF, 0);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    InsertTestValue(42, "Some");
+
+    ret = ExecQuery("BEGIN");
+
+    BOOST_CHECK_EQUAL(ret, SQL_ERROR);
+}
+
+BOOST_AUTO_TEST_CASE(TransactionConnectionTxModeIgnore)
+{
+    Connect("DRIVER={Apache Ignite};address=127.0.0.1:11110;schema=cache;nested_tx_mode=ignore");
+
+    SQLRETURN ret = SQLSetConnectAttr(dbc, SQL_ATTR_AUTOCOMMIT, SQL_AUTOCOMMIT_OFF, 0);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    InsertTestValue(42, "Some");
+
+    ret = ExecQuery("BEGIN");
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    ret = SQLEndTran(SQL_HANDLE_DBC, dbc, SQL_ROLLBACK);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    CheckNoTestValue(42);
+}
+
+BOOST_AUTO_TEST_CASE(TransactionConnectionTxModeCommit)
+{
+    Connect("DRIVER={Apache Ignite};address=127.0.0.1:11110;schema=cache;nested_tx_mode=commit");
+
+    SQLRETURN ret = SQLSetConnectAttr(dbc, SQL_ATTR_AUTOCOMMIT, SQL_AUTOCOMMIT_OFF, 0);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    InsertTestValue(42, "Some");
+
+    ret = ExecQuery("BEGIN");
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    UpdateTestValue(42, "Other");
+
+    CheckTestValue(42, "Other");
+
+    ret = SQLEndTran(SQL_HANDLE_DBC, dbc, SQL_ROLLBACK);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    CheckTestValue(42, "Some");
+}
+
+BOOST_AUTO_TEST_CASE(TransactionEnvironmentCommit)
+{
+    Connect("DRIVER={Apache Ignite};address=127.0.0.1:11110;schema=cache");
+
+    SQLRETURN ret = SQLSetConnectAttr(dbc, SQL_ATTR_AUTOCOMMIT, SQL_AUTOCOMMIT_OFF, 0);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    InsertTestValue(42, "Some");
+
+    CheckTestValue(42, "Some");
+
+    ret = SQLEndTran(SQL_HANDLE_DBC, dbc, SQL_COMMIT);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_ENV, env);
+
+    CheckTestValue(42, "Some");
+}
+
+BOOST_AUTO_TEST_CASE(TransactionEnvironmentRollbackInsert)
+{
+    Connect("DRIVER={Apache Ignite};address=127.0.0.1:11110;schema=cache");
+
+    SQLRETURN ret = SQLSetConnectAttr(dbc, SQL_ATTR_AUTOCOMMIT, SQL_AUTOCOMMIT_OFF, 0);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    InsertTestValue(42, "Some");
+
+    CheckTestValue(42, "Some");
+
+    ret = SQLEndTran(SQL_HANDLE_ENV, env, SQL_ROLLBACK);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_ENV, env);
+
+    CheckNoTestValue(42);
+}
+
+BOOST_AUTO_TEST_CASE(TransactionEnvironmentRollbackUpdate1)
+{
+    Connect("DRIVER={Apache Ignite};address=127.0.0.1:11110;schema=cache");
+
+    InsertTestValue(42, "Some");
+
+    CheckTestValue(42, "Some");
+
+    SQLRETURN ret = SQLSetConnectAttr(dbc, SQL_ATTR_AUTOCOMMIT, SQL_AUTOCOMMIT_OFF, 0);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    UpdateTestValue(42, "Other");
+
+    CheckTestValue(42, "Other");
+
+    ret = SQLEndTran(SQL_HANDLE_ENV, env, SQL_ROLLBACK);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_ENV, env);
+
+    CheckTestValue(42, "Some");
+}
+
+BOOST_AUTO_TEST_CASE(TransactionEnvironmentRollbackUpdate2)
+{
+    Connect("DRIVER={Apache Ignite};address=127.0.0.1:11110;schema=cache");
+
+    SQLRETURN ret = SQLSetConnectAttr(dbc, SQL_ATTR_AUTOCOMMIT, SQL_AUTOCOMMIT_OFF, 0);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    InsertTestValue(42, "Some");
+
+    CheckTestValue(42, "Some");
+
+    ret = SQLEndTran(SQL_HANDLE_DBC, dbc, SQL_COMMIT);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_ENV, env);
+
+    CheckTestValue(42, "Some");
+
+    UpdateTestValue(42, "Other");
+
+    CheckTestValue(42, "Other");
+
+    ret = SQLEndTran(SQL_HANDLE_ENV, env, SQL_ROLLBACK);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_ENV, env);
+
+    CheckTestValue(42, "Some");
+}
+
+BOOST_AUTO_TEST_CASE(TransactionEnvironmentRollbackDelete1)
+{
+    Connect("DRIVER={Apache Ignite};address=127.0.0.1:11110;schema=cache");
+
+    InsertTestValue(42, "Some");
+
+    CheckTestValue(42, "Some");
+
+    SQLRETURN ret = SQLSetConnectAttr(dbc, SQL_ATTR_AUTOCOMMIT, SQL_AUTOCOMMIT_OFF, 0);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    DeleteTestValue(42);
+
+    CheckNoTestValue(42);
+
+    ret = SQLEndTran(SQL_HANDLE_ENV, env, SQL_ROLLBACK);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_ENV, env);
+
+    CheckTestValue(42, "Some");
+}
+
+BOOST_AUTO_TEST_CASE(TransactionEnvironmentRollbackDelete2)
+{
+    Connect("DRIVER={Apache Ignite};address=127.0.0.1:11110;schema=cache");
+
+    SQLRETURN ret = SQLSetConnectAttr(dbc, SQL_ATTR_AUTOCOMMIT, SQL_AUTOCOMMIT_OFF, 0);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    InsertTestValue(42, "Some");
+
+    CheckTestValue(42, "Some");
+
+    ret = SQLEndTran(SQL_HANDLE_DBC, dbc, SQL_COMMIT);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_ENV, env);
+
+    CheckTestValue(42, "Some");
+
+    DeleteTestValue(42);
+
+    CheckNoTestValue(42);
+
+    ret = SQLEndTran(SQL_HANDLE_ENV, env, SQL_ROLLBACK);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_ENV, env);
+
+    CheckTestValue(42, "Some");
+}
+
+BOOST_AUTO_TEST_CASE(TransactionEnvironmentTxModeError)
+{
+    Connect("DRIVER={Apache Ignite};address=127.0.0.1:11110;schema=cache;nested_tx_mode=error");
+
+    SQLRETURN ret = SQLSetConnectAttr(dbc, SQL_ATTR_AUTOCOMMIT, SQL_AUTOCOMMIT_OFF, 0);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    InsertTestValue(42, "Some");
+
+    ret = ExecQuery("BEGIN");
+
+    BOOST_CHECK_EQUAL(ret, SQL_ERROR);
+}
+
+BOOST_AUTO_TEST_CASE(TransactionEnvironmentTxModeIgnore)
+{
+    Connect("DRIVER={Apache Ignite};address=127.0.0.1:11110;schema=cache;nested_tx_mode=ignore");
+
+    SQLRETURN ret = SQLSetConnectAttr(dbc, SQL_ATTR_AUTOCOMMIT, SQL_AUTOCOMMIT_OFF, 0);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    InsertTestValue(42, "Some");
+
+    ret = ExecQuery("BEGIN");
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    ret = SQLEndTran(SQL_HANDLE_ENV, env, SQL_ROLLBACK);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_ENV, env);
+
+    CheckNoTestValue(42);
+}
+
+BOOST_AUTO_TEST_CASE(TransactionEnvironmentTxModeCommit)
+{
+    Connect("DRIVER={Apache Ignite};address=127.0.0.1:11110;schema=cache;nested_tx_mode=commit");
+
+    SQLRETURN ret = SQLSetConnectAttr(dbc, SQL_ATTR_AUTOCOMMIT, SQL_AUTOCOMMIT_OFF, 0);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    InsertTestValue(42, "Some");
+
+    ret = ExecQuery("BEGIN");
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_DBC, dbc);
+
+    UpdateTestValue(42, "Other");
+
+    CheckTestValue(42, "Other");
+
+    ret = SQLEndTran(SQL_HANDLE_ENV, env, SQL_ROLLBACK);
+
+    ODBC_FAIL_ON_ERROR(ret, SQL_HANDLE_ENV, env);
+
+    CheckTestValue(42, "Some");
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/modules/platforms/cpp/odbc/Makefile.am b/modules/platforms/cpp/odbc/Makefile.am
index 90e4dc9..f706f4f 100644
--- a/modules/platforms/cpp/odbc/Makefile.am
+++ b/modules/platforms/cpp/odbc/Makefile.am
@@ -83,6 +83,7 @@
     src/protocol_version.cpp \
     src/result_page.cpp \
     src/row.cpp \
+    src/nested_tx_mode.cpp \
     src/message.cpp \
     src/column.cpp \
     src/statement.cpp \
diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/config/configuration.h b/modules/platforms/cpp/odbc/include/ignite/odbc/config/configuration.h
index c19f29d..27de7e8 100644
--- a/modules/platforms/cpp/odbc/include/ignite/odbc/config/configuration.h
+++ b/modules/platforms/cpp/odbc/include/ignite/odbc/config/configuration.h
@@ -26,6 +26,7 @@
 #include "ignite/odbc/config/settable_value.h"
 #include "ignite/odbc/ssl/ssl_mode.h"
 #include "ignite/odbc/end_point.h"
+#include "ignite/odbc/nested_tx_mode.h"
 
 namespace ignite
 {
@@ -104,6 +105,9 @@
 
                     /** Default value for password attribute. */
                     static const std::string password;
+
+                    /** Default value for nestedTxMode attribute. */
+                    static const NestedTxMode::Type nestedTxMode;
                 };
 
                 /**
@@ -537,6 +541,27 @@
                 bool IsPasswordSet() const;
 
                 /**
+                 * Get nested transaction mode.
+                 *
+                 * @return Nested transaction mode.
+                 */
+                NestedTxMode::Type GetNestedTxMode() const;
+
+                /**
+                 * Set nested transaction mode.
+                 *
+                 * @param mode Nested transaction mode.
+                 */
+                void SetNestedTxMode(NestedTxMode::Type mode);
+
+                /**
+                 * Check if the value set.
+                 *
+                 * @return @true if the value set.
+                 */
+                bool IsNestedTxModeSet() const;
+
+                /**
                  * Get argument map.
                  *
                  * @param res Resulting argument map.
@@ -613,6 +638,9 @@
 
                 /** Password. */
                 SettableValue<std::string> password;
+
+                /** Nested transaction mode. */
+                SettableValue<NestedTxMode::Type> nestedTxMode;
             };
 
             template<>
@@ -642,6 +670,10 @@
             template<>
             void Configuration::AddToMap<ssl::SslMode::Type>(ArgumentMap& map, const std::string& key,
                 const SettableValue<ssl::SslMode::Type>& value);
+
+            template<>
+            void Configuration::AddToMap<NestedTxMode::Type>(ArgumentMap& map, const std::string& key,
+                const SettableValue<NestedTxMode::Type>& value);
         }
     }
 }
diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/config/connection_string_parser.h b/modules/platforms/cpp/odbc/include/ignite/odbc/config/connection_string_parser.h
index 2bfe331..605109e 100644
--- a/modules/platforms/cpp/odbc/include/ignite/odbc/config/connection_string_parser.h
+++ b/modules/platforms/cpp/odbc/include/ignite/odbc/config/connection_string_parser.h
@@ -97,6 +97,9 @@
 
                     /** Connection attribute keyword for password attribute. */
                     static const std::string password;
+
+                    /** Connection attribute keyword for nestedTxMode attribute. */
+                    static const std::string nestedTxMode;
                 };
 
                 /**
diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/connection.h b/modules/platforms/cpp/odbc/include/ignite/odbc/connection.h
index 68d7816..b3ae5fc 100644
--- a/modules/platforms/cpp/odbc/include/ignite/odbc/connection.h
+++ b/modules/platforms/cpp/odbc/include/ignite/odbc/connection.h
@@ -33,6 +33,7 @@
 {
     namespace odbc
     {
+        class Environment;
         class Statement;
 
         /**
@@ -99,6 +100,11 @@
             void Release();
 
             /**
+             * Deregister self from the parent.
+             */
+            void Deregister();
+
+            /**
              * Create statement associated with the connection.
              *
              * @return Pointer to valid instance on success and NULL on failure.
@@ -141,6 +147,13 @@
             const config::Configuration& GetConfiguration() const;
 
             /**
+             * Is auto commit.
+             *
+             * @return @c true if the auto commit is enabled.
+             */
+            bool IsAutoCommit();
+
+            /**
              * Create diagnostic record associated with the Connection instance.
              *
              * @param sqlState SQL state.
@@ -432,7 +445,10 @@
             /**
              * Constructor.
              */
-            Connection();
+            Connection(Environment* env);
+
+            /** Parent. */
+            Environment* env;
 
             /** Client Socket. */
             std::auto_ptr<SocketClient> socket;
@@ -443,6 +459,9 @@
             /** Login timeout in seconds. */
             int32_t loginTimeout;
 
+            /** Autocommit flag. */
+            bool autoCommit;
+
             /** Message parser. */
             Parser parser;
 
diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/diagnostic/diagnosable.h b/modules/platforms/cpp/odbc/include/ignite/odbc/diagnostic/diagnosable.h
index 46d79ce..613cbf4 100644
--- a/modules/platforms/cpp/odbc/include/ignite/odbc/diagnostic/diagnosable.h
+++ b/modules/platforms/cpp/odbc/include/ignite/odbc/diagnostic/diagnosable.h
@@ -82,6 +82,13 @@
                  */
                 virtual void AddStatusRecord(const OdbcError& err) = 0;
 
+                /**
+                 * Add new status record.
+                 *
+                 * @param rec Record.
+                 */
+                virtual void AddStatusRecord(const DiagnosticRecord& rec) = 0;
+
             protected:
                 /**
                  * Default constructor.
diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/diagnostic/diagnosable_adapter.h b/modules/platforms/cpp/odbc/include/ignite/odbc/diagnostic/diagnosable_adapter.h
index f1e9164..d8cfeb7 100644
--- a/modules/platforms/cpp/odbc/include/ignite/odbc/diagnostic/diagnosable_adapter.h
+++ b/modules/platforms/cpp/odbc/include/ignite/odbc/diagnostic/diagnosable_adapter.h
@@ -69,7 +69,7 @@
                  *
                  * @return Diagnostic record.
                  */
-                virtual const diagnostic::DiagnosticRecordStorage& GetDiagnosticRecords() const
+                virtual const DiagnosticRecordStorage& GetDiagnosticRecords() const
                 {
                     return diagnosticRecords;
                 }
@@ -79,7 +79,7 @@
                  *
                  * @return Diagnostic record.
                  */
-                virtual diagnostic::DiagnosticRecordStorage& GetDiagnosticRecords()
+                virtual DiagnosticRecordStorage& GetDiagnosticRecords()
                 {
                     return diagnosticRecords;
                 }
@@ -110,6 +110,13 @@
                  */
                 virtual void AddStatusRecord(const OdbcError& err);
 
+                /**
+                 * Add new status record.
+                 *
+                 * @param rec Record.
+                 */
+                virtual void AddStatusRecord(const DiagnosticRecord& rec);
+
             protected:
                 /** Diagnostic records. */
                 DiagnosticRecordStorage diagnosticRecords;
diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/environment.h b/modules/platforms/cpp/odbc/include/ignite/odbc/environment.h
index 3eeade6..e6a171f 100644
--- a/modules/platforms/cpp/odbc/include/ignite/odbc/environment.h
+++ b/modules/platforms/cpp/odbc/include/ignite/odbc/environment.h
@@ -18,6 +18,8 @@
 #ifndef _IGNITE_ODBC_ENVIRONMENT
 #define _IGNITE_ODBC_ENVIRONMENT
 
+#include <set>
+
 #include "ignite/odbc/diagnostic/diagnosable_adapter.h"
 
 namespace ignite
@@ -32,6 +34,9 @@
         class Environment : public diagnostic::DiagnosableAdapter
         {
         public:
+            /** Connection set type. */
+            typedef std::set<Connection*> ConnectionSet;
+
             /**
              * Constructor.
              */
@@ -50,6 +55,13 @@
             Connection* CreateConnection();
 
             /**
+             * Deregister connection.
+             *
+             * @param conn Connection to deregister.
+             */
+            void DeregisterConnection(Connection* conn);
+
+            /**
              * Perform transaction commit on all the associated connections.
              */
             void TransactionCommit();
@@ -125,6 +137,9 @@
              */
             SqlResult::Type InternalGetAttribute(int32_t attr, app::ApplicationDataBuffer& buffer);
 
+            /** Assotiated connections. */
+            ConnectionSet connections;
+
             /** ODBC version. */
             int32_t odbcVersion;
 
diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/message.h b/modules/platforms/cpp/odbc/include/ignite/odbc/message.h
index e119d1f..87ba064 100644
--- a/modules/platforms/cpp/odbc/include/ignite/odbc/message.h
+++ b/modules/platforms/cpp/odbc/include/ignite/odbc/message.h
@@ -109,9 +109,10 @@
              * @param sql SQL query.
              * @param params Query arguments.
              * @param timeout Timeout.
+             * @param autoCommit Auto commit flag.
              */
             QueryExecuteRequest(const std::string& schema, const std::string& sql, const app::ParameterSet& params,
-                int32_t timeout);
+                int32_t timeout, bool autoCommit);
 
             /**
              * Destructor.
@@ -137,6 +138,9 @@
 
             /** Timeout. */
             int32_t timeout;
+
+            /** Auto commit. */
+            bool autoCommit;
         };
 
         /**
@@ -154,9 +158,11 @@
              * @param begin Beginng of the interval.
              * @param end End of the interval.
              * @param timeout Timeout.
+             * @param autoCommit Auto commit flag.
              */
             QueryExecuteBatchtRequest(const std::string& schema, const std::string& sql,
-                const app::ParameterSet& params, SqlUlen begin, SqlUlen end, bool last, int32_t timeout);
+                const app::ParameterSet& params, SqlUlen begin, SqlUlen end, bool last, int32_t timeout,
+                bool autoCommit);
 
             /**
              * Destructor.
@@ -191,6 +197,9 @@
 
             /** Timeout. */
             int32_t timeout;
+
+            /** Auto commit. */
+            bool autoCommit;
         };
 
         /**
diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/meta/column_meta.h b/modules/platforms/cpp/odbc/include/ignite/odbc/meta/column_meta.h
index 6a94b04..d54fe3e 100644
--- a/modules/platforms/cpp/odbc/include/ignite/odbc/meta/column_meta.h
+++ b/modules/platforms/cpp/odbc/include/ignite/odbc/meta/column_meta.h
@@ -23,6 +23,7 @@
 
 #include "ignite/impl/binary/binary_reader_impl.h"
 
+#include "ignite/odbc/protocol_version.h"
 #include "ignite/odbc/common_types.h"
 #include "ignite/odbc/utility.h"
 
@@ -32,6 +33,8 @@
     {
         namespace meta
         {
+            using namespace ignite::odbc;
+
             /**
              * Column metadata.
              */
@@ -65,7 +68,8 @@
                  */
                 ColumnMeta(const std::string& schemaName, const std::string& tableName,
                            const std::string& columnName, int8_t dataType) :
-                    schemaName(schemaName), tableName(tableName), columnName(columnName), dataType(dataType)
+                    schemaName(schemaName), tableName(tableName), columnName(columnName), dataType(dataType),
+                    precision(-1), scale(-1)
                 {
                     // No-op.
                 }
@@ -85,7 +89,9 @@
                     schemaName(other.schemaName),
                     tableName(other.tableName),
                     columnName(other.columnName),
-                    dataType(other.dataType)
+                    dataType(other.dataType),
+                    precision(other.precision),
+                    scale(other.scale)
                 {
                     // No-op.
                 }
@@ -99,6 +105,8 @@
                     tableName = other.tableName;
                     columnName = other.columnName;
                     dataType = other.dataType;
+                    precision = other.precision;
+                    scale = other.scale;
 
                     return *this;
                 }
@@ -106,8 +114,9 @@
                 /**
                  * Read using reader.
                  * @param reader Reader.
+                 * @param ver Server version.
                  */
-                void Read(ignite::impl::binary::BinaryReaderImpl& reader);
+                void Read(ignite::impl::binary::BinaryReaderImpl& reader, const ProtocolVersion& ver);
 
                 /**
                  * Get schema name.
@@ -140,12 +149,30 @@
                  * Get data type.
                  * @return Data type.
                  */
-                int8_t GetDataType() const 
+                int8_t GetDataType() const
                 {
                     return dataType;
                 }
 
                 /**
+                 * Get column precision.
+                 * @return Column precision.
+                 */
+                const int32_t GetPrecision() const
+                {
+                    return precision;
+                }
+
+                /**
+                 * Get column scale.
+                 * @return Column scale.
+                 */
+                const int32_t GetScale() const
+                {
+                    return scale;
+                }
+
+                /**
                  * Try to get attribute of a string type.
                  *
                  * @param fieldId Field ID.
@@ -175,6 +202,12 @@
 
                 /** Data type. */
                 int8_t dataType;
+
+                /** Column precision. */
+                int32_t precision;
+
+                /** Column scale. */
+                int32_t scale;
             };
 
             /** Column metadata vector alias. */
@@ -184,10 +217,12 @@
              * Read columns metadata collection.
              * @param reader Reader.
              * @param meta Collection.
+             * @param ver Server protocol version.
              */
-            void ReadColumnMetaVector(ignite::impl::binary::BinaryReaderImpl& reader, ColumnMetaVector& meta);
+            void ReadColumnMetaVector(ignite::impl::binary::BinaryReaderImpl& reader, ColumnMetaVector& meta,
+                    const ProtocolVersion& ver);
         }
     }
 }
 
-#endif //_IGNITE_ODBC_META_COLUMN_META
\ No newline at end of file
+#endif //_IGNITE_ODBC_META_COLUMN_META
diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/nested_tx_mode.h b/modules/platforms/cpp/odbc/include/ignite/odbc/nested_tx_mode.h
new file mode 100644
index 0000000..eb2598d
--- /dev/null
+++ b/modules/platforms/cpp/odbc/include/ignite/odbc/nested_tx_mode.h
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef _IGNITE_ODBC_NESTED_TX_MODE
+#define _IGNITE_ODBC_NESTED_TX_MODE
+
+#include <set>
+#include <string>
+
+namespace ignite
+{
+    namespace odbc
+    {
+        /**
+         * Nested transaction mode.
+         */
+        struct NestedTxMode
+        {
+            /**
+             * Values.
+             */
+            enum Type
+            {
+                /** Commit current transaction if a new one started. */
+                AI_COMMIT = 1,
+
+                /** Ignore start of a new transaction. */
+                AI_IGNORE = 2,
+
+                /** Throw an error. */
+                AI_ERROR = 3,
+
+                /** Returned when value is unknown. */
+                AI_UNKNOWN = 100
+            };
+
+            /** Mode set type. */
+            typedef std::set<Type> ModeSet;
+
+            /**
+             * Get value from a string value.
+             *
+             * @param str String.
+             * @param dflt Value to return on error.
+             * @return Corresponding value on success and @c dflt on failure.
+             */
+            static Type FromString(const std::string& str, Type dflt = AI_UNKNOWN);
+
+            /**
+             * Convert value to a string.
+             *
+             * @param value Value.
+             * @return String value.
+             */
+            static std::string ToString(Type value);
+
+            /**
+             * Get set of all valid values.
+             *
+             * @return Set of all valid values.
+             */
+            static const ModeSet& GetValidValues();
+        };
+    }
+}
+
+#endif //_IGNITE_ODBC_NESTED_TX_MODE
\ No newline at end of file
diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/protocol_version.h b/modules/platforms/cpp/odbc/include/ignite/odbc/protocol_version.h
index 4833291..1a722e3 100644
--- a/modules/platforms/cpp/odbc/include/ignite/odbc/protocol_version.h
+++ b/modules/platforms/cpp/odbc/include/ignite/odbc/protocol_version.h
@@ -43,9 +43,12 @@
             /** Version 2.3.2: added multiple statements support. */
             static const ProtocolVersion VERSION_2_3_2;
 
-            /** Version 2.5.0: added multiple statements support. */
+            /** Version 2.5.0: added authentication and transactions support. */
             static const ProtocolVersion VERSION_2_5_0;
 
+            /** Version 2.7.0: added fields precision and scale. */
+            static const ProtocolVersion VERSION_2_7_0;
+
             typedef std::set<ProtocolVersion> VersionSet;
 
             /**
@@ -198,4 +201,4 @@
     }
 }
 
-#endif //_IGNITE_ODBC_PROTOCOL_VERSION
\ No newline at end of file
+#endif //_IGNITE_ODBC_PROTOCOL_VERSION
diff --git a/modules/platforms/cpp/odbc/include/ignite/odbc/system/ui/dsn_configuration_window.h b/modules/platforms/cpp/odbc/include/ignite/odbc/system/ui/dsn_configuration_window.h
index 1666863..77974ad 100644
--- a/modules/platforms/cpp/odbc/include/ignite/odbc/system/ui/dsn_configuration_window.h
+++ b/modules/platforms/cpp/odbc/include/ignite/odbc/system/ui/dsn_configuration_window.h
@@ -61,6 +61,8 @@
                             SKIP_REDUCER_ON_UPDATE_CHECK_BOX,
                             PROTOCOL_VERSION_LABEL,
                             PROTOCOL_VERSION_COMBO_BOX,
+                            NESTED_TX_MODE_LABEL,
+                            NESTED_TX_MODE_COMBO_BOX,
                             SSL_MODE_LABEL,
                             SSL_MODE_COMBO_BOX,
                             SSL_KEY_FILE_LABEL,
@@ -312,6 +314,12 @@
                     /** Password edit. */
                     std::auto_ptr<Window> passwordEdit;
 
+                    /** Nested transaction mode label. */
+                    std::auto_ptr<Window> nestedTxModeLabel;
+
+                    /** Nested transaction mode combo box. */
+                    std::auto_ptr<Window> nestedTxModeComboBox;
+
                     /** Configuration. */
                     config::Configuration& config;
 
diff --git a/modules/platforms/cpp/odbc/os/win/src/system/ui/dsn_configuration_window.cpp b/modules/platforms/cpp/odbc/os/win/src/system/ui/dsn_configuration_window.cpp
index a94b479..e3bbb4e 100644
--- a/modules/platforms/cpp/odbc/os/win/src/system/ui/dsn_configuration_window.cpp
+++ b/modules/platforms/cpp/odbc/os/win/src/system/ui/dsn_configuration_window.cpp
@@ -36,7 +36,7 @@
                 DsnConfigurationWindow::DsnConfigurationWindow(Window* parent, config::Configuration& config):
                     CustomWindow(parent, "IgniteConfigureDsn", "Configure Apache Ignite DSN"),
                     width(360),
-                    height(580),
+                    height(600),
                     connectionSettingsGroupBox(),
                     sslSettingsGroupBox(),
                     authSettingsGroupBox(),
@@ -59,6 +59,7 @@
                     userEdit(),
                     passwordLabel(),
                     passwordEdit(),
+                    nestedTxModeComboBox(),
                     okButton(),
                     cancelButton(),
                     config(config),
@@ -286,7 +287,7 @@
 
                 int DsnConfigurationWindow::CreateAdditionalSettingsGroup(int posX, int posY, int sizeX)
                 {
-                    enum { LABEL_WIDTH = 80 };
+                    enum { LABEL_WIDTH = 130 };
 
                     int labelPosX = posX + INTERVAL;
 
@@ -309,6 +310,29 @@
 
                     rowPos += INTERVAL + ROW_HEIGHT;
 
+                    nestedTxModeLabel = CreateLabel(labelPosX, rowPos, LABEL_WIDTH, ROW_HEIGHT,
+                        "Nested Transaction Mode:", ChildId::NESTED_TX_MODE_LABEL);
+                    nestedTxModeComboBox = CreateComboBox(editPosX, rowPos, editSizeX, ROW_HEIGHT,
+                        "", ChildId::NESTED_TX_MODE_COMBO_BOX);
+
+                    int id = 0;
+
+                    const NestedTxMode::ModeSet& supported = NestedTxMode::GetValidValues();
+
+                    for (NestedTxMode::ModeSet::const_iterator it = supported.begin(); it != supported.end(); ++it)
+                    {
+                        nestedTxModeComboBox->AddString(NestedTxMode::ToString(*it));
+
+                        if (*it == config.GetNestedTxMode())
+                            nestedTxModeComboBox->SetSelection(id);
+
+                        ++id;
+                    }
+
+                    nestedTxModeComboBox->SetEnabled(version >= ProtocolVersion::VERSION_2_5_0);
+
+                    rowPos += INTERVAL + ROW_HEIGHT;
+
                     distributedJoinsCheckBox = CreateCheckBox(labelPosX, rowPos, checkBoxSize, ROW_HEIGHT,
                         "Distributed Joins", ChildId::DISTRIBUTED_JOINS_CHECK_BOX, config.IsDistributedJoins());
 
@@ -429,6 +453,7 @@
                                     ProtocolVersion version = ProtocolVersion::FromString(versionStr);
                                     lazyCheckBox->SetEnabled(version >= ProtocolVersion::VERSION_2_1_5);
                                     skipReducerOnUpdateCheckBox->SetEnabled(version >= ProtocolVersion::VERSION_2_3_0);
+                                    nestedTxModeComboBox->SetEnabled(version >= ProtocolVersion::VERSION_2_5_0);
 
                                     break;
                                 }
@@ -568,13 +593,6 @@
                 {
                     std::string pageSizeStr;
 
-                    bool distributedJoins;
-                    bool enforceJoinOrder;
-                    bool replicatedOnly;
-                    bool collocated;
-                    bool lazy;
-                    bool skipReducerOnUpdate;
-
                     pageSizeEdit->GetText(pageSizeStr);
 
                     int32_t pageSize = common::LexicalCast<int32_t>(pageSizeStr);
@@ -582,15 +600,22 @@
                     if (pageSize <= 0)
                         pageSize = config.GetPageSize();
 
-                    distributedJoins = distributedJoinsCheckBox->IsChecked();
-                    enforceJoinOrder = enforceJoinOrderCheckBox->IsChecked();
-                    replicatedOnly = replicatedOnlyCheckBox->IsChecked();
-                    collocated = collocatedCheckBox->IsChecked();
-                    lazy = lazyCheckBox->IsChecked();
-                    skipReducerOnUpdate = skipReducerOnUpdateCheckBox->IsChecked();
+                    std::string nestedTxModeStr;
+
+                    nestedTxModeComboBox->GetText(nestedTxModeStr);
+
+                    NestedTxMode::Type mode = NestedTxMode::FromString(nestedTxModeStr, config.GetNestedTxMode());
+
+                    bool distributedJoins = distributedJoinsCheckBox->IsChecked();
+                    bool enforceJoinOrder = enforceJoinOrderCheckBox->IsChecked();
+                    bool replicatedOnly = replicatedOnlyCheckBox->IsChecked();
+                    bool collocated = collocatedCheckBox->IsChecked();
+                    bool lazy = lazyCheckBox->IsChecked();
+                    bool skipReducerOnUpdate = skipReducerOnUpdateCheckBox->IsChecked();
 
                     LOG_MSG("Retrieving arguments:");
                     LOG_MSG("Page size:              " << pageSize);
+                    LOG_MSG("Nested TX Mode:         " << NestedTxMode::ToString(mode));
                     LOG_MSG("Distributed Joins:      " << (distributedJoins ? "true" : "false"));
                     LOG_MSG("Enforce Join Order:     " << (enforceJoinOrder ? "true" : "false"));
                     LOG_MSG("Replicated only:        " << (replicatedOnly ? "true" : "false"));
@@ -599,6 +624,7 @@
                     LOG_MSG("Skip reducer on update: " << (skipReducerOnUpdate ? "true" : "false"));
 
                     cfg.SetPageSize(pageSize);
+                    cfg.SetNestedTxMode(mode);
                     cfg.SetDistributedJoins(distributedJoins);
                     cfg.SetEnforceJoinOrder(enforceJoinOrder);
                     cfg.SetReplicatedOnly(replicatedOnly);
diff --git a/modules/platforms/cpp/odbc/project/vs/odbc.vcxproj b/modules/platforms/cpp/odbc/project/vs/odbc.vcxproj
index 352913e..630dc27 100644
--- a/modules/platforms/cpp/odbc/project/vs/odbc.vcxproj
+++ b/modules/platforms/cpp/odbc/project/vs/odbc.vcxproj
@@ -182,6 +182,7 @@
     <ClCompile Include="..\..\src\message.cpp" />
     <ClCompile Include="..\..\src\meta\column_meta.cpp" />
     <ClCompile Include="..\..\src\meta\table_meta.cpp" />
+    <ClCompile Include="..\..\src\nested_tx_mode.cpp" />
     <ClCompile Include="..\..\src\odbc.cpp" />
     <ClCompile Include="..\..\src\protocol_version.cpp" />
     <ClCompile Include="..\..\src\query\batch_query.cpp" />
@@ -230,6 +231,7 @@
     <ClInclude Include="..\..\include\ignite\odbc\meta\column_meta.h" />
     <ClInclude Include="..\..\include\ignite\odbc\meta\primary_key_meta.h" />
     <ClInclude Include="..\..\include\ignite\odbc\meta\table_meta.h" />
+    <ClInclude Include="..\..\include\ignite\odbc\nested_tx_mode.h" />
     <ClInclude Include="..\..\include\ignite\odbc\odbc_error.h" />
     <ClInclude Include="..\..\include\ignite\odbc\parser.h" />
     <ClInclude Include="..\..\include\ignite\odbc\protocol_version.h" />
diff --git a/modules/platforms/cpp/odbc/project/vs/odbc.vcxproj.filters b/modules/platforms/cpp/odbc/project/vs/odbc.vcxproj.filters
index b1956aa..6da0111 100644
--- a/modules/platforms/cpp/odbc/project/vs/odbc.vcxproj.filters
+++ b/modules/platforms/cpp/odbc/project/vs/odbc.vcxproj.filters
@@ -166,6 +166,9 @@
     <ClCompile Include="..\..\src\config\config_tools.cpp">
       <Filter>Code\config</Filter>
     </ClCompile>
+    <ClCompile Include="..\..\src\nested_tx_mode.cpp">
+      <Filter>Code</Filter>
+    </ClCompile>
   </ItemGroup>
   <ItemGroup>
     <None Include="module.def">
@@ -329,5 +332,8 @@
     <ClInclude Include="..\..\include\ignite\odbc\end_point.h">
       <Filter>Code</Filter>
     </ClInclude>
+    <ClInclude Include="..\..\include\ignite\odbc\nested_tx_mode.h">
+      <Filter>Code</Filter>
+    </ClInclude>
   </ItemGroup>
 </Project>
\ No newline at end of file
diff --git a/modules/platforms/cpp/odbc/src/app/application_data_buffer.cpp b/modules/platforms/cpp/odbc/src/app/application_data_buffer.cpp
index de8ca6a..4b9845d 100644
--- a/modules/platforms/cpp/odbc/src/app/application_data_buffer.cpp
+++ b/modules/platforms/cpp/odbc/src/app/application_data_buffer.cpp
@@ -144,42 +144,42 @@
 
                     case OdbcNativeType::AI_SIGNED_SHORT:
                     {
-                        return PutNumToNumBuffer<short>(value);
+                        return PutNumToNumBuffer<SQLSMALLINT>(value);
                     }
 
                     case OdbcNativeType::AI_UNSIGNED_SHORT:
                     {
-                        return PutNumToNumBuffer<unsigned short>(value);
+                        return PutNumToNumBuffer<SQLUSMALLINT>(value);
                     }
 
                     case OdbcNativeType::AI_SIGNED_LONG:
                     {
-                        return PutNumToNumBuffer<long>(value);
+                        return PutNumToNumBuffer<SQLINTEGER>(value);
                     }
 
                     case OdbcNativeType::AI_UNSIGNED_LONG:
                     {
-                        return PutNumToNumBuffer<unsigned long>(value);
+                        return PutNumToNumBuffer<SQLUINTEGER>(value);
                     }
 
                     case OdbcNativeType::AI_SIGNED_BIGINT:
                     {
-                        return PutNumToNumBuffer<int64_t>(value);
+                        return PutNumToNumBuffer<SQLBIGINT>(value);
                     }
 
                     case OdbcNativeType::AI_UNSIGNED_BIGINT:
                     {
-                        return PutNumToNumBuffer<uint64_t>(value);
+                        return PutNumToNumBuffer<SQLUBIGINT>(value);
                     }
 
                     case OdbcNativeType::AI_FLOAT:
                     {
-                        return PutNumToNumBuffer<float>(value);
+                        return PutNumToNumBuffer<SQLREAL>(value);
                     }
 
                     case OdbcNativeType::AI_DOUBLE:
                     {
-                        return PutNumToNumBuffer<double>(value);
+                        return PutNumToNumBuffer<SQLDOUBLE>(value);
                     }
 
                     case OdbcNativeType::AI_CHAR:
diff --git a/modules/platforms/cpp/odbc/src/config/configuration.cpp b/modules/platforms/cpp/odbc/src/config/configuration.cpp
index ef102d2..a99894d 100644
--- a/modules/platforms/cpp/odbc/src/config/configuration.cpp
+++ b/modules/platforms/cpp/odbc/src/config/configuration.cpp
@@ -56,6 +56,8 @@
             const std::string Configuration::DefaultValue::user = "";
             const std::string Configuration::DefaultValue::password = "";
 
+            const NestedTxMode::Type Configuration::DefaultValue::nestedTxMode = NestedTxMode::AI_ERROR;
+
             Configuration::Configuration() :
                 dsn(DefaultValue::dsn),
                 driver(DefaultValue::driver),
@@ -76,7 +78,8 @@
                 sslCertFile(DefaultValue::sslCertFile),
                 sslCaFile(DefaultValue::sslCaFile),
                 user(DefaultValue::user),
-                password(DefaultValue::password)
+                password(DefaultValue::password),
+                nestedTxMode(DefaultValue::nestedTxMode)
             {
                 // No-op.
             }
@@ -404,6 +407,21 @@
                 return password.IsSet();
             }
 
+            NestedTxMode::Type Configuration::GetNestedTxMode() const
+            {
+                return nestedTxMode.GetValue();
+            }
+
+            void Configuration::SetNestedTxMode(NestedTxMode::Type mode)
+            {
+                this->nestedTxMode.SetValue(mode);
+            }
+
+            bool Configuration::IsNestedTxModeSet() const
+            {
+                return nestedTxMode.IsSet();
+            }
+
             int32_t Configuration::GetPageSize() const
             {
                 return pageSize.GetValue();
@@ -431,6 +449,7 @@
                 AddToMap(res, ConnectionStringParser::Key::sslCaFile, sslCaFile);
                 AddToMap(res, ConnectionStringParser::Key::user, user);
                 AddToMap(res, ConnectionStringParser::Key::password, password);
+                AddToMap(res, ConnectionStringParser::Key::nestedTxMode, nestedTxMode);
             }
 
             template<>
@@ -486,6 +505,14 @@
                 if (value.IsSet())
                     map[key] = ssl::SslMode::ToString(value.GetValue());
             }
+
+            template<>
+            void Configuration::AddToMap(ArgumentMap& map, const std::string& key,
+                const SettableValue<NestedTxMode::Type>& value)
+            {
+                if (value.IsSet())
+                    map[key] = NestedTxMode::ToString(value.GetValue());
+            }
         }
     }
 }
diff --git a/modules/platforms/cpp/odbc/src/config/connection_info.cpp b/modules/platforms/cpp/odbc/src/config/connection_info.cpp
index 7279ea6..5885381 100644
--- a/modules/platforms/cpp/odbc/src/config/connection_info.cpp
+++ b/modules/platforms/cpp/odbc/src/config/connection_info.cpp
@@ -766,7 +766,7 @@
 #ifdef SQL_MULTIPLE_ACTIVE_TXN
                 // A character string: "Y" if the driver supports more than one active transaction at the same time,
                 // "N" if only one transaction can be active at any time.
-                strParams[SQL_MULTIPLE_ACTIVE_TXN] = "N";
+                strParams[SQL_MULTIPLE_ACTIVE_TXN] = "Y";
 #endif // SQL_MULTIPLE_ACTIVE_TXN
 
 #ifdef SQL_ORDER_BY_COLUMNS_IN_SELECT
@@ -1596,7 +1596,7 @@
                 // SQL_TXN_REPEATABLE_READ = Dirty reads and nonrepeatable reads are not possible. Phantoms are possible
                 // SQL_TXN_SERIALIZABLE = Transactions are serializable. Serializable transactions do not allow dirty
                 //     reads, nonrepeatable reads, or phantoms.
-                intParams[SQL_DEFAULT_TXN_ISOLATION] = 0;
+                intParams[SQL_DEFAULT_TXN_ISOLATION] = SQL_TXN_REPEATABLE_READ;
 #endif // SQL_DEFAULT_TXN_ISOLATION
 
 #ifdef SQL_DROP_ASSERTION
@@ -2170,7 +2170,7 @@
                 // SQL_ATTR_TXN_ISOLATION attribute. For more information, see SQLSetConnectAttr Function.
                 // An SQL-92 Entry level-conformant driver will always return SQL_TXN_SERIALIZABLE as supported.
                 // A FIPS Transitional level-conformant driver will always return all of these options as supported.
-                intParams[SQL_TXN_ISOLATION_OPTION] = 0;
+                intParams[SQL_TXN_ISOLATION_OPTION] = SQL_TXN_REPEATABLE_READ;
 #endif // SQL_TXN_ISOLATION_OPTION
 
 #ifdef SQL_UNION
@@ -2312,7 +2312,7 @@
 
 #ifdef SQL_TXN_CAPABLE
                 // Describs the transaction support in the driver or data source.
-                shortParams[SQL_TXN_CAPABLE] = SQL_TC_NONE;
+                shortParams[SQL_TXN_CAPABLE] = SQL_TC_DDL_COMMIT;
 #endif // SQL_TXN_CAPABLE
 
 #ifdef SQL_QUOTED_IDENTIFIER_CASE
diff --git a/modules/platforms/cpp/odbc/src/config/connection_string_parser.cpp b/modules/platforms/cpp/odbc/src/config/connection_string_parser.cpp
index d23c2cb..7f7c2f4 100644
--- a/modules/platforms/cpp/odbc/src/config/connection_string_parser.cpp
+++ b/modules/platforms/cpp/odbc/src/config/connection_string_parser.cpp
@@ -24,6 +24,7 @@
 #include "ignite/odbc/ssl/ssl_mode.h"
 #include "ignite/odbc/config/connection_string_parser.h"
 #include "ignite/odbc/config/config_tools.h"
+#include "ignite/odbc/nested_tx_mode.h"
 
 namespace ignite
 {
@@ -51,6 +52,7 @@
             const std::string ConnectionStringParser::Key::sslCaFile              = "ssl_ca_file";
             const std::string ConnectionStringParser::Key::user                   = "user";
             const std::string ConnectionStringParser::Key::password               = "password";
+            const std::string ConnectionStringParser::Key::nestedTxMode           = "nested_tx_mode";
 
             ConnectionStringParser::ConnectionStringParser(Configuration& cfg):
                 cfg(cfg)
@@ -424,6 +426,23 @@
                 {
                     cfg.SetPassword(value);
                 }
+                else if (lKey == Key::nestedTxMode)
+                {
+                    NestedTxMode::Type mode = NestedTxMode::FromString(value);
+
+                    if (mode == NestedTxMode::AI_UNKNOWN)
+                    {
+                        if (diag)
+                        {
+                            diag->AddStatusRecord(SqlState::S01S02_OPTION_VALUE_CHANGED,
+                                "Specified nested transaction mode is not supported. Default value used ('error').");
+                        }
+
+                        return;
+                    }
+
+                    cfg.SetNestedTxMode(mode);
+                }
                 else if (diag)
                 {
                     std::stringstream stream;
diff --git a/modules/platforms/cpp/odbc/src/connection.cpp b/modules/platforms/cpp/odbc/src/connection.cpp
index 4a28bbf..5d01a0d 100644
--- a/modules/platforms/cpp/odbc/src/connection.cpp
+++ b/modules/platforms/cpp/odbc/src/connection.cpp
@@ -25,6 +25,7 @@
 
 #include "ignite/odbc/log.h"
 #include "ignite/odbc/utility.h"
+#include "ignite/odbc/environment.h"
 #include "ignite/odbc/statement.h"
 #include "ignite/odbc/connection.h"
 #include "ignite/odbc/message.h"
@@ -54,10 +55,12 @@
 {
     namespace odbc
     {
-        Connection::Connection() :
+        Connection::Connection(Environment* env) :
+            env(env),
             socket(),
             timeout(0),
             loginTimeout(SocketClient::DEFALT_CONNECT_TIMEOUT),
+            autoCommit(true),
             parser(),
             config(),
             info(config)
@@ -186,6 +189,11 @@
             IGNITE_ODBC_API_CALL(InternalRelease());
         }
 
+        void Connection::Deregister()
+        {
+            env->DeregisterConnection(this);
+        }
+
         SqlResult::Type Connection::InternalRelease()
         {
             if (socket.get() == 0)
@@ -367,6 +375,11 @@
             return config;
         }
 
+        bool Connection::IsAutoCommit()
+        {
+            return autoCommit;
+        }
+
         diagnostic::DiagnosticRecord Connection::CreateStatusRecord(SqlState::Type sqlState,
             const std::string& message, int32_t rowNum, int32_t columnNum)
         {
@@ -380,6 +393,37 @@
 
         SqlResult::Type Connection::InternalTransactionCommit()
         {
+            std::string schema = config.GetSchema();
+
+            app::ParameterSet empty;
+
+            QueryExecuteRequest req(schema, "COMMIT", empty, timeout, autoCommit);
+            QueryExecuteResponse rsp;
+
+            try
+            {
+                bool sent = SyncMessage(req, rsp, timeout);
+
+                if (!sent)
+                {
+                    AddStatusRecord(SqlState::S08S01_LINK_FAILURE, "Failed to send commit request.");
+
+                    return SqlResult::AI_ERROR;
+                }
+            }
+            catch (const OdbcError& err)
+            {
+                AddStatusRecord(err);
+
+                return SqlResult::AI_ERROR;
+            }
+            catch (const IgniteError& err)
+            {
+                AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, err.GetText());
+
+                return SqlResult::AI_ERROR;
+            }
+
             return SqlResult::AI_SUCCESS;
         }
 
@@ -390,10 +434,38 @@
 
         SqlResult::Type Connection::InternalTransactionRollback()
         {
-            AddStatusRecord(SqlState::SHYC00_OPTIONAL_FEATURE_NOT_IMPLEMENTED,
-                "Rollback operation is not supported.");
+            std::string schema = config.GetSchema();
 
-            return SqlResult::AI_ERROR;
+            app::ParameterSet empty;
+
+            QueryExecuteRequest req(schema, "ROLLBACK", empty, timeout, autoCommit);
+            QueryExecuteResponse rsp;
+
+            try
+            {
+                bool sent = SyncMessage(req, rsp, timeout);
+
+                if (!sent)
+                {
+                    AddStatusRecord(SqlState::S08S01_LINK_FAILURE, "Failed to send rollback request.");
+
+                    return SqlResult::AI_ERROR;
+                }
+            }
+            catch (const OdbcError& err)
+            {
+                AddStatusRecord(err);
+
+                return SqlResult::AI_ERROR;
+            }
+            catch (const IgniteError& err)
+            {
+                AddStatusRecord(SqlState::SHY000_GENERAL_ERROR, err.GetText());
+
+                return SqlResult::AI_ERROR;
+            }
+
+            return SqlResult::AI_SUCCESS;
         }
 
         void Connection::GetAttribute(int attr, void* buf, SQLINTEGER bufLen, SQLINTEGER* valueLen)
@@ -452,7 +524,10 @@
                 {
                     SQLUINTEGER *val = reinterpret_cast<SQLUINTEGER*>(buf);
 
-                    *val = SQL_AUTOCOMMIT_ON;
+                    *val = autoCommit ? SQL_AUTOCOMMIT_ON : SQL_AUTOCOMMIT_OFF;
+
+                    if (valueLen)
+                        *valueLen = SQL_IS_INTEGER;
 
                     break;
                 }
@@ -507,10 +582,17 @@
 
                 case SQL_ATTR_AUTOCOMMIT:
                 {
-                    SQLUINTEGER val = static_cast<SQLUINTEGER>(reinterpret_cast<ptrdiff_t>(value));
+                    SQLUINTEGER mode = static_cast<SQLUINTEGER>(reinterpret_cast<ptrdiff_t>(value));
 
-                    if (val != SQL_AUTOCOMMIT_ON)
+                    if (mode != SQL_AUTOCOMMIT_ON && mode != SQL_AUTOCOMMIT_OFF)
+                    {
+                        AddStatusRecord(SqlState::SHYC00_OPTIONAL_FEATURE_NOT_IMPLEMENTED,
+                            "Specified attribute is not supported.");
+
                         return SqlResult::AI_ERROR;
+                    }
+
+                    autoCommit = mode == SQL_AUTOCOMMIT_ON;
 
                     break;
                 }
@@ -588,8 +670,10 @@
                 if (!rsp.GetError().empty())
                     constructor << "Additional info: " << rsp.GetError() << " ";
 
-                constructor << "Current node Apache Ignite version: " << rsp.GetCurrentVer().ToString() << ", "
-                            << "driver protocol version introduced in version: " << protocolVersion.ToString() << ".";
+                constructor << "Current version of the protocol, used by the server node is " 
+                            << rsp.GetCurrentVer().ToString() << ", "
+                            << "driver protocol version introduced in version "
+                            << protocolVersion.ToString() << ".";
 
                 AddStatusRecord(SqlState::S08004_CONNECTION_REJECTED, constructor.str());
 
@@ -617,6 +701,9 @@
 
             CollectAddresses(config, addrs);
 
+            if (socket.get() == 0)
+                socket.reset(new system::TcpSocketClient());
+
             bool connected = false;
 
             while (!addrs.empty() && !connected)
diff --git a/modules/platforms/cpp/odbc/src/diagnostic/diagnosable_adapter.cpp b/modules/platforms/cpp/odbc/src/diagnostic/diagnosable_adapter.cpp
index 143d949..6de0716 100644
--- a/modules/platforms/cpp/odbc/src/diagnostic/diagnosable_adapter.cpp
+++ b/modules/platforms/cpp/odbc/src/diagnostic/diagnosable_adapter.cpp
@@ -52,6 +52,11 @@
             {
                 AddStatusRecord(err.GetStatus(), err.GetErrorMessage(), 0, 0);
             }
+
+            void DiagnosableAdapter::AddStatusRecord(const DiagnosticRecord& rec)
+            {
+                diagnosticRecords.AddStatusRecord(rec);
+            }
         }
     }
 }
diff --git a/modules/platforms/cpp/odbc/src/dsn_config.cpp b/modules/platforms/cpp/odbc/src/dsn_config.cpp
index 1307d7e..dcdb8f4 100644
--- a/modules/platforms/cpp/odbc/src/dsn_config.cpp
+++ b/modules/platforms/cpp/odbc/src/dsn_config.cpp
@@ -212,6 +212,11 @@
 
             if (password.IsSet() && !config.IsPasswordSet())
                 config.SetPassword(password.GetValue());
+
+            SettableValue<std::string> nestedTxModeStr = ReadDsnString(dsn, ConnectionStringParser::Key::nestedTxMode);
+
+            if (nestedTxModeStr.IsSet() && !config.IsNestedTxModeSet())
+                config.SetNestedTxMode(NestedTxMode::FromString(nestedTxModeStr.GetValue(), config.GetNestedTxMode()));
         }
     }
 }
\ No newline at end of file
diff --git a/modules/platforms/cpp/odbc/src/environment.cpp b/modules/platforms/cpp/odbc/src/environment.cpp
index 417fedc..5a2ac4f 100644
--- a/modules/platforms/cpp/odbc/src/environment.cpp
+++ b/modules/platforms/cpp/odbc/src/environment.cpp
@@ -25,7 +25,8 @@
 {
     namespace odbc
     {
-        Environment::Environment() : 
+        Environment::Environment() :
+            connections(),
             odbcVersion(SQL_OV_ODBC3),
             odbcNts(SQL_TRUE)
         {
@@ -46,9 +47,14 @@
             return connection;
         }
 
+        void Environment::DeregisterConnection(Connection* conn)
+        {
+            connections.erase(conn);
+        }
+
         SqlResult::Type Environment::InternalCreateConnection(Connection*& connection)
         {
-            connection = new Connection;
+            connection = new Connection(this);
 
             if (!connection)
             {
@@ -57,6 +63,8 @@
                 return SqlResult::AI_ERROR;
             }
 
+            connections.insert(connection);
+
             return SqlResult::AI_SUCCESS;
         }
 
@@ -67,7 +75,25 @@
 
         SqlResult::Type Environment::InternalTransactionCommit()
         {
-            return SqlResult::AI_SUCCESS;
+            SqlResult::Type res = SqlResult::AI_SUCCESS;
+
+            for (ConnectionSet::iterator it = connections.begin(); it != connections.end(); ++it)
+            {
+                Connection* conn = *it;
+
+                conn->TransactionCommit();
+
+                diagnostic::DiagnosticRecordStorage& diag = conn->GetDiagnosticRecords();
+
+                if (diag.GetStatusRecordsNumber() > 0)
+                {
+                    AddStatusRecord(diag.GetStatusRecord(1));
+
+                    res = SqlResult::AI_SUCCESS_WITH_INFO;
+                }
+            }
+
+            return res;
         }
 
         void Environment::TransactionRollback()
@@ -77,10 +103,25 @@
 
         SqlResult::Type Environment::InternalTransactionRollback()
         {
-            AddStatusRecord(SqlState::SHYC00_OPTIONAL_FEATURE_NOT_IMPLEMENTED,
-                "Rollback operation is not supported.");
+            SqlResult::Type res = SqlResult::AI_SUCCESS;
 
-            return SqlResult::AI_ERROR;
+            for (ConnectionSet::iterator it = connections.begin(); it != connections.end(); ++it)
+            {
+                Connection* conn = *it;
+
+                conn->TransactionRollback();
+
+                diagnostic::DiagnosticRecordStorage& diag = conn->GetDiagnosticRecords();
+
+                if (diag.GetStatusRecordsNumber() > 0)
+                {
+                    AddStatusRecord(diag.GetStatusRecord(1));
+
+                    res = SqlResult::AI_SUCCESS_WITH_INFO;
+                }
+            }
+
+            return res;
         }
 
         void Environment::SetAttribute(int32_t attr, void* value, int32_t len)
diff --git a/modules/platforms/cpp/odbc/src/message.cpp b/modules/platforms/cpp/odbc/src/message.cpp
index 53d429b..5730842 100644
--- a/modules/platforms/cpp/odbc/src/message.cpp
+++ b/modules/platforms/cpp/odbc/src/message.cpp
@@ -82,15 +82,18 @@
             {
                 utility::WriteString(writer, config.GetUser());
                 utility::WriteString(writer, config.GetPassword());
+
+                writer.WriteInt8(config.GetNestedTxMode());
             }
         }
 
         QueryExecuteRequest::QueryExecuteRequest(const std::string& schema, const std::string& sql,
-            const app::ParameterSet& params, int32_t timeout):
+            const app::ParameterSet& params, int32_t timeout, bool autoCommit):
             schema(schema),
             sql(sql),
             params(params),
-            timeout(timeout)
+            timeout(timeout),
+            autoCommit(autoCommit)
         {
             // No-op.
         }
@@ -115,17 +118,21 @@
 
             if (ver >= ProtocolVersion::VERSION_2_3_2)
                 writer.WriteInt32(timeout);
+
+            if (ver >= ProtocolVersion::VERSION_2_5_0)
+                writer.WriteBool(autoCommit);
         }
 
         QueryExecuteBatchtRequest::QueryExecuteBatchtRequest(const std::string& schema, const std::string& sql,
-            const app::ParameterSet& params, SqlUlen begin, SqlUlen end, bool last, int32_t timeout):
+            const app::ParameterSet& params, SqlUlen begin, SqlUlen end, bool last, int32_t timeout, bool autoCommit) :
             schema(schema),
             sql(sql),
             params(params),
             begin(begin),
             end(end),
             last(last),
-            timeout(timeout)
+            timeout(timeout),
+            autoCommit(autoCommit)
         {
             // No-op.
         }
@@ -150,6 +157,9 @@
 
             if (ver >= ProtocolVersion::VERSION_2_3_2)
                 writer.WriteInt32(timeout);
+
+            if (ver >= ProtocolVersion::VERSION_2_5_0)
+                writer.WriteBool(autoCommit);
         }
 
         QueryCloseRequest::QueryCloseRequest(int64_t queryId): queryId(queryId)
@@ -343,7 +353,7 @@
         {
             queryId = reader.ReadInt64();
 
-            meta::ReadColumnMetaVector(reader, meta);
+            meta::ReadColumnMetaVector(reader, meta, ver);
 
             ReadAffectedRows(reader, ver, affectedRows);
         }
@@ -390,7 +400,8 @@
             // No-op.
         }
 
-        void QueryFetchResponse::ReadOnSuccess(impl::binary::BinaryReaderImpl& reader, const ProtocolVersion&)
+        void QueryFetchResponse::ReadOnSuccess(impl::binary::BinaryReaderImpl& reader,
+            const ProtocolVersion& ver)
         {
             queryId = reader.ReadInt64();
 
@@ -407,9 +418,10 @@
             // No-op.
         }
 
-        void QueryGetColumnsMetaResponse::ReadOnSuccess(impl::binary::BinaryReaderImpl& reader, const ProtocolVersion&)
+        void QueryGetColumnsMetaResponse::ReadOnSuccess(impl::binary::BinaryReaderImpl& reader,
+            const ProtocolVersion& ver)
         {
-            meta::ReadColumnMetaVector(reader, meta);
+            meta::ReadColumnMetaVector(reader, meta, ver);
         }
 
         QueryGetTablesMetaResponse::QueryGetTablesMetaResponse()
diff --git a/modules/platforms/cpp/odbc/src/meta/column_meta.cpp b/modules/platforms/cpp/odbc/src/meta/column_meta.cpp
index b7f212a..476f6a6 100644
--- a/modules/platforms/cpp/odbc/src/meta/column_meta.cpp
+++ b/modules/platforms/cpp/odbc/src/meta/column_meta.cpp
@@ -15,6 +15,8 @@
  * limitations under the License.
  */
 
+#include "ignite/common/utils.h"
+
 #include "ignite/odbc/system/odbc_constants.h"
 #include "ignite/odbc/meta/column_meta.h"
 #include "ignite/odbc/type_traits.h"
@@ -72,13 +74,19 @@
 
 #undef DBG_STR_CASE
 
-            void ColumnMeta::Read(ignite::impl::binary::BinaryReaderImpl& reader)
+            void ColumnMeta::Read(ignite::impl::binary::BinaryReaderImpl& reader, const ProtocolVersion& ver)
             {
                 utility::ReadString(reader, schemaName);
                 utility::ReadString(reader, tableName);
                 utility::ReadString(reader, columnName);
 
                 dataType = reader.ReadInt8();
+
+                if (ver >= ProtocolVersion::VERSION_2_7_0)
+                {
+                    precision = reader.ReadInt32();
+                    scale = reader.ReadInt32();
+                }
             }
 
             bool ColumnMeta::GetAttribute(uint16_t fieldId, std::string& value) const 
@@ -137,6 +145,29 @@
                         return true;
                     }
 
+                    case SQL_DESC_PRECISION:
+                    case SQL_COLUMN_LENGTH:
+                    case SQL_COLUMN_PRECISION:
+                    {
+                        if (precision == -1)
+                            return false;
+
+                        value = common::LexicalCast<std::string>(precision);
+
+                        return true;
+                    }
+
+                    case SQL_DESC_SCALE:
+                    case SQL_COLUMN_SCALE:
+                    {
+                        if (scale == -1)
+                            return false;
+
+                        value = common::LexicalCast<std::string>(precision);
+
+                        return true;
+                    }
+
                     default:
                         return false;
                 }
@@ -149,6 +180,15 @@
                 switch (fieldId)
                 {
                     case SQL_DESC_FIXED_PREC_SCALE:
+                    {
+                        if (scale == -1)
+                            value = SQL_FALSE;
+                        else
+                            value = SQL_TRUE;
+
+                        break;
+                    }
+
                     case SQL_DESC_AUTO_UNIQUE_VALUE:
                     {
                         value = SQL_FALSE;
@@ -185,7 +225,10 @@
                     case SQL_DESC_OCTET_LENGTH:
                     case SQL_COLUMN_LENGTH:
                     {
-                        value = type_traits::BinaryTypeTransferLength(dataType);
+                        if (precision == -1)
+                            value = type_traits::BinaryTypeTransferLength(dataType);
+                        else
+                            value = precision;
 
                         break;
                     }
@@ -207,7 +250,10 @@
                     case SQL_DESC_PRECISION:
                     case SQL_COLUMN_PRECISION:
                     {
-                        value = type_traits::BinaryTypeColumnSize(dataType);
+                        if (precision == -1)
+                            value = type_traits::BinaryTypeColumnSize(dataType);
+                        else
+                            value = precision;
 
                         break;
                     }
@@ -215,10 +261,15 @@
                     case SQL_DESC_SCALE:
                     case SQL_COLUMN_SCALE:
                     {
-                        value = type_traits::BinaryTypeDecimalDigits(dataType);
+                        if (scale == -1)
+                        {
+                            value = type_traits::BinaryTypeDecimalDigits(dataType);
 
-                        if (value < 0)
-                            value = 0;
+                            if (value < 0)
+                                value = 0;
+                        }
+                        else
+                            value = scale;
 
                         break;
                     }
@@ -260,7 +311,8 @@
                 return true;
             }
 
-            void ReadColumnMetaVector(ignite::impl::binary::BinaryReaderImpl& reader, ColumnMetaVector& meta)
+            void ReadColumnMetaVector(ignite::impl::binary::BinaryReaderImpl& reader, ColumnMetaVector& meta,
+                    const ProtocolVersion& ver)
             {
                 int32_t metaNum = reader.ReadInt32();
 
@@ -271,7 +323,7 @@
                 {
                     meta.push_back(ColumnMeta());
 
-                    meta.back().Read(reader);
+                    meta.back().Read(reader, ver);
                 }
             }
         }
diff --git a/modules/platforms/cpp/odbc/src/nested_tx_mode.cpp b/modules/platforms/cpp/odbc/src/nested_tx_mode.cpp
new file mode 100644
index 0000000..e4e6118
--- /dev/null
+++ b/modules/platforms/cpp/odbc/src/nested_tx_mode.cpp
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ignite/odbc/nested_tx_mode.h"
+#include "ignite/common/utils.h"
+
+namespace
+{
+    using ignite::odbc::NestedTxMode;
+    NestedTxMode::Type validValues0[] = {
+        NestedTxMode::AI_COMMIT,
+        NestedTxMode::AI_IGNORE,
+        NestedTxMode::AI_ERROR
+    };
+
+    NestedTxMode::ModeSet validValues(validValues0, validValues0 + (sizeof(validValues0) / sizeof(validValues0[0])));
+}
+
+
+namespace ignite
+{
+    namespace odbc
+    {
+        NestedTxMode::Type NestedTxMode::FromString(const std::string& str, Type dflt)
+        {
+            std::string lower = common::ToLower(str);
+
+            if (lower == "commit")
+                return AI_COMMIT;
+
+            if (lower == "ignore")
+                return AI_IGNORE;
+
+            if (lower == "error")
+                return AI_ERROR;
+
+            return dflt;
+        }
+
+        std::string NestedTxMode::ToString(Type value)
+        {
+            switch (value)
+            {
+                case AI_COMMIT:
+                    return "commit";
+
+                case AI_IGNORE:
+                    return "ignore";
+
+                case AI_ERROR:
+                    return "error";
+
+                default:
+                    break;
+            }
+
+            return "default";
+        }
+
+        const NestedTxMode::ModeSet& NestedTxMode::GetValidValues()
+        {
+            return validValues;
+        }
+    }
+}
+
diff --git a/modules/platforms/cpp/odbc/src/odbc.cpp b/modules/platforms/cpp/odbc/src/odbc.cpp
index 3aad52f..5b37da3 100644
--- a/modules/platforms/cpp/odbc/src/odbc.cpp
+++ b/modules/platforms/cpp/odbc/src/odbc.cpp
@@ -204,6 +204,8 @@
         if (!connection)
             return SQL_INVALID_HANDLE;
 
+        connection->Deregister();
+
         delete connection;
 
         return SQL_SUCCESS;
diff --git a/modules/platforms/cpp/odbc/src/protocol_version.cpp b/modules/platforms/cpp/odbc/src/protocol_version.cpp
index c345dd4..01937e2 100644
--- a/modules/platforms/cpp/odbc/src/protocol_version.cpp
+++ b/modules/platforms/cpp/odbc/src/protocol_version.cpp
@@ -31,6 +31,7 @@
         const ProtocolVersion ProtocolVersion::VERSION_2_3_0(2, 3, 0);
         const ProtocolVersion ProtocolVersion::VERSION_2_3_2(2, 3, 2);
         const ProtocolVersion ProtocolVersion::VERSION_2_5_0(2, 5, 0);
+        const ProtocolVersion ProtocolVersion::VERSION_2_7_0(2, 7, 0);
 
         ProtocolVersion::VersionSet::value_type supportedArray[] = {
             ProtocolVersion::VERSION_2_1_0,
@@ -38,6 +39,7 @@
             ProtocolVersion::VERSION_2_3_0,
             ProtocolVersion::VERSION_2_3_2,
             ProtocolVersion::VERSION_2_5_0,
+            ProtocolVersion::VERSION_2_7_0
         };
 
         const ProtocolVersion::VersionSet ProtocolVersion::supported(supportedArray,
@@ -66,7 +68,7 @@
 
         const ProtocolVersion& ProtocolVersion::GetCurrent()
         {
-            return VERSION_2_5_0;
+            return VERSION_2_7_0;
         }
 
         void ThrowParseError()
diff --git a/modules/platforms/cpp/odbc/src/query/batch_query.cpp b/modules/platforms/cpp/odbc/src/query/batch_query.cpp
index 8dada3c..1256c94 100644
--- a/modules/platforms/cpp/odbc/src/query/batch_query.cpp
+++ b/modules/platforms/cpp/odbc/src/query/batch_query.cpp
@@ -144,7 +144,9 @@
             {
                 const std::string& schema = connection.GetSchema();
 
-                QueryExecuteBatchtRequest req(schema, sql, params, begin, end, last, timeout);
+                QueryExecuteBatchtRequest req(schema, sql, params, begin, end, last, timeout,
+                    connection.IsAutoCommit());
+
                 QueryExecuteBatchResponse rsp;
 
                 try
diff --git a/modules/platforms/cpp/odbc/src/query/data_query.cpp b/modules/platforms/cpp/odbc/src/query/data_query.cpp
index 6974c7f..69208b8 100644
--- a/modules/platforms/cpp/odbc/src/query/data_query.cpp
+++ b/modules/platforms/cpp/odbc/src/query/data_query.cpp
@@ -206,7 +206,7 @@
             {
                 const std::string& schema = connection.GetSchema();
 
-                QueryExecuteRequest req(schema, sql, params, timeout);
+                QueryExecuteRequest req(schema, sql, params, timeout, connection.IsAutoCommit());
                 QueryExecuteResponse rsp;
 
                 try
diff --git a/modules/platforms/cpp/thin-client/include/ignite/impl/thin/readable.h b/modules/platforms/cpp/thin-client/include/ignite/impl/thin/readable.h
index 8c3e337..458da25 100644
--- a/modules/platforms/cpp/thin-client/include/ignite/impl/thin/readable.h
+++ b/modules/platforms/cpp/thin-client/include/ignite/impl/thin/readable.h
@@ -84,7 +84,7 @@
                  */
                 virtual void Read(binary::BinaryReaderImpl& reader)
                 {
-                    reader.ReadTopObject0(value);
+                    reader.ReadTopObject0<ignite::binary::BinaryReader, ValueType>(value);
                 }
 
             private:
diff --git a/modules/platforms/dotnet/Apache.Ignite.AspNet.Tests/ExpiryCacheHolderTest.cs b/modules/platforms/dotnet/Apache.Ignite.AspNet.Tests/ExpiryCacheHolderTest.cs
index b3d5228..eb27977 100644
--- a/modules/platforms/dotnet/Apache.Ignite.AspNet.Tests/ExpiryCacheHolderTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.AspNet.Tests/ExpiryCacheHolderTest.cs
@@ -416,11 +416,6 @@
                 throw new NotImplementedException();
             }
 
-            public void LocalPromote(IEnumerable<int> keys)
-            {
-                throw new NotImplementedException();
-            }
-
             public IQueryCursor<ICacheEntry<int, int>> Query(QueryBase qry)
             {
                 throw new NotImplementedException();
diff --git a/modules/platforms/dotnet/Apache.Ignite.AspNet.Tests/IgniteSessionStateStoreProviderTest.cs b/modules/platforms/dotnet/Apache.Ignite.AspNet.Tests/IgniteSessionStateStoreProviderTest.cs
index 08c44a6..da074ac 100644
--- a/modules/platforms/dotnet/Apache.Ignite.AspNet.Tests/IgniteSessionStateStoreProviderTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.AspNet.Tests/IgniteSessionStateStoreProviderTest.cs
@@ -23,7 +23,6 @@
     using System.Linq;
     using System.Reflection;
     using System.Threading;
-    using System.Threading.Tasks;
     using System.Web;
     using System.Web.SessionState;
     using Apache.Ignite.Core;
@@ -56,7 +55,7 @@
         private const string Id = "1";
 
         /** Test context. */
-        private static readonly HttpContext HttpContext = 
+        private static readonly HttpContext HttpContext =
             new HttpContext(new HttpRequest(null, "http://tempuri.org", null), new HttpResponse(null));
 
         /// <summary>
@@ -87,7 +86,7 @@
             var ignite = Ignition.GetIgnite(GridName);
             ignite.GetCacheNames().ToList().ForEach(x => ignite.GetCache<object, object>(x).RemoveAll());
         }
-        
+
         /// <summary>
         /// Test setup.
         /// </summary>
@@ -230,7 +229,7 @@
 
             // Add item.
             provider.CreateUninitializedItem(HttpContext, Id, 7);
-            
+
             // Check added item.
             res = provider.GetItem(HttpContext, Id, out locked, out lockAge, out lockId, out actions);
             Assert.IsNotNull(res);
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.csproj b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.csproj
index 00eddeb..3a4ef03 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.csproj
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Apache.Ignite.Core.Tests.csproj
@@ -47,6 +47,9 @@
       <Private>True</Private>
     </Reference>
     <Reference Include="Microsoft.CSharp" />
+    <Reference Include="Moq, Version=4.0.10827.0, Culture=neutral, PublicKeyToken=69f491c39445e920, processorArchitecture=MSIL">
+      <HintPath>..\packages\Moq.4.0.10827\lib\NET40\Moq.dll</HintPath>
+    </Reference>
     <Reference Include="NLog">
       <HintPath>..\packages\NLog.4.3.7\lib\net40\NLog.dll</HintPath>
     </Reference>
@@ -195,6 +198,7 @@
     <Compile Include="Deployment\RuntimeDependencyFunc.cs" />
     <Compile Include="EventsTestLocalListeners.cs" />
     <Compile Include="FailureHandlerTest.cs" />
+    <Compile Include="Impl\Compute\ComputeImplTest.cs" />
     <Compile Include="Process\ListDataReader.cs" />
     <Compile Include="Log\ConcurrentMemoryTarget.cs" />
     <Compile Include="Log\DefaultLoggerTest.cs" />
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/ApiParity/DataStorageConfigurationParityTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/ApiParity/DataStorageConfigurationParityTest.cs
index 51f2865..2276c20 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/ApiParity/DataStorageConfigurationParityTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/ApiParity/DataStorageConfigurationParityTest.cs
@@ -28,7 +28,8 @@
         /** Properties that are not needed on .NET side. */
         private static readonly string[] UnneededProperties =
         {
-            "FileIOFactory"
+            "FileIOFactory",
+            "isWalHistorySizeParameterUsed"
         };
 
         /** Properties that are missing on .NET side. */
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/ApiParity/IgniteConfigurationParityTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/ApiParity/IgniteConfigurationParityTest.cs
index 1fd8e72d..1f600dd 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/ApiParity/IgniteConfigurationParityTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/ApiParity/IgniteConfigurationParityTest.cs
@@ -80,7 +80,10 @@
             "TimeServerPortBase",
             "TimeServerPortRange",
             "IncludeProperties",
-            "isAutoActivationEnabled"  // IGNITE-7301
+            "isAutoActivationEnabled",  // IGNITE-7301
+            "isMvccEnabled", //TODO: IGNITE-9390: Remove when Mvcc support will be added.
+            "MvccVacuumTimeInterval", //TODO: IGNITE-9390: Remove when Mvcc support will be added.
+            "MvccVacuumThreadCnt" //TODO: IGNITE-9390: Remove when Mvcc support will be added.
         };
 
         /// <summary>
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/ApiParity/QueryEntityConfigurationParityTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/ApiParity/QueryEntityConfigurationParityTest.cs
index ba10cda..1171e16 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/ApiParity/QueryEntityConfigurationParityTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/ApiParity/QueryEntityConfigurationParityTest.cs
@@ -33,7 +33,8 @@
             "KeyFields",
             "NotNullFields",
             "DefaultFieldValues",
-            "DecimalInfo"
+            "FieldsPrecision",
+            "FieldsScale"
         };
 
         /// <summary>
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/CacheAbstractTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/CacheAbstractTest.cs
index 9fee2b9..02ed39d 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/CacheAbstractTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/CacheAbstractTest.cs
@@ -655,6 +655,8 @@
         [Test]
         public void TestWithExpiryPolicy()
         {
+            Assert.Fail("https://issues.apache.org/jira/browse/IGNITE-8983");
+
             TestWithExpiryPolicy((cache, policy) => cache.WithExpiryPolicy(policy), true);
         }
 
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/DataStorageMetricsTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/DataStorageMetricsTest.cs
index 1776d04..ebc6ca2 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/DataStorageMetricsTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/DataStorageMetricsTest.cs
@@ -42,7 +42,7 @@
             {
                 DataStorageConfiguration = new DataStorageConfiguration
                 {
-                    CheckpointFrequency = TimeSpan.FromSeconds(1),
+                    CheckpointFrequency = TimeSpan.FromSeconds(5),
                     MetricsEnabled = true,
                     WalMode = WalMode.LogOnly,
                     DefaultDataRegionConfiguration = new DataRegionConfiguration
@@ -82,7 +82,7 @@
                 Assert.GreaterOrEqual(metrics.LastCheckpointTotalPagesNumber, 26);
                 Assert.AreEqual(0, metrics.LastCheckpointDataPagesNumber);
                 Assert.AreEqual(0, metrics.LastCheckpointCopiedOnWritePagesNumber);
-                Assert.AreEqual(TimeSpan.Zero, metrics.LastCheckpointLockWaitDuration);
+                Assert.Greater(TimeSpan.FromSeconds(1), metrics.LastCheckpointLockWaitDuration);
 
                 Assert.Greater(metrics.LastCheckpointPagesWriteDuration, TimeSpan.Zero);
                 Assert.Greater(metrics.LastCheckpointMarkDuration, TimeSpan.Zero);
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Base.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Base.cs
index 5b56abd..81dcfed 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Base.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Base.cs
@@ -45,11 +45,17 @@
         /** Cache name. */
         private const string PersonOrgCacheName = "person_org";
 
+        /** Cache schema. */
+        private const string PersonOrgCacheSchema = "person_org_Schema";
+
         /** Cache name. */
         private const string PersonSecondCacheName = "person_cache";
 
-        /** Role cache name. */
-        private const string RoleCacheName = "role_cache";
+        /** Cache schema. */
+        private const string PersonSecondCacheSchema = "\"person_cache_SCHEMA\"";
+
+        /** Role cache name: uses invalid characters to test name escaping. */
+        private const string RoleCacheName = "role$ cache.";
 
         /** */
         private const int RoleCount = 3;
@@ -199,7 +205,8 @@
                     new QueryEntity(typeof (int), typeof (Organization)))
                 {
                     CacheMode = CacheMode.Replicated,
-                    SqlEscapeAll = GetSqlEscapeAll()
+                    SqlEscapeAll = GetSqlEscapeAll(),
+                    SqlSchema = PersonOrgCacheSchema
                 });
         }
 
@@ -231,14 +238,15 @@
                         })
                     {
                         CacheMode = CacheMode.Replicated,
-                        SqlEscapeAll = GetSqlEscapeAll()
+                        SqlEscapeAll = GetSqlEscapeAll(),
+                        SqlSchema = PersonSecondCacheSchema
                     });
         }
 
         /// <summary>
         /// Checks that function maps to SQL function properly.
         /// </summary>
-        private static void CheckFunc<T, TR>(Expression<Func<T, TR>> exp, IQueryable<T> query, 
+        private static void CheckFunc<T, TR>(Expression<Func<T, TR>> exp, IQueryable<T> query,
             Func<TR, TR> localResultFunc = null)
         {
             localResultFunc = localResultFunc ?? (x => x);
@@ -263,7 +271,7 @@
         /// <summary>
         /// Checks that function used in Where Clause maps to SQL function properly
         /// </summary>
-        private static void CheckWhereFunc<TKey, TEntry>(IQueryable<ICacheEntry<TKey,TEntry>> query, 
+        private static void CheckWhereFunc<TKey, TEntry>(IQueryable<ICacheEntry<TKey,TEntry>> query,
             Expression<Func<ICacheEntry<TKey, TEntry>,bool>> whereExpression)
         {
             // Calculate result locally, using real method invocation
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Introspection.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Introspection.cs
index aa26548..f5b5baa 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Introspection.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Cache/Query/Linq/CacheLinqTest.Introspection.cs
@@ -66,8 +66,8 @@
 
             Assert.AreEqual(
                 GetSqlEscapeAll()
-                    ? "select _T0._KEY, _T0._VAL from \"person_org\".\"Person\" as _T0 where (_T0.\"_KEY\" > ?)"
-                    : "select _T0._KEY, _T0._VAL from \"person_org\".Person as _T0 where (_T0._KEY > ?)",
+                    ? "select _T0._KEY, _T0._VAL from PERSON_ORG_SCHEMA.\"Person\" as _T0 where (_T0.\"_KEY\" > ?)"
+                    : "select _T0._KEY, _T0._VAL from PERSON_ORG_SCHEMA.Person as _T0 where (_T0._KEY > ?)",
                 fq.Sql);
 
             Assert.AreEqual(new[] { 10 }, fq.Arguments);
@@ -84,12 +84,12 @@
             var str = query.ToString();
             Assert.AreEqual(GetSqlEscapeAll()
                 ? "CacheQueryable [CacheName=person_org, TableName=Person, Query=SqlFieldsQuery " +
-                  "[Sql=select _T0._KEY, _T0._VAL from \"person_org\".\"Person\" as _T0 where " +
+                  "[Sql=select _T0._KEY, _T0._VAL from PERSON_ORG_SCHEMA.\"Person\" as _T0 where " +
                   "(_T0.\"_KEY\" > ?), Arguments=[10], " +
                   "Local=True, PageSize=999, EnableDistributedJoins=False, EnforceJoinOrder=True, " +
                   "Timeout=00:00:02.5000000, ReplicatedOnly=True, Colocated=True, Schema=, Lazy=True]]"
                 : "CacheQueryable [CacheName=person_org, TableName=Person, Query=SqlFieldsQuery " +
-                  "[Sql=select _T0._KEY, _T0._VAL from \"person_org\".Person as _T0 where " +
+                  "[Sql=select _T0._KEY, _T0._VAL from PERSON_ORG_SCHEMA.Person as _T0 where " +
                   "(_T0._KEY > ?), Arguments=[10], " +
                   "Local=True, PageSize=999, EnableDistributedJoins=False, EnforceJoinOrder=True, " +
                   "Timeout=00:00:02.5000000, ReplicatedOnly=True, Colocated=True, Schema=, Lazy=True]]", str);
@@ -104,8 +104,8 @@
 
             fq = fieldsQuery.GetFieldsQuery();
             Assert.AreEqual(GetSqlEscapeAll()
-                    ? "select _T0.\"Name\" from \"person_org\".\"Person\" as _T0"
-                    : "select _T0.NAME from \"person_org\".Person as _T0",
+                    ? "select _T0.\"Name\" from PERSON_ORG_SCHEMA.\"Person\" as _T0"
+                    : "select _T0.NAME from PERSON_ORG_SCHEMA.Person as _T0",
                 fq.Sql);
 
             Assert.IsFalse(fq.Local);
@@ -117,11 +117,11 @@
             str = fieldsQuery.ToString();
             Assert.AreEqual(GetSqlEscapeAll()
                 ? "CacheQueryable [CacheName=person_org, TableName=Person, Query=SqlFieldsQuery " +
-                  "[Sql=select _T0.\"Name\" from \"person_org\".\"Person\" as _T0, Arguments=[], Local=False, " +
+                  "[Sql=select _T0.\"Name\" from PERSON_ORG_SCHEMA.\"Person\" as _T0, Arguments=[], Local=False, " +
                   "PageSize=1024, EnableDistributedJoins=False, EnforceJoinOrder=False, " +
                   "Timeout=00:00:00, ReplicatedOnly=False, Colocated=False, Schema=, Lazy=False]]"
                 : "CacheQueryable [CacheName=person_org, TableName=Person, Query=SqlFieldsQuery " +
-                  "[Sql=select _T0.NAME from \"person_org\".Person as _T0, Arguments=[], Local=False, " +
+                  "[Sql=select _T0.NAME from PERSON_ORG_SCHEMA.Person as _T0, Arguments=[], Local=False, " +
                   "PageSize=1024, EnableDistributedJoins=False, EnforceJoinOrder=False, " +
                   "Timeout=00:00:00, ReplicatedOnly=False, Colocated=False, Schema=, Lazy=False]]", str);
 
@@ -136,17 +136,17 @@
             str = distrQuery.ToString();
             Assert.AreEqual(GetSqlEscapeAll()
                 ? "CacheQueryable [CacheName=person_org, TableName=Person, Query=SqlFieldsQuery " +
-                  "[Sql=select _T0._KEY, _T0._VAL from \"person_org\".\"Person\" as _T0 where " +
+                  "[Sql=select _T0._KEY, _T0._VAL from PERSON_ORG_SCHEMA.\"Person\" as _T0 where " +
                   "(((_T0.\"_KEY\" > ?) and (_T0.\"age1\" > ?)) " +
                   "and (_T0.\"Name\" like \'%\' || ? || \'%\') ), Arguments=[10, 20, x], Local=False, " +
                   "PageSize=1024, EnableDistributedJoins=True, EnforceJoinOrder=False, " +
                   "Timeout=00:00:00, ReplicatedOnly=False, Colocated=False, Schema=, Lazy=False]]"
                 : "CacheQueryable [CacheName=person_org, TableName=Person, Query=SqlFieldsQuery " +
-                  "[Sql=select _T0._KEY, _T0._VAL from \"person_org\".Person as _T0 where " +
+                  "[Sql=select _T0._KEY, _T0._VAL from PERSON_ORG_SCHEMA.Person as _T0 where " +
                   "(((_T0._KEY > ?) and (_T0.AGE1 > ?)) " +
                   "and (_T0.NAME like \'%\' || ? || \'%\') ), Arguments=[10, 20, x], Local=False, " +
                   "PageSize=1024, EnableDistributedJoins=True, EnforceJoinOrder=False, " +
                   "Timeout=00:00:00, ReplicatedOnly=False, Colocated=False, Schema=, Lazy=False]]", str);
         }
     }
-}
\ No newline at end of file
+}
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/ClientCacheConfigurationTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/ClientCacheConfigurationTest.cs
index 04ad7dc..1481f24 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/ClientCacheConfigurationTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/Cache/ClientCacheConfigurationTest.cs
@@ -23,6 +23,7 @@
     using Apache.Ignite.Core.Cache.Configuration;
     using Apache.Ignite.Core.Client.Cache;
     using Apache.Ignite.Core.Impl.Binary.IO;
+    using Apache.Ignite.Core.Impl.Client;
     using Apache.Ignite.Core.Impl.Client.Cache;
     using Apache.Ignite.Core.Tests.Cache;
     using NUnit.Framework;
@@ -178,9 +179,9 @@
         {
             using (var stream = new BinaryHeapStream(128))
             {
-                ClientCacheConfigurationSerializer.Write(stream, cfg, true);
+                ClientCacheConfigurationSerializer.Write(stream, cfg, ClientSocket.CurrentProtocolVersion, true);
                 stream.Seek(0, SeekOrigin.Begin);
-                return new CacheClientConfiguration(stream);
+                return new CacheClientConfiguration(stream, ClientSocket.CurrentProtocolVersion);
             }
         }
 
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/ClientConnectionTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/ClientConnectionTest.cs
index 67d1c52..14d1abf 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/ClientConnectionTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Client/ClientConnectionTest.cs
@@ -31,6 +31,7 @@
     using Apache.Ignite.Core.Client;
     using Apache.Ignite.Core.Client.Cache;
     using Apache.Ignite.Core.Configuration;
+    using Apache.Ignite.Core.Impl.Client;
     using Apache.Ignite.Core.Impl.Common;
     using NUnit.Framework;
 
@@ -251,8 +252,8 @@
             {
                 // ReSharper disable once ObjectCreationAsStatement
                 var ex = Assert.Throws<IgniteClientException>(() =>
-                    new Impl.Client.ClientSocket(GetClientConfiguration(),
-                    new Impl.Client.ClientProtocolVersion(-1, -1, -1)));
+                    new ClientSocket(GetClientConfiguration(),
+                    new ClientProtocolVersion(-1, -1, -1)));
 
                 Assert.AreEqual(ClientStatusCode.Fail, ex.StatusCode);
 
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Compute/ComputeApiTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Compute/ComputeApiTest.cs
index d0c576d..d41b4a6 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Compute/ComputeApiTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Compute/ComputeApiTest.cs
@@ -27,6 +27,7 @@
     using Apache.Ignite.Core.Cluster;
     using Apache.Ignite.Core.Compute;
     using Apache.Ignite.Core.Events;
+    using Apache.Ignite.Core.Impl;
     using Apache.Ignite.Core.Resource;
     using NUnit.Framework;
 
@@ -800,7 +801,7 @@
         [Test]
         public void TestFooterSetting()
         {
-            Assert.AreEqual(CompactFooter, ((Impl.Ignite) _grid1).Marshaller.CompactFooter);
+            Assert.AreEqual(CompactFooter, ((Ignite) _grid1).Marshaller.CompactFooter);
 
             foreach (var g in new[] {_grid1, _grid2, _grid3})
                 Assert.AreEqual(CompactFooter, g.GetConfiguration().BinaryConfiguration.CompactFooter);
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/full-config.xml b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/full-config.xml
index d50cf4f..b091a49 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/full-config.xml
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Config/full-config.xml
@@ -47,6 +47,10 @@
         <string>-Xms1g</string>
         <string>-Xmx4g</string>
     </jvmOptions>
+    <sqlSchemas>
+        <string>SCHEMA_1</string>
+        <string>schema_2</string>
+    </sqlSchemas>
     <lifecycleHandlers>
         <iLifecycleHandler type='Apache.Ignite.Core.Tests.IgniteConfigurationSerializerTest+LifecycleBean' foo='15' />
     </lifecycleHandlers>
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Dataload/DataStreamerTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Dataload/DataStreamerTest.cs
index a3c804d..f1a25c6 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Dataload/DataStreamerTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Dataload/DataStreamerTest.cs
@@ -363,6 +363,8 @@
         [Test]
         public void TestFinalizer()
         {
+            Assert.Fail("https://issues.apache.org/jira/browse/IGNITE-8731");
+
             var streamer = _grid.GetDataStreamer<int, int>(CacheName);
             var streamerRef = new WeakReference(streamer);
 
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/EventsTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/EventsTest.cs
index e9bac02..7162d3e 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/EventsTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/EventsTest.cs
@@ -40,6 +40,8 @@
     using Apache.Ignite.Core.Tests.Compute;
     using NUnit.Framework;
 
+    using ImplCompute = Core.Impl.Compute.Compute;
+
     /// <summary>
     /// <see cref="IEvents"/> tests.
     /// </summary>
@@ -509,7 +511,7 @@
         public void TestSerialization()
         {
             var grid = (Ignite) _grid1;
-            var comp = (Impl.Compute.Compute) grid.GetCluster().ForLocal().GetCompute();
+            var comp = (ImplCompute) grid.GetCluster().ForLocal().GetCompute();
             var locNode = grid.GetCluster().GetLocalNode();
 
             var expectedGuid = Guid.Parse("00000000-0000-0001-0000-000000000002");
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Examples/PathUtil.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Examples/PathUtil.cs
index 7c82d80..9295d29 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Examples/PathUtil.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Examples/PathUtil.cs
@@ -28,7 +28,7 @@
         public const string DevPrefix = "modules\\";
 
         /** */
-        public static readonly string IgniteHome = Impl.Common.IgniteHome.Resolve(null);
+        public static readonly string IgniteHome = Core.Impl.Common.IgniteHome.Resolve(null);
 
         /// <summary>
         /// Examples source code path.
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs
index 226106f..e2ece20 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationSerializerTest.cs
@@ -99,6 +99,12 @@
             Assert.AreEqual(new TimeSpan(1, 2, 3), cfg.LongQueryWarningTimeout);
             Assert.IsFalse(cfg.IsActiveOnStart);
             Assert.IsTrue(cfg.AuthenticationEnabled);
+
+            Assert.IsNotNull(cfg.SqlSchemas);
+            Assert.AreEqual(2, cfg.SqlSchemas.Count);
+            Assert.IsTrue(cfg.SqlSchemas.Contains("SCHEMA_1"));
+            Assert.IsTrue(cfg.SqlSchemas.Contains("schema_2"));
+
             Assert.AreEqual("someId012", cfg.ConsistentId);
             Assert.IsFalse(cfg.RedirectJavaConsoleOutput);
 
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationTest.cs
index 6c772f4..a03d09c 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteConfigurationTest.cs
@@ -19,6 +19,7 @@
 namespace Apache.Ignite.Core.Tests
 {
     using System;
+    using System.Collections.Generic;
     using System.ComponentModel;
     using System.IO;
     using System.Linq;
@@ -240,6 +241,11 @@
                 Assert.AreEqual(sql.ThreadPoolSize, resSql.ThreadPoolSize);
 
                 AssertExtensions.ReflectionEqual(cfg.DataStorageConfiguration, resCfg.DataStorageConfiguration);
+
+                Assert.IsNotNull(resCfg.SqlSchemas);
+                Assert.AreEqual(2, resCfg.SqlSchemas.Count);
+                Assert.IsTrue(resCfg.SqlSchemas.Contains("SCHEMA_3"));
+                Assert.IsTrue(resCfg.SqlSchemas.Contains("schema_4"));
             }
         }
 
@@ -829,7 +835,9 @@
                         }
                     }
                 },
-                AuthenticationEnabled = false
+                AuthenticationEnabled = false,
+
+                SqlSchemas = new List<string> { "SCHEMA_3", "schema_4" }
             };
         }
 
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteStartStopTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteStartStopTest.cs
index f9c1cad..dd6a7b2 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteStartStopTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/IgniteStartStopTest.cs
@@ -15,13 +15,12 @@
  * limitations under the License.
  */
 
-namespace Apache.Ignite.Core.Tests 
+namespace Apache.Ignite.Core.Tests
 {
     using System;
     using System.IO;
     using System.Linq;
     using System.Threading;
-    using System.Threading.Tasks;
     using Apache.Ignite.Core.Common;
     using Apache.Ignite.Core.Impl.Common;
     using Apache.Ignite.Core.Messaging;
@@ -45,7 +44,7 @@
         }
 
         /// <summary>
-        /// 
+        ///
         /// </summary>
         [Test]
         public void TestStartDefault()
@@ -60,7 +59,7 @@
         }
 
         /// <summary>
-        /// 
+        ///
         /// </summary>
         [Test]
         public void TestStartWithConfigPath()
@@ -79,7 +78,7 @@
         }
 
         /// <summary>
-        /// 
+        ///
         /// </summary>
         [Test]
         public void TestStartGetStop()
@@ -140,7 +139,7 @@
         }
 
         /// <summary>
-        /// 
+        ///
         /// </summary>
         [Test]
         public void TestStartTheSameName()
@@ -148,7 +147,7 @@
             var cfg = TestUtils.GetTestConfiguration(name: "grid1");
             var grid1 = Ignition.Start(cfg);
             Assert.AreEqual("grid1", grid1.Name);
-            
+
             var ex = Assert.Throws<IgniteException>(() => Ignition.Start(cfg));
             Assert.AreEqual("Ignite instance with this name has already been started: grid1", ex.Message);
         }
@@ -173,7 +172,7 @@
         }
 
         /// <summary>
-        /// 
+        ///
         /// </summary>
         [Test]
         public void TestUsageAfterStop()
@@ -191,7 +190,7 @@
         }
 
         /// <summary>
-        /// 
+        ///
         /// </summary>
         [Test]
         public void TestStartStopLeak()
@@ -244,7 +243,7 @@
                     }
                 }
             }
-            finally 
+            finally
             {
                 Ignition.ClientMode = false;
             }
@@ -299,13 +298,13 @@
                 "-jvmClasspath=" + TestUtils.CreateTestClasspath(),
                 "-springConfigUrl=" + Path.GetFullPath(cfg.SpringConfigUrl),
                 "-J-Xms512m", "-J-Xmx512m");
-            
+
             Assert.IsTrue(proc.Alive);
 
             var cts = new CancellationTokenSource();
             var token = cts.Token;
 
-            // Spam message subscriptions on a separate thread 
+            // Spam message subscriptions on a separate thread
             // to test race conditions during processor init on remote node
             var listenTask = TaskRunner.Run(() =>
             {
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Impl/Compute/ComputeImplTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Impl/Compute/ComputeImplTest.cs
new file mode 100644
index 0000000..c888dda
--- /dev/null
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Impl/Compute/ComputeImplTest.cs
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace Apache.Ignite.Core.Tests.Impl.Compute
+{
+    using Apache.Ignite.Core.Binary;
+    using Apache.Ignite.Core.Impl;
+    using Apache.Ignite.Core.Impl.Binary;
+    using Apache.Ignite.Core.Impl.Cluster;
+    using Apache.Ignite.Core.Impl.Compute;
+    using Moq;
+    using NUnit.Framework;
+
+    /// <summary>
+    /// Tests for the compute implementations
+    /// </summary>
+    [TestFixture]
+    internal class ComputeImplTest
+    {
+        private const int OpWithNoResultCacheType = 9;
+
+        /// <summary>
+        /// Test caching was disabled by passing right type
+        /// </summary>
+        [Test]
+        public void TestCachingWasDisabledByPassingRightType()
+        {
+            var target = GetTarget();
+            var clusterGroupImpl = new ClusterGroupImpl(target.Object, null);
+            var sut = new ComputeImpl(target.Object, clusterGroupImpl, true);
+
+            sut.WithNoResultCache();
+
+            target.Verify(x => x.InLongOutLong(OpWithNoResultCacheType, It.IsAny<long>()), Times.Once());
+        }
+
+        private static Mock<IPlatformTargetInternal> GetTarget()
+        {
+            var target = new Mock<IPlatformTargetInternal>();
+            target
+                .Setup(x => x.InLongOutLong(It.IsAny<int>(), It.IsAny<long>()))
+                .Returns(1L);
+
+            target
+                .SetupGet(x => x.Marshaller)
+                .Returns(new Marshaller(new BinaryConfiguration()));
+
+            return target;
+        }
+    }
+}
\ No newline at end of file
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/MessagingTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/MessagingTest.cs
index 7db4eef..5dfa82f 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/MessagingTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/MessagingTest.cs
@@ -23,7 +23,6 @@
     using System.Diagnostics.CodeAnalysis;
     using System.Linq;
     using System.Threading;
-    using System.Threading.Tasks;
     using Apache.Ignite.Core.Cache.Configuration;
     using Apache.Ignite.Core.Cluster;
     using Apache.Ignite.Core.Common;
@@ -59,7 +58,7 @@
             DateTime.Now,
             byte.MinValue,
             short.MaxValue,
-            
+
             // Enums.
             CacheMode.Local,
             GCCollectionMode.Forced,
@@ -94,7 +93,7 @@
 
                 MessagingTestHelper.AssertFailures();
             }
-            finally 
+            finally
             {
                 // Stop all grids between tests to drop any hanging messages
                 Ignition.StopAll(true);
@@ -466,7 +465,7 @@
 
             if (sharedResult.Length != 0)
             {
-                Assert.Fail("Unexpected messages ({0}): {1}; last sent message: {2}", sharedResult.Length, 
+                Assert.Fail("Unexpected messages ({0}): {1}; last sent message: {2}", sharedResult.Length,
                     string.Join(",", sharedResult), lastMsg);
             }
         }
@@ -574,7 +573,7 @@
     {
         /** */
         public static readonly ConcurrentStack<string> ReceivedMessages = new ConcurrentStack<string>();
-        
+
         /** */
         private static readonly ConcurrentStack<string> Failures = new ConcurrentStack<string>();
 
@@ -626,7 +625,7 @@
             // check that all messages came from local node.
             var localNodeId = cluster.Ignite.GetCluster().GetLocalNode().Id;
             Assert.AreEqual(localNodeId, LastNodeIds.Distinct().Single());
-            
+
             AssertFailures();
         }
 
@@ -670,7 +669,7 @@
                 }
                 catch (Exception ex)
                 {
-                    // When executed on remote nodes, these exceptions will not go to sender, 
+                    // When executed on remote nodes, these exceptions will not go to sender,
                     // so we have to accumulate them.
                     Failures.Push(string.Format("Exception in Listen (msg: {0}, id: {1}): {2}", message, nodeId, ex));
                     throw;
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/ReconnectTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/ReconnectTest.cs
index 274439e..5d40408 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/ReconnectTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/ReconnectTest.cs
@@ -17,7 +17,6 @@
 
 namespace Apache.Ignite.Core.Tests
 {
-    using System;
     using System.Threading;
     using Apache.Ignite.Core.Cache;
     using Apache.Ignite.Core.Cache.Configuration;
@@ -83,7 +82,7 @@
 
             // Check reconnect task.
             Assert.IsTrue(ex.ClientReconnectTask.Result);
-            
+
             // Wait a bit for notifications.
             Thread.Sleep(100);
 
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Services/ServicesTest.cs b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Services/ServicesTest.cs
index 017a580..81c3652 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Services/ServicesTest.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/Services/ServicesTest.cs
@@ -28,6 +28,7 @@
     using Apache.Ignite.Core.Binary;
     using Apache.Ignite.Core.Cluster;
     using Apache.Ignite.Core.Common;
+    using Apache.Ignite.Core.Impl;
     using Apache.Ignite.Core.Resource;
     using Apache.Ignite.Core.Services;
     using NUnit.Framework;
@@ -957,7 +958,7 @@
         {
             foreach (var grid in Grids)
             {
-                Assert.AreEqual(CompactFooter, ((Impl.Ignite) grid).Marshaller.CompactFooter);
+                Assert.AreEqual(CompactFooter, ((Ignite) grid).Marshaller.CompactFooter);
                 Assert.AreEqual(CompactFooter, grid.GetConfiguration().BinaryConfiguration.CompactFooter);
             }
         }
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/packages.config b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/packages.config
index ccf079c..76c4c87 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core.Tests/packages.config
+++ b/modules/platforms/dotnet/Apache.Ignite.Core.Tests/packages.config
@@ -20,5 +20,6 @@
 <packages>
   <package id="NUnit.Runners" version="2.6.3" targetFramework="net40" />
   <package id="log4net" version="2.0.5" targetFramework="net40" />
+  <package id="Moq" version="4.0.10827" targetFramework="net40" />
   <package id="NLog" version="4.3.7" targetFramework="net40" />
 </packages>
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.nuspec b/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.nuspec
index 3bfb9a1..d5a0c68 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.nuspec
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Apache.Ignite.Core.nuspec
@@ -40,7 +40,7 @@
         <iconUrl>https://ignite.apache.org/images/logo_ignite_32_32.png</iconUrl>
         <requireLicenseAcceptance>false</requireLicenseAcceptance>
         <description>
-Apache Ignite In-Memory Data Fabric is a high-performance, integrated and distributed in-memory platform for computing and transacting on large-scale data sets in real-time, orders of magnitude faster than possible with traditional disk-based or flash technologies.
+Apache Ignite is a memory-centric distributed database, caching, and processing platform for transactional, analytical, and streaming workloads, delivering in-memory speeds at petabyte scale.
 Supports .NET 4+ and .NET Core 2.0+.
             
 More info: https://apacheignite-net.readme.io/
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/CacheConfiguration.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/CacheConfiguration.cs
index 06a7d72..a8925ad 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/CacheConfiguration.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/CacheConfiguration.cs
@@ -40,6 +40,7 @@
     using Apache.Ignite.Core.Impl.Binary;
     using Apache.Ignite.Core.Impl.Cache.Affinity;
     using Apache.Ignite.Core.Impl.Cache.Expiry;
+    using Apache.Ignite.Core.Impl.Client;
     using Apache.Ignite.Core.Log;
     using Apache.Ignite.Core.Plugin.Cache;
     using BinaryReader = Apache.Ignite.Core.Impl.Binary.BinaryReader;
@@ -48,7 +49,7 @@
     /// <summary>
     /// Defines grid cache configuration.
     /// </summary>
-    public class CacheConfiguration : IBinaryRawWriteAware<BinaryWriter>
+    public class CacheConfiguration : IBinaryRawWriteAwareEx<BinaryWriter>
     {
         /// <summary> Default size of rebalance thread pool. </summary>
         public const int DefaultRebalanceThreadPoolSize = 2;
@@ -252,12 +253,12 @@
             {
                 using (var stream = IgniteManager.Memory.Allocate().GetStream())
                 {
-                    other.Write(BinaryUtils.Marshaller.StartMarshal(stream));
+                    other.Write(BinaryUtils.Marshaller.StartMarshal(stream), ClientSocket.CurrentProtocolVersion);
 
                     stream.SynchronizeOutput();
                     stream.Seek(0, SeekOrigin.Begin);
 
-                    Read(BinaryUtils.Marshaller.StartUnmarshal(stream));
+                    Read(BinaryUtils.Marshaller.StartUnmarshal(stream), ClientSocket.CurrentProtocolVersion);
                 }
 
                 CopyLocalProperties(other);
@@ -268,16 +269,18 @@
         /// Initializes a new instance of the <see cref="CacheConfiguration"/> class.
         /// </summary>
         /// <param name="reader">The reader.</param>
-        internal CacheConfiguration(BinaryReader reader)
+        /// <param name="srvVer">Server version.</param>
+        internal CacheConfiguration(BinaryReader reader, ClientProtocolVersion srvVer)
         {
-            Read(reader);
+            Read(reader, srvVer);
         }
 
         /// <summary>
         /// Reads data into this instance from the specified reader.
         /// </summary>
         /// <param name="reader">The reader.</param>
-        private void Read(BinaryReader reader)
+        /// <param name="srvVer">Server version.</param>
+        private void Read(BinaryReader reader, ClientProtocolVersion srvVer)
         {
             // Make sure system marshaller is used.
             Debug.Assert(reader.Marshaller == BinaryUtils.Marshaller);
@@ -327,7 +330,7 @@
             QueryParallelism = reader.ReadInt();
             SqlSchema = reader.ReadString();
 
-            QueryEntities = reader.ReadCollectionRaw(r => new QueryEntity(r));
+            QueryEntities = reader.ReadCollectionRaw(r => new QueryEntity(r, srvVer));
 
             NearConfiguration = reader.ReadBoolean() ? new NearCacheConfiguration(reader) : null;
 
@@ -364,16 +367,18 @@
         /// Writes this instance to the specified writer.
         /// </summary>
         /// <param name="writer">The writer.</param>
-        void IBinaryRawWriteAware<BinaryWriter>.Write(BinaryWriter writer)
+        /// <param name="srvVer">Server version.</param>
+        void IBinaryRawWriteAwareEx<BinaryWriter>.Write(BinaryWriter writer, ClientProtocolVersion srvVer)
         {
-            Write(writer);
+            Write(writer, srvVer);
         }
 
         /// <summary>
         /// Writes this instance to the specified writer.
         /// </summary>
         /// <param name="writer">The writer.</param>
-        internal void Write(BinaryWriter writer)
+        /// <param name="srvVer">Server version.</param>
+        internal void Write(BinaryWriter writer, ClientProtocolVersion srvVer)
         {
             // Make sure system marshaller is used.
             Debug.Assert(writer.Marshaller == BinaryUtils.Marshaller);
@@ -423,7 +428,7 @@
             writer.WriteInt(QueryParallelism);
             writer.WriteString(SqlSchema);
 
-            writer.WriteCollectionRaw(QueryEntities);
+            writer.WriteCollectionRaw(QueryEntities, srvVer);
 
             if (NearConfiguration != null)
             {
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/QueryEntity.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/QueryEntity.cs
index 32173ba..dc8be7f 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/QueryEntity.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/QueryEntity.cs
@@ -27,13 +27,14 @@
     using Apache.Ignite.Core.Binary;
     using Apache.Ignite.Core.Impl.Binary;
     using Apache.Ignite.Core.Impl.Cache;
+    using Apache.Ignite.Core.Impl.Client;
     using Apache.Ignite.Core.Log;
 
     /// <summary>
     /// Query entity is a description of cache entry (composed of key and value) 
     /// in a way of how it must be indexed and can be queried.
     /// </summary>
-    public sealed class QueryEntity : IQueryEntityInternal, IBinaryRawWriteAware
+    public sealed class QueryEntity : IQueryEntityInternal, IBinaryRawWriteAwareEx
     {
         /** */
         private Type _keyType;
@@ -232,7 +233,8 @@
         /// Initializes a new instance of the <see cref="QueryEntity"/> class.
         /// </summary>
         /// <param name="reader">The reader.</param>
-        internal QueryEntity(IBinaryRawReader reader)
+        /// <param name="srvVer">Server version.</param>
+        internal QueryEntity(IBinaryRawReader reader, ClientProtocolVersion srvVer)
         {
             KeyTypeName = reader.ReadString();
             ValueTypeName = reader.ReadString();
@@ -243,7 +245,7 @@
             var count = reader.ReadInt();
             Fields = count == 0
                 ? null
-                : Enumerable.Range(0, count).Select(x => new QueryField(reader)).ToList();
+                : Enumerable.Range(0, count).Select(x => new QueryField(reader, srvVer)).ToList();
 
             count = reader.ReadInt();
             Aliases = count == 0 ? null : Enumerable.Range(0, count)
@@ -256,7 +258,7 @@
         /// <summary>
         /// Writes this instance.
         /// </summary>
-        void IBinaryRawWriteAware<IBinaryRawWriter>.Write(IBinaryRawWriter writer)
+        void IBinaryRawWriteAwareEx<IBinaryRawWriter>.Write(IBinaryRawWriter writer, ClientProtocolVersion srvVer)
         {
             writer.WriteString(KeyTypeName);
             writer.WriteString(ValueTypeName);
@@ -270,7 +272,7 @@
 
                 foreach (var field in Fields)
                 {
-                    field.Write(writer);
+                    field.Write(writer, srvVer);
                 }
             }
             else
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/QueryField.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/QueryField.cs
index 869ce7d..4142986 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/QueryField.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/QueryField.cs
@@ -23,6 +23,7 @@
     using System.Diagnostics;
     using Apache.Ignite.Core.Binary;
     using Apache.Ignite.Core.Impl.Binary;
+    using Apache.Ignite.Core.Impl.Client;
     using Apache.Ignite.Core.Impl.Common;
     using Apache.Ignite.Core.Log;
 
@@ -42,7 +43,8 @@
         /// </summary>
         public QueryField()
         {
-            // No-op.
+            Precision = -1;
+            Scale = -1;
         }
 
         /// <summary>
@@ -50,7 +52,7 @@
         /// </summary>
         /// <param name="name">Name.</param>
         /// <param name="javaFieldTypeName">Java type name.</param>
-        public QueryField(string name, string javaFieldTypeName)
+        public QueryField(string name, string javaFieldTypeName): this()
         {
             IgniteArgumentCheck.NotNullOrEmpty(name, "name");
             IgniteArgumentCheck.NotNullOrEmpty(javaFieldTypeName, "typeName");
@@ -64,7 +66,7 @@
         /// </summary>
         /// <param name="name">Name.</param>
         /// <param name="fieldType">Type.</param>
-        public QueryField(string name, Type fieldType)
+        public QueryField(string name, Type fieldType): this()
         {
             IgniteArgumentCheck.NotNullOrEmpty(name, "name");
             IgniteArgumentCheck.NotNull(fieldType, "type");
@@ -76,7 +78,7 @@
         /// <summary>
         /// Initializes a new instance of the <see cref="QueryField"/> class.
         /// </summary>
-        internal QueryField(IBinaryRawReader reader)
+        internal QueryField(IBinaryRawReader reader, ClientProtocolVersion srvVer)
         {
             Debug.Assert(reader != null);
 
@@ -85,14 +87,18 @@
             IsKeyField = reader.ReadBoolean();
             NotNull = reader.ReadBoolean();
             DefaultValue = reader.ReadObject<object>();
-            Precision = reader.ReadInt();
-            Scale = reader.ReadInt();
+
+            if (srvVer.CompareTo(ClientSocket.Ver120) >= 0)
+            {
+                Precision = reader.ReadInt();
+                Scale = reader.ReadInt();
+            }
         }
 
         /// <summary>
         /// Writes this instance to the specified writer.
         /// </summary>
-        internal void Write(IBinaryRawWriter writer)
+        internal void Write(IBinaryRawWriter writer, ClientProtocolVersion srvVer)
         {
             Debug.Assert(writer != null);
 
@@ -101,8 +107,12 @@
             writer.WriteBoolean(IsKeyField);
             writer.WriteBoolean(NotNull);
             writer.WriteObject(DefaultValue);
-            writer.WriteInt(Precision);
-            writer.WriteInt(Scale);
+
+            if (srvVer.CompareTo(ClientSocket.Ver120) >= 0)
+            {
+                writer.WriteInt(Precision);
+                writer.WriteInt(Scale);
+            }
         }
 
         /// <summary>
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/QuerySqlFieldAttribute.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/QuerySqlFieldAttribute.cs
index bfd3575..2c1d566 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/QuerySqlFieldAttribute.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Cache/Configuration/QuerySqlFieldAttribute.cs
@@ -35,6 +35,8 @@
         public QuerySqlFieldAttribute()
         {
             IndexInlineSize = QueryIndex.DefaultInlineSize;
+            Precision = -1;
+            Scale = -1;
         }
 
         /// <summary>
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Client/Cache/CacheClientConfiguration.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Client/Cache/CacheClientConfiguration.cs
index fad6b58..c6cb112 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Client/Cache/CacheClientConfiguration.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Client/Cache/CacheClientConfiguration.cs
@@ -29,6 +29,7 @@
     using Apache.Ignite.Core.Configuration;
     using Apache.Ignite.Core.Impl;
     using Apache.Ignite.Core.Impl.Binary.IO;
+    using Apache.Ignite.Core.Impl.Client;
     using Apache.Ignite.Core.Impl.Client.Cache;
     using Apache.Ignite.Core.Impl.Common;
 
@@ -115,12 +116,12 @@
             {
                 using (var stream = IgniteManager.Memory.Allocate().GetStream())
                 {
-                    ClientCacheConfigurationSerializer.Write(stream, other, true);
+                    ClientCacheConfigurationSerializer.Write(stream, other, ClientSocket.CurrentProtocolVersion, true);
 
                     stream.SynchronizeOutput();
                     stream.Seek(0, SeekOrigin.Begin);
 
-                    ClientCacheConfigurationSerializer.Read(stream, this);
+                    ClientCacheConfigurationSerializer.Read(stream, this, ClientSocket.CurrentProtocolVersion);
                 }
 
                 CopyLocalProperties(other);
@@ -156,11 +157,11 @@
         /// <summary>
         /// Initializes a new instance of the <see cref="CacheClientConfiguration"/> class.
         /// </summary>
-        internal CacheClientConfiguration(IBinaryStream stream)
+        internal CacheClientConfiguration(IBinaryStream stream, ClientProtocolVersion srvVer)
         {
             Debug.Assert(stream != null);
 
-            ClientCacheConfigurationSerializer.Read(stream, this);
+            ClientCacheConfigurationSerializer.Read(stream, this, srvVer);
         }
 
         /// <summary>
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Compute/ICompute.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Compute/ICompute.cs
index f3bdb7d..7d70f97 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Compute/ICompute.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Compute/ICompute.cs
@@ -63,6 +63,12 @@
         ICompute WithNoFailover();
 
         /// <summary>
+        /// Disables caching for the next executed task in the current thread.
+        /// </summary>
+        /// <returns>This compute instance for chaining calls.</returns>
+        ICompute WithNoResultCache();
+
+        /// <summary>
         /// Sets task timeout for the next executed task on this projection in the current thread.
         /// When task starts execution, the timeout is reset, so one timeout is used only once.
         /// </summary>
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Configuration/DataStorageConfiguration.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Configuration/DataStorageConfiguration.cs
index 7a3778c..0a010b4 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Configuration/DataStorageConfiguration.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Configuration/DataStorageConfiguration.cs
@@ -164,6 +164,11 @@
         public const int DefaultConcurrencyLevel = 0;
 
         /// <summary>
+        /// Default value for <see cref="MaxWalArchiveSize"/>.
+        /// </summary>
+        public const long DefaultMaxWalArchiveSize = 1024 * 1024 * 1024;
+
+        /// <summary>
         /// Initializes a new instance of the <see cref="DataStorageConfiguration"/> class.
         /// </summary>
         public DataStorageConfiguration()
@@ -190,6 +195,7 @@
             SystemRegionMaxSize = DefaultSystemRegionMaxSize;
             PageSize = DefaultPageSize;
             WalAutoArchiveAfterInactivity = DefaultWalAutoArchiveAfterInactivity;
+            MaxWalArchiveSize = DefaultMaxWalArchiveSize;
         }
 
         /// <summary>
@@ -221,6 +227,7 @@
             CheckpointWriteOrder = (CheckpointWriteOrder)reader.ReadInt();
             WriteThrottlingEnabled = reader.ReadBoolean();
             WalCompactionEnabled = reader.ReadBoolean();
+            MaxWalArchiveSize = reader.ReadLong();
 
             SystemRegionInitialSize = reader.ReadLong();
             SystemRegionMaxSize = reader.ReadLong();
@@ -272,6 +279,7 @@
             writer.WriteInt((int)CheckpointWriteOrder);
             writer.WriteBoolean(WriteThrottlingEnabled);
             writer.WriteBoolean(WalCompactionEnabled);
+            writer.WriteLong(MaxWalArchiveSize);
 
             writer.WriteLong(SystemRegionInitialSize);
             writer.WriteLong(SystemRegionMaxSize);
@@ -445,6 +453,12 @@
         public bool WalCompactionEnabled { get; set; }
 
         /// <summary>
+        /// Gets or sets maximum size of wal archive folder, in bytes.
+        /// </summary>
+        [DefaultValue(DefaultMaxWalArchiveSize)]
+        public long MaxWalArchiveSize { get; set; }
+
+        /// <summary>
         /// Gets or sets the size of a memory chunk reserved for system needs.
         /// </summary>
         [DefaultValue(DefaultSystemRegionInitialSize)]
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfiguration.cs b/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfiguration.cs
index 7d8cfc7..55d358a 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfiguration.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfiguration.cs
@@ -43,6 +43,7 @@
     using Apache.Ignite.Core.Failure;
     using Apache.Ignite.Core.Impl;
     using Apache.Ignite.Core.Impl.Binary;
+    using Apache.Ignite.Core.Impl.Client;
     using Apache.Ignite.Core.Impl.Common;
     using Apache.Ignite.Core.Impl.Ssl;
     using Apache.Ignite.Core.Lifecycle;
@@ -252,13 +253,13 @@
             {
                 var marsh = BinaryUtils.Marshaller;
 
-                configuration.Write(marsh.StartMarshal(stream));
+                configuration.Write(marsh.StartMarshal(stream), ClientSocket.CurrentProtocolVersion);
 
                 stream.SynchronizeOutput();
 
                 stream.Seek(0, SeekOrigin.Begin);
 
-                ReadCore(marsh.StartUnmarshal(stream));
+                ReadCore(marsh.StartUnmarshal(stream), ClientSocket.CurrentProtocolVersion);
             }
 
             CopyLocalProperties(configuration);
@@ -269,12 +270,14 @@
         /// </summary>
         /// <param name="binaryReader">The binary reader.</param>
         /// <param name="baseConfig">The base configuration.</param>
-        internal IgniteConfiguration(BinaryReader binaryReader, IgniteConfiguration baseConfig)
+        /// <param name="srvVer">Server version.</param>
+        internal IgniteConfiguration(BinaryReader binaryReader, IgniteConfiguration baseConfig,
+            ClientProtocolVersion srvVer)
         {
             Debug.Assert(binaryReader != null);
             Debug.Assert(baseConfig != null);
 
-            Read(binaryReader);
+            Read(binaryReader, srvVer);
             CopyLocalProperties(baseConfig);
         }
 
@@ -282,7 +285,8 @@
         /// Writes this instance to a writer.
         /// </summary>
         /// <param name="writer">The writer.</param>
-        internal void Write(BinaryWriter writer)
+        /// <param name="srvVer">Server version.</param>
+        internal void Write(BinaryWriter writer, ClientProtocolVersion srvVer)
         {
             Debug.Assert(writer != null);
 
@@ -305,6 +309,19 @@
             writer.WriteTimeSpanAsLongNullable(_longQueryWarningTimeout);
             writer.WriteBooleanNullable(_isActiveOnStart);
             writer.WriteBooleanNullable(_authenticationEnabled);
+
+            if (SqlSchemas == null)
+                writer.WriteInt(-1);
+            else
+            {
+                writer.WriteInt(SqlSchemas.Count);
+
+                foreach (string sqlSchema in SqlSchemas)
+                {
+                    writer.WriteString(sqlSchema);
+                }
+            }
+
             writer.WriteObjectDetached(ConsistentId);
 
             // Thread pools
@@ -319,7 +336,7 @@
             writer.WriteIntNullable(_queryThreadPoolSize);
 
             // Cache config
-            writer.WriteCollectionRaw(CacheConfiguration);
+            writer.WriteCollectionRaw(CacheConfiguration, srvVer);
 
             // Discovery config
             var disco = DiscoverySpi;
@@ -512,7 +529,7 @@
 
             // SSL Context factory.
             SslFactorySerializer.Write(writer, SslContextFactory);
-            
+
             // Failure handler.
             if (FailureHandler == null)
             {
@@ -521,7 +538,7 @@
             else
             {
                 writer.WriteBoolean(true);
-                
+
                 if (FailureHandler is NoOpFailureHandler)
                 {
                     writer.WriteByte(0);
@@ -530,7 +547,7 @@
                 {
                     writer.WriteByte(1);
                 }
-                else 
+                else
                 {
                     var failHnd = FailureHandler as StopNodeOrHaltFailureHandler;
 
@@ -548,7 +565,7 @@
                     failHnd.Write(writer);
                 }
             }
-           
+
             // Plugins (should be last).
             if (PluginConfigurations != null)
             {
@@ -637,7 +654,8 @@
         /// Reads data from specified reader into current instance.
         /// </summary>
         /// <param name="r">The binary reader.</param>
-        private void ReadCore(BinaryReader r)
+        /// <param name="srvVer">Server version.</param>
+        private void ReadCore(BinaryReader r, ClientProtocolVersion srvVer)
         {
             // Simple properties
             _clientMode = r.ReadBooleanNullable();
@@ -657,6 +675,19 @@
             _longQueryWarningTimeout = r.ReadTimeSpanNullable();
             _isActiveOnStart = r.ReadBooleanNullable();
             _authenticationEnabled = r.ReadBooleanNullable();
+
+            int sqlSchemasCnt = r.ReadInt();
+
+            if (sqlSchemasCnt == -1)
+                SqlSchemas = null;
+            else
+            {
+                SqlSchemas = new List<string>(sqlSchemasCnt);
+
+                for (int i = 0; i < sqlSchemasCnt; i++)
+                    SqlSchemas.Add(r.ReadString());
+            }
+
             ConsistentId = r.ReadObject<object>();
 
             // Thread pools
@@ -671,7 +702,7 @@
             _queryThreadPoolSize = r.ReadIntNullable();
 
             // Cache config
-            CacheConfiguration = r.ReadCollectionRaw(x => new CacheConfiguration(x));
+            CacheConfiguration = r.ReadCollectionRaw(x => new CacheConfiguration(x, srvVer));
 
             // Discovery config
             DiscoverySpi = r.ReadBoolean() ? new TcpDiscoverySpi(r) : null;
@@ -775,7 +806,7 @@
 
             // SSL context factory.
             SslContextFactory = SslFactorySerializer.Read(r);
-            
+
             //Failure handler.
             if (r.ReadBoolean())
             {
@@ -783,22 +814,22 @@
                 {
                     case 0:
                         FailureHandler = new NoOpFailureHandler();
-                        
+
                         break;
 
                     case 1:
                         FailureHandler = new StopNodeFailureHandler();
-                        
+
                         break;
 
                     case 2:
                         FailureHandler = StopNodeOrHaltFailureHandler.Read(r);
-                        
+
                         break;
-                    
+
                     default:
                         FailureHandler = null;
-                        
+
                         break;
                 }
             }
@@ -812,9 +843,10 @@
         /// Reads data from specified reader into current instance.
         /// </summary>
         /// <param name="binaryReader">The binary reader.</param>
-        private void Read(BinaryReader binaryReader)
+        /// <param name="srvVer">Server version.</param>
+        private void Read(BinaryReader binaryReader, ClientProtocolVersion srvVer)
         {
-            ReadCore(binaryReader);
+            ReadCore(binaryReader, srvVer);
 
             // Misc
             IgniteHome = binaryReader.ReadString();
@@ -928,7 +960,7 @@
         /// Null property values do not override Spring values.
         /// Value-typed properties are tracked internally: if setter was not called, Spring value won't be overwritten.
         /// <para />
-        /// This merging happens on the top level only; e. g. if there are cache configurations defined in Spring 
+        /// This merging happens on the top level only; e. g. if there are cache configurations defined in Spring
         /// and in .NET, .NET caches will overwrite Spring caches.
         /// </summary>
         [SuppressMessage("Microsoft.Design", "CA1056:UriPropertiesShouldNotBeStrings")]
@@ -959,7 +991,7 @@
 
         /// <summary>
         /// List of additional .Net assemblies to load on Ignite start. Each item can be either
-        /// fully qualified assembly name, path to assembly to DLL or path to a directory when 
+        /// fully qualified assembly name, path to assembly to DLL or path to a directory when
         /// assemblies reside.
         /// </summary>
         [SuppressMessage("Microsoft.Usage", "CA2227:CollectionPropertiesShouldBeReadOnly")]
@@ -1015,7 +1047,7 @@
         }
 
         /// <summary>
-        /// Gets or sets a set of event types (<see cref="EventType" />) to be recorded by Ignite. 
+        /// Gets or sets a set of event types (<see cref="EventType" />) to be recorded by Ignite.
         /// </summary>
         [SuppressMessage("Microsoft.Usage", "CA2227:CollectionPropertiesShouldBeReadOnly")]
         public ICollection<int> IncludedEventTypes { get; set; }
@@ -1155,11 +1187,11 @@
         public string WorkDirectory { get; set; }
 
         /// <summary>
-        /// Gets or sets system-wide local address or host for all Ignite components to bind to. 
+        /// Gets or sets system-wide local address or host for all Ignite components to bind to.
         /// If provided it will override all default local bind settings within Ignite.
         /// <para />
-        /// If <c>null</c> then Ignite tries to use local wildcard address.That means that all services 
-        /// will be available on all network interfaces of the host machine. 
+        /// If <c>null</c> then Ignite tries to use local wildcard address.That means that all services
+        /// will be available on all network interfaces of the host machine.
         /// <para />
         /// It is strongly recommended to set this parameter for all production environments.
         /// </summary>
@@ -1168,11 +1200,11 @@
         /// <summary>
         /// Gets or sets a value indicating whether this node should be a daemon node.
         /// <para />
-        /// Daemon nodes are the usual grid nodes that participate in topology but not visible on the main APIs, 
+        /// Daemon nodes are the usual grid nodes that participate in topology but not visible on the main APIs,
         /// i.e. they are not part of any cluster groups.
         /// <para />
-        /// Daemon nodes are used primarily for management and monitoring functionality that is built on Ignite 
-        /// and needs to participate in the topology, but also needs to be excluded from the "normal" topology, 
+        /// Daemon nodes are used primarily for management and monitoring functionality that is built on Ignite
+        /// and needs to participate in the topology, but also needs to be excluded from the "normal" topology,
         /// so that it won't participate in the task execution or in-memory data grid storage.
         /// </summary>
         public bool IsDaemon
@@ -1209,14 +1241,14 @@
         /// affinity assignment mode is disabled then new affinity mapping is applied immediately.
         /// <para />
         /// With late affinity assignment mode, if primary node was changed for some partition, but data for this
-        /// partition is not rebalanced yet on this node, then current primary is not changed and new primary 
-        /// is temporary assigned as backup. This nodes becomes primary only when rebalancing for all assigned primary 
-        /// partitions is finished. This mode can show better performance for cache operations, since when cache 
-        /// primary node executes some operation and data is not rebalanced yet, then it sends additional message 
+        /// partition is not rebalanced yet on this node, then current primary is not changed and new primary
+        /// is temporary assigned as backup. This nodes becomes primary only when rebalancing for all assigned primary
+        /// partitions is finished. This mode can show better performance for cache operations, since when cache
+        /// primary node executes some operation and data is not rebalanced yet, then it sends additional message
         /// to force rebalancing from other nodes.
         /// <para />
         /// Note, that <see cref="ICacheAffinity"/> interface provides assignment information taking late assignment
-        /// into account, so while rebalancing for new primary nodes is not finished it can return assignment 
+        /// into account, so while rebalancing for new primary nodes is not finished it can return assignment
         /// which differs from assignment calculated by AffinityFunction.
         /// <para />
         /// This property should have the same value for all nodes in cluster.
@@ -1281,7 +1313,7 @@
         public ILogger Logger { get; set; }
 
         /// <summary>
-        /// Gets or sets the failure detection timeout used by <see cref="TcpDiscoverySpi"/> 
+        /// Gets or sets the failure detection timeout used by <see cref="TcpDiscoverySpi"/>
         /// and <see cref="TcpCommunicationSpi"/>.
         /// </summary>
         [DefaultValue(typeof(TimeSpan), "00:00:10")]
@@ -1508,7 +1540,7 @@
         public bool RedirectJavaConsoleOutput { get; set; }
 
         /// <summary>
-        /// Gets or sets whether user authentication is enabled for the cluster. Default is <c>false</c>. 
+        /// Gets or sets whether user authentication is enabled for the cluster. Default is <c>false</c>.
         /// </summary>
         [DefaultValue(DefaultAuthenticationEnabled)]
         public bool AuthenticationEnabled
@@ -1525,9 +1557,17 @@
         /// <para><see cref="StopNodeOrHaltFailureHandler"/> -- try to stop node if tryStop value is true.
         /// If node can't be stopped during provided timeout or tryStop value is false then JVM process will be terminated forcibly.</para>
         /// <para/>
-        /// Only these implementations are supported: 
+        /// Only these implementations are supported:
         /// <see cref="NoOpFailureHandler"/>, <see cref="StopNodeOrHaltFailureHandler"/>, <see cref="StopNodeFailureHandler"/>.
         /// </summary>
         public IFailureHandler FailureHandler { get; set; }
+
+        /// <summary>
+        /// Gets or sets SQL schemas to be created on node startup. Schemas are created on local node only and are not propagated.
+        /// to other cluster nodes. Created schemas cannot be dropped.
+        /// <para/>
+        /// By default schema names are case-insensitive. Use quotes to enforce case sensitivity.
+        /// </summary>
+        public ICollection<string> SqlSchemas { get; set; }
     }
 }
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfigurationSection.xsd b/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfigurationSection.xsd
index f16ef43..ebbef67 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfigurationSection.xsd
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/IgniteConfigurationSection.xsd
@@ -1855,6 +1855,11 @@
                                 <xs:documentation>If true, system will filter and compress WAL archive in background. Compressed WAL archive gets automatically decompressed on demand.</xs:documentation>
                             </xs:annotation>
                         </xs:attribute>
+                        <xs:attribute name="maxWalArchiveSize" type="xs:long">
+                            <xs:annotation>
+                                <xs:documentation>Maximum size of wal archive folder, in bytes.</xs:documentation>
+                            </xs:annotation>
+                        </xs:attribute>
                         <xs:attribute name="pageSize" type="xs:int">
                             <xs:annotation>
                                 <xs:documentation>Size of the memory page.</xs:documentation>
@@ -2029,6 +2034,16 @@
                         </xs:attribute>
                     </xs:complexType>
                 </xs:element>
+                <xs:element name="sqlSchemas" minOccurs="0">
+                    <xs:annotation>
+                        <xs:documentation>SQL schemas to be created on node startup. Schemas are created on local node only and are not propagated.</xs:documentation>
+                    </xs:annotation>
+                    <xs:complexType>
+                        <xs:sequence>
+                            <xs:element maxOccurs="unbounded" name="string" type="xs:string" />
+                        </xs:sequence>
+                    </xs:complexType>
+                </xs:element>
             </xs:all>
             <xs:attribute name="igniteInstanceName" type="xs:string">
                 <xs:annotation>
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Ignition.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Ignition.cs
index 5b93609..fe80ba1 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Ignition.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Ignition.cs
@@ -407,7 +407,8 @@
 
             // 3. Send configuration details to Java
             cfg.Validate(log);
-            cfg.Write(BinaryUtils.Marshaller.StartMarshal(outStream));  // Use system marshaller.
+            // Use system marshaller.
+            cfg.Write(BinaryUtils.Marshaller.StartMarshal(outStream), ClientSocket.CurrentProtocolVersion);
         }
 
         /// <summary>
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinaryWriterExtensions.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinaryWriterExtensions.cs
index d87d217..e504d75 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinaryWriterExtensions.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/BinaryWriterExtensions.cs
@@ -22,6 +22,7 @@
     using System.Diagnostics;
     using System.IO;
     using Apache.Ignite.Core.Binary;
+    using Apache.Ignite.Core.Impl.Client;
 
     /// <summary>
     /// Writer extensions.
@@ -211,5 +212,33 @@
                 writer.WriteInt(0);
             }
         }
+
+        /// <summary>
+        /// Writes the collection of write-aware-ex items.
+        /// </summary>
+        public static void WriteCollectionRaw<T, TWriter>(this TWriter writer, ICollection<T> collection,
+            ClientProtocolVersion srvVer) where T : IBinaryRawWriteAwareEx<TWriter> where TWriter: IBinaryRawWriter
+        {
+            Debug.Assert(writer != null);
+
+            if (collection != null)
+            {
+                writer.WriteInt(collection.Count);
+
+                foreach (var x in collection)
+                {
+                    if (x == null)
+                    {
+                        throw new ArgumentNullException(string.Format("{0} can not be null", typeof(T).Name));
+                    }
+
+                    x.Write(writer, srvVer);
+                }
+            }
+            else
+            {
+                writer.WriteInt(0);
+            }
+        }
     }
-}
\ No newline at end of file
+}
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/IBinaryRawWriteAware.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/IBinaryRawWriteAware.cs
index 9b191e4..737d3c9 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/IBinaryRawWriteAware.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Binary/IBinaryRawWriteAware.cs
@@ -18,6 +18,29 @@
 namespace Apache.Ignite.Core.Impl.Binary
 {
     using Apache.Ignite.Core.Binary;
+    using Apache.Ignite.Core.Impl.Client;
+
+    /// <summary>
+    /// Represents an object that can write itself to a raw binary writer using specific server version.
+    /// </summary>
+    internal interface IBinaryRawWriteAwareEx<in T> where T : IBinaryRawWriter
+    {
+        /// <summary>
+        /// Writes this object to the given writer.
+        /// </summary>
+        /// <param name="writer">Writer.</param>
+        /// <param name="srvVer">Server version.</param>
+        /// <exception cref="System.IO.IOException">If write failed.</exception>
+        void Write(T writer, ClientProtocolVersion srvVer);
+    }
+
+    /// <summary>
+    /// Represents an object that can write itself to a raw binary writer.
+    /// </summary>
+    internal interface IBinaryRawWriteAwareEx : IBinaryRawWriteAwareEx<IBinaryRawWriter>
+    {
+        // No-op.
+    }
 
     /// <summary>
     /// Represents an object that can write itself to a raw binary writer.
@@ -39,4 +62,4 @@
     {
         // No-op.
     }
-}
\ No newline at end of file
+}
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/CacheImpl.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/CacheImpl.cs
index 71fbaee..9e99967 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/CacheImpl.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Cache/CacheImpl.cs
@@ -34,6 +34,7 @@
     using Apache.Ignite.Core.Impl.Cache.Expiry;
     using Apache.Ignite.Core.Impl.Cache.Query;
     using Apache.Ignite.Core.Impl.Cache.Query.Continuous;
+    using Apache.Ignite.Core.Impl.Client;
     using Apache.Ignite.Core.Impl.Cluster;
     using Apache.Ignite.Core.Impl.Common;
     using Apache.Ignite.Core.Impl.Transactions;
@@ -161,7 +162,7 @@
         public CacheConfiguration GetConfiguration()
         {
             return DoInOp((int) CacheOp.GetConfig, stream => new CacheConfiguration(
-                BinaryUtils.Marshaller.StartUnmarshal(stream)));
+                BinaryUtils.Marshaller.StartUnmarshal(stream), ClientSocket.CurrentProtocolVersion));
         }
 
         /** <inheritDoc /> */
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/CacheClient.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/CacheClient.cs
index 8138b77..a5a9246 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/CacheClient.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/CacheClient.cs
@@ -513,7 +513,8 @@
         /** <inheritDoc /> */
         public CacheClientConfiguration GetConfiguration()
         {
-            return DoOutInOp(ClientOp.CacheGetConfiguration, null, s => new CacheClientConfiguration(s));
+            return DoOutInOp(ClientOp.CacheGetConfiguration, null,
+                s => new CacheClientConfiguration(s, _ignite.ServerVersion()));
         }
 
         /** <inheritDoc /> */
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/ClientCacheConfigurationSerializer.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/ClientCacheConfigurationSerializer.cs
index 552c778..0cccdac 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/ClientCacheConfigurationSerializer.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/Cache/ClientCacheConfigurationSerializer.cs
@@ -195,7 +195,8 @@
         /// <summary>
         /// Writes the specified config.
         /// </summary>
-        public static void Write(IBinaryStream stream, CacheClientConfiguration cfg, bool skipCodes = false)
+        public static void Write(IBinaryStream stream, CacheClientConfiguration cfg, ClientProtocolVersion srvVer,
+            bool skipCodes = false)
         {
             Debug.Assert(stream != null);
             Debug.Assert(cfg != null);
@@ -302,7 +303,7 @@
             writer.WriteCollectionRaw(cfg.KeyConfiguration);
             
             code(Op.QueryEntities);
-            writer.WriteCollectionRaw(cfg.QueryEntities);
+            writer.WriteCollectionRaw(cfg.QueryEntities, srvVer);
 
             // Write length (so that part of the config can be skipped).
             var len = writer.Stream.Position - pos - 4;
@@ -312,7 +313,7 @@
         /// <summary>
         /// Reads the config.
         /// </summary>
-        public static void Read(IBinaryStream stream, CacheClientConfiguration cfg)
+        public static void Read(IBinaryStream stream, CacheClientConfiguration cfg, ClientProtocolVersion srvVer)
         {
             Debug.Assert(stream != null);
 
@@ -351,7 +352,7 @@
             cfg.SqlSchema = reader.ReadString();
             cfg.WriteSynchronizationMode = (CacheWriteSynchronizationMode)reader.ReadInt();
             cfg.KeyConfiguration = reader.ReadCollectionRaw(r => new CacheKeyConfiguration(r));
-            cfg.QueryEntities = reader.ReadCollectionRaw(r => new QueryEntity(r));
+            cfg.QueryEntities = reader.ReadCollectionRaw(r => new QueryEntity(r, srvVer));
 
             Debug.Assert(len == reader.Stream.Position - pos);
         }
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/ClientSocket.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/ClientSocket.cs
index 11d7942..8a8b53b 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/ClientSocket.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/ClientSocket.cs
@@ -45,8 +45,11 @@
         /** Version 1.1.0. */
         private static readonly ClientProtocolVersion Ver110 = new ClientProtocolVersion(1, 1, 0);
 
+        /** Version 1.2.0. */
+        public static readonly ClientProtocolVersion Ver120 = new ClientProtocolVersion(1, 2, 0);
+
         /** Current version. */
-        private static readonly ClientProtocolVersion CurrentProtocolVersion = Ver110;
+        public static readonly ClientProtocolVersion CurrentProtocolVersion = Ver120;
 
         /** Handshake opcode. */
         private const byte OpHandshake = 1;
@@ -69,6 +72,9 @@
         /** Callback checker guard. */
         private volatile bool _checkingTimeouts;
 
+        /** Server protocol version. */
+        public ClientProtocolVersion ServerVersion { get; private set; }
+
         /** Current async operations, map from request id. */
         private readonly ConcurrentDictionary<long, Request> _requests
             = new ConcurrentDictionary<long, Request>();
@@ -105,9 +111,11 @@
             _socket = Connect(clientConfiguration);
             _stream = GetSocketStream(_socket, clientConfiguration);
 
+            ServerVersion = version ?? CurrentProtocolVersion;
+
             Validate(clientConfiguration);
 
-            Handshake(clientConfiguration, version ?? CurrentProtocolVersion);
+            Handshake(clientConfiguration, ServerVersion);
 
             // Check periodically if any request has timed out.
             if (_timeout > TimeSpan.Zero)
@@ -303,10 +311,12 @@
 
                 if (success)
                 {
+                    ServerVersion = version;
+
                     return;
                 }
 
-                var serverVersion =
+                ServerVersion =
                     new ClientProtocolVersion(stream.ReadShort(), stream.ReadShort(), stream.ReadShort());
 
                 var errMsg = BinaryUtils.Marshaller.Unmarshal<string>(stream);
@@ -325,17 +335,17 @@
                 }
 
                 // Re-try if possible.
-                bool retry = serverVersion.CompareTo(version) < 0 && serverVersion.Equals(Ver100);
+                bool retry = ServerVersion.CompareTo(version) < 0 && ServerVersion.Equals(Ver100);
 
                 if (retry)
                 {
-                    Handshake(clientConfiguration, serverVersion);
+                    Handshake(clientConfiguration, ServerVersion);
                 }
                 else
                 {
                     throw new IgniteClientException(string.Format(
                         "Client handshake failed: '{0}'. Client version: {1}. Server version: {2}",
-                        errMsg, version, serverVersion), null, errCode);
+                        errMsg, version, ServerVersion), null, errCode);
                 }
             }
         }
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/IgniteClient.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/IgniteClient.cs
index 1b1aa6f..61d0220 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/IgniteClient.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Client/IgniteClient.cs
@@ -115,7 +115,7 @@
             IgniteArgumentCheck.NotNull(configuration, "configuration");
 
             DoOutOp(ClientOp.CacheGetOrCreateWithConfiguration,
-                w => ClientCacheConfigurationSerializer.Write(w.Stream, configuration));
+                w => ClientCacheConfigurationSerializer.Write(w.Stream, configuration, ServerVersion()));
 
             return GetCache<TK, TV>(configuration.Name);
         }
@@ -136,7 +136,7 @@
             IgniteArgumentCheck.NotNull(configuration, "configuration");
 
             DoOutOp(ClientOp.CacheCreateWithConfiguration,
-                w => ClientCacheConfigurationSerializer.Write(w.Stream, configuration));
+                w => ClientCacheConfigurationSerializer.Write(w.Stream, configuration, ServerVersion()));
 
             return GetCache<TK, TV>(configuration.Name);
         }
@@ -217,6 +217,13 @@
         }
 
         /// <summary>
+        /// Gets the protocol version supported by server.
+        /// </summary>
+        public ClientProtocolVersion ServerVersion() {
+            return _socket.ServerVersion;
+        }
+
+        /// <summary>
         /// Gets the client not supported exception.
         /// </summary>
         public static NotSupportedException GetClientNotSupportedException(string info = null)
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/Compute.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/Compute.cs
index b54d6a9..efea1c7 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/Compute.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/Compute.cs
@@ -60,6 +60,14 @@
         }
 
         /** <inheritDoc /> */
+        public ICompute WithNoResultCache()
+        {
+            _compute.WithNoResultCache();
+
+            return this;
+        }
+
+        /** <inheritDoc /> */
         public ICompute WithTimeout(long timeout)
         {
             _compute.WithTimeout(timeout);
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/ComputeImpl.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/ComputeImpl.cs
index 3c684ae..05efe3c 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/ComputeImpl.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Compute/ComputeImpl.cs
@@ -65,6 +65,9 @@
         /** */
         private const int OpExecNative = 8;
 
+        /** */
+        private const int OpWithNoResultCache = 9;
+
         /** Underlying projection. */
         private readonly ClusterGroupImpl _prj;
 
@@ -118,6 +121,14 @@
         }
 
         /// <summary>
+        /// Disables caching for the next executed task in the current thread.
+        /// </summary>
+        public void WithNoResultCache()
+        {
+            DoOutInOp(OpWithNoResultCache);
+        }
+
+        /// <summary>
         /// Sets keep-binary flag for the next executed Java task on this projection in the current
         /// thread so that task argument passed to Java and returned task results will not be
         /// deserialized.
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Ignite.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Ignite.cs
index ad9a185..42d9ed6 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Ignite.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Impl/Ignite.cs
@@ -35,6 +35,7 @@
     using Apache.Ignite.Core.Events;
     using Apache.Ignite.Core.Impl.Binary;
     using Apache.Ignite.Core.Impl.Cache;
+    using Apache.Ignite.Core.Impl.Client;
     using Apache.Ignite.Core.Impl.Cluster;
     using Apache.Ignite.Core.Impl.Common;
     using Apache.Ignite.Core.Impl.Datastream;
@@ -483,7 +484,7 @@
             {
                 var w = BinaryUtils.Marshaller.StartMarshal(s);
 
-                configuration.Write(w);
+                configuration.Write(w, ClientSocket.CurrentProtocolVersion);
 
                 if (nearConfiguration != null)
                 {
@@ -683,7 +684,8 @@
         public IgniteConfiguration GetConfiguration()
         {
             return DoInOp((int) Op.GetIgniteConfiguration,
-                s => new IgniteConfiguration(BinaryUtils.Marshaller.StartUnmarshal(s), _cfg));
+                s => new IgniteConfiguration(BinaryUtils.Marshaller.StartUnmarshal(s), _cfg,
+                    ClientSocket.CurrentProtocolVersion));
         }
 
         /** <inheritdoc /> */
@@ -876,7 +878,7 @@
             IgniteArgumentCheck.NotNull(configuration, "configuration");
 
             DoOutOp((int) Op.AddCacheConfiguration,
-                s => configuration.Write(BinaryUtils.Marshaller.StartMarshal(s)));
+                s => configuration.Write(BinaryUtils.Marshaller.StartMarshal(s), ClientSocket.CurrentProtocolVersion));
         }
 
         /// <summary>
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Package-Info.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Package-Info.cs
index a3cfea7..3c39b79 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Package-Info.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Package-Info.cs
@@ -26,11 +26,11 @@
 
     /**
 
-    \mainpage Apache Ignite.NET In-Memory Data Fabric
+    \mainpage Apache Ignite.NET
 
-    Apache Ignite.NET In-Memory Data Fabric is a high-performance, integrated and distributed in-memory platform for 
-    computing and transacting on large-scale data sets in real-time, orders of magnitude faster than possible with 
-    traditional disk-based or flash-based technologies.
+    Apache Ignite is a memory-centric distributed database, caching,
+	and processing platform for transactional, analytical,
+	and streaming workloads, delivering in-memory speeds at petabyte scale.
 
     */
 }
diff --git a/modules/platforms/dotnet/Apache.Ignite.Core/Properties/AssemblyInfo.cs b/modules/platforms/dotnet/Apache.Ignite.Core/Properties/AssemblyInfo.cs
index 94af7f8..c403c1f 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Core/Properties/AssemblyInfo.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Core/Properties/AssemblyInfo.cs
@@ -41,6 +41,7 @@
 
 #if !EXCLUDE_INTERNALS_VISIBLE_TO
 
+[assembly: InternalsVisibleTo("DynamicProxyGenAssembly2, PublicKey = 0024000004800000940000000602000000240000525341310004000001000100c547cac37abd99c8db225ef2f6c8a3602f3b3606cc9891605d02baa56104f4cfc0734aa39b93bf7852f7d9266654753cc297e7d2edfe0bac1cdcf9f717241550e0a7b191195b7667bb4f64bcb8e2121380fd1d9d46ad2d92d2d15605093924cceaf74c4861eff62abf69b9291ed0a340e113be11e6a7d3113e92484cf7045cc7")]
 [assembly: InternalsVisibleTo("Apache.Ignite.Core.Tests, PublicKey=0024000004800000940000000602000000240000525341310004000001000100a5bf8e0062a26bde53ccf0f8c42ef5b122a22052f99aecacb7028adcc163050324ee3c75ff40eb0cbe2d0426fa20eca03726cad90d7eb882ff47f5361567a82b676a27565f88b2479d7b9354ae0a1e526ee781b6e11de943d8f4a49efb53765f8c954022bede0fca86c133fab038af8dc88b67d6b6e5b9796d6ca490e699efab")]
 [assembly: InternalsVisibleTo("Apache.Ignite.Benchmarks, PublicKey=0024000004800000940000000602000000240000525341310004000001000100a3e0c1df4cbedbd4ed0e88808401c69b69ec12575ed1c056ac9f448e018fb29af19d236b7b03563aad66c48ab2045e72971ed098d4f65d4cdd38d65abcb39b4f84c626b22ccab2754375f0e8c97dc304fa146f0eddad5cc40a71803a8f15b0b0bb0bff0d4bf0ff6a64bb1044e0d71e6e2405b83fd4c1f7b3e2cfc2e9d50823d4")]
 [assembly: InternalsVisibleTo("Apache.Ignite.AspNet.Tests, PublicKey=0024000004800000940000000602000000240000525341310004000001000100c9380ce05eb74bd7c531f72e9ea615c59d7eceb09bd9795cb3dff1fcf638fd799c2a58a9be42fff156efe1c8cdebb751e27763f6c9a7c80cdc1dc1bbf44283608ef18ccd5017fd57b2b026503637c89c2537f361807f3bdd49265f4d444716159d989342561d324b1a0961640338bb32eaf67f4ae0c95f1b210f65404b0909c6")]
diff --git a/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/ExpressionWalker.cs b/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/ExpressionWalker.cs
index 0d6306a..9a684d9 100644
--- a/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/ExpressionWalker.cs
+++ b/modules/platforms/dotnet/Apache.Ignite.Linq/Impl/ExpressionWalker.cs
@@ -33,6 +33,9 @@
     /// </summary>
     internal static class ExpressionWalker
     {
+        /** SQL quote */
+        private const string SqlQuote = "\"";
+
         /** Compiled member readers. */
         private static readonly CopyOnWriteConcurrentDictionary<MemberInfo, Func<object, object>> MemberReaders =
             new CopyOnWriteConcurrentDictionary<MemberInfo, Func<object, object>>();
@@ -251,10 +254,57 @@
 
             var cacheCfg = queryable.CacheConfiguration;
 
-            return string.Format(cacheCfg.SqlEscapeAll
-                    ? "\"{0}\".\"{1}\""
-                    : "\"{0}\".{1}",
-                cacheCfg.Name, queryable.TableName);
+            var tableName = queryable.TableName;
+            if (cacheCfg.SqlEscapeAll)
+            {
+                tableName = string.Format("{0}{1}{0}", SqlQuote, tableName);
+            }
+
+            var schemaName = NormalizeSchemaName(cacheCfg.Name, cacheCfg.SqlSchema);
+
+            return string.Format("{0}.{1}", schemaName, tableName);
+        }
+
+        /// <summary>
+        /// Normalizes SQL schema name, see
+        /// <c>org.apache.ignite.internal.processors.query.QueryUtils#normalizeSchemaName</c>
+        /// </summary>
+        private static string NormalizeSchemaName(string cacheName, string schemaName)
+        {
+            if (schemaName == null)
+            {
+                // If schema name is not set explicitly, we will use escaped cache name. The reason is that cache name
+                // could contain weird characters, such as underscores, dots or non-Latin stuff, which are invalid from
+                // SQL syntax perspective. We do not want node to fail on startup due to this.
+                return string.Format("{0}{1}{0}", SqlQuote, cacheName);
+            }
+
+            if (schemaName.StartsWith(SqlQuote, StringComparison.Ordinal)
+                && schemaName.EndsWith(SqlQuote, StringComparison.Ordinal))
+            {
+                return schemaName;
+            }
+
+            return NormalizeObjectName(schemaName, false);
+        }
+
+        /// <summary>
+        /// Normalizes SQL object name, see
+        /// <c>org.apache.ignite.internal.processors.query.QueryUtils#normalizeObjectName</c>
+        /// </summary>
+        private static string NormalizeObjectName(string name, bool replace)
+        {
+            if (string.IsNullOrEmpty(name))
+            {
+                return name;
+            }
+
+            if (replace)
+            {
+                name = name.Replace('.', '_').Replace('$', '_');
+            }
+
+            return name.ToUpperInvariant();
         }
     }
 }
diff --git a/modules/platforms/dotnet/docfx/index.md b/modules/platforms/dotnet/docfx/index.md
index 4472cff..48985a1 100644
--- a/modules/platforms/dotnet/docfx/index.md
+++ b/modules/platforms/dotnet/docfx/index.md
@@ -1,10 +1,10 @@
-# Apache Ignite.NET In-Memory Data Fabric
+# Apache Ignite.NET
 
 <img src="https://ignite.apache.org/images/logo3.png" hspace="20" /><img src="https://ptupitsyn.github.io/images/net-framework.png" hspace="20" />
 
-Apache Ignite In-Memory Data Fabric is designed to deliver uncompromised performance for a wide set of in-memory computing use cases from 
-[high performance computing](https://ignite.apache.org/features.html), to the industry most advanced [data grid](https://ignite.apache.org/features.html), 
-highly available [service grid](https://ignite.apache.org/features.html), and [streaming](https://ignite.apache.org/features.html).
+Apache Ignite is a [memory-centric](https://ignite.apache.org/arch/memorycentric.html) distributed database, 
+[caching](https://ignite.apache.org/features/datagrid.html), and processing platform for transactional, analytical, 
+and streaming workloads, delivering in-memory speeds at petabyte scale.
 
 * [API Documentation](api/)
 * [Gettting Started](https://apacheignite-net.readme.io/docs/getting-started)
\ No newline at end of file
diff --git a/modules/platforms/dotnet/examples/Apache.Ignite.Examples/Datagrid/BinaryModeExample.cs b/modules/platforms/dotnet/examples/Apache.Ignite.Examples/Datagrid/BinaryModeExample.cs
index 89021e6..0d96f4c 100644
--- a/modules/platforms/dotnet/examples/Apache.Ignite.Examples/Datagrid/BinaryModeExample.cs
+++ b/modules/platforms/dotnet/examples/Apache.Ignite.Examples/Datagrid/BinaryModeExample.cs
@@ -179,7 +179,8 @@
                 "from Person, Company " +
                 "where Person.CompanyId = Company.Id and Company.Name = ?", orgName)
             {
-                EnableDistributedJoins = true
+                EnableDistributedJoins = true,
+                Timeout = new TimeSpan(0, 1, 0)
             });
 
             Console.WriteLine();
diff --git a/modules/platforms/dotnet/examples/Apache.Ignite.Examples/Sql/LinqExample.cs b/modules/platforms/dotnet/examples/Apache.Ignite.Examples/Sql/LinqExample.cs
index ee2fd90..9a71fca 100644
--- a/modules/platforms/dotnet/examples/Apache.Ignite.Examples/Sql/LinqExample.cs
+++ b/modules/platforms/dotnet/examples/Apache.Ignite.Examples/Sql/LinqExample.cs
@@ -173,7 +173,11 @@
         {
             const string orgName = "Apache";
 
-            var queryOptions = new QueryOptions {EnableDistributedJoins = true};
+            var queryOptions = new QueryOptions
+            {
+                EnableDistributedJoins = true,
+                Timeout = new TimeSpan(0, 1, 0)
+            };
 
             IQueryable<ICacheEntry<int, Employee>> employees = employeeCache.AsCacheQueryable(queryOptions);
             IQueryable<ICacheEntry<int, Organization>> organizations = organizationCache.AsCacheQueryable(queryOptions);
diff --git a/modules/platforms/dotnet/examples/Apache.Ignite.Examples/Sql/SqlExample.cs b/modules/platforms/dotnet/examples/Apache.Ignite.Examples/Sql/SqlExample.cs
index 15b9d40..9fb7921 100644
--- a/modules/platforms/dotnet/examples/Apache.Ignite.Examples/Sql/SqlExample.cs
+++ b/modules/platforms/dotnet/examples/Apache.Ignite.Examples/Sql/SqlExample.cs
@@ -140,7 +140,8 @@
                 "from Employee, \"dotnet_cache_query_organization\".Organization " +
                 "where Employee.organizationId = Organization._key and Organization.name = ?", orgName)
             {
-                EnableDistributedJoins = true
+                EnableDistributedJoins = true,
+                Timeout = new TimeSpan(0, 1, 0)
             });
 
             Console.WriteLine();
diff --git a/modules/platforms/nodejs/README.md b/modules/platforms/nodejs/README.md
index 63a6725..4792caa 100644
--- a/modules/platforms/nodejs/README.md
+++ b/modules/platforms/nodejs/README.md
@@ -1,615 +1,32 @@
 # NodeJS Client for Apache Ignite #
 
-This client allows your application to work with the [Apache Ignite platform](https://ignite.apache.org/) via the [Binary Client Protocol](https://apacheignite.readme.io/docs/binary-client-protocol).
-
-The client includes:
-- [API specification](https://rawgit.com/nobitlost/ignite/master/modules/platforms/nodejs/api_spec/index.html)
-- [implementation](./lib)
-- [examples](./examples)
-- [tests](./spec)
-- docs
-  - the main readme (this file)
-  - [readme for examples](./examples/README.md)
-  - [readme for tests](./spec/README.md)
-
 ## Installation ##
 
 [Node.js](https://nodejs.org/en/) version 8 or higher is required. Either download the Node.js [pre-built binary](https://nodejs.org/en/download/) for the target platform, or install Node.js via [package manager](https://nodejs.org/en/download/package-manager).
 
-Once `node` and `npm` are installed, execute the following commands:
+Once `node` and `npm` are installed, you can use one of the following installation options.
 
-(temporary, while the NPM module is not released on [npmjs](https://www.npmjs.com))
+### Installation via npm ###
 
-1. Clone or download Ignite repository https://github.com/nobitlost/ignite.git to `local_ignite_path`
+Execute the following command to install the Node.js Thin Client package:
+
+```
+npm install -g apache-ignite-client
+```
+
+### Installation from Sources ###
+
+If you want to install the Thin Client library from Ignite sources, please follow the steps:
+
+1. Download Ignite sources to `local_ignite_path`
 2. Go to `local_ignite_path/modules/platforms/nodejs` folder
 3. Execute `npm link` command
-4. Execute `npm link apache-ignite-client` command (needed only for examples and tests)
+4. Execute `npm link apache-ignite-client` command (needed only for examples)
 
 ```bash
 cd local_ignite_path/modules/platforms/nodejs
 npm link
-npm link apache-ignite-client
+npm link apache-ignite-client #linking examples (optional)
 ```
 
-## Supported Features ##
-
-The client supports all operations and types from the [Binary Client Protocol v.2.4](https://apacheignite.readme.io/v2.4/docs/binary-client-protocol) except the following not-applicable features:
-- OP_REGISTER_BINARY_TYPE_NAME and OP_GET_BINARY_TYPE_NAME operations are not supported.
-- Filter object for OP_QUERY_SCAN operation is not supported. OP_QUERY_SCAN operation itself is supported.
-- It is not possible to register a new Ignite Enum type. Reading and writing items of the existing Ignite Enum types are supported.
-
-The following additional features are supported:
-- Authentication using username/password.
-- SSL/TLS connection.
-- "Failover re-connection algorithm".
-
-## API Specification ##
-
-Full specification of the client's public API is available [here](https://rawgit.com/nobitlost/ignite/master/modules/platforms/nodejs/api_spec/index.html)
-
-It is auto-generated from the [jsdoc](http://usejsdoc.org/) comments in source files and located in the [api_spec](./api_spec) folder.
-
-Promises async/await mechanism is used by the API and the client's implementation.
-
-## Data Types ##
-
-The client supports two cases of mapping between Ignite types defined by the Binary Client Protocol and JavaScript types:
-- default mapping,
-- explicit mapping.
-
-A mapping occurs every time an application writes or reads a field to/from an Ignite cache via the client's API. A field here is any data in a cache - key or value of a cache entry or a map, element of an array or set, field of a complex object, etc.
-
-Using the client's API methods, an application can explicitly specify an Ignite type for a field. The client uses this information during the field read/write operations. It returns the corresponding JavaScript type in results of read operations. It checks the corresponding JavaScript type in inputs of write operations.
-
-If an application does not explicitly specify an Ignite type for a field, the client uses default mapping during the field read/write operations.
-
-Default mapping between Ignite and JavaScript types is described [here](https://rawgit.com/nobitlost/ignite/master/modules/platforms/nodejs/api_spec/ObjectType.html).
-
-### Complex Object Type Support ###
-
-The client provides two ways to operate with the Ignite Complex Object type - in the deserialized form and in the binary form.
-
-An application can specify an Ignite type of a field by an instance of the *ComplexObjectType* class which references an instance of a JavaScript Object. In this case, when the application reads a value of the field, the client deserializes the received Ignite Complex Object and returns it to the client as an instance of the corresponding JavaScript Object. When the application writes a value of the field, the client expects an instance of the corresponding JavaScript Object and serializes it to the Ignite Complex Object.
-
-If an application does not specify an Ignite type of a field and reads a value of the field, the client returns the received Ignite Complex Object as an instance of the *BinaryObject* class - a binary form of the Ignite Complex Object. The *BinaryObject* allows to manipulate with it's content - read and write values of the object's fields, add and remove the fields, etc. Also, an application can create an instance of the *BinaryObject* class from a JavaScript Object. An application can write the *BinaryObject* as a value of a field in a cache, if that field has no explicitly specified Ignite type.
-
-The client takes care of obtaining or registering information about Ignite Complex Object type, including schema, from/at Ignite cluster. It is done automatically by the client, when required for reading or writing of the Ignite Complex Object from/to a cache.
-
-## Usage ##
-
-The below sections exaplains the basic steps to work with Apache Ignite using NodeJS client.
-
-### Instantiate Ignite Client ###
-
-A usage of the client starts from the creation of an *IgniteClient* class instance. The constructor has one, optional, parameter - *onStateChanged* callback which will be called every time the client moves to a new connection state (see below).
-
-It is possible to create as many *IgniteClient* instances as needed. All of them will work fully independently.
-
-```javascript
-const IgniteClient = require('apache-ignite-client');
-
-const igniteClient = new IgniteClient(onStateChanged);
-
-function onStateChanged(state, reason) {
-    if (state === IgniteClient.STATE.CONNECTED) {
-        console.log('Client is started');
-    }
-    else if (state === IgniteClient.STATE.DISCONNECTED) {
-        console.log('Client is stopped');
-        if (reason) {
-            console.log(reason);
-        }
-    }
-}
-```
-
-### Create Ignite Client Configuration ###
-
-The next step is to define a configuration for the client's connection - create an *IgniteClientConfiguration* class instance.
-
-A mandatory part of the configuration, which is specified in the constructor, is a list of endpoints of the Ignite nodes. At least one endpoint must be specified. A client connects to one node only - a random endpoint from the provided list. Other nodes, if provided, are used by the client for the "failover re-connection algorithm": the client tries to re-connect to the next random endpoint from the list if the current connection has lost.
-
-Optional parts of the configuration can be specified using additional set methods. They include:
-- username and password for authentication,
-- SSL/TLS connection enabling,
-- NodeJS connection options.
-
-By default, the client establishes a non-secure connection with default connection options defined by NodeJS and does not use authentication.
-
-Example: default Ignite Client Configuration
-
-```javascript
-const IgniteClient = require('apache-ignite-client');
-const IgniteClientConfiguration = IgniteClient.IgniteClientConfiguration;
-
-const igniteClientConfiguration = new IgniteClientConfiguration('127.0.0.1:10800');
-```
-
-Example: Ignite Client Configuration with username/password authentication and additional connection options
-
-```javascript
-const IgniteClient = require('apache-ignite-client');
-const IgniteClientConfiguration = IgniteClient.IgniteClientConfiguration;
-
-const igniteClientConfiguration = new IgniteClientConfiguration('127.0.0.1:10800').
-    setUserName('ignite').
-    setPassword('ignite').
-    setConnectionOptions(false, { 'timeout' : 0 });
-```
-
-### Connect Ignite Client ###
-
-The next step is to connect the client to an Ignite node. The configuration for the client's connection, which includes endpoint(s) to connect to, is specified in the connect method.
-
-The client has three connection states - *CONNECTING*, *CONNECTED*, *DISCONNECTED*. A state is reported via *onStateChanged* callback, if that was provided in the client's constructor.
-
-Any operations with Ignite caches are possible in the *CONNECTED* state only.
-
-If the client unexpectedly lost the connection, it automatically moves to the *CONNECTING* state and tries to re-connect using the "failover re-connection algorithm". If not possible to connect to all endpoints from the provided list, the client moves to the *DISCONNECTED* state.
-
-At any moment, an application can call the disconnect method and forcibly moves the client to the *DISCONNECTED* state.
-
-When the client becomes disconnected, an application can call the connect method again - with the same or different configuration (eg. with different list of endpoints).
-
-```javascript
-const IgniteClient = require('apache-ignite-client');
-const IgniteClientConfiguration = IgniteClient.IgniteClientConfiguration;
-
-async function connectClient() {
-    const igniteClient = new IgniteClient(onStateChanged);
-    try {
-        const igniteClientConfiguration = new IgniteClientConfiguration('127.0.0.1:10800');
-        // connect to Ignite node
-        await igniteClient.connect(igniteClientConfiguration);
-    }
-    catch (err) {
-        console.log(err.message);
-    }
-    finally {
-        igniteClient.disconnect();
-    }
-}
-
-function onStateChanged(state, reason) {
-    if (state === IgniteClient.STATE.CONNECTED) {
-        console.log('Client is started');
-    }
-    else if (state === IgniteClient.STATE.DISCONNECTED) {
-        console.log('Client is stopped');
-        if (reason) {
-            console.log(reason);
-        }
-    }
-}
-
-connectClient();
-```
-
-### Obtain Cache Instance ###
-
-The next step is to obtain a Cache instance - an instance of the *CacheClient* class. One Cache instance gives access to one Ignite cache.
-
-The Ignite client provides several methods to manipulate with Ignite caches and obtain a Cache instance - get a cache by it's name, create a cache with the specified name and optional cache configuration, get or create a cache, destroys a cache, etc.
-
-It is possible to obtain as many *CacheClient* instances as needed - for the same or different Ignite caches - and work with all of them "in parallel".
-
-Example: get or create cache by name and destroy the cache
-
-```javascript
-const IgniteClient = require('apache-ignite-client');
-const IgniteClientConfiguration = IgniteClient.IgniteClientConfiguration;
-
-async function getOrCreateCacheByName() {
-    const igniteClient = new IgniteClient();
-    try {
-        await igniteClient.connect(new IgniteClientConfiguration('127.0.0.1:10800'));
-        // get or create cache by name
-        const cache = await igniteClient.getOrCreateCache('myCache');
-
-        // perform cache key-value operations
-        // ...
-
-        // destroy cache
-        await igniteClient.destroyCache('myCache');
-    }
-    catch (err) {
-        console.log(err.message);
-    }
-    finally {
-        igniteClient.disconnect();
-    }
-}
-
-getOrCreateCacheByName();
-```
-
-Example: create cache by name and configuration
-
-```javascript
-const IgniteClient = require('apache-ignite-client');
-const IgniteClientConfiguration = IgniteClient.IgniteClientConfiguration;
-const CacheConfiguration = IgniteClient.CacheConfiguration;
-
-async function createCacheByConfiguration() {
-    const igniteClient = new IgniteClient();
-    try {
-        await igniteClient.connect(new IgniteClientConfiguration('127.0.0.1:10800'));
-        // create cache by name and configuration
-        const cache = await igniteClient.createCache(
-            'myCache',
-            new CacheConfiguration().setSqlSchema('PUBLIC'));
-    }
-    catch (err) {
-        console.log(err.message);
-    }
-    finally {
-        igniteClient.disconnect();
-    }
-}
-
-createCacheByConfiguration();
-```
-
-Example: get existing cache by name
-
-```javascript
-const IgniteClient = require('apache-ignite-client');
-const IgniteClientConfiguration = IgniteClient.IgniteClientConfiguration;
-
-async function getExistingCache() {
-    const igniteClient = new IgniteClient();
-    try {
-        await igniteClient.connect(new IgniteClientConfiguration('127.0.0.1:10800'));
-        // get existing cache by name
-        const cache = igniteClient.getCache('myCache');
-    }
-    catch (err) {
-        console.log(err.message);
-    }
-    finally {
-        igniteClient.disconnect();
-    }
-}
-
-getExistingCache();
-```
-
-### Configure Cache Instance ###
-
-The next step is optional.
-
-It is possible to specify concrete Ignite types for the key and/or the value of the cache. If the key and/or value is a non-primitive type (eg. a map, a collection, a complex object, etc.) it is possible to specify concrete Ignite types for fields of that objects as well.
-
-If Ignite type is not explicitly specified for some field, the client tries to make automatic default mapping between JavaScript types and Ignite object types.
-
-More details about types and mappings are clarified in the [Data Types](#data-types) section.
-
-```javascript
-const IgniteClient = require('apache-ignite-client');
-const IgniteClientConfiguration = IgniteClient.IgniteClientConfiguration;
-const ObjectType = IgniteClient.ObjectType;
-const MapObjectType = IgniteClient.MapObjectType;
-
-async function setCacheKeyValueTypes() {
-    const igniteClient = new IgniteClient();
-    try {
-        await igniteClient.connect(new IgniteClientConfiguration('127.0.0.1:10800'));
-        const cache = await igniteClient.getOrCreateCache('myCache');
-        // set cache key/value types
-        cache.setKeyType(ObjectType.PRIMITIVE_TYPE.INTEGER).
-            setValueType(new MapObjectType(
-                MapObjectType.MAP_SUBTYPE.LINKED_HASH_MAP,
-                ObjectType.PRIMITIVE_TYPE.SHORT,
-                ObjectType.PRIMITIVE_TYPE.BYTE_ARRAY));
-    }
-    catch (err) {
-        console.log(err.message);
-    }
-    finally {
-        igniteClient.disconnect();
-    }
-}
-
-setCacheKeyValueTypes();
-```
-
-Now, everything is ready to manipulate with the data in the cache.
-
-### Key-Value Queries ###
-
-The *CacheClient* class provides methods to manipulate with the key and the value of the cache using Key-Value Queries operations - put, get, put all, get all, replace, clear, etc.
-
-Example: different cache Key-Value operations with primitive types
-
-```javascript
-const IgniteClient = require('apache-ignite-client');
-const IgniteClientConfiguration = IgniteClient.IgniteClientConfiguration;
-const ObjectType = IgniteClient.ObjectType;
-const CacheEntry = IgniteClient.CacheEntry;
-
-async function performCacheKeyValueOperations() {
-    const igniteClient = new IgniteClient();
-    try {
-        await igniteClient.connect(new IgniteClientConfiguration('127.0.0.1:10800'));
-        const cache = (await igniteClient.getOrCreateCache('myCache')).
-            setKeyType(ObjectType.PRIMITIVE_TYPE.INTEGER);
-        // put and get value
-        await cache.put(1, 'abc');
-        const value = await cache.get(1);
-
-        // put and get multiple values using putAll()/getAll() methods
-        await cache.putAll([new CacheEntry(2, 'value2'), new CacheEntry(3, 'value3')]);
-        const values = await cache.getAll([1, 2, 3]);
-
-        // removes all entries from the cache
-        await cache.clear();
-    }
-    catch (err) {
-        console.log(err.message);
-    }
-    finally {
-        igniteClient.disconnect();
-    }
-}
-
-performCacheKeyValueOperations();
-```
-
-Example: cache put/get Complex Objects and Binary Objects
-
-```javascript
-const IgniteClient = require('apache-ignite-client');
-const IgniteClientConfiguration = IgniteClient.IgniteClientConfiguration;
-const ObjectType = IgniteClient.ObjectType;
-const CacheEntry = IgniteClient.CacheEntry;
-const ComplexObjectType = IgniteClient.ComplexObjectType;
-
-class Person {
-    constructor(id = null, name = null, salary = null) {
-        this.id = id;
-        this.name = name;
-        this.salary = salary;
-    }
-}
-
-async function putGetComplexAndBinaryObjects() {
-    const igniteClient = new IgniteClient();
-    try {
-        await igniteClient.connect(new IgniteClientConfiguration('127.0.0.1:10800'));
-        const cache = await igniteClient.getOrCreateCache('myPersonCache');
-        // Complex Object type for JavaScript Person class instances
-        const personComplexObjectType = new ComplexObjectType(new Person(0, '', 0)).
-            setFieldType('id', ObjectType.PRIMITIVE_TYPE.INTEGER); 
-        // set cache key and value types
-        cache.setKeyType(ObjectType.PRIMITIVE_TYPE.INTEGER).
-            setValueType(personComplexObjectType);
-        // put Complex Objects to the cache
-        await cache.put(1, new Person(1, 'John Doe', 1000));
-        await cache.put(2, new Person(2, 'Jane Roe', 2000));
-        // get Complex Object, returned value is an instance of Person class
-        const person = await cache.get(1);
-        console.log(person);
-
-        // new CacheClient instance of the same cache to operate with BinaryObjects
-        const binaryCache = igniteClient.getCache('myPersonCache').
-            setKeyType(ObjectType.PRIMITIVE_TYPE.INTEGER);
-        // get Complex Object from the cache in a binary form, returned value is an instance of BinaryObject class
-        let binaryPerson = await binaryCache.get(2);
-        console.log('Binary form of Person:');       
-        for (let fieldName of binaryPerson.getFieldNames()) {
-            let fieldValue = await binaryPerson.getField(fieldName);
-            console.log(fieldName + ' : ' + fieldValue);
-        }
-        // modify Binary Object and put it to the cache
-        binaryPerson.setField('id', 3, ObjectType.PRIMITIVE_TYPE.INTEGER).
-            setField('name', 'Mary Major');
-        await binaryCache.put(3, binaryPerson);
-
-        // get Binary Object from the cache and convert it to JavaScript Object
-        binaryPerson = await binaryCache.get(3);
-        console.log(await binaryPerson.toObject(personComplexObjectType));
-
-        await igniteClient.destroyCache('myPersonCache');
-    }
-    catch (err) {
-        console.log(err.message);
-    }
-    finally {
-        igniteClient.disconnect();
-    }
-}
-
-putGetComplexAndBinaryObjects();
-```
-
-### SQL, SQL Fields and Scan Queries ###
-
-The *CacheClient* class provides the query method that accepts an instance of a concrete query definition class and returns an instance of a concrete cursor class which can be used to obtain the results of the query.
-
-Every cursor class allows
-- either to iterate over the results of the query by obtaining one element of the results after another,
-- or to get all elements of the results in a one array at once.
-
-#### SQL Query ####
-
-First, define the query by creating and configuring an instance of the *SqlQuery* class.
-
-Then, pass the *SqlQuery* instance in to the query method of the Cache instance and obtain an instance of the *Cursor* class.
-
-Finally, use the *Cursor* instance to iterate over or get all cache entries returned by the query.
-
-```javascript
-const IgniteClient = require('apache-ignite-client');
-const IgniteClientConfiguration = IgniteClient.IgniteClientConfiguration;
-const CacheConfiguration = IgniteClient.CacheConfiguration;
-const QueryEntity = IgniteClient.QueryEntity;
-const QueryField = IgniteClient.QueryField;
-const ObjectType = IgniteClient.ObjectType;
-const ComplexObjectType = IgniteClient.ComplexObjectType;
-const CacheEntry = IgniteClient.CacheEntry;
-const SqlQuery = IgniteClient.SqlQuery;
-
-async function performSqlQuery() {
-    const igniteClient = new IgniteClient();
-    try {
-        await igniteClient.connect(new IgniteClientConfiguration('127.0.0.1:10800'));
-        // cache configuration required for sql query execution
-        const cacheConfiguration = new CacheConfiguration().
-            setQueryEntities(
-                new QueryEntity().
-                    setValueTypeName('Person').
-                    setFields([
-                        new QueryField('name', 'java.lang.String'),
-                        new QueryField('salary', 'java.lang.Double')
-                    ]));
-        const cache = (await igniteClient.getOrCreateCache('sqlQueryPersonCache', cacheConfiguration)).
-            setKeyType(ObjectType.PRIMITIVE_TYPE.INTEGER).
-            setValueType(new ComplexObjectType({ 'name' : '', 'salary' : 0 }, 'Person'));
-
-        // put multiple values using putAll()
-        await cache.putAll([
-            new CacheEntry(1, { 'name' : 'John Doe', 'salary' : 1000 }),
-            new CacheEntry(2, { 'name' : 'Jane Roe', 'salary' : 2000 }),
-            new CacheEntry(2, { 'name' : 'Mary Major', 'salary' : 1500 })]);
-
-        // create and configure sql query
-        const sqlQuery = new SqlQuery('Person', 'salary > ? and salary <= ?').
-            setArgs(900, 1600);
-        // obtain sql query cursor
-        const cursor = await cache.query(sqlQuery);
-        // getAll cache entries returned by the sql query
-        for (let cacheEntry of await cursor.getAll()) {
-            console.log(cacheEntry.getValue());
-        }
-
-        await igniteClient.destroyCache('sqlQueryPersonCache');
-    }
-    catch (err) {
-        console.log(err.message);
-    }
-    finally {
-        igniteClient.disconnect();
-    }
-}
-
-performSqlQuery();
-```
-
-#### Scan Query ####
-
-First, define the query by creating and configuring an instance of the *ScanQuery* class.
-
-Then, pass the *ScanQuery* instance in to the query method of the Cache instance and obtain an instance of the *Cursor* class.
-
-Finally, use the *Cursor* instance to iterate over or get all cache entries returned by the query.
-
-```javascript
-const IgniteClient = require('apache-ignite-client');
-const IgniteClientConfiguration = IgniteClient.IgniteClientConfiguration;
-const ObjectType = IgniteClient.ObjectType;
-const CacheEntry = IgniteClient.CacheEntry;
-const ScanQuery = IgniteClient.ScanQuery;
-
-async function performScanQuery() {
-    const igniteClient = new IgniteClient();
-    try {
-        await igniteClient.connect(new IgniteClientConfiguration('127.0.0.1:10800'));
-        const cache = (await igniteClient.getOrCreateCache('myCache')).
-            setKeyType(ObjectType.PRIMITIVE_TYPE.INTEGER);
-
-        // put multiple values using putAll()
-        await cache.putAll([
-            new CacheEntry(1, 'value1'),
-            new CacheEntry(2, 'value2'),
-            new CacheEntry(3, 'value3')]);
-
-        // create and configure scan query
-        const scanQuery = new ScanQuery().
-            setPageSize(1);
-        // obtain scan query cursor
-        const cursor = await cache.query(scanQuery);
-        // getAll cache entries returned by the scan query
-        for (let cacheEntry of await cursor.getAll()) {
-            console.log(cacheEntry.getValue());
-        }
-
-        await igniteClient.destroyCache('myCache');
-    }
-    catch (err) {
-        console.log(err.message);
-    }
-    finally {
-        igniteClient.disconnect();
-    }
-}
-
-performScanQuery();
-```
-
-#### SQL Fields Query ####
-
-First, define the query by creating and configuring an instance of the *SqlFieldsQuery* class.
-
-Then, pass the *SqlFieldsQuery* instance in to the query method of the Cache instance and obtain an instance of the *SqlFieldsCursor* class.
-
-Finally, use the *SqlFieldsCursor* instance to iterate over or get all elements returned by the query.
-
-```javascript
-const IgniteClient = require('apache-ignite-client');
-const IgniteClientConfiguration = IgniteClient.IgniteClientConfiguration;
-const CacheConfiguration = IgniteClient.CacheConfiguration;
-const ObjectType = IgniteClient.ObjectType;
-const CacheEntry = IgniteClient.CacheEntry;
-const SqlFieldsQuery = IgniteClient.SqlFieldsQuery;
-
-async function performSqlFieldsQuery() {
-    const igniteClient = new IgniteClient();
-    try {
-        await igniteClient.connect(new IgniteClientConfiguration('127.0.0.1:10800'));
-        const cache = await igniteClient.getOrCreateCache('myPersonCache', new CacheConfiguration().
-            setSqlSchema('PUBLIC'));
-
-        // create table using SqlFieldsQuery
-        (await cache.query(new SqlFieldsQuery(
-           'CREATE TABLE Person (id INTEGER PRIMARY KEY, firstName VARCHAR, lastName VARCHAR, salary DOUBLE)'))).getAll();
-
-        // insert data into the table
-        const insertQuery = new SqlFieldsQuery('INSERT INTO Person (id, firstName, lastName, salary) values (?, ?, ?, ?)').
-            setArgTypes(ObjectType.PRIMITIVE_TYPE.INTEGER);
-        (await cache.query(insertQuery.setArgs(1, 'John', 'Doe', 1000))).getAll();
-        (await cache.query(insertQuery.setArgs(2, 'Jane', 'Roe', 2000))).getAll();
-
-        // obtain sql fields cursor
-        const sqlFieldsCursor = await cache.query(
-            new SqlFieldsQuery("SELECT concat(firstName, ' ', lastName), salary from Person").
-                setPageSize(1));
-
-        // iterate over elements returned by the query
-        do {
-            console.log(await sqlFieldsCursor.getValue());
-        } while (sqlFieldsCursor.hasMore());
-
-        // drop the table
-        (await cache.query(new SqlFieldsQuery("DROP TABLE Person"))).getAll();
-    }
-    catch (err) {
-        console.log(err.message);
-    }
-    finally {
-        igniteClient.disconnect();
-    }
-}
-
-performSqlFieldsQuery();
-```
-
-### Enable Debug ###
-
-To switch on/off the client's debug output (including errors logging), call *setDebug()* method of the *IgniteClient* instance. Debug output is disabled by default.
-
-```javascript
-const IgniteClient = require('apache-ignite-client');
-
-const igniteClient = new IgniteClient();
-igniteClient.setDebug(true);
-```
+For more information, see [Apache Ignite Node.JS Thin Client documentation](https://apacheignite.readme.io/docs/nodejs-thin-client).
diff --git a/modules/platforms/nodejs/examples/README.md b/modules/platforms/nodejs/examples/README.md
deleted file mode 100644
index 94b1433..0000000
--- a/modules/platforms/nodejs/examples/README.md
+++ /dev/null
@@ -1,128 +0,0 @@
-# Examples #
-
-NodeJS Client for Apache Ignite contains fully workable examples to demonstrate the main behavior of the client.
-
-## Description ##
-
-### Sql Example ###
-
-Source: [SqlExample.js](./SqlExample.js)
-
-This example shows primary APIs to use with Ignite as with an SQL database:
-- connects to a node
-- creates a cache, if it doesn't exist
-- creates tables (CREATE TABLE)
-- creates indices (CREATE INDEX)
-- writes data of primitive types into the tables (INSERT INTO table)
-- reads data from the tables (SELECT ...)
-- deletes tables (DROP TABLE)
-- destroys the cache
-
-### Cache Put Get Example ###
-
-Source: [CachePutGetExample.js](./CachePutGetExample.js)
-
-This example demonstrates basic Cache, Key-Value Queries and Scan Query operations:
-- connects to a node
-- creates a cache, if it doesn't exist
-  - specifies key type as Integer
-- executes different cache operations with Complex Objects and Binary Objects
-  - put several objects in parallel
-  - putAll
-  - get
-  - getAll
-  - ScanQuery
-- destroys the cache
-
-### Sql Query Entries Example ###
-
-Source: [SqlQueryEntriesExample.js](./SqlQueryEntriesExample.js)
-
-This example demonstrates basic Cache, Key-Value Queries and SQL Query operations:
-- connects to a node
-- creates a cache from CacheConfiguration, if it doesn't exist
-- writes data of primitive and Complex Object types into the cache using Key-Value put operation
-- reads data from the cache using SQL Query
-- destroys the cache
-
-### Auth Tls Example ###
-
-Source: [AuthTlsExample.js](./AuthTlsExample.js)
-
-This example requires [additional setup](#additional-setup-for-authtlsexample).
-
-This example demonstrates how to establish a secure connection to an Ignite node and use username/password authentication, as well as basic Key-Value Queries operations for primitive types:
-- connects to a node using TLS and providing username/password
-- creates a cache, if it doesn't exist
-  - specifies key and value type of the cache
-- put data of primitive types into the cache
-- get data from the cache
-- destroys the cache
-
-
-### Failover Example ###
-
-Source: [FailoverExample.js](./FailoverExample.js)
-
-This example requires [additional setup](#additional-setup-for-failoverexample).
-
-This example demonstrates the failover behavior of the client
-- configures the client to connect to a set of nodes
-- connects to a node
-- if connection is broken, the client automatically tries to reconnect to another node
-- if no specified nodes are available, stops the client
-
-
-## Installation ##
-
-(temporary, while the NPM module is not released on [npmjs](https://www.npmjs.com))
-
-Examples are installed along with the client.
-Follow the [instructions in the main readme](../README.md#installation).
-
-## Setup and Running ##
-
-1. Run Apache Ignite server - locally or remotely.
-
-2. If needed, modify `ENDPOINT` constant in an example source file - Ignite node endpoint. The default value is `127.0.0.1:10800`.
-
-3. Run an example by calling `node <example_file_name>.js`. Eg. `node CachePutGetExample.js`
-
-## Additional Setup for AuthTlsExample ##
-
-1. Obtain certificates required for TLS:
-  - either use pre-generated certificates provided in the [examples/certs](./certs) folder. Password for the files: `123456`. Note, these certificates work for an Ignite server installed locally only.
-  - or obtain other existing certificates applicable for a concrete Ignite server.
-  - or generate new certificates applicable for a concrete Ignite server.
-
-  - The following files are needed:
-    - keystore.jks, truststore.jks - for the server side
-    - client.key, client.crt, ca.crt - for the client side
-
-2. Place client.key, client.crt and ca.crt files somewhere locally, eg. into the [examples/certs](./certs) folder.
-
-3. If needed, modify `TLS_KEY_FILE_NAME`, `TLS_CERT_FILE_NAME` and `TLS_CA_FILE_NAME` constants in the example source file. The default values point to the files in the [examples/certs](./certs) folder.
-
-4. Setup Apache Ignite server to accept TLS - see appropriate Ignite documentation. Provide the obtained keystore.jks and truststore.jks certificates during the setup.
-
-5. Switch on and setup authentication in Apache Ignite server - see appropriate Ignite documentation.
-
-6. If needed, modify `USER_NAME` and `PASSWORD` constants in the example source file. The default values are the default Ignite username/password.
-
-7. Executes [Setup and Running](#setup-and-running) steps.
-
-## Additional Setup for FailoverExample ##
-
-1. Run three Ignite nodes. See appropriate Ignite documentation for more details.
-
-2. If needed, modify `ENDPOINT1`, `ENDPOINT2`, `ENDPOINT2` constants in an example source file - Ignite node endpoints.
-Default values are `localhost:10800`, `localhost:10801`, `localhost:10802` respectively.
-
-2. Run an example by calling `node FailoverExample.js`. 
-
-3. Shut down the node the client connected to (you can find it out from the client logs in the console).
-
-4. From the logs, you will see that the client automatically reconnects to another node which is available.
-
-5. Shut down all the nodes. You will see the client being stopped after failing to connect to each of the nodes.
-
diff --git a/modules/platforms/nodejs/lib/BinaryObject.js b/modules/platforms/nodejs/lib/BinaryObject.js
index 2cc6be6..9be60da 100644
--- a/modules/platforms/nodejs/lib/BinaryObject.js
+++ b/modules/platforms/nodejs/lib/BinaryObject.js
@@ -25,7 +25,6 @@
 const BinaryType = require('./internal/BinaryType');
 const BinaryField = require('./internal/BinaryType').BinaryField;
 const BinaryTypeBuilder = require('./internal/BinaryType').BinaryTypeBuilder;
-const BinaryWriter = require('./internal/BinaryWriter');
 const ArgumentChecker = require('./internal/ArgumentChecker');
 const Logger = require('./internal/Logger');
 
@@ -78,7 +77,9 @@
         this._typeBuilder = BinaryTypeBuilder.fromTypeName(typeName);
         this._modified = false;
         this._schemaOffset = null;
+        this._hasSchema = false;
         this._compactFooter = false;
+        this._hasRawData = false;
     }
 
     /**
@@ -283,33 +284,41 @@
     /**
      * @ignore
      */
-    static async _fromBuffer(buffer) {
+    static async _fromBuffer(communicator, buffer) {
         const result = new BinaryObject(new ComplexObjectType({})._typeName);
         result._buffer = buffer;
         result._startPos = buffer.position;
-        await result._read();
+        await result._read(communicator);
         return result;
     }
 
     /**
      * @ignore
      */
-    async _write(buffer) {
+    async _write(communicator, buffer) {
         if (this._buffer && !this._modified) {
             buffer.writeBuffer(this._buffer.buffer, this._startPos, this._startPos + this._length);
         }
         else {
-            await this._typeBuilder.finalize();
+            await this._typeBuilder.finalize(communicator);
             this._startPos = buffer.position;
             buffer.position = this._startPos + HEADER_LENGTH;
-            // write fields
-            for (let field of this._fields.values()) {
-                await field._writeValue(buffer, this._typeBuilder.getField(field.id).typeCode);
+            this._hasSchema = (this._fields.size > 0);
+            if (this._hasSchema) {
+                let field;
+                // write fields
+                for (field of this._fields.values()) {
+                    await field._writeValue(communicator, buffer, this._typeBuilder.getField(field.id).typeCode);
+                }
+                this._schemaOffset = buffer.position - this._startPos;
+                this._offsetType = field.getOffsetType(this._startPos);
+                // write schema
+                for (let field of this._fields.values()) {
+                    field._writeOffset(buffer, this._startPos, this._offsetType);
+                }
             }
-            this._schemaOffset = buffer.position - this._startPos;
-            // write schema
-            for (let field of this._fields.values()) {
-                field._writeOffset(buffer, this._startPos);
+            else {
+                this._schemaOffset = 0;
             }
             this._length = buffer.position - this._startPos;
             this._buffer = buffer;
@@ -334,7 +343,17 @@
         // version
         this._buffer.writeByte(VERSION);
         // flags
-        this._buffer.writeShort(FLAG_USER_TYPE | FLAG_HAS_SCHEMA | FLAG_COMPACT_FOOTER);
+        let flags = FLAG_USER_TYPE;
+        if (this._hasSchema) {
+            flags = flags | FLAG_HAS_SCHEMA | FLAG_COMPACT_FOOTER;
+        }
+        if (this._offsetType === BinaryUtils.TYPE_CODE.BYTE) {
+            flags = flags | FLAG_OFFSET_ONE_BYTE;
+        }
+        else if (this._offsetType === BinaryUtils.TYPE_CODE.SHORT) {
+            flags = flags | FLAG_OFFSET_TWO_BYTES;
+        }
+        this._buffer.writeShort(flags);
         // type id
         this._buffer.writeInteger(this._typeBuilder.getTypeId());
         // hash code
@@ -343,7 +362,7 @@
         // length
         this._buffer.writeInteger(this._length);
         // schema id
-        this._buffer.writeInteger(this._typeBuilder.getSchemaId());
+        this._buffer.writeInteger(this._hasSchema ? this._typeBuilder.getSchemaId() : 0);
         // schema offset
         this._buffer.writeInteger(this._schemaOffset);
     }
@@ -351,39 +370,45 @@
     /**
      * @ignore
      */
-    async _read() {
-        await this._readHeader();
-        this._buffer.position = this._startPos + this._schemaOffset;
-        const fieldOffsets = new Array();
-        const fieldIds = this._typeBuilder._schema.fieldIds;
-        let index = 0;
-        let fieldId;
-        while (this._buffer.position < this._startPos + this._length) {
-            if (!this._compactFooter) {
-                fieldId = this._buffer.readInteger();
-                this._typeBuilder._schema.addField(fieldId);
+    async _read(communicator) {
+        await this._readHeader(communicator);
+        if (this._hasSchema) {
+            this._buffer.position = this._startPos + this._schemaOffset;
+            const fieldOffsets = new Array();
+            const fieldIds = this._typeBuilder._schema.fieldIds;
+            let index = 0;
+            let fieldId;
+            let schemaEndOffset = this._startPos + this._length;
+            if (this._hasRawData) {
+                schemaEndOffset -= BinaryUtils.getSize(BinaryUtils.TYPE_CODE.INTEGER);
             }
-            else {
-                if (index >= fieldIds.length) {
-                    throw Errors.IgniteClientError.serializationError(
-                        false, 'wrong number of fields in schema');
+            while (this._buffer.position < schemaEndOffset) {
+                if (!this._compactFooter) {
+                    fieldId = this._buffer.readInteger();
+                    this._typeBuilder._schema.addField(fieldId);
                 }
-                fieldId = fieldIds[index];
-                index++;
+                else {
+                    if (index >= fieldIds.length) {
+                        throw Errors.IgniteClientError.serializationError(
+                            false, 'wrong number of fields in schema');
+                    }
+                    fieldId = fieldIds[index];
+                    index++;
+                }
+                fieldOffsets.push([fieldId, this._buffer.readNumber(this._offsetType, false)]);
             }
-            fieldOffsets.push([fieldId, this._buffer.readNumber(this._offsetType)]);
-        }
-        fieldOffsets.sort((val1, val2) => val1[1] - val2[1]);
-        let offset;
-        let nextOffset;
-        let field;
-        for (let i = 0; i < fieldOffsets.length; i++) {
-            fieldId = fieldOffsets[i][0];
-            offset = fieldOffsets[i][1];
-            nextOffset = i + 1 < fieldOffsets.length ? fieldOffsets[i + 1][1] : this._schemaOffset;
-            field = BinaryObjectField._fromBuffer(
-                this._buffer, this._startPos + offset, nextOffset - offset, fieldId);
-            this._fields.set(field.id, field);
+            fieldOffsets.sort((val1, val2) => val1[1] - val2[1]);
+            let offset;
+            let nextOffset;
+            let field;
+            for (let i = 0; i < fieldOffsets.length; i++) {
+                fieldId = fieldOffsets[i][0];
+                offset = fieldOffsets[i][1];
+                nextOffset = i + 1 < fieldOffsets.length ? fieldOffsets[i + 1][1] : this._schemaOffset;
+                field = BinaryObjectField._fromBuffer(
+                    communicator,this._buffer, this._startPos + offset, nextOffset - offset, fieldId);
+                this._fields.set(field.id, field);
+            }
         }
         this._buffer.position = this._startPos + this._length;
     }
@@ -391,7 +416,7 @@
     /**
      * @ignore
      */
-    async _readHeader() {
+    async _readHeader(communicator) {
         // type code
         this._buffer.readByte();
         // version
@@ -411,23 +436,15 @@
         const schemaId = this._buffer.readInteger();
         // schema offset
         this._schemaOffset = this._buffer.readInteger();
-        const hasSchema = BinaryObject._isFlagSet(flags, FLAG_HAS_SCHEMA);
+        this._hasSchema = BinaryObject._isFlagSet(flags, FLAG_HAS_SCHEMA);
         this._compactFooter = BinaryObject._isFlagSet(flags, FLAG_COMPACT_FOOTER);
+        this._hasRawData = BinaryObject._isFlagSet(flags, FLAG_HAS_RAW_DATA);
         this._offsetType = BinaryObject._isFlagSet(flags, FLAG_OFFSET_ONE_BYTE) ?
             BinaryUtils.TYPE_CODE.BYTE :
             BinaryObject._isFlagSet(flags, FLAG_OFFSET_TWO_BYTES) ?
                 BinaryUtils.TYPE_CODE.SHORT :
                 BinaryUtils.TYPE_CODE.INTEGER;
-
-        if (BinaryObject._isFlagSet(FLAG_HAS_RAW_DATA)) {
-            throw Errors.IgniteClientError.serializationError(
-                false, 'complex objects with raw data are not supported');
-        }
-        if (this._compactFooter && !hasSchema) {
-            throw Errors.IgniteClientError.serializationError(
-                false, 'schema is absent for object with compact footer');
-        }
-        this._typeBuilder = await BinaryTypeBuilder.fromTypeId(typeId, schemaId, hasSchema);
+        this._typeBuilder = await BinaryTypeBuilder.fromTypeId(communicator, typeId, this._compactFooter ? schemaId : null);
     }
 }
 
@@ -460,38 +477,53 @@
     async getValue(type = null) {
         if (this._value === undefined || this._buffer && this._type !== type) {
             this._buffer.position = this._offset;
-            const BinaryReader = require('./internal/BinaryReader');
-            this._value = await BinaryReader.readObject(this._buffer, type);
+            this._value = await this._communicator.readObject(this._buffer, type);
             this._type = type;
         }
         return this._value;
     }
 
-    static _fromBuffer(buffer, offset, length, id) {
+    getOffsetType(headerStartPos) {
+        let offset = this._offset - headerStartPos;
+        if (offset < 0x100) {
+            return BinaryUtils.TYPE_CODE.BYTE;
+        }
+        else if (offset < 0x10000) {
+            return BinaryUtils.TYPE_CODE.SHORT;
+        }
+        return BinaryUtils.TYPE_CODE.INTEGER;        
+    }
+
+    static _fromBuffer(communicator, buffer, offset, length, id) {
         const result = new BinaryObjectField(null);
         result._id = id;
+        result._communicator = communicator;
         result._buffer = buffer;
         result._offset = offset;
         result._length = length;
         return result;
     }
 
-    async _writeValue(buffer, expectedTypeCode) {
+    async _writeValue(communicator, buffer, expectedTypeCode) {
         const offset = buffer.position;
-        if (this._buffer) {
+        if (this._buffer && this._communicator === communicator) {
             buffer.writeBuffer(this._buffer.buffer, this._offset, this._offset + this._length);
         }
         else {
+            if (this._value === undefined) {
+                await this.getValue();
+            }
             BinaryUtils.checkCompatibility(this._value, expectedTypeCode);
-            await BinaryWriter.writeObject(buffer, this._value, this._type);
+            await communicator.writeObject(buffer, this._value, this._type);
         }
+        this._communicator = communicator;
         this._buffer = buffer;
         this._length = buffer.position - offset;
         this._offset = offset;
     }
 
-    _writeOffset(buffer, headerStartPos) {
-        buffer.writeInteger(this._offset - headerStartPos);
+    _writeOffset(buffer, headerStartPos, offsetType) {
+        buffer.writeNumber(this._offset - headerStartPos, offsetType, false);
     }
 }
 
diff --git a/modules/platforms/nodejs/lib/CacheClient.js b/modules/platforms/nodejs/lib/CacheClient.js
index b76471f..f59910b 100644
--- a/modules/platforms/nodejs/lib/CacheClient.js
+++ b/modules/platforms/nodejs/lib/CacheClient.js
@@ -18,8 +18,6 @@
 'use strict';
 
 const BinaryUtils = require('./internal/BinaryUtils');
-const BinaryReader = require('./internal/BinaryReader');
-const BinaryWriter = require('./internal/BinaryWriter');
 const ArgumentChecker = require('./internal/ArgumentChecker');
 const SqlQuery = require('./Query').SqlQuery;
 const SqlFieldsQuery = require('./Query').SqlFieldsQuery;
@@ -156,7 +154,7 @@
         ArgumentChecker.notEmpty(keys, 'keys');
         ArgumentChecker.hasType(keys, 'keys', false, Array);
         let result = null;
-        await this._socket.send(
+        await this._communicator.send(
             BinaryUtils.OPERATION.CACHE_GET_ALL,
             async (payload) => {
                 this._writeCacheInfo(payload);
@@ -167,8 +165,8 @@
                 result = new Array(resultCount);
                 for (let i = 0; i < resultCount; i++) {
                     result[i] = new CacheEntry(
-                        await BinaryReader.readObject(payload, this._getKeyType()),
-                        await BinaryReader.readObject(payload, this._getValueType()));
+                        await this._communicator.readObject(payload, this._getKeyType()),
+                        await this._communicator.readObject(payload, this._getValueType()));
                 }
             });
         return result;
@@ -206,7 +204,7 @@
     async putAll(entries) {
         ArgumentChecker.notEmpty(entries, 'entries');
         ArgumentChecker.hasType(entries, 'entries', true, CacheEntry);
-        await this._socket.send(
+        await this._communicator.send(
             BinaryUtils.OPERATION.CACHE_PUT_ALL,
             async (payload) => {
                 this._writeCacheInfo(payload);
@@ -374,12 +372,12 @@
         ArgumentChecker.notNull(value, 'value');
         ArgumentChecker.notNull(newValue, 'newValue');
         let result;
-        await this._socket.send(
+        await this._communicator.send(
             BinaryUtils.OPERATION.CACHE_REPLACE_IF_EQUALS,
             async (payload) => {
                 this._writeCacheInfo(payload);
                 await this._writeKeyValue(payload, key, value);
-                await BinaryWriter.writeObject(payload, newValue, this._getValueType());
+                await this._communicator.writeObject(payload, newValue, this._getValueType());
             },
             async (payload) => {
                 result = payload.readBoolean();
@@ -395,7 +393,7 @@
      * @throws {IgniteClientError} if error.
      */
     async clear() {
-        await this._socket.send(
+        await this._communicator.send(
             BinaryUtils.OPERATION.CACHE_CLEAR,
             async (payload) => {
                 this._writeCacheInfo(payload);
@@ -481,7 +479,7 @@
      * @throws {IgniteClientError} if error.
      */
     async removeAll() {
-        await this._socket.send(
+        await this._communicator.send(
             BinaryUtils.OPERATION.CACHE_REMOVE_ALL,
             async (payload) => {
                 this._writeCacheInfo(payload);
@@ -502,7 +500,7 @@
     async getSize(...peekModes) {
         ArgumentChecker.hasValueFrom(peekModes, 'peekModes', true, CacheClient.PEEK_MODE);
         let result;
-        await this._socket.send(
+        await this._communicator.send(
             BinaryUtils.OPERATION.CACHE_GET_SIZE,
             async (payload) => {
                 this._writeCacheInfo(payload);
@@ -537,14 +535,14 @@
         ArgumentChecker.hasType(query, 'query', false, SqlQuery, SqlFieldsQuery, ScanQuery);
 
         let value = null;
-        await this._socket.send(
+        await this._communicator.send(
             query._operation,
             async (payload) => {
                 this._writeCacheInfo(payload);
-                await query._write(payload);
+                await query._write(this._communicator, payload);
             },
             async (payload) => {
-                value = await query._getCursor(this._socket, payload, this._keyType, this._valueType);
+                value = await query._getCursor(this._communicator, payload, this._keyType, this._valueType);
             });
         return value;
     }
@@ -554,13 +552,13 @@
     /**
      * @ignore
      */
-    constructor(name, config, socket) {
+    constructor(name, config, communicator) {
         this._name = name;
         this._cacheId = CacheClient._calculateId(this._name);
         this._config = config;
         this._keyType = null;
         this._valueType = null;
-        this._socket = socket;
+        this._communicator = communicator;
     }
 
     /**
@@ -582,8 +580,8 @@
      * @ignore
      */
     async _writeKeyValue(payload, key, value) {
-        await BinaryWriter.writeObject(payload, key, this._getKeyType());
-        await BinaryWriter.writeObject(payload, value, this._getValueType());
+        await this._communicator.writeObject(payload, key, this._getKeyType());
+        await this._communicator.writeObject(payload, value, this._getValueType());
     }
 
     /**
@@ -592,7 +590,7 @@
     async _writeKeys(payload, keys) {
         payload.writeInteger(keys.length);
         for (let key of keys) {
-            await BinaryWriter.writeObject(payload, key, this._getKeyType());
+            await this._communicator.writeObject(payload, key, this._getKeyType());
         }
     }
 
@@ -616,7 +614,7 @@
     async _writeKeyValueOp(operation, key, value, payloadReader = null) {
         ArgumentChecker.notNull(key, 'key');
         ArgumentChecker.notNull(value, 'value');
-        await this._socket.send(
+        await this._communicator.send(
             operation,
             async (payload) => {
                 this._writeCacheInfo(payload);
@@ -633,7 +631,7 @@
         await this._writeKeyValueOp(
             operation, key, value,
             async (payload) => {
-                result = await BinaryReader.readObject(payload, this._getValueType());
+                result = await this._communicator.readObject(payload, this._getValueType());
             });
         return result;
     }
@@ -656,11 +654,11 @@
      */
     async _writeKeyOp(operation, key, payloadReader = null) {
         ArgumentChecker.notNull(key, 'key');
-        await this._socket.send(
+        await this._communicator.send(
             operation,
             async (payload) => {
                 this._writeCacheInfo(payload);
-                await BinaryWriter.writeObject(payload, key, this._getKeyType());
+                await this._communicator.writeObject(payload, key, this._getKeyType());
             },
             payloadReader);
     }
@@ -673,7 +671,7 @@
         await this._writeKeyOp(
             operation, key,
             async (payload) => {
-                value = await BinaryReader.readObject(payload, this._getValueType());
+                value = await this._communicator.readObject(payload, this._getValueType());
             });
         return value;
     }
@@ -697,7 +695,7 @@
     async _writeKeysOp(operation, keys, payloadReader = null) {
         ArgumentChecker.notEmpty(keys, 'keys');
         ArgumentChecker.hasType(keys, 'keys', false, Array);
-        await this._socket.send(
+        await this._communicator.send(
             operation,
             async (payload) => {
                 this._writeCacheInfo(payload);
diff --git a/modules/platforms/nodejs/lib/CacheConfiguration.js b/modules/platforms/nodejs/lib/CacheConfiguration.js
index a4e4574..ccf20b9 100644
--- a/modules/platforms/nodejs/lib/CacheConfiguration.js
+++ b/modules/platforms/nodejs/lib/CacheConfiguration.js
@@ -20,8 +20,7 @@
 const ComplexObjectType = require('./ObjectType').ComplexObjectType;
 const ObjectArrayType = require('./ObjectType').ObjectArrayType;
 const BinaryUtils = require('./internal/BinaryUtils');
-const BinaryReader = require('./internal/BinaryReader');
-const BinaryWriter = require('./internal/BinaryWriter');
+const BinaryCommunicator = require('./internal/BinaryCommunicator');
 const ArgumentChecker = require('./internal/ArgumentChecker');
 const Errors = require('./Errors');
 
@@ -94,17 +93,17 @@
     /**
      * @ignore
      */
-    async _write(buffer) {
-        await BinaryWriter.writeString(buffer, this._typeName);
-        await BinaryWriter.writeString(buffer, this._affinityKeyFieldName);
+    async _write(communicator, buffer) {
+        BinaryCommunicator.writeString(buffer, this._typeName);
+        BinaryCommunicator.writeString(buffer, this._affinityKeyFieldName);
     }
 
     /**
      * @ignore
      */
-    async _read(buffer) {
-        this._typeName = await BinaryReader.readObject(buffer);
-        this._affinityKeyFieldName = await BinaryReader.readObject(buffer);
+    async _read(communicator, buffer) {
+        this._typeName = BinaryCommunicator.readString(buffer);
+        this._affinityKeyFieldName = BinaryCommunicator.readString(buffer);
     }
 }
 
@@ -306,27 +305,27 @@
     /**
      * @ignore
      */
-    async _write(buffer) {
-        await BinaryWriter.writeString(buffer, this._keyTypeName);
-        await BinaryWriter.writeString(buffer, this._valueTypeName);
-        await BinaryWriter.writeString(buffer, this._tableName);
-        await BinaryWriter.writeString(buffer, this._keyFieldName);
-        await BinaryWriter.writeString(buffer, this._valueFieldName);
-        await this._writeSubEntities(buffer, this._fields);
-        await this._writeAliases(buffer);
-        await this._writeSubEntities(buffer, this._indexes);
+    async _write(communicator, buffer) {
+        BinaryCommunicator.writeString(buffer, this._keyTypeName);
+        BinaryCommunicator.writeString(buffer, this._valueTypeName);
+        BinaryCommunicator.writeString(buffer, this._tableName);
+        BinaryCommunicator.writeString(buffer, this._keyFieldName);
+        BinaryCommunicator.writeString(buffer, this._valueFieldName);
+        await this._writeSubEntities(communicator, buffer, this._fields);
+        await this._writeAliases(communicator, buffer);
+        await this._writeSubEntities(communicator, buffer, this._indexes);
     }
 
     /**
      * @ignore
      */
-    async _writeAliases(buffer) {
+    async _writeAliases(communicator, buffer) {
         const length = this._aliases ? this._aliases.size : 0;
         buffer.writeInteger(length);
         if (length > 0) {
             for (let [key, value] of this._aliases.entries()) {
-                await BinaryWriter.writeString(buffer, key);
-                await BinaryWriter.writeString(buffer, value);
+                BinaryCommunicator.writeString(buffer, key);
+                BinaryCommunicator.writeString(buffer, value);
             }
         }
     }
@@ -334,12 +333,12 @@
     /**
      * @ignore
      */
-    async _writeSubEntities(buffer, entities) {
+    async _writeSubEntities(communicator, buffer, entities) {
         const length = entities ? entities.length : 0;
         buffer.writeInteger(length);
         if (length > 0) {
             for (let entity of entities) {
-                await entity._write(buffer);
+                await entity._write(communicator, buffer);
             }
         }
     }
@@ -347,28 +346,28 @@
     /**
      * @ignore
      */
-    async _read(buffer) {
-        this._keyTypeName = await BinaryReader.readObject(buffer);
-        this._valueTypeName = await BinaryReader.readObject(buffer);
-        this._tableName = await BinaryReader.readObject(buffer);
-        this._keyFieldName = await BinaryReader.readObject(buffer);
-        this._valueFieldName = await BinaryReader.readObject(buffer);
-        this._fields = await this._readSubEntities(buffer, QueryField);
-        await this._readAliases(buffer);
-        this._indexes = await this._readSubEntities(buffer, QueryIndex);
+    async _read(communicator, buffer) {
+        this._keyTypeName = await communicator.readObject(buffer);
+        this._valueTypeName = await communicator.readObject(buffer);
+        this._tableName = await communicator.readObject(buffer);
+        this._keyFieldName = await communicator.readObject(buffer);
+        this._valueFieldName = await communicator.readObject(buffer);
+        this._fields = await this._readSubEntities(communicator, buffer, QueryField);
+        await this._readAliases(communicator, buffer);
+        this._indexes = await this._readSubEntities(communicator, buffer, QueryIndex);
     }
 
     /**
      * @ignore
      */
-    async _readSubEntities(buffer, objectConstructor) {
+    async _readSubEntities(communicator, buffer, objectConstructor) {
         const length = buffer.readInteger(buffer);
         const result = new Array(length);
         if (length > 0) {
             let res;
             for (let i = 0; i < length; i++) {
                 res = new objectConstructor();
-                await res._read(buffer);
+                await res._read(communicator, buffer);
                 result[i] = res;
             }
         }
@@ -378,13 +377,13 @@
     /**
      * @ignore
      */
-    async _readAliases(buffer) {
+    async _readAliases(communicator, buffer) {
         const length = buffer.readInteger(buffer);
         this._aliases = new Map();
         if (length > 0) {
             let res;
             for (let i = 0; i < length; i++) {
-                this._aliases.set(await BinaryReader.readObject(buffer), await BinaryReader.readObject(buffer));
+                this._aliases.set(await communicator.readObject(buffer), await communicator.readObject(buffer));
             }
         }
     }
@@ -416,6 +415,7 @@
         this._precision = -1;
         this._scale = -1;
         this._valueType = null;
+        this._communicator = null;
         this._buffer = null;
         this._index = null;
     }
@@ -538,7 +538,7 @@
             if (this._buffer) {
                 const position = this._buffer.position;
                 this._buffer.position = this._index;
-                const result = await BinaryReader.readObject(this._buffer, valueType);
+                const result = await this._communicator.readObject(this._buffer, valueType);
                 this._buffer.position = position;
                 return result;
             }
@@ -600,12 +600,12 @@
     /**
      * @ignore
      */
-    async _write(buffer) {
-        await BinaryWriter.writeString(buffer, this._name);
-        await BinaryWriter.writeString(buffer, this._typeName);
+    async _write(communicator, buffer) {
+        BinaryCommunicator.writeString(buffer, this._name);
+        BinaryCommunicator.writeString(buffer, this._typeName);
         buffer.writeBoolean(this._isKeyField);
         buffer.writeBoolean(this._isNotNull);
-        await BinaryWriter.writeObject(buffer, this._defaultValue ? this._defaultValue : null, this._valueType);
+        await communicator.writeObject(buffer, this._defaultValue ? this._defaultValue : null, this._valueType);
         buffer.writeInteger(this._precision);
         buffer.writeInteger(this._scale);
     }
@@ -613,15 +613,16 @@
     /**
      * @ignore
      */
-    async _read(buffer) {
-        this._name = await BinaryReader.readObject(buffer);
-        this._typeName = await BinaryReader.readObject(buffer);
+    async _read(communicator, buffer) {
+        this._name = await communicator.readObject(buffer);
+        this._typeName = await communicator.readObject(buffer);
         this._isKeyField = buffer.readBoolean();
         this._isNotNull = buffer.readBoolean();
         this._defaultValue = undefined;
+        this._communicator = communicator;
         this._buffer = buffer;
         this._index = buffer.position;
-        await BinaryReader.readObject(buffer);
+        await communicator.readObject(buffer);
         this._precision = buffer.readInteger();
         this._scale = buffer.readInteger();
     }
@@ -732,7 +733,7 @@
      *
      * @return {number}
      */
-     getInlineSize() {
+    getInlineSize() {
         return this._inlineSize;
     }
 
@@ -762,8 +763,8 @@
     /**
      * @ignore
      */
-    async _write(buffer) {
-        await BinaryWriter.writeString(buffer, this._name);
+    async _write(communicator, buffer) {
+        BinaryCommunicator.writeString(buffer, this._name);
         buffer.writeByte(this._type);
         buffer.writeInteger(this._inlineSize);
         // write fields
@@ -771,7 +772,7 @@
         buffer.writeInteger(length);
         if (length > 0) {
             for (let [key, value] of this._fields.entries()) {
-                await BinaryWriter.writeString(buffer, key);
+                BinaryCommunicator.writeString(buffer, key);
                 buffer.writeBoolean(value);
             }
         }
@@ -780,8 +781,8 @@
     /**
      * @ignore
      */
-    async _read(buffer) {
-        this._name = await BinaryReader.readObject(buffer);
+    async _read(communicator, buffer) {
+        this._name = await communicator.readObject(buffer);
         this._type = buffer.readByte();
         this._inlineSize = buffer.readInteger();
         // read fields
@@ -790,7 +791,7 @@
         if (length > 0) {
             let res;
             for (let i = 0; i < length; i++) {
-                this._fields.set(await BinaryReader.readObject(buffer), buffer.readBoolean());
+                this._fields.set(await communicator.readObject(buffer), buffer.readBoolean());
             }
         }
     }
@@ -1610,7 +1611,7 @@
     /**
      * @ignore
      */
-    async _write(buffer, name) {
+    async _write(communicator, buffer, name) {
         this._properties.set(PROP_NAME, name);
 
         const startPos = buffer.position;
@@ -1619,7 +1620,7 @@
             BinaryUtils.getSize(BinaryUtils.TYPE_CODE.SHORT);
 
         for (let [propertyCode, property] of this._properties) {
-            await this._writeProperty(buffer, propertyCode, property);
+            await this._writeProperty(communicator, buffer, propertyCode, property);
         }
 
         const length = buffer.position - startPos;
@@ -1632,23 +1633,23 @@
     /**
      * @ignore
      */
-    async _writeProperty(buffer, propertyCode, property) {
+    async _writeProperty(communicator, buffer, propertyCode, property) {
         buffer.writeShort(propertyCode);
         const propertyType = PROP_TYPES[propertyCode];
         switch (BinaryUtils.getTypeCode(propertyType)) {
             case BinaryUtils.TYPE_CODE.INTEGER:
             case BinaryUtils.TYPE_CODE.LONG:
             case BinaryUtils.TYPE_CODE.BOOLEAN:
-                await BinaryWriter.writeObject(buffer, property, propertyType, false);
+                await communicator.writeObject(buffer, property, propertyType, false);
                 return;
             case BinaryUtils.TYPE_CODE.STRING:
-                await BinaryWriter.writeObject(buffer, property, propertyType);
+                await communicator.writeObject(buffer, property, propertyType);
                 return;
             case BinaryUtils.TYPE_CODE.OBJECT_ARRAY:
                 const length = property ? property.length : 0;
                 buffer.writeInteger(length);
                 for (let prop of property) {
-                    await prop._write(buffer);
+                    await prop._write(communicator, buffer);
                 }
                 return;
             default:
@@ -1659,54 +1660,54 @@
     /**
      * @ignore
      */
-    async _read(buffer) {
+    async _read(communicator, buffer) {
         // length
         buffer.readInteger();
-        await this._readProperty(buffer, PROP_ATOMICITY_MODE);
-        await this._readProperty(buffer, PROP_BACKUPS);
-        await this._readProperty(buffer, PROP_CACHE_MODE);
-        await this._readProperty(buffer, PROP_COPY_ON_READ);
-        await this._readProperty(buffer, PROP_DATA_REGION_NAME);
-        await this._readProperty(buffer, PROP_EAGER_TTL);
-        await this._readProperty(buffer, PROP_STATISTICS_ENABLED);
-        await this._readProperty(buffer, PROP_GROUP_NAME);
-        await this._readProperty(buffer, PROP_DEFAULT_LOCK_TIMEOUT);
-        await this._readProperty(buffer, PROP_MAX_CONCURRENT_ASYNC_OPS);
-        await this._readProperty(buffer, PROP_MAX_QUERY_ITERATORS);
-        await this._readProperty(buffer, PROP_NAME);
-        await this._readProperty(buffer, PROP_IS_ONHEAP_CACHE_ENABLED);
-        await this._readProperty(buffer, PROP_PARTITION_LOSS_POLICY);
-        await this._readProperty(buffer, PROP_QUERY_DETAIL_METRICS_SIZE);
-        await this._readProperty(buffer, PROP_QUERY_PARALLELISM);
-        await this._readProperty(buffer, PROP_READ_FROM_BACKUP);
-        await this._readProperty(buffer, PROP_REBALANCE_BATCH_SIZE);
-        await this._readProperty(buffer, PROP_REBALANCE_BATCHES_PREFETCH_COUNT);
-        await this._readProperty(buffer, PROP_REBALANCE_DELAY);
-        await this._readProperty(buffer, PROP_REBALANCE_MODE);
-        await this._readProperty(buffer, PROP_REBALANCE_ORDER);
-        await this._readProperty(buffer, PROP_REBALANCE_THROTTLE);
-        await this._readProperty(buffer, PROP_REBALANCE_TIMEOUT);
-        await this._readProperty(buffer, PROP_SQL_ESCAPE_ALL);
-        await this._readProperty(buffer, PROP_SQL_INDEX_INLINE_MAX_SIZE);
-        await this._readProperty(buffer, PROP_SQL_SCHEMA);
-        await this._readProperty(buffer, PROP_WRITE_SYNCHRONIZATION_MODE);
-        await this._readProperty(buffer, PROP_CACHE_KEY_CONFIGURATION);
-        await this._readProperty(buffer, PROP_QUERY_ENTITY);
+        await this._readProperty(communicator, buffer, PROP_ATOMICITY_MODE);
+        await this._readProperty(communicator, buffer, PROP_BACKUPS);
+        await this._readProperty(communicator, buffer, PROP_CACHE_MODE);
+        await this._readProperty(communicator, buffer, PROP_COPY_ON_READ);
+        await this._readProperty(communicator, buffer, PROP_DATA_REGION_NAME);
+        await this._readProperty(communicator, buffer, PROP_EAGER_TTL);
+        await this._readProperty(communicator, buffer, PROP_STATISTICS_ENABLED);
+        await this._readProperty(communicator, buffer, PROP_GROUP_NAME);
+        await this._readProperty(communicator, buffer, PROP_DEFAULT_LOCK_TIMEOUT);
+        await this._readProperty(communicator, buffer, PROP_MAX_CONCURRENT_ASYNC_OPS);
+        await this._readProperty(communicator, buffer, PROP_MAX_QUERY_ITERATORS);
+        await this._readProperty(communicator, buffer, PROP_NAME);
+        await this._readProperty(communicator, buffer, PROP_IS_ONHEAP_CACHE_ENABLED);
+        await this._readProperty(communicator, buffer, PROP_PARTITION_LOSS_POLICY);
+        await this._readProperty(communicator, buffer, PROP_QUERY_DETAIL_METRICS_SIZE);
+        await this._readProperty(communicator, buffer, PROP_QUERY_PARALLELISM);
+        await this._readProperty(communicator, buffer, PROP_READ_FROM_BACKUP);
+        await this._readProperty(communicator, buffer, PROP_REBALANCE_BATCH_SIZE);
+        await this._readProperty(communicator, buffer, PROP_REBALANCE_BATCHES_PREFETCH_COUNT);
+        await this._readProperty(communicator, buffer, PROP_REBALANCE_DELAY);
+        await this._readProperty(communicator, buffer, PROP_REBALANCE_MODE);
+        await this._readProperty(communicator, buffer, PROP_REBALANCE_ORDER);
+        await this._readProperty(communicator, buffer, PROP_REBALANCE_THROTTLE);
+        await this._readProperty(communicator, buffer, PROP_REBALANCE_TIMEOUT);
+        await this._readProperty(communicator, buffer, PROP_SQL_ESCAPE_ALL);
+        await this._readProperty(communicator, buffer, PROP_SQL_INDEX_INLINE_MAX_SIZE);
+        await this._readProperty(communicator, buffer, PROP_SQL_SCHEMA);
+        await this._readProperty(communicator, buffer, PROP_WRITE_SYNCHRONIZATION_MODE);
+        await this._readProperty(communicator, buffer, PROP_CACHE_KEY_CONFIGURATION);
+        await this._readProperty(communicator, buffer, PROP_QUERY_ENTITY);
     }
 
     /**
      * @ignore
      */
-    async _readProperty(buffer, propertyCode) {
+    async _readProperty(communicator, buffer, propertyCode) {
         const propertyType = PROP_TYPES[propertyCode];
         switch (BinaryUtils.getTypeCode(propertyType)) {
             case BinaryUtils.TYPE_CODE.INTEGER:
             case BinaryUtils.TYPE_CODE.LONG:
             case BinaryUtils.TYPE_CODE.BOOLEAN:
-                this._properties.set(propertyCode, await BinaryReader._readTypedObject(buffer, propertyType));
+                this._properties.set(propertyCode, await communicator._readTypedObject(buffer, propertyType));
                 return;
             case BinaryUtils.TYPE_CODE.STRING:
-                this._properties.set(propertyCode, await BinaryReader.readObject(buffer, propertyType));
+                this._properties.set(propertyCode, await communicator.readObject(buffer, propertyType));
                 return;
             case BinaryUtils.TYPE_CODE.OBJECT_ARRAY:
                 const length = buffer.readInteger();
@@ -1714,7 +1715,7 @@
                     const properties = new Array(length);
                     for (let i = 0; i < length; i++) {
                         const property = new propertyType._elementType._objectConstructor();
-                        await property._read(buffer);
+                        await property._read(communicator, buffer);
                         properties[i] = property;
                     }
                     this._properties.set(propertyCode, properties);
diff --git a/modules/platforms/nodejs/lib/Cursor.js b/modules/platforms/nodejs/lib/Cursor.js
index 85176e3..39eea21 100644
--- a/modules/platforms/nodejs/lib/Cursor.js
+++ b/modules/platforms/nodejs/lib/Cursor.js
@@ -20,8 +20,6 @@
 const Errors = require('./Errors');
 const BinaryUtils = require('./internal/BinaryUtils');
 const BinaryObject = require('./BinaryObject');
-const BinaryReader = require('./internal/BinaryReader');
-const BinaryWriter = require('./internal/BinaryWriter');
 
 /**
  * Class representing a cursor to obtain results of SQL and Scan query operations.
@@ -101,7 +99,7 @@
     async close() {
         // Close cursor only if the server has more pages: the server closes cursor automatically on last page
         if (this._id && this._hasNext) {
-            await this._socket.send(
+            await this._communicator.send(
                 BinaryUtils.OPERATION.RESOURCE_CLOSE,
                 async (payload) => {
                     await this._write(payload);
@@ -114,8 +112,8 @@
     /**
      * @ignore
      */
-    constructor(socket, operation, buffer, keyType = null, valueType = null) {
-        this._socket = socket;
+    constructor(communicator, operation, buffer, keyType = null, valueType = null) {
+        this._communicator = communicator;
         this._operation = operation;
         this._buffer = buffer;
         this._keyType = keyType;
@@ -133,7 +131,7 @@
         this._hasNext = false;
         this._values = null;
         this._buffer = null;
-        await this._socket.send(
+        await this._communicator.send(
             this._operation,
             async (payload) => {
                 await this._write(payload);
@@ -175,8 +173,8 @@
     async _readRow(buffer) {
         const CacheEntry = require('./CacheClient').CacheEntry;
         return new CacheEntry(
-            await BinaryReader.readObject(buffer, this._keyType),
-            await BinaryReader.readObject(buffer, this._valueType));
+            await this._communicator.readObject(buffer, this._keyType),
+            await this._communicator.readObject(buffer, this._valueType));
     }
 
     /**
@@ -273,8 +271,8 @@
     /**
      * @ignore
      */
-    constructor(socket, buffer) {
-        super(socket, BinaryUtils.OPERATION.QUERY_SQL_FIELDS_CURSOR_GET_PAGE, buffer);
+    constructor(communicator, buffer) {
+        super(communicator, BinaryUtils.OPERATION.QUERY_SQL_FIELDS_CURSOR_GET_PAGE, buffer);
         this._fieldNames = [];
     }
 
@@ -286,7 +284,7 @@
         this._fieldCount = buffer.readInteger();
         if (includeFieldNames) {
             for (let i = 0; i < this._fieldCount; i++) {
-                this._fieldNames[i] = await BinaryReader.readObject(buffer);
+                this._fieldNames[i] = await this._communicator.readObject(buffer);
             }
         }
     }
@@ -299,7 +297,7 @@
         let fieldType;
         for (let i = 0; i < this._fieldCount; i++) {
             fieldType = this._fieldTypes && i < this._fieldTypes.length ? this._fieldTypes[i] : null;
-            values[i] = await BinaryReader.readObject(buffer);
+            values[i] = await this._communicator.readObject(buffer, fieldType);
         }
         return values;
     }
diff --git a/modules/platforms/nodejs/lib/EnumItem.js b/modules/platforms/nodejs/lib/EnumItem.js
index e4fb165..1d1725e 100644
--- a/modules/platforms/nodejs/lib/EnumItem.js
+++ b/modules/platforms/nodejs/lib/EnumItem.js
@@ -156,14 +156,14 @@
     /**
      * @ignore
      */
-    async _write(buffer) {
+    async _write(communicator, buffer) {
         buffer.writeInteger(this._typeId);
         if (this._ordinal !== null) {
             buffer.writeInteger(this._ordinal);
             return;
         }
         else if (this._name !== null || this._value !== null) {
-            const type = await this._getType(this._typeId);
+            const type = await this._getType(communicator, this._typeId);
             if (type._isEnum && type._enumValues) {
                 for (let i = 0; i < type._enumValues.length; i++) {
                     if (this._name === type._enumValues[i][0] ||
@@ -181,10 +181,10 @@
     /**
      * @ignore
      */
-    async _read(buffer) {
+    async _read(communicator, buffer) {
         this._typeId = buffer.readInteger();
         this._ordinal = buffer.readInteger();
-        const type = await this._getType(this._typeId);
+        const type = await this._getType(communicator, this._typeId);
         if (!type._isEnum || !type._enumValues || type._enumValues.length <= this._ordinal) {
             throw new Errors.IgniteClientError('EnumItem can not be deserialized: type mismatch');
         }
@@ -195,9 +195,8 @@
     /**
      * @ignore
      */
-    async _getType(typeId) {
-        const BinaryTypeStorage = require('./internal/BinaryTypeStorage');
-        return await BinaryTypeStorage.getEntity().getType(typeId);
+    async _getType(communicator, typeId) {
+        return await communicator.typeStorage.getType(typeId);
     }
 }
 
diff --git a/modules/platforms/nodejs/lib/IgniteClient.js b/modules/platforms/nodejs/lib/IgniteClient.js
index ba3361f..544c37f 100644
--- a/modules/platforms/nodejs/lib/IgniteClient.js
+++ b/modules/platforms/nodejs/lib/IgniteClient.js
@@ -21,9 +21,7 @@
 const IgniteClientConfiguration = require('./IgniteClientConfiguration');
 const CacheConfiguration = require('./CacheConfiguration');
 const BinaryUtils = require('./internal/BinaryUtils');
-const BinaryWriter = require('./internal/BinaryWriter');
-const BinaryReader = require('./internal/BinaryReader');
-const BinaryTypeStorage = require('./internal/BinaryTypeStorage');
+const BinaryCommunicator = require('./internal/BinaryCommunicator');
 const ArgumentChecker = require('./internal/ArgumentChecker');
 const Logger = require('./internal/Logger');
 
@@ -70,7 +68,7 @@
     constructor(onStateChanged = null) {
         const ClientFailoverSocket = require('./internal/ClientFailoverSocket');
         this._socket = new ClientFailoverSocket(onStateChanged);
-        BinaryTypeStorage.createEntity(this._socket);
+        this._communicator = new BinaryCommunicator(this._socket);
     }
 
     static get STATE() {
@@ -133,7 +131,7 @@
         ArgumentChecker.notEmpty(name, 'name');
         ArgumentChecker.hasType(cacheConfig, 'cacheConfig', false, CacheConfiguration);
 
-        await this._socket.send(
+        await this._communicator.send(
             cacheConfig ?
                 BinaryUtils.OPERATION.CACHE_CREATE_WITH_CONFIGURATION :
                 BinaryUtils.OPERATION.CACHE_CREATE_WITH_NAME,
@@ -161,7 +159,7 @@
     async getOrCreateCache(name, cacheConfig = null) {
         ArgumentChecker.notEmpty(name, 'name');
         ArgumentChecker.hasType(cacheConfig, 'cacheConfig', false, CacheConfiguration);
-        await this._socket.send(
+        await this._communicator.send(
             cacheConfig ?
                 BinaryUtils.OPERATION.CACHE_GET_OR_CREATE_WITH_CONFIGURATION :
                 BinaryUtils.OPERATION.CACHE_GET_OR_CREATE_WITH_NAME,
@@ -199,7 +197,7 @@
      */
     async destroyCache(name) {
         ArgumentChecker.notEmpty(name, 'name');
-        await this._socket.send(
+        await this._communicator.send(
             BinaryUtils.OPERATION.CACHE_DESTROY,
             async (payload) => {
                 payload.writeInteger(CacheClient._calculateId(name));
@@ -222,7 +220,7 @@
     async getCacheConfiguration(name) {
         ArgumentChecker.notEmpty(name, 'name');
         let config;
-        await this._socket.send(
+        await this._communicator.send(
             BinaryUtils.OPERATION.CACHE_GET_CONFIGURATION,
             async (payload) => {
                 payload.writeInteger(CacheClient._calculateId(name));
@@ -230,7 +228,7 @@
             },
             async (payload) => {
                 config = new CacheConfiguration();
-                await config._read(payload);
+                await config._read(this._communicator, payload);
             });
         return config;
     }
@@ -248,11 +246,11 @@
      */
     async cacheNames() {
         let names;
-        await this._socket.send(
+        await this._communicator.send(
             BinaryUtils.OPERATION.CACHE_GET_NAMES,
             null,
             async (payload) => {
-                names = await BinaryReader.readStringArray(payload);
+                names = await this._communicator.readStringArray(payload);
             });
         return names;
     }
@@ -273,7 +271,7 @@
      * @ignore
      */
     _getCache(name, cacheConfig = null) {
-        return new CacheClient(name, cacheConfig, this._socket);
+        return new CacheClient(name, cacheConfig, this._communicator);
     }
 
     /**
@@ -281,10 +279,10 @@
      */
     async _writeCacheNameOrConfig(buffer, name, cacheConfig) {
         if (cacheConfig) {
-            await cacheConfig._write(buffer, name);
+            await cacheConfig._write(this._communicator, buffer, name);
         }
         else {
-            await BinaryWriter.writeString(buffer, name);
+            BinaryCommunicator.writeString(buffer, name);
         }
     }
 }
diff --git a/modules/platforms/nodejs/lib/Query.js b/modules/platforms/nodejs/lib/Query.js
index 5c230df..029ec3d 100644
--- a/modules/platforms/nodejs/lib/Query.js
+++ b/modules/platforms/nodejs/lib/Query.js
@@ -20,7 +20,7 @@
 const Cursor = require('./Cursor').Cursor;
 const SqlFieldsCursor = require('./Cursor').SqlFieldsCursor;
 const ArgumentChecker = require('./internal/ArgumentChecker');
-const BinaryWriter = require('./internal/BinaryWriter');
+const BinaryCommunicator = require('./internal/BinaryCommunicator');
 const BinaryUtils = require('./internal/BinaryUtils');
 
 const PAGE_SIZE_DEFAULT = 1024;
@@ -220,10 +220,10 @@
     /**
      * @ignore
      */
-    async _write(buffer) {
-        await BinaryWriter.writeString(buffer, this._type);
-        await BinaryWriter.writeString(buffer, this._sql);
-        await this._writeArgs(buffer);
+    async _write(communicator, buffer) {
+        BinaryCommunicator.writeString(buffer, this._type);
+        BinaryCommunicator.writeString(buffer, this._sql);
+        await this._writeArgs(communicator, buffer);
         buffer.writeBoolean(this._distributedJoins);
         buffer.writeBoolean(this._local);
         buffer.writeBoolean(this._replicatedOnly);
@@ -234,14 +234,14 @@
     /**
      * @ignore
      */
-    async _writeArgs(buffer) {
+    async _writeArgs(communicator, buffer) {
         const argsLength = this._args ? this._args.length : 0;
         buffer.writeInteger(argsLength);
         if (argsLength > 0) {
             let argType;
             for (let i = 0; i < argsLength; i++) {
                 argType = this._argTypes && i < this._argTypes.length ? this._argTypes[i] : null;
-                await BinaryWriter.writeObject(buffer, this._args[i], argType);
+                await communicator.writeObject(buffer, this._args[i], argType);
             }
         }
     }
@@ -249,8 +249,8 @@
     /**
      * @ignore
      */
-    async _getCursor(socket, payload, keyType = null, valueType = null) {
-        const cursor = new Cursor(socket, BinaryUtils.OPERATION.QUERY_SQL_CURSOR_GET_PAGE, payload, keyType, valueType);
+    async _getCursor(communicator, payload, keyType = null, valueType = null) {
+        const cursor = new Cursor(communicator, BinaryUtils.OPERATION.QUERY_SQL_CURSOR_GET_PAGE, payload, keyType, valueType);
         cursor._readId(payload);
         return cursor;
     }
@@ -410,12 +410,12 @@
     /**
      * @ignore
      */
-    async _write(buffer) {
-        await BinaryWriter.writeString(buffer, this._schema);
+    async _write(communicator, buffer) {
+        BinaryCommunicator.writeString(buffer, this._schema);
         buffer.writeInteger(this._pageSize);
         buffer.writeInteger(this._maxRows);
-        await BinaryWriter.writeString(buffer, this._sql);
-        await this._writeArgs(buffer)
+        BinaryCommunicator.writeString(buffer, this._sql);
+        await this._writeArgs(communicator, buffer)
         buffer.writeByte(this._statementType);
         buffer.writeBoolean(this._distributedJoins);
         buffer.writeBoolean(this._local);
@@ -430,8 +430,8 @@
     /**
      * @ignore
      */
-    async _getCursor(socket, payload, keyType = null, valueType = null) {
-        const cursor = new SqlFieldsCursor(socket, payload);
+    async _getCursor(communicator, payload, keyType = null, valueType = null) {
+        const cursor = new SqlFieldsCursor(communicator, payload);
         await cursor._readFieldNames(payload, this._includeFieldNames);
         return cursor;
     }
@@ -485,9 +485,9 @@
     /**
      * @ignore
      */
-    async _write(buffer) {
+    async _write(communicator, buffer) {
         // filter
-        await BinaryWriter.writeObject(buffer, null);
+        await communicator.writeObject(buffer, null);
         buffer.writeInteger(this._pageSize);
         buffer.writeInteger(this._partitionNumber);
         buffer.writeBoolean(this._local);
@@ -496,8 +496,8 @@
     /**
      * @ignore
      */
-    async _getCursor(socket, payload, keyType = null, valueType = null) {
-        const cursor = new Cursor(socket, BinaryUtils.OPERATION.QUERY_SCAN_CURSOR_GET_PAGE, payload, keyType, valueType);
+    async _getCursor(communicator, payload, keyType = null, valueType = null) {
+        const cursor = new Cursor(communicator, BinaryUtils.OPERATION.QUERY_SCAN_CURSOR_GET_PAGE, payload, keyType, valueType);
         cursor._readId(payload);
         return cursor;
     }
diff --git a/modules/platforms/nodejs/lib/internal/BinaryCommunicator.js b/modules/platforms/nodejs/lib/internal/BinaryCommunicator.js
new file mode 100644
index 0000000..9418d36
--- /dev/null
+++ b/modules/platforms/nodejs/lib/internal/BinaryCommunicator.js
@@ -0,0 +1,409 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+'use strict';
+
+const Decimal = require('decimal.js');
+const CollectionObjectType = require('../ObjectType').CollectionObjectType;
+const ComplexObjectType = require('../ObjectType').ComplexObjectType;
+const Errors = require('../Errors');
+const Timestamp = require('../Timestamp');
+const EnumItem = require('../EnumItem');
+const BinaryUtils = require('./BinaryUtils');
+const BinaryTypeStorage = require('./BinaryTypeStorage');
+
+class BinaryCommunicator {
+
+    constructor(socket) {
+        this._socket = socket;
+        this._typeStorage = new BinaryTypeStorage(this);
+    }
+
+    static readString(buffer) {
+        const typeCode = buffer.readByte();
+        BinaryUtils.checkTypesComatibility(BinaryUtils.TYPE_CODE.STRING, typeCode);
+        if (typeCode === BinaryUtils.TYPE_CODE.NULL) {
+            return null;
+        }
+        return buffer.readString();
+    }
+
+    static writeString(buffer, value) {
+        if (value === null) {
+            buffer.writeByte(BinaryUtils.TYPE_CODE.NULL);
+        }
+        else {
+            buffer.writeByte(BinaryUtils.TYPE_CODE.STRING);
+            buffer.writeString(value);
+        }
+    }
+    
+    async send(opCode, payloadWriter, payloadReader = null) {
+        await this._socket.send(opCode, payloadWriter, payloadReader);
+    }
+
+    get typeStorage() {
+        return this._typeStorage;
+    }
+
+    async readObject(buffer, expectedType = null) {
+        const typeCode = buffer.readByte();
+        BinaryUtils.checkTypesComatibility(expectedType, typeCode);
+        return await this._readTypedObject(buffer, typeCode, expectedType);
+    }
+
+    async readStringArray(buffer) {
+        return await this._readTypedObject(buffer, BinaryUtils.TYPE_CODE.STRING_ARRAY);
+    }
+
+    async writeObject(buffer, object, objectType = null, writeObjectType = true) {
+        BinaryUtils.checkCompatibility(object, objectType);
+        if (object === null) {
+            buffer.writeByte(BinaryUtils.TYPE_CODE.NULL);
+            return;
+        }
+
+        objectType =  objectType ? objectType : BinaryUtils.calcObjectType(object);
+        const objectTypeCode = BinaryUtils.getTypeCode(objectType);
+
+        if (writeObjectType) {
+            buffer.writeByte(objectTypeCode);
+        }
+        switch (objectTypeCode) {
+            case BinaryUtils.TYPE_CODE.BYTE:
+            case BinaryUtils.TYPE_CODE.SHORT:
+            case BinaryUtils.TYPE_CODE.INTEGER:
+            case BinaryUtils.TYPE_CODE.FLOAT:
+            case BinaryUtils.TYPE_CODE.DOUBLE:
+                buffer.writeNumber(object, objectTypeCode);
+                break;
+            case BinaryUtils.TYPE_CODE.LONG:
+                buffer.writeLong(object);
+                break;
+            case BinaryUtils.TYPE_CODE.CHAR:
+                buffer.writeChar(object);
+                break;
+            case BinaryUtils.TYPE_CODE.BOOLEAN:
+                buffer.writeBoolean(object);
+                break;
+            case BinaryUtils.TYPE_CODE.STRING:
+                buffer.writeString(object);
+                break;
+            case BinaryUtils.TYPE_CODE.UUID:
+                this._writeUUID(buffer, object);
+                break;
+            case BinaryUtils.TYPE_CODE.DATE:
+                buffer.writeDate(object);
+                break;
+            case BinaryUtils.TYPE_CODE.ENUM:
+                await this._writeEnum(buffer, object);
+                break;
+            case BinaryUtils.TYPE_CODE.DECIMAL:
+                this._writeDecimal(buffer, object);
+                break;
+            case BinaryUtils.TYPE_CODE.TIMESTAMP:
+                this._writeTimestamp(buffer, object);
+                break;
+            case BinaryUtils.TYPE_CODE.TIME:
+                this._writeTime(buffer, object);
+                break;
+            case BinaryUtils.TYPE_CODE.BYTE_ARRAY:
+            case BinaryUtils.TYPE_CODE.SHORT_ARRAY:
+            case BinaryUtils.TYPE_CODE.INTEGER_ARRAY:
+            case BinaryUtils.TYPE_CODE.LONG_ARRAY:
+            case BinaryUtils.TYPE_CODE.FLOAT_ARRAY:
+            case BinaryUtils.TYPE_CODE.DOUBLE_ARRAY:
+            case BinaryUtils.TYPE_CODE.CHAR_ARRAY:
+            case BinaryUtils.TYPE_CODE.BOOLEAN_ARRAY:
+            case BinaryUtils.TYPE_CODE.STRING_ARRAY:
+            case BinaryUtils.TYPE_CODE.UUID_ARRAY:
+            case BinaryUtils.TYPE_CODE.DATE_ARRAY:
+            case BinaryUtils.TYPE_CODE.OBJECT_ARRAY:
+            case BinaryUtils.TYPE_CODE.ENUM_ARRAY:
+            case BinaryUtils.TYPE_CODE.DECIMAL_ARRAY:
+            case BinaryUtils.TYPE_CODE.TIMESTAMP_ARRAY:
+            case BinaryUtils.TYPE_CODE.TIME_ARRAY:
+                await this._writeArray(buffer, object, objectType, objectTypeCode);
+                break;
+            case BinaryUtils.TYPE_CODE.COLLECTION:
+                await this._writeCollection(buffer, object, objectType);
+                break;
+            case BinaryUtils.TYPE_CODE.MAP:
+                await this._writeMap(buffer, object, objectType);
+                break;
+            case BinaryUtils.TYPE_CODE.BINARY_OBJECT:
+                await this._writeBinaryObject(buffer, object, objectType);
+                break;
+            case BinaryUtils.TYPE_CODE.COMPLEX_OBJECT:
+                await this._writeComplexObject(buffer, object, objectType);
+                break;
+            default:
+                throw Errors.IgniteClientError.unsupportedTypeError(objectType);
+        }
+    }
+
+    async _readTypedObject(buffer, objectTypeCode, expectedType = null) {
+        switch (objectTypeCode) {
+            case BinaryUtils.TYPE_CODE.BYTE:
+            case BinaryUtils.TYPE_CODE.SHORT:
+            case BinaryUtils.TYPE_CODE.INTEGER:
+            case BinaryUtils.TYPE_CODE.FLOAT:
+            case BinaryUtils.TYPE_CODE.DOUBLE:
+                return buffer.readNumber(objectTypeCode);
+            case BinaryUtils.TYPE_CODE.LONG:
+                return buffer.readLong().toNumber();
+            case BinaryUtils.TYPE_CODE.CHAR:
+                return buffer.readChar();
+            case BinaryUtils.TYPE_CODE.BOOLEAN:
+                return buffer.readBoolean();
+            case BinaryUtils.TYPE_CODE.STRING:
+                return buffer.readString();
+            case BinaryUtils.TYPE_CODE.UUID:
+                return this._readUUID(buffer);
+            case BinaryUtils.TYPE_CODE.DATE:
+                return buffer.readDate();
+            case BinaryUtils.TYPE_CODE.ENUM:
+            case BinaryUtils.TYPE_CODE.BINARY_ENUM:
+                return await this._readEnum(buffer);
+            case BinaryUtils.TYPE_CODE.DECIMAL:
+                return this._readDecimal(buffer);
+            case BinaryUtils.TYPE_CODE.TIMESTAMP:
+                return this._readTimestamp(buffer);
+            case BinaryUtils.TYPE_CODE.TIME:
+                return buffer.readDate();
+            case BinaryUtils.TYPE_CODE.BYTE_ARRAY:
+            case BinaryUtils.TYPE_CODE.SHORT_ARRAY:
+            case BinaryUtils.TYPE_CODE.INTEGER_ARRAY:
+            case BinaryUtils.TYPE_CODE.LONG_ARRAY:
+            case BinaryUtils.TYPE_CODE.FLOAT_ARRAY:
+            case BinaryUtils.TYPE_CODE.DOUBLE_ARRAY:
+            case BinaryUtils.TYPE_CODE.CHAR_ARRAY:
+            case BinaryUtils.TYPE_CODE.BOOLEAN_ARRAY:
+            case BinaryUtils.TYPE_CODE.STRING_ARRAY:
+            case BinaryUtils.TYPE_CODE.UUID_ARRAY:
+            case BinaryUtils.TYPE_CODE.DATE_ARRAY:
+            case BinaryUtils.TYPE_CODE.OBJECT_ARRAY:
+            case BinaryUtils.TYPE_CODE.ENUM_ARRAY:
+            case BinaryUtils.TYPE_CODE.DECIMAL_ARRAY:
+            case BinaryUtils.TYPE_CODE.TIMESTAMP_ARRAY:
+            case BinaryUtils.TYPE_CODE.TIME_ARRAY:
+                return await this._readArray(buffer, objectTypeCode, expectedType);
+            case BinaryUtils.TYPE_CODE.COLLECTION:
+                return await this._readCollection(buffer, expectedType);
+            case BinaryUtils.TYPE_CODE.MAP:
+                return await this._readMap(buffer, expectedType);
+            case BinaryUtils.TYPE_CODE.BINARY_OBJECT:
+                return await this._readBinaryObject(buffer, expectedType);
+            case BinaryUtils.TYPE_CODE.NULL:
+                return null;
+            case BinaryUtils.TYPE_CODE.COMPLEX_OBJECT:
+                return await this._readComplexObject(buffer, expectedType);
+            default:
+                throw Errors.IgniteClientError.unsupportedTypeError(objectTypeCode);
+        }
+    }
+
+    _readUUID(buffer) {
+        return [...buffer.readBuffer(BinaryUtils.getSize(BinaryUtils.TYPE_CODE.UUID))];
+    }
+
+    async _readEnum(buffer) {
+        const enumItem = new EnumItem(0);
+        await enumItem._read(this, buffer);
+        return enumItem;
+    }
+
+    _readDecimal(buffer) {
+        const scale = buffer.readInteger();
+        const dataLength = buffer.readInteger();
+        const data = buffer.readBuffer(dataLength);
+        const isNegative = (data[0] & 0x80) !== 0;
+        if (isNegative) {
+            data[0] &= 0x7F;
+        }
+        let result = new Decimal('0x' + data.toString('hex'));
+        if (isNegative) {
+            result = result.negated();
+        }
+        return result.mul(Decimal.pow(10, -scale));
+    }
+
+    _readTimestamp(buffer) {
+        return new Timestamp(buffer.readLong().toNumber(), buffer.readInteger());
+    }
+
+    async _readArray(buffer, arrayTypeCode, arrayType) {
+        if (arrayTypeCode === BinaryUtils.TYPE_CODE.OBJECT_ARRAY) {
+            buffer.readInteger();
+        }
+        const length = buffer.readInteger();
+        const elementType = BinaryUtils.getArrayElementType(arrayType ? arrayType : arrayTypeCode);
+        const keepElementType = elementType === null ? true : BinaryUtils.keepArrayElementType(arrayTypeCode);
+        const result = new Array(length);
+        for (let i = 0; i < length; i++) {
+            result[i] = keepElementType ?
+                await this.readObject(buffer, elementType) :
+                await this._readTypedObject(buffer, elementType);
+        }
+        return result;
+    }
+
+    async _readMap(buffer, expectedMapType) {
+        const result = new Map();
+        const size = buffer.readInteger();
+        const subType = buffer.readByte();
+        let key, value;
+        for (let i = 0; i < size; i++) {
+            key = await this.readObject(buffer, expectedMapType ? expectedMapType._keyType : null);
+            value = await this.readObject(buffer, expectedMapType ? expectedMapType._valueType : null);
+            result.set(key, value);
+        }
+        return result;
+    }
+
+    async _readCollection(buffer, expectedColType) {
+        const size = buffer.readInteger();
+        const subType = buffer.readByte();
+        const isSet = CollectionObjectType._isSet(subType);
+        const result = isSet ? new Set() : new Array(size);
+        let element;
+        for (let i = 0; i < size; i++) {
+            element = await this.readObject(buffer, expectedColType ? expectedColType._elementType : null);
+            if (isSet) {
+                result.add(element);
+            }
+            else {
+                result[i] = element;
+            }
+        }
+        return result;
+    }
+
+    async _readBinaryObject(buffer, expectedType) {
+        const size = buffer.readInteger();
+        const startPos = buffer.position;
+        buffer.position = startPos + size;
+        const offset = buffer.readInteger();
+        const endPos = buffer.position;
+        buffer.position = startPos + offset;
+        const result = await this.readObject(buffer, expectedType);
+        buffer.position = endPos;
+        return result;
+    }
+
+    async _readComplexObject(buffer, expectedType) {
+        buffer.position = buffer.position - 1;
+        const BinaryObject = require('../BinaryObject');
+        const binaryObject = await BinaryObject._fromBuffer(this, buffer);
+        return expectedType ?
+            await binaryObject.toObject(expectedType) : binaryObject;
+    }
+
+    _writeUUID(buffer, value) {
+        buffer.writeBuffer(Buffer.from(value));
+    }
+
+    async _writeEnum(buffer, enumValue) {
+        await enumValue._write(this, buffer);
+    }
+
+    _writeDecimal(buffer, decimal) {
+        let strValue = decimal.toExponential();
+        let expIndex = strValue.indexOf('e');
+        if (expIndex < 0) {
+            expIndex = strValue.indexOf('E');
+        }
+        let scale = 0;
+        if (expIndex >= 0) {
+            scale = parseInt(strValue.substring(expIndex + 1));
+            strValue = strValue.substring(0, expIndex);
+        }
+        const isNegative = strValue.startsWith('-');
+        if (isNegative) {
+            strValue = strValue.substring(1);
+        }
+        const dotIndex = strValue.indexOf('.');
+        if (dotIndex >= 0) {
+            scale -= strValue.length - dotIndex - 1;
+            strValue = strValue.substring(0, dotIndex) + strValue.substring(dotIndex + 1);
+        }
+        scale = -scale;
+        let hexValue = new Decimal(strValue).toHexadecimal().substring(2);
+        hexValue = ((hexValue.length % 2 !== 0) ? '000' : '00') + hexValue;
+        const valueBuffer = Buffer.from(hexValue, 'hex');
+        if (isNegative) {
+            valueBuffer[0] |= 0x80;
+        }
+        buffer.writeInteger(scale);
+        buffer.writeInteger(valueBuffer.length);
+        buffer.writeBuffer(valueBuffer);
+    }
+
+    _writeTimestamp(buffer, timestamp) {
+        buffer.writeDate(timestamp);
+        buffer.writeInteger(timestamp.getNanos());
+    }
+
+    _writeTime(buffer, time) {
+        const midnight = new Date(time);
+        midnight.setHours(0, 0, 0, 0);
+        buffer.writeLong(time.getTime() - midnight.getTime());
+    }
+
+    async _writeArray(buffer, array, arrayType, arrayTypeCode) {
+        const BinaryType = require('./BinaryType');
+        const elementType = BinaryUtils.getArrayElementType(arrayType);
+        const keepElementType = BinaryUtils.keepArrayElementType(arrayTypeCode);
+        if (arrayTypeCode === BinaryUtils.TYPE_CODE.OBJECT_ARRAY) {
+            buffer.writeInteger(elementType instanceof ComplexObjectType ?
+                BinaryType._calculateId(elementType._typeName) : -1);
+        }
+        buffer.writeInteger(array.length);
+        for (let elem of array) {
+            await this.writeObject(buffer, elem, elementType, keepElementType);
+        }
+    }
+
+    async _writeCollection(buffer, collection, collectionType) {
+        buffer.writeInteger(collection instanceof Set ? collection.size : collection.length);
+        buffer.writeByte(collectionType._subType);
+        for (let element of collection) {
+            await this.writeObject(buffer, element, collectionType._elementType);
+        }
+    }
+
+    async _writeMap(buffer, map, mapType) {
+        buffer.writeInteger(map.size);
+        buffer.writeByte(mapType._subType);
+        for (let [key, value] of map.entries()) {
+            await this.writeObject(buffer, key, mapType._keyType);
+            await this.writeObject(buffer, value, mapType._valueType);
+        }
+    }
+
+    async _writeBinaryObject(buffer, binaryObject) {
+        buffer.position = buffer.position - 1;
+        await binaryObject._write(this, buffer);
+    }
+
+    async _writeComplexObject(buffer, object, objectType) {
+        const BinaryObject = require('../BinaryObject');
+        await this._writeBinaryObject(buffer, await BinaryObject.fromObject(object, objectType));
+    }
+}
+
+module.exports = BinaryCommunicator;
diff --git a/modules/platforms/nodejs/lib/internal/BinaryReader.js b/modules/platforms/nodejs/lib/internal/BinaryReader.js
deleted file mode 100644
index 8c25c39..0000000
--- a/modules/platforms/nodejs/lib/internal/BinaryReader.js
+++ /dev/null
@@ -1,197 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-'use strict';
-
-const Decimal = require('decimal.js');
-const BinaryObject = require('../BinaryObject');
-const CollectionObjectType = require('../ObjectType').CollectionObjectType;
-const Errors = require('../Errors');
-const Timestamp = require('../Timestamp');
-const EnumItem = require('../EnumItem');
-const BinaryUtils = require('./BinaryUtils');
-
-class BinaryReader {
-
-    static async readObject(buffer, expectedType = null) {
-        const typeCode = buffer.readByte();
-        BinaryUtils.checkTypesComatibility(expectedType, typeCode);
-        return await BinaryReader._readTypedObject(buffer, typeCode, expectedType);
-    }
-
-    static async readStringArray(buffer) {
-        return await BinaryReader._readTypedObject(buffer, BinaryUtils.TYPE_CODE.STRING_ARRAY);
-    }
-
-    static async _readTypedObject(buffer, objectTypeCode, expectedType = null) {
-        switch (objectTypeCode) {
-            case BinaryUtils.TYPE_CODE.BYTE:
-            case BinaryUtils.TYPE_CODE.SHORT:
-            case BinaryUtils.TYPE_CODE.INTEGER:
-            case BinaryUtils.TYPE_CODE.FLOAT:
-            case BinaryUtils.TYPE_CODE.DOUBLE:
-                return buffer.readNumber(objectTypeCode);
-            case BinaryUtils.TYPE_CODE.LONG:
-                return buffer.readLong().toNumber();
-            case BinaryUtils.TYPE_CODE.CHAR:
-                return buffer.readChar();
-            case BinaryUtils.TYPE_CODE.BOOLEAN:
-                return buffer.readBoolean();
-            case BinaryUtils.TYPE_CODE.STRING:
-                return buffer.readString();
-            case BinaryUtils.TYPE_CODE.UUID:
-                return BinaryReader._readUUID(buffer);
-            case BinaryUtils.TYPE_CODE.DATE:
-                return buffer.readDate();
-            case BinaryUtils.TYPE_CODE.ENUM:
-            case BinaryUtils.TYPE_CODE.BINARY_ENUM:
-                return await BinaryReader._readEnum(buffer);
-            case BinaryUtils.TYPE_CODE.DECIMAL:
-                return BinaryReader._readDecimal(buffer);
-            case BinaryUtils.TYPE_CODE.TIMESTAMP:
-                return BinaryReader._readTimestamp(buffer);
-            case BinaryUtils.TYPE_CODE.TIME:
-                return buffer.readDate();
-            case BinaryUtils.TYPE_CODE.BYTE_ARRAY:
-            case BinaryUtils.TYPE_CODE.SHORT_ARRAY:
-            case BinaryUtils.TYPE_CODE.INTEGER_ARRAY:
-            case BinaryUtils.TYPE_CODE.LONG_ARRAY:
-            case BinaryUtils.TYPE_CODE.FLOAT_ARRAY:
-            case BinaryUtils.TYPE_CODE.DOUBLE_ARRAY:
-            case BinaryUtils.TYPE_CODE.CHAR_ARRAY:
-            case BinaryUtils.TYPE_CODE.BOOLEAN_ARRAY:
-            case BinaryUtils.TYPE_CODE.STRING_ARRAY:
-            case BinaryUtils.TYPE_CODE.UUID_ARRAY:
-            case BinaryUtils.TYPE_CODE.DATE_ARRAY:
-            case BinaryUtils.TYPE_CODE.OBJECT_ARRAY:
-            case BinaryUtils.TYPE_CODE.ENUM_ARRAY:
-            case BinaryUtils.TYPE_CODE.DECIMAL_ARRAY:
-            case BinaryUtils.TYPE_CODE.TIMESTAMP_ARRAY:
-            case BinaryUtils.TYPE_CODE.TIME_ARRAY:
-                return await BinaryReader._readArray(buffer, objectTypeCode, expectedType);
-            case BinaryUtils.TYPE_CODE.COLLECTION:
-                return await BinaryReader._readCollection(buffer, expectedType);
-            case BinaryUtils.TYPE_CODE.MAP:
-                return await BinaryReader._readMap(buffer, expectedType);
-            case BinaryUtils.TYPE_CODE.BINARY_OBJECT:
-                return await BinaryReader._readBinaryObject(buffer, expectedType);
-            case BinaryUtils.TYPE_CODE.NULL:
-                return null;
-            case BinaryUtils.TYPE_CODE.COMPLEX_OBJECT:
-                return await BinaryReader._readComplexObject(buffer, expectedType);
-            default:
-                throw Errors.IgniteClientError.unsupportedTypeError(objectTypeCode);
-        }
-    }
-
-    static _readUUID(buffer) {
-        return [...buffer.readBuffer(BinaryUtils.getSize(BinaryUtils.TYPE_CODE.UUID))];
-    }
-
-    static async _readEnum(buffer) {
-        const enumItem = new EnumItem(0);
-        await enumItem._read(buffer);
-        return enumItem;
-    }
-
-    static _readDecimal(buffer) {
-        const scale = buffer.readInteger();
-        const dataLength = buffer.readInteger();
-        const data = buffer.readBuffer(dataLength);
-        const isNegative = (data[0] & 0x80) !== 0;
-        if (isNegative) {
-            data[0] &= 0x7F;
-        }
-        let result = new Decimal('0x' + data.toString('hex'));
-        if (isNegative) {
-            result = result.negated();
-        }
-        return result.mul(Decimal.pow(10, -scale));
-    }
-
-    static _readTimestamp(buffer) {
-        return new Timestamp(buffer.readLong().toNumber(), buffer.readInteger());
-    }
-
-    static async _readArray(buffer, arrayTypeCode, arrayType) {
-        if (arrayTypeCode === BinaryUtils.TYPE_CODE.OBJECT_ARRAY) {
-            buffer.readInteger();
-        }
-        const length = buffer.readInteger();
-        const elementType = BinaryUtils.getArrayElementType(arrayType ? arrayType : arrayTypeCode);
-        const keepElementType = elementType === null ? true : BinaryUtils.keepArrayElementType(arrayTypeCode);
-        const result = new Array(length);
-        for (let i = 0; i < length; i++) {
-            result[i] = keepElementType ?
-                await BinaryReader.readObject(buffer, elementType) :
-                await BinaryReader._readTypedObject(buffer, elementType);
-        }
-        return result;
-    }
-
-    static async _readMap(buffer, expectedMapType) {
-        const result = new Map();
-        const size = buffer.readInteger();
-        const subType = buffer.readByte();
-        let key, value;
-        for (let i = 0; i < size; i++) {
-            key = await BinaryReader.readObject(buffer, expectedMapType ? expectedMapType._keyType : null);
-            value = await BinaryReader.readObject(buffer, expectedMapType ? expectedMapType._valueType : null);
-            result.set(key, value);
-        }
-        return result;
-    }
-
-    static async _readCollection(buffer, expectedColType) {
-        const size = buffer.readInteger();
-        const subType = buffer.readByte();
-        const isSet = CollectionObjectType._isSet(subType);
-        const result = isSet ? new Set() : new Array(size);
-        let element;
-        for (let i = 0; i < size; i++) {
-            element = await BinaryReader.readObject(buffer, expectedColType ? expectedColType._elementType : null);
-            if (isSet) {
-                result.add(element);
-            }
-            else {
-                result[i] = element;
-            }
-        }
-        return result;
-    }
-
-    static async _readBinaryObject(buffer, expectedType) {
-        const size = buffer.readInteger();
-        const startPos = buffer.position;
-        buffer.position = startPos + size;
-        const offset = buffer.readInteger();
-        const endPos = buffer.position;
-        buffer.position = startPos + offset;
-        const result = await BinaryReader.readObject(buffer, expectedType);
-        buffer.position = endPos;
-        return result;
-    }
-
-    static async _readComplexObject(buffer, expectedType) {
-        buffer.position = buffer.position - 1;
-        const binaryObject = await BinaryObject._fromBuffer(buffer);
-        return expectedType ?
-            await binaryObject.toObject(expectedType) : binaryObject;
-    }
-}
-
-module.exports = BinaryReader;
diff --git a/modules/platforms/nodejs/lib/internal/BinaryType.js b/modules/platforms/nodejs/lib/internal/BinaryType.js
index b9e239d..a111ed0 100644
--- a/modules/platforms/nodejs/lib/internal/BinaryType.js
+++ b/modules/platforms/nodejs/lib/internal/BinaryType.js
@@ -18,10 +18,11 @@
 'use strict';
 
 const Util = require('util');
+const Long = require('long');
 const ComplexObjectType = require('../ObjectType').ComplexObjectType;
 const BinaryTypeStorage = require('./BinaryTypeStorage');
 const BinaryUtils = require('./BinaryUtils');
-const BinaryWriter = require('./BinaryWriter');
+const BinaryCommunicator = require('./BinaryCommunicator');
 const Errors = require('../Errors');
 
 class BinaryType {
@@ -104,6 +105,15 @@
         return result;
     }
 
+    isValid() {
+        for (let field of this._fields.values()) {
+            if (!field.isValid()) {
+                return false;
+            }
+        }
+        return this._name !== null;
+    }
+
     static _calculateId(name) {
         return BinaryUtils.hashCodeLowerCase(name);
     }
@@ -112,9 +122,9 @@
         // type id
         buffer.writeInteger(this._id);
         // type name
-        await BinaryWriter.writeString(buffer, this._name);
+        BinaryCommunicator.writeString(buffer, this._name);
         // affinity key field name
-        await BinaryWriter.writeString(buffer, null);
+        BinaryCommunicator.writeString(buffer, null);
         // fields count
         buffer.writeInteger(this._fields.size);
         // fields
@@ -136,7 +146,7 @@
             buffer.writeInteger(length);
             if (length > 0) {
                 for (let [key, value] of this._enumValues) {
-                    await BinaryWriter.writeString(buffer, key);
+                    BinaryCommunicator.writeString(buffer, key);
                     buffer.writeInteger(value);
                 }
             }
@@ -147,10 +157,9 @@
         // type id
         this._id = buffer.readInteger();
         // type name
-        const BinaryReader = require('./BinaryReader');
-        this._name = await BinaryReader.readObject(buffer);
+        this._name = BinaryCommunicator.readString(buffer);
         // affinity key field name
-        await BinaryReader.readObject(buffer);
+        BinaryCommunicator.readString(buffer);
         // fields count
         const fieldsCount = buffer.readInteger();
         // fields
@@ -173,13 +182,12 @@
     }
 
     async _readEnum(buffer) {
-        const BinaryReader = require('./BinaryReader');
         this._isEnum = buffer.readBoolean();
         if (this._isEnum) {
             const valuesCount = buffer.readInteger();
             this._enumValues = new Array(valuesCount);
             for (let i = 0; i < valuesCount; i++) {
-                this._enumValues[i] = [await BinaryReader.readObject(buffer), buffer.readInteger()];
+                this._enumValues[i] = [BinaryCommunicator.readString(buffer), buffer.readInteger()];
             }
         }
     }
@@ -247,19 +255,16 @@
     }
 
     static _updateSchemaId(schemaId, fieldId) {
-        schemaId = schemaId ^ (fieldId & 0xFF);
-        schemaId = schemaId * FNV1_PRIME;
-        schemaId |= 0;
-        schemaId = schemaId ^ ((fieldId >> 8) & 0xFF);
-        schemaId = schemaId * FNV1_PRIME;
-        schemaId |= 0;
-        schemaId = schemaId ^ ((fieldId >> 16) & 0xFF);
-        schemaId = schemaId * FNV1_PRIME;
-        schemaId |= 0;
-        schemaId = schemaId ^ ((fieldId >> 24) & 0xFF);
-        schemaId = schemaId * FNV1_PRIME;
-        schemaId |= 0;
+        schemaId = BinarySchema._updateSchemaIdPart(schemaId, fieldId & 0xFF);
+        schemaId = BinarySchema._updateSchemaIdPart(schemaId, (fieldId >> 8) & 0xFF);
+        schemaId = BinarySchema._updateSchemaIdPart(schemaId, (fieldId >> 16) & 0xFF);
+        schemaId = BinarySchema._updateSchemaIdPart(schemaId, (fieldId >> 24) & 0xFF);
+        return schemaId;
+    }
 
+    static _updateSchemaIdPart(schemaId, fieldIdPart) {
+        schemaId = schemaId ^ fieldIdPart;
+        schemaId = Long.fromValue(schemaId).multiply(FNV1_PRIME).getLowBits();
         return schemaId;
     }
 
@@ -306,13 +311,17 @@
         return this._typeCode;
     }
 
+    isValid() {
+        return this._name !== null;
+    }
+
     static _calculateId(name) {
         return BinaryUtils.hashCodeLowerCase(name);
     }
 
     async _write(buffer) {
         // field name
-        await BinaryWriter.writeString(buffer, this._name);
+        BinaryCommunicator.writeString(buffer, this._name);
         // type code
         buffer.writeInteger(this._typeCode);
         // field id
@@ -320,9 +329,8 @@
     }
 
     async _read(buffer) {
-        const BinaryReader = require('./BinaryReader');
         // field name
-        this._name = await BinaryReader.readObject(buffer);
+        this._name = BinaryCommunicator.readString(buffer);
         // type code
         this._typeCode = buffer.readInteger();
         // field id
@@ -338,12 +346,12 @@
         return result;
     }
 
-    static async fromTypeId(typeId, schemaId, hasSchema) {
+    static async fromTypeId(communicator, typeId, schemaId) {
         let result = new BinaryTypeBuilder();
-        if (hasSchema) {
-            let type = await BinaryTypeStorage.getEntity().getType(typeId, schemaId);
-            if (type) {
-                result._type = type;
+        let type = await communicator.typeStorage.getType(typeId, schemaId);
+        if (type) {
+            result._type = type;
+            if (schemaId !== null) {
                 result._schema = type.getSchema(schemaId);
                 if (!result._schema) {
                     throw Errors.IgniteClientError.serializationError(
@@ -351,8 +359,11 @@
                             schemaId, type.name));
                 }
                 result._fromStorage = true;
-                return result;
             }
+            else {
+                result._schema = new BinarySchema();
+            }
+            return result;
         }
         result._init(null);
         result._type._id = typeId;
@@ -372,7 +383,7 @@
 
     static fromComplexObjectType(complexObjectType, jsObject) {
         let result = new BinaryTypeBuilder();
-        const typeInfo = BinaryTypeStorage.getEntity().getByComplexObjectType(complexObjectType);
+        const typeInfo = BinaryTypeStorage.getByComplexObjectType(complexObjectType);
         if (typeInfo) {
             result._type = typeInfo[0];
             result._schema = typeInfo[1];
@@ -380,7 +391,7 @@
         }
         else {
             result._fromComplexObjectType(complexObjectType, jsObject);
-            BinaryTypeStorage.getEntity().setByComplexObjectType(complexObjectType, result._type, result._schema);
+            BinaryTypeStorage.setByComplexObjectType(complexObjectType, result._type, result._schema);
         }
         return result;        
     }
@@ -424,9 +435,9 @@
         }
     }
 
-    async finalize() {
+    async finalize(communicator) {
         this._schema.finalize();
-        await BinaryTypeStorage.getEntity().addType(this._type, this._schema);
+        await communicator.typeStorage.addType(this._type, this._schema);
     }
 
     constructor() {
diff --git a/modules/platforms/nodejs/lib/internal/BinaryTypeStorage.js b/modules/platforms/nodejs/lib/internal/BinaryTypeStorage.js
index d79156b..2248eb5 100644
--- a/modules/platforms/nodejs/lib/internal/BinaryTypeStorage.js
+++ b/modules/platforms/nodejs/lib/internal/BinaryTypeStorage.js
@@ -19,18 +19,30 @@
 
 const Errors = require('../Errors');
 const BinaryUtils = require('./BinaryUtils');
+const Util = require('util');
 
 class BinaryTypeStorage {
 
-    static getEntity() {
-        if (!BinaryTypeStorage._entity) {
-            throw Errors.IgniteClientError.internalError();
-        }
-        return BinaryTypeStorage._entity;
+    constructor(communicator) {
+        this._communicator = communicator;
+        this._types = new Map();
     }
 
-    static createEntity(socket) {
-        BinaryTypeStorage._entity = new BinaryTypeStorage(socket);
+    static getByComplexObjectType(complexObjectType) {
+        return BinaryTypeStorage.complexObjectTypes.get(complexObjectType);
+    }
+
+    static setByComplexObjectType(complexObjectType, type, schema) {
+        if (!BinaryTypeStorage.complexObjectTypes.has(complexObjectType)) {
+            BinaryTypeStorage.complexObjectTypes.set(complexObjectType, [type, schema]);
+        }
+    }
+
+    static get complexObjectTypes() {
+        if (!BinaryTypeStorage._complexObjectTypes) {
+            BinaryTypeStorage._complexObjectTypes = new Map();
+        }
+        return BinaryTypeStorage._complexObjectTypes;
     }
 
     async addType(binaryType, binarySchema) {
@@ -61,29 +73,13 @@
         return storageType;
     }
 
-    getByComplexObjectType(complexObjectType) {
-        return this._complexObjectTypes.get(complexObjectType);
-    }
-
-    setByComplexObjectType(complexObjectType, type, schema) {
-        if (!this._complexObjectTypes.has(complexObjectType)) {
-            this._complexObjectTypes.set(complexObjectType, [type, schema]);
-        }
-    }
-
     /** Private methods */
 
-    constructor(socket) {
-        this._socket = socket;
-        this._types = new Map();
-        this._complexObjectTypes = new Map();
-    }
-
     async _getBinaryType(typeId) {
         const BinaryType = require('./BinaryType');
         let binaryType = new BinaryType(null);
         binaryType._id = typeId;
-        await this._socket.send(
+        await this._communicator.send(
             BinaryUtils.OPERATION.GET_BINARY_TYPE,
             async (payload) => {
                 payload.writeInteger(typeId);
@@ -101,7 +97,11 @@
     }
 
     async _putBinaryType(binaryType) {
-        await this._socket.send(
+        if (!binaryType.isValid()) {
+            throw Errors.IgniteClientError.serializationError(
+                true, Util.format('type "%d" can not be registered', binaryType.id));
+        }
+        await this._communicator.send(
             BinaryUtils.OPERATION.PUT_BINARY_TYPE,
             async (payload) => {
                 await binaryType._write(payload);
diff --git a/modules/platforms/nodejs/lib/internal/BinaryUtils.js b/modules/platforms/nodejs/lib/internal/BinaryUtils.js
index ab2d40c..2619df7 100644
--- a/modules/platforms/nodejs/lib/internal/BinaryUtils.js
+++ b/modules/platforms/nodejs/lib/internal/BinaryUtils.js
@@ -306,7 +306,7 @@
         const BinaryObject = require('../BinaryObject');
         const objectType = typeof object;
         if (object === null) {
-            return BinaryUtils.TYPE_CODE.NULL;
+            throw Errors.IgniteClientError.unsupportedTypeError(BinaryUtils.TYPE_CODE.NULL);
         }
         else if (objectType === 'number') {
             return BinaryUtils.TYPE_CODE.DOUBLE;
diff --git a/modules/platforms/nodejs/lib/internal/BinaryWriter.js b/modules/platforms/nodejs/lib/internal/BinaryWriter.js
deleted file mode 100644
index 3686bb4..0000000
--- a/modules/platforms/nodejs/lib/internal/BinaryWriter.js
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-'use strict';
-
-const Decimal = require('decimal.js');
-const Errors = require('../Errors');
-const ComplexObjectType = require('../ObjectType').ComplexObjectType;
-const BinaryUtils = require('./BinaryUtils');
-
-class BinaryWriter {
-
-    static async writeString(buffer, value) {
-        await BinaryWriter.writeObject(buffer, value, BinaryUtils.TYPE_CODE.STRING);
-    }
-
-    static async writeObject(buffer, object, objectType = null, writeObjectType = true) {
-        BinaryUtils.checkCompatibility(object, objectType);
-        if (object === null) {
-            buffer.writeByte(BinaryUtils.TYPE_CODE.NULL);
-            return;
-        }
-
-        objectType =  objectType ? objectType : BinaryUtils.calcObjectType(object);
-        const objectTypeCode = BinaryUtils.getTypeCode(objectType);
-
-        if (writeObjectType) {
-            buffer.writeByte(objectTypeCode);
-        }
-        switch (objectTypeCode) {
-            case BinaryUtils.TYPE_CODE.BYTE:
-            case BinaryUtils.TYPE_CODE.SHORT:
-            case BinaryUtils.TYPE_CODE.INTEGER:
-            case BinaryUtils.TYPE_CODE.FLOAT:
-            case BinaryUtils.TYPE_CODE.DOUBLE:
-                buffer.writeNumber(object, objectTypeCode);
-                break;
-            case BinaryUtils.TYPE_CODE.LONG:
-                buffer.writeLong(object);
-                break;
-            case BinaryUtils.TYPE_CODE.CHAR:
-                buffer.writeChar(object);
-                break;
-            case BinaryUtils.TYPE_CODE.BOOLEAN:
-                buffer.writeBoolean(object);
-                break;
-            case BinaryUtils.TYPE_CODE.STRING:
-                buffer.writeString(object);
-                break;
-            case BinaryUtils.TYPE_CODE.UUID:
-                BinaryWriter._writeUUID(buffer, object);
-                break;
-            case BinaryUtils.TYPE_CODE.DATE:
-                buffer.writeDate(object);
-                break;
-            case BinaryUtils.TYPE_CODE.ENUM:
-                await BinaryWriter._writeEnum(buffer, object);
-                break;
-            case BinaryUtils.TYPE_CODE.DECIMAL:
-                BinaryWriter._writeDecimal(buffer, object);
-                break;
-            case BinaryUtils.TYPE_CODE.TIMESTAMP:
-                BinaryWriter._writeTimestamp(buffer, object);
-                break;
-            case BinaryUtils.TYPE_CODE.TIME:
-                BinaryWriter._writeTime(buffer, object);
-                break;
-            case BinaryUtils.TYPE_CODE.BYTE_ARRAY:
-            case BinaryUtils.TYPE_CODE.SHORT_ARRAY:
-            case BinaryUtils.TYPE_CODE.INTEGER_ARRAY:
-            case BinaryUtils.TYPE_CODE.LONG_ARRAY:
-            case BinaryUtils.TYPE_CODE.FLOAT_ARRAY:
-            case BinaryUtils.TYPE_CODE.DOUBLE_ARRAY:
-            case BinaryUtils.TYPE_CODE.CHAR_ARRAY:
-            case BinaryUtils.TYPE_CODE.BOOLEAN_ARRAY:
-            case BinaryUtils.TYPE_CODE.STRING_ARRAY:
-            case BinaryUtils.TYPE_CODE.UUID_ARRAY:
-            case BinaryUtils.TYPE_CODE.DATE_ARRAY:
-            case BinaryUtils.TYPE_CODE.OBJECT_ARRAY:
-            case BinaryUtils.TYPE_CODE.ENUM_ARRAY:
-            case BinaryUtils.TYPE_CODE.DECIMAL_ARRAY:
-            case BinaryUtils.TYPE_CODE.TIMESTAMP_ARRAY:
-            case BinaryUtils.TYPE_CODE.TIME_ARRAY:
-                await BinaryWriter._writeArray(buffer, object, objectType, objectTypeCode);
-                break;
-            case BinaryUtils.TYPE_CODE.COLLECTION:
-                await BinaryWriter._writeCollection(buffer, object, objectType);
-                break;
-            case BinaryUtils.TYPE_CODE.MAP:
-                await BinaryWriter._writeMap(buffer, object, objectType);
-                break;
-            case BinaryUtils.TYPE_CODE.BINARY_OBJECT:
-                await BinaryWriter._writeBinaryObject(buffer, object, objectType);
-                break;
-            case BinaryUtils.TYPE_CODE.COMPLEX_OBJECT:
-                await BinaryWriter._writeComplexObject(buffer, object, objectType);
-                break;
-            default:
-                throw Errors.IgniteClientError.unsupportedTypeError(objectType);
-        }
-    }
-
-    static _writeUUID(buffer, value) {
-        buffer.writeBuffer(Buffer.from(value));
-    }
-
-    static async _writeEnum(buffer, enumValue) {
-        await enumValue._write(buffer);
-    }
-
-    static _writeDecimal(buffer, decimal) {
-        let strValue = decimal.toExponential();
-        let expIndex = strValue.indexOf('e');
-        if (expIndex < 0) {
-            expIndex = strValue.indexOf('E');
-        }
-        let scale = 0;
-        if (expIndex >= 0) {
-            scale = parseInt(strValue.substring(expIndex + 1));
-            strValue = strValue.substring(0, expIndex);
-        }
-        const isNegative = strValue.startsWith('-');
-        if (isNegative) {
-            strValue = strValue.substring(1);
-        }
-        const dotIndex = strValue.indexOf('.');
-        if (dotIndex >= 0) {
-            scale -= strValue.length - dotIndex - 1;
-            strValue = strValue.substring(0, dotIndex) + strValue.substring(dotIndex + 1);
-        }
-        scale = -scale;
-        let hexValue = new Decimal(strValue).toHexadecimal().substring(2);
-        hexValue = ((hexValue.length % 2 !== 0) ? '000' : '00') + hexValue;
-        const valueBuffer = Buffer.from(hexValue, 'hex');
-        if (isNegative) {
-            valueBuffer[0] |= 0x80;
-        }
-        buffer.writeInteger(scale);
-        buffer.writeInteger(valueBuffer.length);
-        buffer.writeBuffer(valueBuffer);
-    }
-
-    static _writeTimestamp(buffer, timestamp) {
-        buffer.writeDate(timestamp);
-        buffer.writeInteger(timestamp.getNanos());
-    }
-
-    static _writeTime(buffer, time) {
-        const midnight = new Date(time);
-        midnight.setHours(0, 0, 0, 0);
-        buffer.writeLong(time.getTime() - midnight.getTime());
-    }
-
-    static async _writeArray(buffer, array, arrayType, arrayTypeCode) {
-        const BinaryType = require('./BinaryType');
-        const elementType = BinaryUtils.getArrayElementType(arrayType);
-        const keepElementType = BinaryUtils.keepArrayElementType(arrayTypeCode);
-        if (arrayTypeCode === BinaryUtils.TYPE_CODE.OBJECT_ARRAY) {
-            buffer.writeInteger(elementType instanceof ComplexObjectType ?
-                BinaryType._calculateId(elementType._typeName) : -1);
-        }
-        buffer.writeInteger(array.length);
-        for (let elem of array) {
-            await BinaryWriter.writeObject(buffer, elem, elementType, keepElementType);
-        }
-    }
-
-    static async _writeCollection(buffer, collection, collectionType) {
-        buffer.writeInteger(collection instanceof Set ? collection.size : collection.length);
-        buffer.writeByte(collectionType._subType);
-        for (let element of collection) {
-            await BinaryWriter.writeObject(buffer, element, collectionType._elementType);
-        }
-    }
-
-    static async _writeMap(buffer, map, mapType) {
-        buffer.writeInteger(map.size);
-        buffer.writeByte(mapType._subType);
-        for (let [key, value] of map.entries()) {
-            await BinaryWriter.writeObject(buffer, key, mapType._keyType);
-            await BinaryWriter.writeObject(buffer, value, mapType._valueType);
-        }
-    }
-
-    static async _writeBinaryObject(buffer, binaryObject) {
-        buffer.position = buffer.position - 1;
-        await binaryObject._write(buffer);
-    }
-
-    static async _writeComplexObject(buffer, object, objectType) {
-        const BinaryObject = require('../BinaryObject');
-        await BinaryWriter._writeBinaryObject(buffer, await BinaryObject.fromObject(object, objectType));
-    }
-}
-
-module.exports = BinaryWriter;
diff --git a/modules/platforms/nodejs/lib/internal/ClientSocket.js b/modules/platforms/nodejs/lib/internal/ClientSocket.js
index 1f12040..73e11f0 100644
--- a/modules/platforms/nodejs/lib/internal/ClientSocket.js
+++ b/modules/platforms/nodejs/lib/internal/ClientSocket.js
@@ -26,8 +26,7 @@
 const IgniteClientConfiguration = require('../IgniteClientConfiguration');
 const MessageBuffer = require('./MessageBuffer');
 const BinaryUtils = require('./BinaryUtils');
-const BinaryReader = require('./BinaryReader');
-const BinaryWriter = require('./BinaryWriter');
+const BinaryCommunicator = require('./BinaryCommunicator');
 const ArgumentChecker = require('./ArgumentChecker');
 const Logger = require('./Logger');
 
@@ -78,12 +77,16 @@
 
 const PROTOCOL_VERSION_1_0_0 = new ProtocolVersion(1, 0, 0);
 const PROTOCOL_VERSION_1_1_0 = new ProtocolVersion(1, 1, 0);
+const PROTOCOL_VERSION_1_2_0 = new ProtocolVersion(1, 2, 0);
 
 const SUPPORTED_VERSIONS = [
     // PROTOCOL_VERSION_1_0_0, // Support for QueryField precision/scale fields breaks 1.0.0 compatibility
-    PROTOCOL_VERSION_1_1_0
+    PROTOCOL_VERSION_1_1_0,
+    PROTOCOL_VERSION_1_2_0
 ];
 
+const CURRENT_VERSION = PROTOCOL_VERSION_1_2_0;
+
 const STATE = Object.freeze({
     INITIAL : 0,
     HANDSHAKE : 1,
@@ -107,12 +110,14 @@
         this._onSocketDisconnect = onSocketDisconnect;
         this._error = null;
         this._wasConnected = false;
+        this._buffer = null;
+        this._offset = 0;
     }
 
     async connect() {
         return new Promise((resolve, reject) => {
             this._connectSocket(
-                this._getHandshake(PROTOCOL_VERSION_1_1_0, resolve, reject));
+                this._getHandshake(CURRENT_VERSION, resolve, reject));
         });
     }
 
@@ -195,28 +200,43 @@
         if (this._state === STATE.DISCONNECTED) {
             return;
         }
-        let offset = 0;
-        while (offset < message.length) {
-            let buffer = MessageBuffer.from(message, offset);
+        if (this._buffer) {
+            this._buffer.concat(message);
+            this._buffer.position = this._offset;
+        }
+        else {
+            this._buffer = MessageBuffer.from(message, 0);
+        }
+        while (this._buffer && this._offset < this._buffer.length) {
             // Response length
-            const length = buffer.readInteger();
-            offset += length + BinaryUtils.getSize(BinaryUtils.TYPE_CODE.INTEGER);
+            const length = this._buffer.readInteger() + BinaryUtils.getSize(BinaryUtils.TYPE_CODE.INTEGER);
+            if (this._buffer.length < this._offset + length) {
+              break;
+            }
+            this._offset += length;
+
             let requestId, isSuccess;
             const isHandshake = this._state === STATE.HANDSHAKE;
 
             if (isHandshake) {
                 // Handshake status
-                isSuccess = (buffer.readByte() === HANDSHAKE_SUCCESS_STATUS_CODE)
+                isSuccess = (this._buffer.readByte() === HANDSHAKE_SUCCESS_STATUS_CODE);
                 requestId = this._handshakeRequestId.toString();
             }
             else {
                 // Request id
-                requestId = buffer.readLong().toString();
+                requestId = this._buffer.readLong().toString();
                 // Status code
-                isSuccess = (buffer.readInteger() === REQUEST_SUCCESS_STATUS_CODE);
+                isSuccess = (this._buffer.readInteger() === REQUEST_SUCCESS_STATUS_CODE);
             }
 
-            this._logMessage(requestId, false, buffer.data);
+            this._logMessage(requestId, false, this._buffer.data);
+
+            const buffer = this._buffer;
+            if (this._offset === this._buffer.length) {
+                this._buffer = null;
+                this._offset = 0;
+            }
 
             if (this._requests.has(requestId)) {
                 const request = this._requests.get(requestId);
@@ -240,7 +260,7 @@
             const serverVersion = new ProtocolVersion();
             serverVersion.read(buffer);
             // Error message
-            const errMessage = await BinaryReader.readObject(buffer);
+            const errMessage = BinaryCommunicator.readString(buffer);
 
             if (!this._protocolVersion.equals(serverVersion)) {
                 if (!this._isSupportedVersion(serverVersion) ||
@@ -271,7 +291,7 @@
     async _finalizeResponse(buffer, request, isSuccess) {
         if (!isSuccess) {
             // Error message
-            const errMessage = await BinaryReader.readObject(buffer);
+            const errMessage = BinaryCommunicator.readString(buffer);
             request.reject(new Errors.OperationError(errMessage));
         }
         else {
@@ -295,8 +315,8 @@
         // Client code
         payload.writeByte(2);
         if (this._config._userName) {
-            await BinaryWriter.writeString(payload, this._config._userName);
-            await BinaryWriter.writeString(payload, this._config._password);
+            BinaryCommunicator.writeString(payload, this._config._userName);
+            BinaryCommunicator.writeString(payload, this._config._password);
         }
     }
 
@@ -431,4 +451,4 @@
     }
 }
 
-module.exports = ClientSocket;
\ No newline at end of file
+module.exports = ClientSocket;
diff --git a/modules/platforms/nodejs/lib/internal/MessageBuffer.js b/modules/platforms/nodejs/lib/internal/MessageBuffer.js
index f1407bf..b3be7e9 100644
--- a/modules/platforms/nodejs/lib/internal/MessageBuffer.js
+++ b/modules/platforms/nodejs/lib/internal/MessageBuffer.js
@@ -42,6 +42,12 @@
         return buf;
     }
 
+    concat(source) {
+        this._buffer = Buffer.concat([this._buffer, source]);
+        this._length = this._buffer.length;
+        this._capacity = this._length;
+    }
+
     get position() {
         return this._position;
     }
@@ -99,19 +105,34 @@
         this.writeNumber(value, BinaryUtils.TYPE_CODE.DOUBLE);
     }
 
-    writeNumber(value, type) {
+    writeNumber(value, type, signed = true) {
         const size = BinaryUtils.getSize(type);
         this._ensureCapacity(size);
         try {
             switch (type) {
                 case BinaryUtils.TYPE_CODE.BYTE:
-                    this._buffer.writeInt8(value, this._position);
+                    if (signed) {
+                        this._buffer.writeInt8(value, this._position);
+                    }
+                    else {
+                        this._buffer.writeUInt8(value, this._position);   
+                    }
                     break;
                 case BinaryUtils.TYPE_CODE.SHORT:
-                    this._buffer.writeInt16LE(value, this._position);
+                    if (signed) {
+                        this._buffer.writeInt16LE(value, this._position);
+                    }
+                    else {
+                        this._buffer.writeUInt16LE(value, this._position);   
+                    }
                     break;
                 case BinaryUtils.TYPE_CODE.INTEGER:
-                    this._buffer.writeInt32LE(value, this._position);
+                    if (signed) {
+                        this._buffer.writeInt32LE(value, this._position);
+                    }
+                    else {
+                        this._buffer.writeUInt32LE(value, this._position);   
+                    }
                     break;
                 case BinaryUtils.TYPE_CODE.FLOAT:
                     this._buffer.writeFloatLE(value, this._position);
@@ -178,19 +199,19 @@
         return this.readNumber(BinaryUtils.TYPE_CODE.DOUBLE);
     }
 
-    readNumber(type) {
+    readNumber(type, signed = true) {
         const size = BinaryUtils.getSize(type);
         this._ensureSize(size);
         let value;
         switch (type) {
             case BinaryUtils.TYPE_CODE.BYTE:
-                value = this._buffer.readInt8(this._position);
+                value = signed ? this._buffer.readInt8(this._position) : this._buffer.readUInt8(this._position);
                 break;
             case BinaryUtils.TYPE_CODE.SHORT:
-                value = this._buffer.readInt16LE(this._position);
+                value = signed ? this._buffer.readInt16LE(this._position) : this._buffer.readUInt16LE(this._position);
                 break;
             case BinaryUtils.TYPE_CODE.INTEGER:
-                value = this._buffer.readInt32LE(this._position);
+                value = signed ? this._buffer.readInt32LE(this._position) : this._buffer.readUInt32LE(this._position);
                 break;
             case BinaryUtils.TYPE_CODE.FLOAT:
                 value = this._buffer.readFloatLE(this._position);
diff --git a/modules/platforms/nodejs/spec/README.md b/modules/platforms/nodejs/spec/README.md
deleted file mode 100644
index 4b947c4..0000000
--- a/modules/platforms/nodejs/spec/README.md
+++ /dev/null
@@ -1,42 +0,0 @@
-# Tests #
-
-NodeJS Client for Apache Ignite contains [Jasmine](https://www.npmjs.com/package/jasmine) tests to check the behavior of the client. the tests include:
-- functional tests which cover all API methods of the client
-- examples executors which run all examples except AuthTlsExample
-- AuthTlsExample executor
-
-## Tests Installation ##
-
-(temporary, while the NPM module is not released on [npmjs](https://www.npmjs.com))
-
-Tests are installed along with the client.
-Follow the [instructions in the main readme](../README.md#installation).
-
-## Tests Running ##
-
-1. Run Apache Ignite server locally or remotely with default configuration.
-2. Set the environment variable:
-    - **APACHE_IGNITE_CLIENT_ENDPOINTS** - comma separated list of Ignite node endpoints.
-    - **APACHE_IGNITE_CLIENT_DEBUG** - (optional) if *true*, tests will display additional output (default: *false*).
-3. Alternatively, instead of the environment variables setting, you can directly specify the values of the corresponding variables in [local_ignite_path/modules/platforms/nodejspec/config.js](./config.js) file.
-4. Run the tests:
-
-### Run Functional Tests ###
-
-Call `npm test` command from `local_ignite_path/modules/platforms/nodejs` folder.
-
-### Run Examples Executors ###
-
-Call `npm run test:examples` command from `local_ignite_path/modules/platforms/nodejs` folder.
-
-### Run AuthTlsExample Executor ###
-
-It requires running Apache Ignite server with non-default configuration (authentication and TLS switched on).
-
-If the server runs locally:
-- setup the server to accept TLS. During the setup use `keystore.jks` and `truststore.jks` certificates from `local_ignite_path/modules/platforms/nodejs/examples/certs/` folder. Password for the files: `123456`
-- switch on the authentication on the server. Use the default username/password.
-
-If the server runs remotely, and/or other certificates are required, and/or non-default username/password is required - see this [instruction](../examples/README.md#additional-setup-for-authtlsexample).
-
-Call `npm run test:auth_example` command from `local_ignite_path/modules/platforms/nodejs` folder.
diff --git a/modules/platforms/nodejs/spec/cache/ComplexObject.spec.js b/modules/platforms/nodejs/spec/cache/ComplexObject.spec.js
index 2119ab5..9cc8115 100644
--- a/modules/platforms/nodejs/spec/cache/ComplexObject.spec.js
+++ b/modules/platforms/nodejs/spec/cache/ComplexObject.spec.js
@@ -28,6 +28,9 @@
 const BinaryObject = IgniteClient.BinaryObject;
 
 const CACHE_NAME = '__test_cache';
+const ONE_BYTE_MAX_OFFSET = 0x100 - 1;
+const TWO_BYTES_MAX_OFFSET = 0x10000 - 1;
+const COMPLEX_OBJECT_HEADER_LENGTH = 24;
 
 class Class1 {
     constructor() {
@@ -323,6 +326,79 @@
             catch(error => done.fail(error));
     });
 
+    it('put get complex objects with one byte offset', (done) => {
+        Promise.resolve().
+            then(async () => {
+                const key = new Date();
+                const valueType = new ComplexObjectType(new Class2(), 'Class2WithStrings');
+                const value = new Class2();
+                value.field_2_1 = 'x';
+                value.field_2_2 = 'y';
+                await putGetComplexObjects(key, value, null, valueType, value);
+                const oneByteMaxLen = getMaxFieldLength(true);
+                value.field_2_1 = 'x'.repeat(oneByteMaxLen);
+                value.field_2_2 = 'y';
+                await putGetComplexObjects(key, value, null, valueType, value);
+            }).
+            then(done).
+            catch(error => done.fail(error));
+    });
+
+    it('put get complex objects with two bytes offset', (done) => {
+        Promise.resolve().
+            then(async () => {
+                const key = new Date();
+                const valueType = new ComplexObjectType(new Class2(), 'Class2WithStrings');
+                const value = new Class2();
+                const oneByteMaxLen = getMaxFieldLength(true);
+                value.field_2_1 = 'x'.repeat(oneByteMaxLen + 1);
+                value.field_2_2 = 'y';
+                await putGetComplexObjects(key, value, null, valueType, value);
+                const twoBytesMaxLen = getMaxFieldLength(false);
+                value.field_2_1 = 'x'.repeat(twoBytesMaxLen);
+                value.field_2_2 = 'y';
+                await putGetComplexObjects(key, value, null, valueType, value);
+            }).
+            then(done).
+            catch(error => done.fail(error));
+    });
+
+    it('put get complex objects with four bytes offset', (done) => {
+        Promise.resolve().
+            then(async () => {
+                const key = new Date();
+                const valueType = new ComplexObjectType(new Class2(), 'Class2WithStrings');
+                const value = new Class2();
+                const twoBytesMaxLen = getMaxFieldLength(false);
+                value.field_2_1 = 'x'.repeat(twoBytesMaxLen + 1);
+                value.field_2_2 = 'y';
+                await putGetComplexObjects(key, value, null, valueType, value);
+                value.field_2_1 = 'x'.repeat(twoBytesMaxLen * 2);
+                value.field_2_2 = 'y';
+                await putGetComplexObjects(key, value, null, valueType, value);
+            }).
+            then(done).
+            catch(error => done.fail(error));
+    });
+
+    it('put get complex objects without schema', (done) => {
+        Promise.resolve().
+            then(async () => {
+                const key = new Date();
+                const valueType = new ComplexObjectType({});
+                const value = {};
+                await putGetComplexObjects(key, value, null, valueType, value);
+            }).
+            then(done).
+            catch(error => done.fail(error));
+    });
+
+    function getMaxFieldLength(oneByte) {
+        const maxOffset = oneByte ? ONE_BYTE_MAX_OFFSET : TWO_BYTES_MAX_OFFSET;
+        // max offset - field type code - field type (string) length - complex object header length
+        return maxOffset - 1 - 4 - COMPLEX_OBJECT_HEADER_LENGTH;
+    }
+
     async function testSuiteCleanup(done) {
         await TestingHelper.destroyCache(CACHE_NAME, done);
     }
diff --git a/modules/platforms/nodejs/spec/examples/AuthExample.spec.js b/modules/platforms/nodejs/spec/examples/AuthExample.spec.js
index 3fb9205..667a396 100644
--- a/modules/platforms/nodejs/spec/examples/AuthExample.spec.js
+++ b/modules/platforms/nodejs/spec/examples/AuthExample.spec.js
@@ -20,6 +20,11 @@
 const TestingHelper = require('../TestingHelper');
 
 describe('execute auth example >', () => {
+    beforeAll((done) => {
+        jasmine.DEFAULT_TIMEOUT_INTERVAL = TestingHelper.TIMEOUT;
+        done();
+    });
+
     it('AuthTlsExample', (done) => {
         TestingHelper.executeExample('examples/AuthTlsExample.js').
             then(done).
diff --git a/modules/platforms/nodejs/spec/examples/Examples.spec.js b/modules/platforms/nodejs/spec/examples/Examples.spec.js
index c8dce3c..2ba5f08 100644
--- a/modules/platforms/nodejs/spec/examples/Examples.spec.js
+++ b/modules/platforms/nodejs/spec/examples/Examples.spec.js
@@ -20,6 +20,11 @@
 const TestingHelper = require('../TestingHelper');
 
 describe('execute examples >', () => {
+    beforeAll((done) => {
+        jasmine.DEFAULT_TIMEOUT_INTERVAL = TestingHelper.TIMEOUT;
+        done();
+    });
+
     it('CachePutGetExample', (done) => {
         TestingHelper.executeExample('examples/CachePutGetExample.js').
             then(done).
diff --git a/modules/rest-http/pom.xml b/modules/rest-http/pom.xml
index 8cc31da..84dc8a4 100644
--- a/modules/rest-http/pom.xml
+++ b/modules/rest-http/pom.xml
@@ -130,6 +130,28 @@
             <groupId>log4j</groupId>
             <artifactId>log4j</artifactId>
         </dependency>
+
+        <dependency>
+            <groupId>org.apache.ignite</groupId>
+            <artifactId>ignite-core</artifactId>
+            <version>${project.version}</version>
+            <type>test-jar</type>
+            <scope>test</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.springframework</groupId>
+            <artifactId>spring-beans</artifactId>
+            <version>${spring-5.0.version}</version>
+            <scope>test</scope>
+        </dependency>
+
+        <dependency>
+            <groupId>org.springframework</groupId>
+            <artifactId>spring-context</artifactId>
+            <version>${spring-5.0.version}</version>
+            <scope>test</scope>
+        </dependency>
     </dependencies>
 
     <build>
diff --git a/modules/rest-http/src/main/java/org/apache/ignite/internal/processors/rest/protocols/http/jetty/GridJettyRestHandler.java b/modules/rest-http/src/main/java/org/apache/ignite/internal/processors/rest/protocols/http/jetty/GridJettyRestHandler.java
index 99a8844..12dc0dd 100644
--- a/modules/rest-http/src/main/java/org/apache/ignite/internal/processors/rest/protocols/http/jetty/GridJettyRestHandler.java
+++ b/modules/rest-http/src/main/java/org/apache/ignite/internal/processors/rest/protocols/http/jetty/GridJettyRestHandler.java
@@ -29,6 +29,7 @@
 import java.sql.Date;
 import java.sql.Time;
 import java.sql.Timestamp;
+import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Iterator;
 import java.util.LinkedList;
@@ -42,6 +43,7 @@
 
 import org.apache.ignite.IgniteCheckedException;
 import org.apache.ignite.IgniteLogger;
+import org.apache.ignite.IgniteSystemProperties;
 import org.apache.ignite.cache.CacheWriteSynchronizationMode;
 import org.apache.ignite.internal.processors.cache.CacheConfigurationOverride;
 import org.apache.ignite.internal.processors.rest.GridRestCommand;
@@ -58,6 +60,7 @@
 import org.apache.ignite.internal.processors.rest.request.RestUserActionRequest;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteBiTuple;
 import org.apache.ignite.lang.IgniteClosure;
 import org.apache.ignite.lang.IgniteUuid;
 import org.apache.ignite.plugin.security.SecurityCredentials;
@@ -68,6 +71,7 @@
 import com.fasterxml.jackson.databind.ObjectMapper;
 import org.jetbrains.annotations.Nullable;
 
+import static org.apache.ignite.IgniteSystemProperties.IGNITE_REST_GETALL_AS_ARRAY;
 import static org.apache.ignite.internal.client.GridClientCacheFlag.KEEP_BINARIES_MASK;
 import static org.apache.ignite.internal.processors.rest.GridRestCommand.CACHE_CONTAINS_KEYS;
 import static org.apache.ignite.internal.processors.rest.GridRestCommand.CACHE_GET_ALL;
@@ -86,13 +90,13 @@
     private static final String CHARSET = StandardCharsets.UTF_8.name();
 
     /** */
-    private static final String  USER_PARAM = "user";
+    private static final String USER_PARAM = "user";
 
     /** */
-    private static final String  PWD_PARAM = "password";
+    private static final String PWD_PARAM = "password";
 
     /** */
-    private static final String  CACHE_NAME_PARAM = "cacheName";
+    private static final String CACHE_NAME_PARAM = "cacheName";
 
     /** */
     private static final String BACKUPS_PARAM = "backups";
@@ -113,7 +117,7 @@
     private static final String IGNITE_PASSWORD = "ignite.password";
 
     /** */
-    private static final String  TEMPLATE_NAME_PARAM = "templateName";
+    private static final String TEMPLATE_NAME_PARAM = "templateName";
 
     /** */
     private static final NullOutputStream NULL_OUTPUT_STREAM = new NullOutputStream();
@@ -136,6 +140,9 @@
     /** Mapper from Java object to JSON. */
     private final ObjectMapper jsonMapper;
 
+    /** */
+    private final boolean getAllAsArray = IgniteSystemProperties.getBoolean(IGNITE_REST_GETALL_AS_ARRAY);
+
     /**
      * Creates new HTTP requests handler.
      *
@@ -391,6 +398,15 @@
             if (cmdRes == null)
                 throw new IllegalStateException("Received null result from handler: " + hnd);
 
+            if (getAllAsArray && cmd == GridRestCommand.CACHE_GET_ALL) {
+                List<Object> resKeyValue = new ArrayList<>();
+
+                for (Map.Entry<Object, Object> me : ((Map<Object, Object>)cmdRes.getResponse()).entrySet())
+                    resKeyValue.add(new IgniteBiTuple<>(me.getKey(), me.getValue()));
+
+                cmdRes.setResponse(resKeyValue);
+            }
+
             byte[] sesTok = cmdRes.sessionTokenBytes();
 
             if (sesTok != null)
diff --git a/modules/rest-http/src/main/java/org/apache/ignite/internal/processors/rest/protocols/http/jetty/GridJettyRestProtocol.java b/modules/rest-http/src/main/java/org/apache/ignite/internal/processors/rest/protocols/http/jetty/GridJettyRestProtocol.java
index 11c0eb6..1818d7c 100644
--- a/modules/rest-http/src/main/java/org/apache/ignite/internal/processors/rest/protocols/http/jetty/GridJettyRestProtocol.java
+++ b/modules/rest-http/src/main/java/org/apache/ignite/internal/processors/rest/protocols/http/jetty/GridJettyRestProtocol.java
@@ -63,13 +63,11 @@
      */
     static {
         if (!IgniteSystemProperties.getBoolean(IGNITE_JETTY_LOG_NO_OVERRIDE)) {
-            Properties p = new Properties();
-
-            p.setProperty("org.eclipse.jetty.LEVEL", "WARN");
-            p.setProperty("org.eclipse.jetty.util.log.LEVEL", "OFF");
-            p.setProperty("org.eclipse.jetty.util.component.LEVEL", "OFF");
-
-            StdErrLog.setProperties(p);
+            // See also https://www.eclipse.org/jetty/documentation/9.4.x/configuring-logging.html
+            // It seems that using system properties should be fine.
+            System.setProperty("org.eclipse.jetty.LEVEL", "WARN");
+            System.setProperty("org.eclipse.jetty.util.log.LEVEL", "OFF");
+            System.setProperty("org.eclipse.jetty.util.component.LEVEL", "OFF");
 
             try {
                 Class<?> logCls = Class.forName("org.apache.log4j.Logger");
diff --git a/modules/rest-http/src/test/java/org/apache/ignite/internal/processors/rest/protocols/http/jetty/GridRestSuite.java b/modules/rest-http/src/test/java/org/apache/ignite/internal/processors/rest/protocols/http/jetty/GridRestSuite.java
new file mode 100644
index 0000000..c69ce7f
--- /dev/null
+++ b/modules/rest-http/src/test/java/org/apache/ignite/internal/processors/rest/protocols/http/jetty/GridRestSuite.java
@@ -0,0 +1,35 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ignite.internal.processors.rest.protocols.http.jetty;
+
+import junit.framework.TestSuite;
+
+/**
+ * Integration tests for Grid REST functionality; Jetty is under the hood.
+ */
+public class GridRestSuite extends TestSuite {
+    /**
+     * @return Suite that contains all tests for REST.
+     */
+    public static TestSuite suite() {
+        TestSuite suite = new TestSuite("Apache Ignite REST Api suite");
+
+        suite.addTest(new TestSuite(RestSetupSimpleTest.class));
+
+        return suite;
+    }
+}
diff --git a/modules/rest-http/src/test/java/org/apache/ignite/internal/processors/rest/protocols/http/jetty/RestSetupSimpleTest.java b/modules/rest-http/src/test/java/org/apache/ignite/internal/processors/rest/protocols/http/jetty/RestSetupSimpleTest.java
new file mode 100644
index 0000000..7c076f7
--- /dev/null
+++ b/modules/rest-http/src/test/java/org/apache/ignite/internal/processors/rest/protocols/http/jetty/RestSetupSimpleTest.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.internal.processors.rest.protocols.http.jetty;
+
+import com.fasterxml.jackson.core.type.TypeReference;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import java.io.InputStreamReader;
+import java.net.URL;
+import java.net.URLConnection;
+import java.util.Map;
+import org.apache.ignite.configuration.ConnectorConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+
+/**
+ * Integration test for Grid REST functionality; Jetty is under the hood.
+ */
+public class RestSetupSimpleTest extends GridCommonAbstractTest {
+    /** Jetty port. */
+    private static final int JETTY_PORT = 8080;
+
+    /** {@inheritDoc} */
+    @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception {
+        IgniteConfiguration configuration = super.getConfiguration(igniteInstanceName);
+
+        configuration.setConnectorConfiguration(new ConnectorConfiguration());
+
+        return configuration;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void beforeTestsStarted() throws Exception {
+        startGrid(0);
+    }
+
+    /** {@inheritDoc} */
+    @Override protected void afterTestsStopped() throws Exception {
+        stopAllGrids(true);
+    }
+
+    /**
+     * Runs version command using GridJettyRestProtocol.
+     */
+    public void testVersionCommand() throws Exception {
+        URLConnection conn = new URL("http://localhost:" + JETTY_PORT + "/ignite?cmd=version").openConnection();
+
+        conn.connect();
+
+        try (InputStreamReader streamReader = new InputStreamReader(conn.getInputStream())) {
+            ObjectMapper objMapper = new ObjectMapper();
+            Map<String, Object> myMap = objMapper.readValue(streamReader,
+                new TypeReference<Map<String, Object>>() {
+                });
+
+            log.info("Version command response is: " + myMap);
+
+            assertTrue(myMap.containsKey("response"));
+            assertEquals(0, myMap.get("successStatus"));
+        }
+    }
+}
diff --git a/modules/spark/src/main/scala/org/apache/ignite/spark/impl/IgniteSQLRelation.scala b/modules/spark/src/main/scala/org/apache/ignite/spark/impl/IgniteSQLRelation.scala
index 485ddf6..1b4f277 100644
--- a/modules/spark/src/main/scala/org/apache/ignite/spark/impl/IgniteSQLRelation.scala
+++ b/modules/spark/src/main/scala/org/apache/ignite/spark/impl/IgniteSQLRelation.scala
@@ -119,7 +119,7 @@
 
         StructType(columns.map { case (name, dataType) ⇒
             StructField(
-                name = name,
+                name = table.getAliases.getOrDefault(name, name),
                 dataType = IgniteRDD.dataType(dataType, name),
                 nullable = !isKeyColumn(table, name),
                 metadata = Metadata.empty)
diff --git a/modules/spark/src/main/scala/org/apache/ignite/spark/impl/optimization/SimpleExpressions.scala b/modules/spark/src/main/scala/org/apache/ignite/spark/impl/optimization/SimpleExpressions.scala
index 10d021a..4e54ffc 100644
--- a/modules/spark/src/main/scala/org/apache/ignite/spark/impl/optimization/SimpleExpressions.scala
+++ b/modules/spark/src/main/scala/org/apache/ignite/spark/impl/optimization/SimpleExpressions.scala
@@ -48,41 +48,43 @@
     /** @inheritdoc */
     override def toString(expr: Expression, childToString: Expression ⇒ String, useQualifier: Boolean,
         useAlias: Boolean): Option[String] = expr match {
-        case l: Literal ⇒ l.dataType match {
-            case StringType ⇒
-                Some("'" + l.value.toString + "'")
+        case l: Literal ⇒
+            if (l.value == null)
+                Some("null")
+            else {
+                l.dataType match {
+                    case StringType ⇒
+                        Some("'" + l.value.toString + "'")
 
-            case TimestampType ⇒
-                l.value match {
-                    //Internal representation of TimestampType is Long.
-                    //So we converting from internal spark representation to CAST call.
-                    case date: Long ⇒
-                        Some(s"CAST('${timestampFormat.get.format(DateTimeUtils.toJavaTimestamp(date))}' AS TIMESTAMP)")
+                    case TimestampType ⇒
+                        l.value match {
+                            //Internal representation of TimestampType is Long.
+                            //So we converting from internal spark representation to CAST call.
+                            case date: Long ⇒
+                                Some(s"CAST('${timestampFormat.get.format(DateTimeUtils.toJavaTimestamp(date))}' " +
+                                    s"AS TIMESTAMP)")
+
+                            case _ ⇒
+                                Some(l.value.toString)
+                        }
+
+                    case DateType ⇒
+                        l.value match {
+                            //Internal representation of DateType is Int.
+                            //So we converting from internal spark representation to CAST call.
+                            case days: Integer ⇒
+                                val date = new java.util.Date(DateTimeUtils.daysToMillis(days))
+
+                                Some(s"CAST('${dateFormat.get.format(date)}' AS DATE)")
+
+                            case _ ⇒
+                                Some(l.value.toString)
+                        }
 
                     case _ ⇒
                         Some(l.value.toString)
                 }
-
-            case DateType ⇒
-                l.value match {
-                    //Internal representation of DateType is Int.
-                    //So we converting from internal spark representation to CAST call.
-                    case days: Integer ⇒
-                        val date = new java.util.Date(DateTimeUtils.daysToMillis(days))
-
-                        Some(s"CAST('${dateFormat.get.format(date)}' AS DATE)")
-
-                    case _ ⇒
-                        Some(l.value.toString)
-                }
-
-            case _ ⇒
-                if (l.value == null)
-                    Some("null")
-                else
-                    Some(l.value.toString)
-        }
-
+            }
         case ar: AttributeReference ⇒
             val name =
                 if (useQualifier)
@@ -90,9 +92,11 @@
                 else
                     ar.name
 
-            if (ar.metadata.contains(ALIAS) && !isAliasEqualColumnName(ar.metadata.getString(ALIAS), ar.name) && useAlias)
+            if (ar.metadata.contains(ALIAS) &&
+                !isAliasEqualColumnName(ar.metadata.getString(ALIAS), ar.name) &&
+                useAlias) {
                 Some(aliasToString(name, ar.metadata.getString(ALIAS)))
-            else
+            } else
                 Some(name)
 
         case Alias(child, name) ⇒
@@ -142,7 +146,8 @@
             Set[DataType](BooleanType, StringType)(to)
 
         case ByteType ⇒
-            Set(ByteType, ShortType, IntegerType, LongType, FloatType, DoubleType, StringType, DecimalType(_, _), StringType)(to)
+            Set(ByteType, ShortType, IntegerType, LongType, FloatType, DoubleType, StringType, DecimalType(_, _),
+                StringType)(to)
 
         case ShortType ⇒
             Set(ShortType, IntegerType, LongType, FloatType, DoubleType, StringType, DecimalType(_, _))(to)
diff --git a/modules/spark/src/main/scala/org/apache/ignite/spark/impl/optimization/accumulator/JoinSQLAccumulator.scala b/modules/spark/src/main/scala/org/apache/ignite/spark/impl/optimization/accumulator/JoinSQLAccumulator.scala
index 7ae5e70..baf5a8b 100644
--- a/modules/spark/src/main/scala/org/apache/ignite/spark/impl/optimization/accumulator/JoinSQLAccumulator.scala
+++ b/modules/spark/src/main/scala/org/apache/ignite/spark/impl/optimization/accumulator/JoinSQLAccumulator.scala
@@ -44,7 +44,7 @@
     orderBy: Option[Seq[SortOrder]] = None
 ) extends BinaryNode with SelectAccumulator {
     /** @inheritdoc */
-    override def compileQuery(prettyPrint: Boolean = false): String = {
+    override def compileQuery(prettyPrint: Boolean = false, nestedQuery: Boolean = false): String = {
         val delim = if (prettyPrint) "\n" else " "
         val tab = if (prettyPrint) "  " else ""
 
@@ -68,9 +68,13 @@
             sql += s"${delim}ORDER BY " +
                 s"${fixQualifier(orderBy.get).map(exprToString(_, useQualifier = true)).mkString(s",$delim$tab")}"
 
-        if (limit.isDefined)
+        if (limit.isDefined) {
             sql += s" LIMIT ${exprToString(fixQualifier0(limit.get), useQualifier = true)}"
 
+            if (nestedQuery)
+                sql = s"SELECT * FROM ($sql)"
+        }
+
         sql
     }
 
diff --git a/modules/spark/src/main/scala/org/apache/ignite/spark/impl/optimization/accumulator/QueryAccumulator.scala b/modules/spark/src/main/scala/org/apache/ignite/spark/impl/optimization/accumulator/QueryAccumulator.scala
index 133d355..9570a66 100644
--- a/modules/spark/src/main/scala/org/apache/ignite/spark/impl/optimization/accumulator/QueryAccumulator.scala
+++ b/modules/spark/src/main/scala/org/apache/ignite/spark/impl/optimization/accumulator/QueryAccumulator.scala
@@ -18,7 +18,7 @@
 package org.apache.ignite.spark.impl.optimization.accumulator
 
 import org.apache.ignite.spark.impl.optimization.IgniteQueryContext
-import org.apache.spark.sql.catalyst.expressions.{NamedExpression, SortOrder}
+import org.apache.spark.sql.catalyst.expressions.{Expression, NamedExpression, SortOrder}
 import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
 
 /**
@@ -53,10 +53,20 @@
     def withOrderBy(orderBy: Seq[SortOrder]): QueryAccumulator
 
     /**
+      * @return Copy of this accumulator with `limit` expression.
+      */
+    def withLimit(limit: Expression): QueryAccumulator
+
+    /**
+      * @return Copy of this accumulator with `localLimit` expression.
+      */
+    def withLocalLimit(localLimit: Expression): QueryAccumulator
+
+    /**
       * @param prettyPrint If true human readable query will be generated.
       * @return SQL query.
       */
-    def compileQuery(prettyPrint: Boolean = false): String
+    def compileQuery(prettyPrint: Boolean = false, nestedQuery: Boolean = false): String
 
     /**
       * @return Qualifier that should be use to select data from this accumulator.
diff --git a/modules/spark/src/main/scala/org/apache/ignite/spark/impl/optimization/accumulator/SingleTableSQLAccumulator.scala b/modules/spark/src/main/scala/org/apache/ignite/spark/impl/optimization/accumulator/SingleTableSQLAccumulator.scala
index 47035b9..735740f 100644
--- a/modules/spark/src/main/scala/org/apache/ignite/spark/impl/optimization/accumulator/SingleTableSQLAccumulator.scala
+++ b/modules/spark/src/main/scala/org/apache/ignite/spark/impl/optimization/accumulator/SingleTableSQLAccumulator.scala
@@ -42,7 +42,7 @@
     orderBy: Option[Seq[SortOrder]] = None
 ) extends SelectAccumulator {
     /** @inheritdoc */
-    override def compileQuery(prettyPrint: Boolean = false): String = {
+    override def compileQuery(prettyPrint: Boolean = false, nestedQuery: Boolean = false): String = {
         val delim = if (prettyPrint) "\n" else " "
         val tab = if (prettyPrint) "  " else ""
 
@@ -61,9 +61,13 @@
         if (orderBy.exists(_.nonEmpty))
             sql += s"${delim}ORDER BY ${orderBy.get.map(exprToString(_)).mkString(s",$delim$tab")}"
 
-        if (limit.isDefined)
+        if (limit.isDefined) {
             sql += s" LIMIT ${limit.map(exprToString(_)).get}"
 
+            if (nestedQuery)
+                sql = s"SELECT * FROM ($sql)"
+        }
+
         sql
     }
 
diff --git a/modules/spark/src/main/scala/org/apache/ignite/spark/impl/optimization/accumulator/UnionSQLAccumulator.scala b/modules/spark/src/main/scala/org/apache/ignite/spark/impl/optimization/accumulator/UnionSQLAccumulator.scala
index 723e17a..5f870e3 100644
--- a/modules/spark/src/main/scala/org/apache/ignite/spark/impl/optimization/accumulator/UnionSQLAccumulator.scala
+++ b/modules/spark/src/main/scala/org/apache/ignite/spark/impl/optimization/accumulator/UnionSQLAccumulator.scala
@@ -18,7 +18,7 @@
 package org.apache.ignite.spark.impl.optimization.accumulator
 
 import org.apache.ignite.spark.impl.optimization.{IgniteQueryContext, exprToString, toAttributeReference}
-import org.apache.spark.sql.catalyst.expressions.{Attribute, NamedExpression, SortOrder}
+import org.apache.spark.sql.catalyst.expressions.{Attribute, Expression, NamedExpression, SortOrder}
 
 /**
   * Accumulator to store info about UNION query.
@@ -27,21 +27,32 @@
     igniteQueryContext: IgniteQueryContext,
     children: Seq[QueryAccumulator],
     outputExpressions: Seq[NamedExpression],
+    limit: Option[Expression] = None,
+    localLimit: Option[Expression] = None,
     orderBy: Option[Seq[SortOrder]] = None
 ) extends QueryAccumulator {
     /** @inheritdoc */
-    override def compileQuery(prettyPrint: Boolean = false): String = {
+    override def compileQuery(prettyPrint: Boolean = false, nestedQuery: Boolean = false): String = {
         val delim = if (prettyPrint) "\n" else " "
         val tab = if (prettyPrint) "  " else ""
 
-        val query = children.map(_.compileQuery(prettyPrint)).mkString(s"${delim}UNION$delim")
+        var query = children.map(_.compileQuery(prettyPrint, nestedQuery = true)).mkString(s"${delim}UNION$delim")
 
-        orderBy match {
+        query = orderBy match {
             case Some(sortOrders) ⇒
                 query + s"${delim}ORDER BY ${sortOrders.map(exprToString(_)).mkString(s",$delim$tab")}"
 
             case None ⇒ query
         }
+
+        if (limit.isDefined) {
+            query += s" LIMIT ${exprToString(limit.get)}"
+
+            if (nestedQuery)
+                query = s"SELECT * FROM ($query)"
+        }
+
+        query
     }
 
     /** @inheritdoc */
@@ -60,4 +71,10 @@
 
     /** @inheritdoc */
     override lazy val qualifier: String = igniteQueryContext.uniqueTableAlias
+
+    /** @inheritdoc */
+    override def withLimit(limit: Expression): QueryAccumulator = copy(limit = Some(limit))
+
+    /** @inheritdoc */
+    override def withLocalLimit(localLimit: Expression): QueryAccumulator =  copy(localLimit = Some(localLimit))
 }
diff --git a/modules/spark/src/main/scala/org/apache/spark/sql/ignite/IgniteOptimization.scala b/modules/spark/src/main/scala/org/apache/spark/sql/ignite/IgniteOptimization.scala
index 4a0f791..2d97792 100644
--- a/modules/spark/src/main/scala/org/apache/spark/sql/ignite/IgniteOptimization.scala
+++ b/modules/spark/src/main/scala/org/apache/spark/sql/ignite/IgniteOptimization.scala
@@ -126,7 +126,7 @@
                         if (acc.groupBy.isDefined) {
                             val tableAlias = acc.igniteQueryContext.uniqueTableAlias
 
-                            accumulator.SingleTableSQLAccumulator(
+                            SingleTableSQLAccumulator(
                                 igniteQueryContext = acc.igniteQueryContext,
                                 table = None,
                                 tableExpression = Some((acc, tableAlias)),
@@ -141,7 +141,7 @@
                     case acc: QueryAccumulator ⇒
                         val tableAlias = acc.igniteQueryContext.uniqueTableAlias
 
-                        accumulator.SingleTableSQLAccumulator(
+                        SingleTableSQLAccumulator(
                             igniteQueryContext = acc.igniteQueryContext,
                             table = None,
                             tableExpression = Some((acc, tableAlias)),
@@ -156,6 +156,9 @@
                     case acc: SelectAccumulator ⇒
                         acc.withLocalLimit(limit.limitExpr)
 
+                    case acc: QueryAccumulator ⇒
+                        acc.withLocalLimit(limit.limitExpr)
+
                     case _ ⇒
                         throw new IgniteException("stepSkipped == true but child is not SelectAccumulator")
                 }
@@ -165,6 +168,9 @@
                     case acc: SelectAccumulator ⇒
                         acc.withLimit(limit.limitExpr)
 
+                    case acc: QueryAccumulator ⇒
+                        acc.withLimit(limit.limitExpr)
+
                     case _ ⇒
                         throw new IgniteException("stepSkipped == true but child is not SelectAccumulator")
                 }
diff --git a/modules/spark/src/main/scala/org/apache/spark/sql/ignite/IgniteSparkSession.scala b/modules/spark/src/main/scala/org/apache/spark/sql/ignite/IgniteSparkSession.scala
index 1cc63ed..9bf6017 100644
--- a/modules/spark/src/main/scala/org/apache/spark/sql/ignite/IgniteSparkSession.scala
+++ b/modules/spark/src/main/scala/org/apache/spark/sql/ignite/IgniteSparkSession.scala
@@ -43,9 +43,16 @@
 /**
   * Implementation of Spark Session for Ignite.
   */
-class IgniteSparkSession private(ic: IgniteContext, proxy: SparkSession) extends SparkSession(proxy.sparkContext) {
+class IgniteSparkSession private(
+    ic: IgniteContext,
+    proxy: SparkSession,
+    existingSharedState: Option[SharedState],
+    parentSessionState: Option[SessionState]) extends SparkSession(proxy.sparkContext) {
     self ⇒
 
+    private def this(ic: IgniteContext, proxy: SparkSession) =
+        this(ic, proxy, None, None)
+
     private def this(proxy: SparkSession) =
         this(new IgniteContext(proxy.sparkContext, IgnitionEx.DFLT_CFG), proxy)
 
@@ -63,16 +70,20 @@
 
     /** @inheritdoc */
     @transient override lazy val sharedState: SharedState =
-        new IgniteSharedState(ic, sparkContext)
+        existingSharedState.getOrElse(new IgniteSharedState(ic, sparkContext))
 
     /** @inheritdoc */
     @transient override lazy val sessionState: SessionState = {
-        val sessionState = new SessionStateBuilder(self, None).build()
+        parentSessionState
+            .map(_.clone(this))
+            .getOrElse {
+                val sessionState = new SessionStateBuilder(self, None).build()
 
-        sessionState.experimentalMethods.extraOptimizations =
-            sessionState.experimentalMethods.extraOptimizations :+ IgniteOptimization
+                sessionState.experimentalMethods.extraOptimizations =
+                    sessionState.experimentalMethods.extraOptimizations :+ IgniteOptimization
 
-        sessionState
+                sessionState
+          }
     }
 
     /** @inheritdoc */
@@ -172,7 +183,13 @@
     }
 
     /** @inheritdoc */
-    override private[sql] def cloneSession() = new IgniteSparkSession(ic, proxy.cloneSession())
+    override private[sql] def cloneSession(): IgniteSparkSession = {
+        val session = new IgniteSparkSession(ic, proxy.cloneSession(), Some(sharedState), Some(sessionState))
+
+        session.sessionState // Force copy of SessionState.
+
+        session
+    }
 
     /** @inheritdoc */
     @transient override private[sql] val extensions =
diff --git a/modules/spark/src/test/scala/org/apache/ignite/spark/IgniteDataFrameSchemaSpec.scala b/modules/spark/src/test/scala/org/apache/ignite/spark/IgniteDataFrameSchemaSpec.scala
index c5df901..b071008 100644
--- a/modules/spark/src/test/scala/org/apache/ignite/spark/IgniteDataFrameSchemaSpec.scala
+++ b/modules/spark/src/test/scala/org/apache/ignite/spark/IgniteDataFrameSchemaSpec.scala
@@ -17,6 +17,8 @@
 
 package org.apache.ignite.spark
 
+import org.apache.ignite.cache.query.annotations.QuerySqlField
+import org.apache.ignite.configuration.CacheConfiguration
 import org.apache.ignite.spark.AbstractDataFrameSpec._
 import org.apache.spark.sql.DataFrame
 import org.apache.spark.sql.types._
@@ -24,6 +26,8 @@
 import org.scalatest.junit.JUnitRunner
 import org.apache.ignite.spark.IgniteDataFrameSettings._
 
+import scala.annotation.meta.field
+
 /**
   * Tests to check loading schema for Ignite data sources.
   */
@@ -33,6 +37,8 @@
 
     var employeeDataFrame: DataFrame = _
 
+    var personWithAliasesDataFrame: DataFrame = _
+
     describe("Loading DataFrame schema for Ignite tables") {
         it("should successfully load DataFrame schema for a Ignite SQL Table") {
             personDataFrame.schema.fields.map(f ⇒ (f.name, f.dataType, f.nullable)) should equal (
@@ -52,9 +58,17 @@
         it("should successfully load DataFrame data for a Ignite table configured throw java annotation") {
             employeeDataFrame.schema.fields.map(f ⇒ (f.name, f.dataType, f.nullable)) should equal (
                 Array(
-                    ("id", LongType, true),
-                    ("name", StringType, true),
-                    ("salary", FloatType, true))
+                    ("ID", LongType, true),
+                    ("NAME", StringType, true),
+                    ("SALARY", FloatType, true))
+            )
+        }
+
+        it("should use QueryEntity column aliases") {
+            personWithAliasesDataFrame.schema.fields.map(f ⇒ (f.name, f.dataType, f.nullable)) should equal (
+                Array(
+                    ("ID", LongType, true),
+                    ("PERSON_NAME", StringType, true))
             )
         }
     }
@@ -62,6 +76,16 @@
     override protected def beforeAll(): Unit = {
         super.beforeAll()
 
+        client.getOrCreateCache(new CacheConfiguration[Long, JPersonWithAlias]()
+            .setName("P3")
+            .setIndexedTypes(classOf[Long], classOf[JPersonWithAlias]))
+
+        personWithAliasesDataFrame = spark.read
+            .format(FORMAT_IGNITE)
+            .option(OPTION_CONFIG_FILE, TEST_CONFIG_FILE)
+            .option(OPTION_TABLE, classOf[JPersonWithAlias].getSimpleName)
+            .load()
+
         createPersonTable(client, DEFAULT_CACHE)
 
         createEmployeeCache(client, EMPLOYEE_CACHE_NAME)
@@ -82,4 +106,8 @@
 
         employeeDataFrame.createOrReplaceTempView("employee")
     }
+
+    case class JPersonWithAlias(
+        @(QuerySqlField @field) id: Long,
+        @(QuerySqlField @field)(name = "person_name", index = true) name: String)
 }
diff --git a/modules/spark/src/test/scala/org/apache/ignite/spark/IgniteDataFrameSuite.scala b/modules/spark/src/test/scala/org/apache/ignite/spark/IgniteDataFrameSuite.scala
index e1bb7ff..728cde6 100644
--- a/modules/spark/src/test/scala/org/apache/ignite/spark/IgniteDataFrameSuite.scala
+++ b/modules/spark/src/test/scala/org/apache/ignite/spark/IgniteDataFrameSuite.scala
@@ -17,6 +17,7 @@
 
 package org.apache.ignite.spark
 
+import org.apache.spark.sql.ignite.IgniteSparkSessionSpec
 import org.scalatest.Suites
 
 /**
@@ -36,5 +37,6 @@
     new IgniteOptimizationSystemFuncSpec,
     new IgniteOptimizationJoinSpec,
     new IgniteOptimizationDateFuncSpec,
-    new IgniteOptimizationDisableEnableSpec
+    new IgniteOptimizationDisableEnableSpec,
+    new IgniteSparkSessionSpec
 )
diff --git a/modules/spark/src/test/scala/org/apache/ignite/spark/IgniteOptimizationSpec.scala b/modules/spark/src/test/scala/org/apache/ignite/spark/IgniteOptimizationSpec.scala
index ff367af..c2b5973 100644
--- a/modules/spark/src/test/scala/org/apache/ignite/spark/IgniteOptimizationSpec.scala
+++ b/modules/spark/src/test/scala/org/apache/ignite/spark/IgniteOptimizationSpec.scala
@@ -17,12 +17,20 @@
 
 package org.apache.ignite.spark
 
+import org.apache.ignite.cache.query.annotations.QuerySqlField
+import org.apache.ignite.configuration.CacheConfiguration
 import org.apache.spark.sql.ignite.IgniteSparkSession
 import org.junit.runner.RunWith
 import org.scalatest.junit.JUnitRunner
 import org.apache.ignite.internal.IgnitionEx
 import org.apache.ignite.internal.util.IgniteUtils.resolveIgnitePath
 import org.apache.ignite.spark.AbstractDataFrameSpec.{DEFAULT_CACHE, TEST_CONFIG_FILE, checkOptimizationResult, enclose}
+import org.apache.ignite.spark.IgniteDataFrameSettings.{FORMAT_IGNITE, OPTION_TABLE}
+import org.apache.spark.sql.functions.lit
+import org.apache.spark.sql.types.DataTypes.StringType
+import org.apache.spark.sql.{Dataset, Row}
+
+import scala.annotation.meta.field
 
 /**
   */
@@ -232,6 +240,25 @@
 
             checkQueryData(df, data)
         }
+
+        it("Should optimize union") {
+            val union = readTable("JPerson").union(readTable("JPerson2"))
+
+            val data = (
+                (1, "JPerson-1"),
+                (2, "JPerson-2"))
+
+            checkQueryData(union, data)
+        }
+
+        it("Should optimize null column") {
+            val p = readTable("JPerson").withColumn("nullColumn", lit(null).cast(StringType))
+
+            val data = Tuple1(
+                (1, "JPerson-1", null))
+
+            checkQueryData(p, data)
+        }
     }
 
     describe("Not Optimized Queries") {
@@ -278,6 +305,13 @@
         }
     }
 
+    def readTable(tblName: String): Dataset[Row] =
+        igniteSession.read
+            .format(FORMAT_IGNITE)
+            .option(OPTION_TABLE, tblName)
+            .option(IgniteDataFrameSettings.OPTION_CONFIG_FILE, TEST_CONFIG_FILE)
+            .load
+
     override protected def beforeAll(): Unit = {
         super.beforeAll()
 
@@ -285,6 +319,20 @@
 
         createCityTable(client, DEFAULT_CACHE)
 
+        val p = client.getOrCreateCache(new CacheConfiguration[Long, JPerson]()
+            .setName("P")
+            .setSqlSchema("SQL_PUBLIC")
+            .setIndexedTypes(classOf[Long], classOf[JPerson]))
+
+        p.put(1L, new JPerson(1L, "JPerson-1"))
+
+        val p2 = client.getOrCreateCache(new CacheConfiguration[Long, JPerson2]()
+            .setName("P2")
+            .setSqlSchema("SQL_PUBLIC")
+            .setIndexedTypes(classOf[Long], classOf[JPerson2]))
+
+        p2.put(1L, new JPerson2(2L, "JPerson-2"))
+
         val configProvider = enclose(null) (x ⇒ () ⇒ {
             val cfg = IgnitionEx.loadConfiguration(TEST_CONFIG_FILE).get1()
 
@@ -302,4 +350,12 @@
 
         igniteSession.udf.register("test_reverse", (str: String) ⇒ str.reverse)
     }
+
+    case class JPerson(
+        @(QuerySqlField @field) id: Long,
+        @(QuerySqlField @field)(index = true) name: String)
+
+    case class JPerson2(
+        @(QuerySqlField @field) id: Long,
+        @(QuerySqlField @field)(index = true) name: String)
 }
diff --git a/modules/spark/src/test/scala/org/apache/spark/sql/ignite/IgniteSparkSessionSpec.scala b/modules/spark/src/test/scala/org/apache/spark/sql/ignite/IgniteSparkSessionSpec.scala
new file mode 100644
index 0000000..fa9d2ee
--- /dev/null
+++ b/modules/spark/src/test/scala/org/apache/spark/sql/ignite/IgniteSparkSessionSpec.scala
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.ignite
+
+import org.apache.ignite.internal.IgnitionEx
+import org.apache.ignite.internal.util.IgniteUtils.resolveIgnitePath
+import org.apache.ignite.spark.AbstractDataFrameSpec
+import org.apache.ignite.spark.AbstractDataFrameSpec.{DEFAULT_CACHE, TEST_CONFIG_FILE, enclose}
+import org.junit.runner.RunWith
+import org.scalatest.junit.JUnitRunner
+
+/**
+  * Tests to check Spark Session implementation.
+  */
+@RunWith(classOf[JUnitRunner])
+class IgniteSparkSessionSpec extends AbstractDataFrameSpec {
+    var igniteSession: IgniteSparkSession = _
+
+    describe("Ignite Spark Session Implementation") {
+        it("should keep session state after session clone") {
+            val dfProvider = (s: IgniteSparkSession) => {
+                s.read.json(resolveIgnitePath("modules/spark/src/test/resources/cities.json").getAbsolutePath)
+                    .filter("name = 'Denver'")
+            }
+
+            var df = dfProvider(igniteSession).cache()
+
+            val cachedData = igniteSession.sharedState.cacheManager.lookupCachedData(df)
+
+            cachedData shouldBe defined
+
+            val otherSession = igniteSession.cloneSession()
+
+            df = dfProvider(otherSession)
+
+            val otherCachedData = otherSession.sharedState.cacheManager.lookupCachedData(df)
+
+            otherCachedData shouldBe defined
+
+            cachedData shouldEqual otherCachedData
+        }
+    }
+
+    override protected def beforeAll(): Unit = {
+        super.beforeAll()
+
+        createCityTable(client, DEFAULT_CACHE)
+
+        val configProvider = enclose(null)(_ ⇒ () ⇒ {
+            val cfg = IgnitionEx.loadConfiguration(TEST_CONFIG_FILE).get1()
+
+            cfg.setClientMode(true)
+
+            cfg.setIgniteInstanceName("client-2")
+
+            cfg
+        })
+
+        igniteSession = IgniteSparkSession.builder()
+            .config(spark.sparkContext.getConf)
+            .igniteConfigProvider(configProvider)
+            .getOrCreate()
+    }
+}
diff --git a/modules/spring-data-2.0/pom.xml b/modules/spring-data-2.0/pom.xml
index 53cb254..0751389 100644
--- a/modules/spring-data-2.0/pom.xml
+++ b/modules/spring-data-2.0/pom.xml
@@ -77,10 +77,30 @@
                     <groupId>org.springframework</groupId>
                     <artifactId>spring-core</artifactId>
                 </exclusion>
-                  <exclusion>
+                <exclusion>
                     <groupId>org.springframework</groupId>
                     <artifactId>spring-beans</artifactId>
                 </exclusion>
+                <exclusion>
+                    <groupId>org.springframework</groupId>
+                    <artifactId>spring-aop</artifactId>
+                </exclusion>
+                <exclusion>
+                    <groupId>org.springframework</groupId>
+                    <artifactId>spring-context</artifactId>
+                </exclusion>
+                <exclusion>
+                    <groupId>org.springframework</groupId>
+                    <artifactId>spring-expression</artifactId>
+                </exclusion>
+                <exclusion>
+                    <groupId>org.springframework</groupId>
+                    <artifactId>spring-tx</artifactId>
+                </exclusion>
+                <exclusion>
+                    <groupId>org.springframework</groupId>
+                    <artifactId>spring-jdbc</artifactId>
+                </exclusion>
             </exclusions>
         </dependency>
 
@@ -98,6 +118,18 @@
         </dependency>
 
         <dependency>
+            <groupId>org.springframework</groupId>
+            <artifactId>spring-context</artifactId>
+            <version>${spring-5.0.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>org.springframework</groupId>
+            <artifactId>spring-tx</artifactId>
+            <version>${spring-5.0.version}</version>
+        </dependency>
+
+        <dependency>
             <groupId>org.apache.ignite</groupId>
             <artifactId>ignite-core</artifactId>
             <version>${project.version}</version>
diff --git a/modules/spring/src/main/java/org/apache/ignite/cache/spring/SpringCacheManager.java b/modules/spring/src/main/java/org/apache/ignite/cache/spring/SpringCacheManager.java
index 0b02f4c..7fb9383 100644
--- a/modules/spring/src/main/java/org/apache/ignite/cache/spring/SpringCacheManager.java
+++ b/modules/spring/src/main/java/org/apache/ignite/cache/spring/SpringCacheManager.java
@@ -315,26 +315,27 @@
 
     /** {@inheritDoc} */
     @Override public void onApplicationEvent(ContextRefreshedEvent event) {
-        assert ignite == null;
+        if (ignite == null) {
 
-        if (cfgPath != null && cfg != null) {
-            throw new IllegalArgumentException("Both 'configurationPath' and 'configuration' are " +
-                "provided. Set only one of these properties if you need to start a Ignite node inside of " +
-                "SpringCacheManager. If you already have a node running, omit both of them and set" +
-                "'igniteInstanceName' property.");
-        }
-
-        try {
-            if (cfgPath != null) {
-                ignite = IgniteSpring.start(cfgPath, springCtx);
+            if (cfgPath != null && cfg != null) {
+                throw new IllegalArgumentException("Both 'configurationPath' and 'configuration' are " +
+                    "provided. Set only one of these properties if you need to start a Ignite node inside of " +
+                    "SpringCacheManager. If you already have a node running, omit both of them and set" +
+                    "'igniteInstanceName' property.");
             }
-            else if (cfg != null)
-                ignite = IgniteSpring.start(cfg, springCtx);
-            else
-                ignite = Ignition.ignite(igniteInstanceName);
-        }
-        catch (IgniteCheckedException e) {
-            throw U.convertException(e);
+
+            try {
+                if (cfgPath != null) {
+                    ignite = IgniteSpring.start(cfgPath, springCtx);
+                }
+                else if (cfg != null)
+                    ignite = IgniteSpring.start(cfg, springCtx);
+                else
+                    ignite = Ignition.ignite(igniteInstanceName);
+            }
+            catch (IgniteCheckedException e) {
+                throw U.convertException(e);
+            }
         }
     }
 
diff --git a/modules/spring/src/main/java/org/apache/ignite/transactions/spring/SpringTransactionManager.java b/modules/spring/src/main/java/org/apache/ignite/transactions/spring/SpringTransactionManager.java
index 57c099a..bc9dd16 100644
--- a/modules/spring/src/main/java/org/apache/ignite/transactions/spring/SpringTransactionManager.java
+++ b/modules/spring/src/main/java/org/apache/ignite/transactions/spring/SpringTransactionManager.java
@@ -344,26 +344,26 @@
 
     /** {@inheritDoc} */
     @Override public void onApplicationEvent(ContextRefreshedEvent event) {
-        assert ignite == null;
-
-        if (cfgPath != null && cfg != null) {
-            throw new IllegalArgumentException("Both 'configurationPath' and 'configuration' are " +
+        if (ignite == null) {
+            if (cfgPath != null && cfg != null) {
+                throw new IllegalArgumentException("Both 'configurationPath' and 'configuration' are " +
                     "provided. Set only one of these properties if you need to start a Ignite node inside of " +
                     "SpringCacheManager. If you already have a node running, omit both of them and set" +
                     "'igniteInstanceName' property.");
-        }
-
-        try {
-            if (cfgPath != null) {
-                ignite = IgniteSpring.start(cfgPath, springCtx);
             }
-            else if (cfg != null)
-                ignite = IgniteSpring.start(cfg, springCtx);
-            else
-                ignite = Ignition.ignite(igniteInstanceName);
-        }
-        catch (IgniteCheckedException e) {
-            throw U.convertException(e);
+
+            try {
+                if (cfgPath != null) {
+                    ignite = IgniteSpring.start(cfgPath, springCtx);
+                }
+                else if (cfg != null)
+                    ignite = IgniteSpring.start(cfg, springCtx);
+                else
+                    ignite = Ignition.ignite(igniteInstanceName);
+            }
+            catch (IgniteCheckedException e) {
+                throw U.convertException(e);
+            }
         }
 
         if (transactionConcurrency == null)
diff --git a/modules/spring/src/test/java/org/apache/ignite/cache/spring/GridSpringCacheManagerMultiJvmSelfTest.java b/modules/spring/src/test/java/org/apache/ignite/cache/spring/GridSpringCacheManagerMultiJvmSelfTest.java
index a392bc9..acf09a7 100644
--- a/modules/spring/src/test/java/org/apache/ignite/cache/spring/GridSpringCacheManagerMultiJvmSelfTest.java
+++ b/modules/spring/src/test/java/org/apache/ignite/cache/spring/GridSpringCacheManagerMultiJvmSelfTest.java
@@ -55,6 +55,8 @@
      * @throws Exception If failed.
      */
     public void testSyncCache() throws Exception {
+        fail("https://issues.apache.org/jira/browse/IGNITE-9488");
+
         IgniteEx loc = startGrid(0);
 
         final int threads = 4;
diff --git a/modules/spring/src/test/java/org/apache/ignite/cache/spring/GridSpringCacheManagerSpringBeanSelfTest.java b/modules/spring/src/test/java/org/apache/ignite/cache/spring/GridSpringCacheManagerSpringBeanSelfTest.java
index 8557cc2..8c5fc10 100644
--- a/modules/spring/src/test/java/org/apache/ignite/cache/spring/GridSpringCacheManagerSpringBeanSelfTest.java
+++ b/modules/spring/src/test/java/org/apache/ignite/cache/spring/GridSpringCacheManagerSpringBeanSelfTest.java
@@ -17,8 +17,9 @@
 
 package org.apache.ignite.cache.spring;
 
-import org.springframework.beans.factory.BeanFactory;
+import org.springframework.context.ApplicationContext;
 import org.springframework.context.support.ClassPathXmlApplicationContext;
+import org.springframework.context.support.GenericXmlApplicationContext;
 
 /**
  * Spring cache test.
@@ -27,10 +28,15 @@
 
     /** {@inheritDoc} */
     @Override protected void beforeTest() throws Exception {
-        BeanFactory factory = new ClassPathXmlApplicationContext("org/apache/ignite/cache/spring/spring-caching-ignite-spring-bean.xml");
+        ApplicationContext appCtx = new ClassPathXmlApplicationContext("org/apache/ignite/cache/spring/spring-caching-ignite-spring-bean.xml");
 
-        svc = (GridSpringCacheTestService)factory.getBean("testService");
-        dynamicSvc = (GridSpringDynamicCacheTestService)factory.getBean("dynamicTestService");
+        // To produce multiple calls of ApplicationListener::onApplicationEvent
+        GenericXmlApplicationContext child = new GenericXmlApplicationContext();
+        child.setParent(appCtx);
+        child.refresh();
+
+        svc = (GridSpringCacheTestService)appCtx.getBean("testService");
+        dynamicSvc = (GridSpringDynamicCacheTestService)appCtx.getBean("dynamicTestService");
     }
 
     /** {@inheritDoc} */
diff --git a/modules/spring/src/test/java/org/apache/ignite/testsuites/IgniteSpringTestSuite.java b/modules/spring/src/test/java/org/apache/ignite/testsuites/IgniteSpringTestSuite.java
index 17897de..0e590a7 100644
--- a/modules/spring/src/test/java/org/apache/ignite/testsuites/IgniteSpringTestSuite.java
+++ b/modules/spring/src/test/java/org/apache/ignite/testsuites/IgniteSpringTestSuite.java
@@ -93,9 +93,9 @@
 
         suite.addTestSuite(SpringCacheTest.class);
 
-        //suite.addTestSuite(GridSpringCacheManagerMultiJvmSelfTest.class);
+        suite.addTestSuite(GridSpringCacheManagerMultiJvmSelfTest.class);
 
-        //suite.addTestSuite(GridCommandLineLoaderTest.class);
+        suite.addTestSuite(GridCommandLineLoaderTest.class);
 
         return suite;
     }
diff --git a/modules/spring/src/test/java/org/apache/ignite/transactions/spring/GridSpringTransactionManagerSpringBeanSelfTest.java b/modules/spring/src/test/java/org/apache/ignite/transactions/spring/GridSpringTransactionManagerSpringBeanSelfTest.java
index f98952f..6ed14cf 100644
--- a/modules/spring/src/test/java/org/apache/ignite/transactions/spring/GridSpringTransactionManagerSpringBeanSelfTest.java
+++ b/modules/spring/src/test/java/org/apache/ignite/transactions/spring/GridSpringTransactionManagerSpringBeanSelfTest.java
@@ -41,6 +41,12 @@
     /** {@inheritDoc} */
     @Override protected void beforeTest() throws Exception {
         ApplicationContext appCtx = new GenericXmlApplicationContext("config/spring-transactions-ignite-spring-bean.xml");
+
+        // To produce multiple calls of ApplicationListener::onApplicationEvent
+        GenericXmlApplicationContext child = new GenericXmlApplicationContext();
+        child.setParent(appCtx);
+        child.refresh();
+
         ignite = (Ignite)appCtx.getBean("mySpringBean");
         service = (GridSpringTransactionService)appCtx.getBean("gridSpringTransactionService");
     }
diff --git a/modules/ssh/src/main/java/org/apache/ignite/internal/util/nodestart/StartNodeCallableImpl.java b/modules/ssh/src/main/java/org/apache/ignite/internal/util/nodestart/StartNodeCallableImpl.java
index 1bf23b3..6014510 100644
--- a/modules/ssh/src/main/java/org/apache/ignite/internal/util/nodestart/StartNodeCallableImpl.java
+++ b/modules/ssh/src/main/java/org/apache/ignite/internal/util/nodestart/StartNodeCallableImpl.java
@@ -254,6 +254,14 @@
                     igniteHome = igniteHome.replaceFirst("~", homeDir);
                 }
 
+                String prepareStartCmd = new SB()
+                    // Ensure diagnostics in the log even in case if start node breaks silently.
+                    .a("nohup echo \"Preparing to start remote node...\" > ")
+                    .a(scriptOutputDir).a('/').a(scriptOutputFileName).a(" 2>& 1 &")
+                    .toString();
+
+                shell(ses, prepareStartCmd);
+
                 String startNodeCmd = new SB()
                     // Console output is consumed, started nodes must use Ignite file appenders for log.
                     .a("nohup ")
@@ -382,11 +390,11 @@
     }
 
     /**
-     * Gets the value of the specified environment variable.
+     * Executes command using {@code exec} channel.
      *
      * @param ses SSH session.
-     * @param cmd environment variable name.
-     * @return environment variable value.
+     * @param cmd Command.
+     * @return Output result.
      * @throws JSchException In case of SSH error.
      * @throws IOException If failed.
      */
@@ -395,12 +403,12 @@
     }
 
     /**
-     * Gets the value of the specified environment variable.
+     * Executes command using {@code exec} channel with setting encoding.
      *
      * @param ses SSH session.
-     * @param cmd environment variable name.
+     * @param cmd Command.
      * @param encoding Process output encoding, {@code null} for default charset encoding.
-     * @return environment variable value.
+     * @return Output result.
      * @throws JSchException In case of SSH error.
      * @throws IOException If failed.
      */
diff --git a/modules/ssh/src/test/java/org/apache/ignite/internal/IgniteProjectionStartStopRestartSelfTest.java b/modules/ssh/src/test/java/org/apache/ignite/internal/IgniteProjectionStartStopRestartSelfTest.java
index 4314a98..a91488c 100644
--- a/modules/ssh/src/test/java/org/apache/ignite/internal/IgniteProjectionStartStopRestartSelfTest.java
+++ b/modules/ssh/src/test/java/org/apache/ignite/internal/IgniteProjectionStartStopRestartSelfTest.java
@@ -36,13 +36,13 @@
 import org.apache.ignite.cluster.ClusterStartNodeResult;
 import org.apache.ignite.events.Event;
 import org.apache.ignite.internal.util.nodestart.IgniteNodeStartUtils;
-import org.apache.ignite.internal.util.typedef.CI1;
 import org.apache.ignite.internal.util.typedef.F;
 import org.apache.ignite.internal.util.typedef.G;
 import org.apache.ignite.internal.util.typedef.X;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.lang.IgniteClosure;
 import org.apache.ignite.lang.IgnitePredicate;
+import org.apache.ignite.testframework.config.GridTestProperties;
 import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
 import org.jetbrains.annotations.Nullable;
 
@@ -63,20 +63,24 @@
  * Tests for {@code startNodes(..)}, {@code stopNodes(..)}
  * and {@code restartNodes(..)} methods.
  * <p>
- * {@code tests.properties} file must specify username ({@code ssh.username} property)
- * and one (and only one) of password ({@code ssh.password} property) or
- * private key path ({@code ssh.key} property).
+ * Environment (obtained via {@link System#getenv(String)}) or, alternatively, {@code tests.properties} file must
+ * specify either username and password or private key path in the environment properties (@code test.ssh.username},
+ * {@code test.ssh.password}, {@code ssh.key} or in test file entries {@code ssh.username} {@code ssh.password},
+ * {@code ssh.key}respectively.</p>
+ * <p>
+ * Configured target host must run ssh server and accept ssh connections at configured port from user with specified
+ * credentials.</p>
  */
 @SuppressWarnings("ConstantConditions")
 public class IgniteProjectionStartStopRestartSelfTest extends GridCommonAbstractTest {
     /** */
-    private static final String SSH_UNAME = System.getenv("test.ssh.username");
+    private static final String SSH_UNAME = getProperty("test.ssh.username", "ssh.username");
 
     /** */
-    private static final String SSH_PWD = System.getenv("test.ssh.password");
+    private static final String SSH_PWD = getProperty("test.ssh.password", "ssh.password");
 
     /** */
-    private static final String SSH_KEY = System.getenv("ssh.key");
+    private static final String SSH_KEY = getProperty("ssh.key", "ssh.key");
 
     /** */
     private static final String CUSTOM_SCRIPT_WIN = "modules/core/src/test/bin/start-nodes-custom.bat";
@@ -124,7 +128,7 @@
     private volatile CountDownLatch leftLatch;
 
     /** {@inheritDoc} */
-    @Override protected void beforeTest() throws Exception {
+    @Override protected void beforeTest() {
         if (SSH_KEY != null) {
             key = new File(SSH_KEY);
 
@@ -144,25 +148,23 @@
 
         G.setDaemon(false);
 
-        ignite.events().localListen(new IgnitePredicate<Event>() {
-            @Override public boolean apply(Event evt) {
-                info("Received event: " + evt.shortDisplay());
+        ignite.events().localListen((IgnitePredicate<Event>)evt -> {
+            info("Received event: " + evt.shortDisplay());
 
-                if (evt.type() == EVT_NODE_JOINED) {
-                    joinedCnt.incrementAndGet();
+            if (evt.type() == EVT_NODE_JOINED) {
+                joinedCnt.incrementAndGet();
 
-                    if (joinedLatch != null)
-                        joinedLatch.countDown();
-                }
-                else if (evt.type() == EVT_NODE_LEFT || evt.type() == EVT_NODE_FAILED) {
-                    leftCnt.incrementAndGet();
-
-                    if (leftLatch != null)
-                        leftLatch.countDown();
-                }
-
-                return true;
+                if (joinedLatch != null)
+                    joinedLatch.countDown();
             }
+            else if (evt.type() == EVT_NODE_LEFT || evt.type() == EVT_NODE_FAILED) {
+                leftCnt.incrementAndGet();
+
+                if (leftLatch != null)
+                    leftLatch.countDown();
+            }
+
+            return true;
         }, EVT_NODE_JOINED, EVT_NODE_LEFT, EVT_NODE_FAILED);
     }
 
@@ -209,18 +211,16 @@
 
         Collection<ClusterStartNodeResult> res =
             startNodes(ignite.cluster(),
-                maps(Collections.singleton(HOST), SSH_UNAME, pwd, key, 1, U.getIgniteHome(), CFG_NO_ATTR, null),
-                null, false, 0, 16);
+                maps(Collections.singleton(HOST), pwd, key, 1, U.getIgniteHome(), CFG_NO_ATTR, null),
+                false, 0, 16);
 
         assert res.size() == 1;
 
-        F.forEach(res, new CI1<ClusterStartNodeResult>() {
-            @Override public void apply(ClusterStartNodeResult t) {
-                assert t.getHostName().equals(HOST);
+        res.forEach(t -> {
+            assert t.getHostName().equals(HOST);
 
-                if (!t.isSuccess())
-                    throw new IgniteException(t.getError());
-            }
+            if (!t.isSuccess())
+                throw new IgniteException(t.getError());
         });
 
         assert joinedLatch.await(WAIT_TIMEOUT, MILLISECONDS);
@@ -239,18 +239,16 @@
 
         Collection<ClusterStartNodeResult> res =
             startNodes(ignite.cluster(),
-                maps(Collections.singleton(HOST), SSH_UNAME, pwd, key, 3, U.getIgniteHome(), CFG_NO_ATTR, null),
-                null, false, DFLT_TIMEOUT, 1);
+                maps(Collections.singleton(HOST), pwd, key, 3, U.getIgniteHome(), CFG_NO_ATTR, null),
+                false, DFLT_TIMEOUT, 1);
 
         assert res.size() == 3;
 
-        F.forEach(res, new CI1<ClusterStartNodeResult>() {
-            @Override public void apply(ClusterStartNodeResult t) {
-                assert t.getHostName().equals(HOST);
+        res.forEach(t -> {
+            assert t.getHostName().equals(HOST);
 
-                if (!t.isSuccess())
-                    throw new IgniteException(t.getError());
-            }
+            if (!t.isSuccess())
+                throw new IgniteException(t.getError());
         });
 
         assert joinedLatch.await(WAIT_TIMEOUT, MILLISECONDS);
@@ -269,18 +267,16 @@
 
         Collection<ClusterStartNodeResult> res =
             startNodes(ignite.cluster(),
-                maps(Collections.singleton(HOST), SSH_UNAME, pwd, key, 3, U.getIgniteHome(), CFG_NO_ATTR, null),
-                null, false, 0, 16);
+                maps(Collections.singleton(HOST), pwd, key, 3, U.getIgniteHome(), CFG_NO_ATTR, null),
+                false, 0, 16);
 
         assert res.size() == 3;
 
-        F.forEach(res, new CI1<ClusterStartNodeResult>() {
-            @Override public void apply(ClusterStartNodeResult t) {
-                assert t.getHostName().equals(HOST);
+        res.forEach(t -> {
+            assert t.getHostName().equals(HOST);
 
-                if (!t.isSuccess())
-                    throw new IgniteException(t.getError());
-            }
+            if (!t.isSuccess())
+                throw new IgniteException(t.getError());
         });
 
         assert joinedLatch.await(WAIT_TIMEOUT, MILLISECONDS);
@@ -291,8 +287,8 @@
         assert ignite.cluster().nodes().size() == 3;
 
         res = startNodes(ignite.cluster(),
-            maps(Collections.singleton(HOST), SSH_UNAME, pwd, key, 3, U.getIgniteHome(), CFG_NO_ATTR, null),
-            null, false, 0, 16);
+            maps(Collections.singleton(HOST), pwd, key, 3, U.getIgniteHome(), CFG_NO_ATTR, null),
+            false, 0, 16);
 
         assert res.isEmpty();
 
@@ -310,18 +306,16 @@
 
         Collection<ClusterStartNodeResult> res =
             startNodes(ignite.cluster(),
-                maps(Collections.singleton(HOST), SSH_UNAME, pwd, key, 3, U.getIgniteHome(), CFG_NO_ATTR, null),
-                null, false, 0, 16);
+                maps(Collections.singleton(HOST), pwd, key, 3, U.getIgniteHome(), CFG_NO_ATTR, null),
+                false, 0, 16);
 
         assert res.size() == 3;
 
-        F.forEach(res, new CI1<ClusterStartNodeResult>() {
-            @Override public void apply(ClusterStartNodeResult t) {
-                assert t.getHostName().equals(HOST);
+        res.forEach(t -> {
+            assert t.getHostName().equals(HOST);
 
-                if (!t.isSuccess())
-                    throw new IgniteException(t.getError());
-            }
+            if (!t.isSuccess())
+                throw new IgniteException(t.getError());
         });
 
         assert joinedLatch.await(WAIT_TIMEOUT, MILLISECONDS);
@@ -332,8 +326,8 @@
         assert ignite.cluster().nodes().size() == 3;
 
         res = startNodes(ignite.cluster(),
-            maps(Collections.singleton(HOST), SSH_UNAME, pwd, key, 1, U.getIgniteHome(), CFG_NO_ATTR, null),
-            null, false, 0, 16);
+            maps(Collections.singleton(HOST), pwd, key, 1, U.getIgniteHome(), CFG_NO_ATTR, null),
+            false, 0, 16);
 
         assert res.isEmpty();
 
@@ -351,18 +345,16 @@
 
         Collection<ClusterStartNodeResult> res =
             startNodes(ignite.cluster(),
-                maps(Collections.singleton(HOST), SSH_UNAME, pwd, key, 3, U.getIgniteHome(), CFG_NO_ATTR, null),
-                null, false, 0, 16);
+                maps(Collections.singleton(HOST), pwd, key, 3, U.getIgniteHome(), CFG_NO_ATTR, null),
+                false, 0, 16);
 
         assert res.size() == 3;
 
-        F.forEach(res, new CI1<ClusterStartNodeResult>() {
-            @Override public void apply(ClusterStartNodeResult t) {
-                assert t.getHostName().equals(HOST);
+        res.forEach(t -> {
+            assert t.getHostName().equals(HOST);
 
-                if (!t.isSuccess())
-                    throw new IgniteException(t.getError());
-            }
+            if (!t.isSuccess())
+                throw new IgniteException(t.getError());
         });
 
         assert joinedLatch.await(WAIT_TIMEOUT, MILLISECONDS);
@@ -375,18 +367,16 @@
         joinedLatch = new CountDownLatch(2);
 
         res = startNodes(ignite.cluster(),
-            maps(Collections.singleton(HOST), SSH_UNAME, pwd, key, 5, U.getIgniteHome(), CFG_NO_ATTR, null),
-            null, false, 0, 16);
+            maps(Collections.singleton(HOST), pwd, key, 5, U.getIgniteHome(), CFG_NO_ATTR, null),
+            false, 0, 16);
 
         assert res.size() == 2;
 
-        F.forEach(res, new CI1<ClusterStartNodeResult>() {
-            @Override public void apply(ClusterStartNodeResult t) {
-                assert t.getHostName().equals(HOST);
+        res.forEach(t -> {
+            assert t.getHostName().equals(HOST);
 
-                if (!t.isSuccess())
-                    throw new IgniteException(t.getError());
-            }
+            if (!t.isSuccess())
+                throw new IgniteException(t.getError());
         });
 
         assert joinedLatch.await(WAIT_TIMEOUT, MILLISECONDS);
@@ -405,19 +395,17 @@
 
         Collection<ClusterStartNodeResult> res =
             startNodes(ignite.cluster(),
-                F.asList(map(HOST, SSH_UNAME, pwd, key, 2, U.getIgniteHome(), CFG_NO_ATTR, null),
-                    map(HOST, SSH_UNAME, pwd, key, 3, U.getIgniteHome(), CFG_NO_ATTR, null)),
-                null, false, 0, 16);
+                F.asList(map(pwd, key, 2, U.getIgniteHome()),
+                    map(pwd, key, 3, U.getIgniteHome())),
+                false, 0, 16);
 
         assert res.size() == 5;
 
-        F.forEach(res, new CI1<ClusterStartNodeResult>() {
-            @Override public void apply(ClusterStartNodeResult t) {
-                assert t.getHostName().equals(HOST);
+        res.forEach(t -> {
+            assert t.getHostName().equals(HOST);
 
-                if (!t.isSuccess())
-                    throw new IgniteException(t.getError());
-            }
+            if (!t.isSuccess())
+                throw new IgniteException(t.getError());
         });
 
         assert joinedLatch.await(WAIT_TIMEOUT, MILLISECONDS);
@@ -436,18 +424,16 @@
 
         Collection<ClusterStartNodeResult> res =
             startNodes(ignite.cluster(),
-                maps(Collections.singleton(HOST), SSH_UNAME, pwd, key, 3, U.getIgniteHome(), CFG_NO_ATTR, null),
-                null, false, 0, 16);
+                maps(Collections.singleton(HOST), pwd, key, 3, U.getIgniteHome(), CFG_NO_ATTR, null),
+                false, 0, 16);
 
         assert res.size() == 3;
 
-        F.forEach(res, new CI1<ClusterStartNodeResult>() {
-            @Override public void apply(ClusterStartNodeResult t) {
-                assert t.getHostName().equals(HOST);
+        res.forEach(t -> {
+            assert t.getHostName().equals(HOST);
 
-                if (!t.isSuccess())
-                    throw new IgniteException(t.getError());
-            }
+            if (!t.isSuccess())
+                throw new IgniteException(t.getError());
         });
 
         assert joinedLatch.await(WAIT_TIMEOUT, MILLISECONDS);
@@ -461,18 +447,16 @@
         leftLatch = new CountDownLatch(3);
 
         res = startNodes(ignite.cluster(),
-            maps(Collections.singleton(HOST), SSH_UNAME, pwd, key, 3, U.getIgniteHome(), CFG_NO_ATTR, null),
-            null, true, 0, 16);
+            maps(Collections.singleton(HOST), pwd, key, 3, U.getIgniteHome(), CFG_NO_ATTR, null),
+            true, 0, 16);
 
         assert res.size() == 3;
 
-        F.forEach(res, new CI1<ClusterStartNodeResult>() {
-            @Override public void apply(ClusterStartNodeResult t) {
-                assert t.getHostName().equals(HOST);
+        res.forEach(t -> {
+            assert t.getHostName().equals(HOST);
 
-                if (!t.isSuccess())
-                    throw new IgniteException(t.getError());
-            }
+            if (!t.isSuccess())
+                throw new IgniteException(t.getError());
         });
 
         assert joinedLatch.await(WAIT_TIMEOUT, MILLISECONDS);
@@ -496,18 +480,16 @@
 
         Collection<ClusterStartNodeResult> res =
             startNodes(ignite.cluster(),
-                maps(Collections.singleton(HOST), SSH_UNAME, pwd, key, 1, U.getIgniteHome(), null, script),
-                null, false, 0, 16);
+                maps(Collections.singleton(HOST), pwd, key, 1, U.getIgniteHome(), null, script),
+                false, 0, 16);
 
         assert res.size() == 1;
 
-        F.forEach(res, new CI1<ClusterStartNodeResult>() {
-            @Override public void apply(ClusterStartNodeResult t) {
-                assert t.getHostName().equals(HOST);
+        res.forEach(t -> {
+            assert t.getHostName().equals(HOST);
 
-                if (!t.isSuccess())
-                    throw new IgniteException(t.getError());
-            }
+            if (!t.isSuccess())
+                throw new IgniteException(t.getError());
         });
 
         assert joinedLatch.await(WAIT_TIMEOUT, MILLISECONDS);
@@ -528,18 +510,16 @@
 
         Collection<ClusterStartNodeResult> res =
             startNodes(ignite.cluster(),
-                maps(Collections.singleton(HOST), SSH_UNAME, pwd, null, 3, U.getIgniteHome(), CFG_NO_ATTR,
-                null), null, false, 0, 16);
+                maps(Collections.singleton(HOST), pwd, null, 3, U.getIgniteHome(), CFG_NO_ATTR,
+                null), false, 0, 16);
 
         assert res.size() == 3;
 
-        F.forEach(res, new CI1<ClusterStartNodeResult>() {
-            @Override public void apply(ClusterStartNodeResult t) {
-                assert t.getHostName().equals(HOST);
+        res.forEach(t -> {
+            assert t.getHostName().equals(HOST);
 
-                if (!t.isSuccess())
-                    throw new IgniteException(t.getError());
-            }
+            if (!t.isSuccess())
+                throw new IgniteException(t.getError());
         });
 
         assert joinedLatch.await(WAIT_TIMEOUT, MILLISECONDS);
@@ -563,18 +543,16 @@
 
         Collection<ClusterStartNodeResult> res =
             startNodes(ignite.cluster(),
-                maps(Collections.singleton(HOST), SSH_UNAME, pwd, key, 2, U.getIgniteHome(), CFG_ATTR, null),
-                null, false, 0, 16);
+                maps(Collections.singleton(HOST), pwd, key, 2, U.getIgniteHome(), CFG_ATTR, null),
+                false, 0, 16);
 
         assert res.size() == 2;
 
-        F.forEach(res, new CI1<ClusterStartNodeResult>() {
-            @Override public void apply(ClusterStartNodeResult t) {
-                assert t.getHostName().equals(HOST);
+        res.forEach(t -> {
+            assert t.getHostName().equals(HOST);
 
-                if (!t.isSuccess())
-                    throw new IgniteException(t.getError());
-            }
+            if (!t.isSuccess())
+                throw new IgniteException(t.getError());
         });
 
         assert joinedLatch.await(WAIT_TIMEOUT, MILLISECONDS);
@@ -582,18 +560,16 @@
         joinedLatch = new CountDownLatch(1);
 
         res = startNodes(ignite.cluster(),
-            maps(Collections.singleton(HOST), SSH_UNAME, pwd, key, 3, U.getIgniteHome(), CFG_NO_ATTR, null),
-            null, false, 0, 16);
+            maps(Collections.singleton(HOST), pwd, key, 3, U.getIgniteHome(), CFG_NO_ATTR, null),
+            false, 0, 16);
 
         assert res.size() == 1;
 
-        F.forEach(res, new CI1<ClusterStartNodeResult>() {
-            @Override public void apply(ClusterStartNodeResult t) {
-                assert t.getHostName().equals(HOST);
+        res.forEach(t -> {
+            assert t.getHostName().equals(HOST);
 
-                if (!t.isSuccess())
-                    throw new IgniteException(t.getError());
-            }
+            if (!t.isSuccess())
+                throw new IgniteException(t.getError());
         });
 
         assert joinedLatch.await(WAIT_TIMEOUT, MILLISECONDS);
@@ -603,11 +579,7 @@
         leftLatch = new CountDownLatch(2);
 
         Collection<UUID> ids = F.transform(ignite.cluster().forAttribute(CUSTOM_CFG_ATTR_KEY, CUSTOM_CFG_ATTR_VAL).nodes(),
-            new IgniteClosure<ClusterNode, UUID>() {
-            @Override public UUID apply(ClusterNode node) {
-                return node.id();
-            }
-        });
+            (IgniteClosure<ClusterNode, UUID>)ClusterNode::id);
 
         ignite.cluster().forAttribute(CUSTOM_CFG_ATTR_KEY, CUSTOM_CFG_ATTR_VAL).nodes();
 
@@ -626,18 +598,16 @@
 
         Collection<ClusterStartNodeResult> res =
             startNodes(ignite.cluster(),
-                maps(Collections.singleton(HOST), SSH_UNAME, pwd, key, 3, U.getIgniteHome(), CFG_NO_ATTR, null),
-                null, false, 0, 16);
+                maps(Collections.singleton(HOST), pwd, key, 3, U.getIgniteHome(), CFG_NO_ATTR, null),
+                false, 0, 16);
 
         assert res.size() == 3;
 
-        F.forEach(res, new CI1<ClusterStartNodeResult>() {
-            @Override public void apply(ClusterStartNodeResult t) {
-                assert t.getHostName().equals(HOST);
+        res.forEach(t -> {
+            assert t.getHostName().equals(HOST);
 
-                if (!t.isSuccess())
-                    throw new IgniteException(t.getError());
-            }
+            if (!t.isSuccess())
+                throw new IgniteException(t.getError());
         });
 
         assert joinedLatch.await(WAIT_TIMEOUT, MILLISECONDS);
@@ -661,18 +631,16 @@
 
         Collection<ClusterStartNodeResult> res =
             startNodes(ignite.cluster(),
-                maps(Collections.singleton(HOST), SSH_UNAME, pwd, key, 3, U.getIgniteHome(), CFG_NO_ATTR, null),
-                null, false, 0, 16);
+                maps(Collections.singleton(HOST), pwd, key, 3, U.getIgniteHome(), CFG_NO_ATTR, null),
+                false, 0, 16);
 
         assert res.size() == 3;
 
-        F.forEach(res, new CI1<ClusterStartNodeResult>() {
-            @Override public void apply(ClusterStartNodeResult t) {
+        res.forEach(t -> {
                 assert t.getHostName().equals(HOST);
 
                 if (!t.isSuccess())
                     throw new IgniteException(t.getError());
-            }
         });
 
         assert joinedLatch.await(WAIT_TIMEOUT, MILLISECONDS);
@@ -703,18 +671,16 @@
 
         Collection<ClusterStartNodeResult> res =
             startNodes(ignite.cluster(),
-                maps(Collections.singleton(HOST), SSH_UNAME, pwd, key, 3, U.getIgniteHome(), CFG_NO_ATTR, null),
-                null, false, 0, 16);
+                maps(Collections.singleton(HOST), pwd, key, 3, U.getIgniteHome(), CFG_NO_ATTR, null),
+                false, 0, 16);
 
         assert res.size() == 3;
 
-        F.forEach(res, new CI1<ClusterStartNodeResult>() {
-            @Override public void apply(ClusterStartNodeResult t) {
-                assert t.getHostName().equals(HOST);
+        res.forEach(t -> {
+            assert t.getHostName().equals(HOST);
 
-                if (!t.isSuccess())
-                    throw new IgniteException(t.getError());
-            }
+            if (!t.isSuccess())
+                throw new IgniteException(t.getError());
         });
 
         assert joinedLatch.await(WAIT_TIMEOUT, MILLISECONDS);
@@ -740,18 +706,16 @@
 
         Collection<ClusterStartNodeResult> res =
             startNodes(ignite.cluster(),
-                maps(Collections.singleton(HOST), SSH_UNAME, pwd, key, 2, U.getIgniteHome(), CFG_ATTR, null),
-                null, false, 0, 16);
+                maps(Collections.singleton(HOST), pwd, key, 2, U.getIgniteHome(), CFG_ATTR, null),
+                false, 0, 16);
 
         assert res.size() == 2;
 
-        F.forEach(res, new CI1<ClusterStartNodeResult>() {
-            @Override public void apply(ClusterStartNodeResult t) {
-                assert t.getHostName().equals(HOST);
+        res.forEach(t -> {
+            assert t.getHostName().equals(HOST);
 
-                if (!t.isSuccess())
-                    throw new IgniteException(t.getError());
-            }
+            if (!t.isSuccess())
+                throw new IgniteException(t.getError());
         });
 
         assert joinedLatch.await(WAIT_TIMEOUT, MILLISECONDS);
@@ -759,18 +723,16 @@
         joinedLatch = new CountDownLatch(1);
 
         res = startNodes(ignite.cluster(),
-            maps(Collections.singleton(HOST), SSH_UNAME, pwd, key, 3, U.getIgniteHome(), CFG_NO_ATTR, null),
-            null, false, 0, 16);
+            maps(Collections.singleton(HOST), pwd, key, 3, U.getIgniteHome(), CFG_NO_ATTR, null),
+            false, 0, 16);
 
         assert res.size() == 1;
 
-        F.forEach(res, new CI1<ClusterStartNodeResult>() {
-            @Override public void apply(ClusterStartNodeResult t) {
-                assert t.getHostName().equals(HOST);
+        res.forEach(t -> {
+            assert t.getHostName().equals(HOST);
 
-                if (!t.isSuccess())
-                    throw new IgniteException(t.getError());
-            }
+            if (!t.isSuccess())
+                throw new IgniteException(t.getError());
         });
 
         assert joinedLatch.await(WAIT_TIMEOUT, MILLISECONDS);
@@ -783,11 +745,7 @@
         X.println("Restarting nodes with " + CUSTOM_CFG_ATTR_KEY);
 
         Collection<UUID> ids = F.transform(ignite.cluster().forAttribute(CUSTOM_CFG_ATTR_KEY, CUSTOM_CFG_ATTR_VAL).nodes(),
-            new IgniteClosure<ClusterNode, UUID>() {
-                @Override public UUID apply(ClusterNode node) {
-                    return node.id();
-                }
-            }
+            (IgniteClosure<ClusterNode, UUID>)ClusterNode::id
         );
 
         ignite.cluster().restartNodes(ids);
@@ -806,18 +764,16 @@
 
         Collection<ClusterStartNodeResult> res =
             startNodes(ignite.cluster(),
-                maps(Collections.singleton(HOST), SSH_UNAME, pwd, key, 3, U.getIgniteHome(), CFG_NO_ATTR, null),
-                null, false, 0, 16);
+                maps(Collections.singleton(HOST), pwd, key, 3, U.getIgniteHome(), CFG_NO_ATTR, null),
+                false, 0, 16);
 
         assert res.size() == 3;
 
-        F.forEach(res, new CI1<ClusterStartNodeResult>() {
-            @Override public void apply(ClusterStartNodeResult t) {
-                assert t.getHostName().equals(HOST);
+        res.forEach(t -> {
+            assert t.getHostName().equals(HOST);
 
-                if (!t.isSuccess())
-                    throw new IgniteException(t.getError());
-            }
+            if (!t.isSuccess())
+                throw new IgniteException(t.getError());
         });
 
         assert joinedLatch.await(WAIT_TIMEOUT, MILLISECONDS);
@@ -843,18 +799,16 @@
 
         Collection<ClusterStartNodeResult> res =
             startNodes(ignite.cluster(),
-                maps(Collections.singleton(HOST), SSH_UNAME, pwd, key, 3, U.getIgniteHome(), CFG_NO_ATTR, null),
-                null, false, 0, 16);
+                maps(Collections.singleton(HOST), pwd, key, 3, U.getIgniteHome(), CFG_NO_ATTR, null),
+                false, 0, 16);
 
         assert res.size() == 3;
 
-        F.forEach(res, new CI1<ClusterStartNodeResult>() {
-            @Override public void apply(ClusterStartNodeResult t) {
-                assert t.getHostName().equals(HOST);
+        res.forEach(t -> {
+            assert t.getHostName().equals(HOST);
 
-                if (!t.isSuccess())
-                    throw new IgniteException(t.getError());
-            }
+            if (!t.isSuccess())
+                throw new IgniteException(t.getError());
         });
 
         assert joinedLatch.await(WAIT_TIMEOUT, MILLISECONDS);
@@ -875,44 +829,35 @@
     }
 
     /**
-     * @param host Hostname.
-     * @param uname Username.
      * @param passwd Password.
      * @param key Private key file.
      * @param nodes Number of nodes.
      * @param igniteHome Ignite home.
-     * @param cfg Configuration file path.
-     * @param script Startup script path.
      * @return Parameters map.
      */
     private Map<String, Object> map(
-        String host,
-        @Nullable String uname,
         @Nullable String passwd,
         @Nullable File key,
         @Nullable Integer nodes,
-        @Nullable String igniteHome,
-        @Nullable String cfg,
-        @Nullable String script) {
-        assert host != null;
+        @Nullable String igniteHome) {
+        assert IgniteProjectionStartStopRestartSelfTest.HOST != null;
 
         Map<String, Object> params = new HashMap<>();
 
-        params.put(IgniteNodeStartUtils.HOST, host);
-        params.put(UNAME, uname);
+        params.put(IgniteNodeStartUtils.HOST, IgniteProjectionStartStopRestartSelfTest.HOST);
+        params.put(UNAME, IgniteProjectionStartStopRestartSelfTest.SSH_UNAME);
         params.put(PASSWD, passwd);
         params.put(KEY, key);
         params.put(NODES, nodes);
         params.put(IGNITE_HOME, igniteHome);
-        params.put(CFG, cfg);
-        params.put(SCRIPT, script);
+        params.put(CFG, IgniteProjectionStartStopRestartSelfTest.CFG_NO_ATTR);
+        params.put(SCRIPT, null);
 
         return params;
     }
 
     /**
      * @param hosts Hostnames.
-     * @param uname Username.
      * @param passwd Password.
      * @param key Private key file.
      * @param nodes Number of nodes.
@@ -923,7 +868,6 @@
      */
     private Collection<Map<String, Object>> maps(
         Collection<String> hosts,
-        @Nullable String uname,
         @Nullable String passwd,
         @Nullable File key,
         @Nullable Integer nodes,
@@ -938,7 +882,7 @@
             Map<String, Object> params = new HashMap<>();
 
             params.put(IgniteNodeStartUtils.HOST, host);
-            params.put(UNAME, uname);
+            params.put(UNAME, IgniteProjectionStartStopRestartSelfTest.SSH_UNAME);
             params.put(PASSWD, passwd);
             params.put(KEY, key);
             params.put(NODES, nodes);
@@ -953,17 +897,8 @@
     }
 
     /**
-     * @param name Filename.
-     * @return Whether name belongs to log file.
-     */
-    private boolean isSshNodeLogName(String name) {
-        return name.matches("ignite.[0-9a-z-]+.log");
-    }
-
-    /**
      * @param cluster Cluster.
      * @param hosts Hosts.
-     * @param dflts Default.
      * @param restart Restart flag.
      * @param timeout Timeout.
      * @param maxConn Maximum connections.
@@ -971,10 +906,19 @@
      */
     private Collection<ClusterStartNodeResult> startNodes(IgniteCluster cluster,
         Collection<Map<String, Object>> hosts,
-        @Nullable Map<String, Object> dflts,
         boolean restart,
         int timeout,
         int maxConn) {
-        return cluster.startNodesAsync(hosts, dflts, restart, timeout, maxConn).get(WAIT_TIMEOUT);
+        return cluster.startNodesAsync(hosts, null, restart, timeout, maxConn).get(WAIT_TIMEOUT);
     }
-}
\ No newline at end of file
+
+    /** */
+    private static String getProperty(String envName, String gridTestName) {
+        String candidate = System.getenv(envName);
+
+        if (candidate != null)
+            return candidate;
+
+        return GridTestProperties.getProperty(gridTestName);
+    }
+}
diff --git a/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/TensorFlowClusterMaintainer.java b/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/TensorFlowClusterMaintainer.java
index e6ca33d..f44a4f8 100644
--- a/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/TensorFlowClusterMaintainer.java
+++ b/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/TensorFlowClusterMaintainer.java
@@ -95,65 +95,61 @@
 
     /** {@inheritDoc} */
     @Override public void execute(ServiceContext ctx) {
-        while (!ctx.isCancelled()) {
+        while (!ctx.isCancelled() && !hasUserScriptCompletedSuccessfully()) {
             LockSupport.parkNanos(1_000_000);
 
-            boolean completed = clusterMgr.isUserScriptCompleted(clusterId);
-            if (completed)
-                break;
-
-            boolean restartRequired = hasAffinityChanged();
-
-            if (restartRequired)
-                log.debug("Affinity mapping changed, cluster will be restarted [clusterId=" + clusterId + "]");
-
-            if (!restartRequired) {
-                try {
-                    TensorFlowCluster cluster = clusterMgr.getCluster(clusterId);
-                    Map<UUID, List<LongRunningProcessStatus>> statuses = clusterMgr.getSrvProcMgr()
-                        .ping(cluster.getProcesses());
-
-                    for (UUID nodeId : statuses.keySet()) {
-                        for (LongRunningProcessStatus status : statuses.get(nodeId)) {
-                            if (status.getState().equals(LongRunningProcessState.DONE)) {
-                                restartRequired = true;
-                                break;
-                            }
-                        }
-                    }
-
-                }
-                catch (Exception e) {
-                    log.error("Failed to check process statuses", e);
-                    restartRequired = true;
-                }
-
-                if (restartRequired)
-                    log.debug("Fail detected, cluster will be restarted [clusterId=" + clusterId + "]");
-            }
+            boolean restartRequired = hasAffinityChanged()
+                || hasAnyWorkerFailed()
+                || hasChiefFailed()
+                || hasUserScriptFailed();
 
             if (restartRequired) {
-                clusterMgr.stopClusterIfExists(clusterId);
+                log.debug("Cluster will be restarted [clusterId=" + clusterId + "]");
 
-                TensorFlowCluster cluster = clusterMgr.createCluster(
-                    clusterId,
-                    jobArchive,
-                    str -> ignite.message().sendOrdered("us_out_" + clusterId, str, 60 * 1000),
-                    str -> ignite.message().sendOrdered("us_err_" + clusterId, str, 60 * 1000)
-                );
-
-                ignite.message().send(topicName, Optional.of(cluster));
+                restartCluster();
             }
         }
 
-        clusterMgr.stopClusterIfExists(clusterId);
-
-        ignite.message().send(topicName, Optional.empty());
+        stopCluster(true);
 
         log.debug("Cluster maintainer completed [clusterId=" + clusterId + "]");
     }
 
     /**
+     * Restarts TensorFlow cluster.
+     */
+    private void restartCluster() {
+        stopCluster(false);
+        startCluster();
+    }
+
+    /**
+     * Stops TensorFlow cluster.
+     *
+     * @param terminate Terminate TensorFlow cluster and notify all listeners that cluster won't be started again.
+     */
+    private void stopCluster(boolean terminate) {
+        clusterMgr.stopClusterIfExists(clusterId);
+
+        if (terminate)
+            ignite.message().send(topicName, Optional.empty());
+    }
+
+    /**
+     * Starts TensorFlow cluster.
+     */
+    private void startCluster() {
+        TensorFlowCluster cluster = clusterMgr.createCluster(
+            clusterId,
+            jobArchive,
+            str -> ignite.message().sendOrdered("us_out_" + clusterId, str, 60 * 1000),
+            str -> ignite.message().sendOrdered("us_err_" + clusterId, str, 60 * 1000)
+        );
+
+        ignite.message().send(topicName, Optional.of(cluster));
+    }
+
+    /**
      * Checks if affinity mapping has been changed.
      *
      * @return True if mapping has been changed, otherwise false.
@@ -178,4 +174,60 @@
 
         return false;
     }
+
+    /**
+     * Checks is any worker has failed.
+     *
+     * @return True if any worker has failed, otherwise false.
+     */
+    private boolean hasAnyWorkerFailed() {
+        TensorFlowCluster cluster = clusterMgr.getCluster(clusterId);
+
+        Map<UUID, List<LongRunningProcessStatus>> statuses;
+        try {
+            statuses = clusterMgr.getSrvProcMgr().ping(cluster.getProcesses());
+        }
+        catch (Exception e) {
+            log.error("Failed to check process statuses", e);
+
+            return true;
+        }
+
+        for (UUID nodeId : statuses.keySet()) {
+            for (LongRunningProcessStatus status : statuses.get(nodeId)) {
+                if (status.getState().equals(LongRunningProcessState.DONE))
+                    return true;
+            }
+        }
+
+        return false;
+    }
+
+    /**
+     * Checks if chief has failed.
+     *
+     * @return True if chief has failed, otherwise false.
+     */
+    private boolean hasChiefFailed() {
+        return clusterMgr.getChiefException(clusterId) != null;
+    }
+
+    /**
+     * Checks if user script failed.
+     *
+     * @return True if user script has failed, otherwise false.
+     */
+    private boolean hasUserScriptFailed() {
+        return clusterMgr.getUserScriptException(clusterId) != null;
+    }
+
+    /**
+     * Checks if user script has completed successfully.
+     *
+     * @return True if user script has completed successfully, otherwise false.
+     */
+    private boolean hasUserScriptCompletedSuccessfully() {
+        return clusterMgr.isUserScriptCompleted(clusterId)
+            && clusterMgr.getUserScriptException(clusterId) == null;
+    }
 }
diff --git a/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/TensorFlowClusterManager.java b/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/TensorFlowClusterManager.java
index cdbd774..12ed4ea 100644
--- a/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/TensorFlowClusterManager.java
+++ b/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/TensorFlowClusterManager.java
@@ -230,6 +230,30 @@
     }
 
     /**
+     * Checks if chief completed and returns result.
+     *
+     * @param clusterId Cluster identifier.
+     * @return {@code true} if chief completed, otherwise {@code false}.
+     */
+    public boolean isChiefCompleted(UUID clusterId) {
+        TensorFlowChiefRunner runner = chiefRunners.get(clusterId);
+
+        return runner != null && runner.isCompleted();
+    }
+
+    /**
+     * Returns an exception that happened during execution or {@code null} if there is no exception.
+     *
+     * @param clusterId Cluster identifier.
+     * @return Exception that happened during execution or {@code null} if there is no exception.
+     */
+    public Exception getChiefException(UUID clusterId) {
+        TensorFlowChiefRunner runner = chiefRunners.get(clusterId);
+
+        return runner != null ? runner.getException() : null;
+    }
+
+    /**
      * Starts user script processes using the specified job archive.
      *
      * @param clusterId Cluster identifier.
@@ -279,6 +303,18 @@
     }
 
     /**
+     * Returns an exception that happened during execution or {@code null} if there is no exception.
+     *
+     * @param clusterId Cluster identifier.
+     * @return Exception that happened during execution or {@code null} if there is no exception.
+     */
+    public Exception getUserScriptException(UUID clusterId) {
+        TensorFlowUserScriptRunner runner = userScriptRunners.get(clusterId);
+
+        return runner != null ? runner.getException() : null;
+    }
+
+    /**
      * Returns list of maintained TensorFlow clusters.
      *
      * @return List of maintained TensorFlow clusters.
diff --git a/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/tfrunning/TensorFlowServerManager.java b/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/tfrunning/TensorFlowServerManager.java
index fa6194a..ed6c801 100644
--- a/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/tfrunning/TensorFlowServerManager.java
+++ b/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/tfrunning/TensorFlowServerManager.java
@@ -24,15 +24,16 @@
 import org.apache.ignite.Ignition;
 import org.apache.ignite.tensorflow.cluster.spec.TensorFlowClusterSpec;
 import org.apache.ignite.tensorflow.cluster.spec.TensorFlowServerAddressSpec;
+import org.apache.ignite.tensorflow.cluster.util.TensorFlowProcessBuilderSupplier;
 import org.apache.ignite.tensorflow.core.ProcessManager;
 import org.apache.ignite.tensorflow.core.ProcessManagerWrapper;
-import org.apache.ignite.tensorflow.core.pythonrunning.PythonProcess;
-import org.apache.ignite.tensorflow.core.pythonrunning.PythonProcessManager;
+import org.apache.ignite.tensorflow.core.nativerunning.NativeProcess;
+import org.apache.ignite.tensorflow.core.nativerunning.NativeProcessManager;
 
 /**
  * TensorFlow server manager that allows to start, stop and make other actions with TensorFlow servers.
  */
-public class TensorFlowServerManager extends ProcessManagerWrapper<PythonProcess, TensorFlowServer> {
+public class TensorFlowServerManager extends ProcessManagerWrapper<NativeProcess, TensorFlowServer> {
     /** TensorFlow server script formatter. */
     private static final TensorFlowServerScriptFormatter scriptFormatter = new TensorFlowServerScriptFormatter();
 
@@ -42,7 +43,7 @@
      * @param ignite Ignite instance.
      */
     public TensorFlowServerManager(Ignite ignite) {
-        this(new PythonProcessManager(ignite));
+        this(new NativeProcessManager(ignite));
     }
 
     /**
@@ -50,17 +51,21 @@
      *
      * @param delegate Delegate.
      */
-    public TensorFlowServerManager(ProcessManager<PythonProcess> delegate) {
+    public TensorFlowServerManager(ProcessManager<NativeProcess> delegate) {
         super(delegate);
     }
 
     /** {@inheritDoc} */
-    @Override protected PythonProcess transformSpecification(TensorFlowServer spec) {
-        return new PythonProcess(
+    @Override protected NativeProcess transformSpecification(TensorFlowServer spec) {
+        return new NativeProcess(
+            new TensorFlowProcessBuilderSupplier(
+                true,
+                spec.getTaskIdx(),
+                "job:" + spec.getJobName(),
+                "task:" + spec.getTaskIdx()
+            ),
             scriptFormatter.format(spec, true, Ignition.ignite()),
-            getNode(spec),
-            "job:" + spec.getJobName(),
-            "task:" + spec.getTaskIdx()
+            getNode(spec)
         );
     }
 
diff --git a/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/tfrunning/TensorFlowServerScriptFormatter.java b/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/tfrunning/TensorFlowServerScriptFormatter.java
index a93d910..7cfa1c6 100644
--- a/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/tfrunning/TensorFlowServerScriptFormatter.java
+++ b/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/tfrunning/TensorFlowServerScriptFormatter.java
@@ -70,6 +70,9 @@
             .append(srv.getTaskIdx())
             .append("))")
             .append("\n");
+        builder.append("print('IGNITE_DATASET_HOST = ', os.environ.get('IGNITE_DATASET_HOST'))").append("\n");
+        builder.append("print('IGNITE_DATASET_PORT = ', os.environ.get('IGNITE_DATASET_PORT'))").append("\n");
+        builder.append("print('IGNITE_DATASET_PART = ', os.environ.get('IGNITE_DATASET_PART'))").append("\n");
 
         builder.append("server = tf.train.Server(cluster");
 
diff --git a/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/util/ClusterPortManager.java b/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/util/ClusterPortManager.java
index 462752c..681e07f 100644
--- a/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/util/ClusterPortManager.java
+++ b/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/util/ClusterPortManager.java
@@ -17,15 +17,6 @@
 
 package org.apache.ignite.tensorflow.cluster.util;
 
-import java.io.Serializable;
-import java.net.NetworkInterface;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.BitSet;
-import java.util.Enumeration;
-import java.util.List;
-import java.util.UUID;
-import java.util.concurrent.locks.Lock;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.IgniteLogger;
@@ -35,6 +26,11 @@
 import org.apache.ignite.cluster.ClusterGroupEmptyException;
 import org.apache.ignite.configuration.CacheConfiguration;
 
+import java.io.Serializable;
+import java.net.NetworkInterface;
+import java.util.*;
+import java.util.concurrent.locks.Lock;
+
 /**
  * Cluster port manager that allows to reliably {@link #acquirePort(UUID)} and {@link #releasePort(UUID, int)} on the
  * cluster nodes.
@@ -144,6 +140,8 @@
 
                 if (ports.isEmpty())
                     cache.remove(hostId);
+                else
+                    cache.put(hostId, ports);
             }
         }
         finally {
diff --git a/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/util/TensorFlowChiefRunner.java b/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/util/TensorFlowChiefRunner.java
index 96535eb..d8640fa 100644
--- a/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/util/TensorFlowChiefRunner.java
+++ b/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/util/TensorFlowChiefRunner.java
@@ -23,9 +23,9 @@
 import org.apache.ignite.tensorflow.cluster.spec.TensorFlowClusterSpec;
 import org.apache.ignite.tensorflow.cluster.tfrunning.TensorFlowServer;
 import org.apache.ignite.tensorflow.cluster.tfrunning.TensorFlowServerScriptFormatter;
-import org.apache.ignite.tensorflow.core.pythonrunning.PythonProcessBuilderSupplier;
 import org.apache.ignite.tensorflow.core.util.AsyncNativeProcessRunner;
 import org.apache.ignite.tensorflow.core.util.NativeProcessRunner;
+import org.apache.ignite.tensorflow.core.util.PythonProcessBuilderSupplier;
 
 /**
  * Utils class that helps to start and stop chief process.
diff --git a/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/util/TensorFlowProcessBuilderSupplier.java b/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/util/TensorFlowProcessBuilderSupplier.java
new file mode 100644
index 0000000..8b95526
--- /dev/null
+++ b/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/util/TensorFlowProcessBuilderSupplier.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.tensorflow.cluster.util;
+
+import java.util.Map;
+import org.apache.ignite.Ignite;
+import org.apache.ignite.Ignition;
+import org.apache.ignite.cluster.ClusterNode;
+import org.apache.ignite.internal.processors.odbc.ClientListenerProcessor;
+import org.apache.ignite.tensorflow.core.util.PythonProcessBuilderSupplier;
+
+/**
+ * Python process builder supplier that is used to create TensorFlow worker process builder.
+ */
+public class TensorFlowProcessBuilderSupplier extends PythonProcessBuilderSupplier {
+    /** */
+    private static final long serialVersionUID = 6866243505446122897L;
+
+    /** Prefix for worker environment variables. */
+    private static final String ENV_PREFIX = "IGNITE_DATASET_";
+
+    /** Partition of the upstream cache. */
+    private final Integer part;
+
+    /**
+     * Constructs a new instance of Python process builder supplier.
+     *
+     * @param interactive Interactive flag (allows to used standard input to pass Python script).
+     * @param part Partition index.
+     * @param meta Meta information that adds to script as arguments.
+     */
+    public TensorFlowProcessBuilderSupplier(boolean interactive, Integer part, String... meta) {
+        super(interactive, meta);
+        this.part = part;
+    }
+
+    /** {@inheritDoc} */
+    @Override public ProcessBuilder get() {
+        ProcessBuilder pythonProcBuilder = super.get();
+
+        Ignite ignite = Ignition.ignite();
+        ClusterNode locNode = ignite.cluster().localNode();
+
+        Integer port = locNode.attribute(ClientListenerProcessor.CLIENT_LISTENER_PORT);
+
+        Map<String, String> env = pythonProcBuilder.environment();
+        env.put(ENV_PREFIX + "HOST", "localhost");
+
+        if (port != null)
+            env.put(ENV_PREFIX + "PORT", String.valueOf(port));
+
+        if (part != null)
+            env.put(ENV_PREFIX + "PART", String.valueOf(part));
+
+        return pythonProcBuilder;
+    }
+}
diff --git a/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/util/TensorFlowUserScriptRunner.java b/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/util/TensorFlowUserScriptRunner.java
index 6bb3b0a..17e63bb 100644
--- a/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/util/TensorFlowUserScriptRunner.java
+++ b/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/cluster/util/TensorFlowUserScriptRunner.java
@@ -37,7 +37,6 @@
 import org.apache.ignite.tensorflow.cluster.TensorFlowJobArchive;
 import org.apache.ignite.tensorflow.cluster.spec.TensorFlowClusterSpec;
 import org.apache.ignite.tensorflow.cluster.spec.TensorFlowServerAddressSpec;
-import org.apache.ignite.tensorflow.core.pythonrunning.PythonProcessBuilderSupplier;
 import org.apache.ignite.tensorflow.core.util.AsyncNativeProcessRunner;
 import org.apache.ignite.tensorflow.core.util.NativeProcessRunner;
 
@@ -118,7 +117,7 @@
         if (workingDir == null)
             throw new IllegalStateException("Working directory is not created");
 
-        ProcessBuilder procBuilder = new PythonProcessBuilderSupplier(false).get();
+        ProcessBuilder procBuilder = new TensorFlowProcessBuilderSupplier(false, null).get();
 
         procBuilder.directory(workingDir);
         procBuilder.command(jobArchive.getCommands());
diff --git a/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/core/pythonrunning/PythonProcess.java b/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/core/pythonrunning/PythonProcess.java
deleted file mode 100644
index 34c5a12..0000000
--- a/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/core/pythonrunning/PythonProcess.java
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tensorflow.core.pythonrunning;
-
-import java.io.Serializable;
-import java.util.UUID;
-
-/**
- * Python process specification.
- */
-public class PythonProcess implements Serializable {
-    /** */
-    private static final long serialVersionUID = -1623536488451695210L;
-
-    /** Stdin of the process. */
-    private final String stdin;
-
-    /** Node identifier. */
-    private final UUID nodeId;
-
-    /** Meta information that adds to script as arguments.  */
-    private final String[] meta;
-
-    /**
-     * Constructs a new instance of python process.
-     *
-     * @param stdin  Stdin of the process.
-     * @param nodeId Node identifier.
-     * @param meta Meta information that adds to script as arguments.
-     */
-    public PythonProcess(String stdin, UUID nodeId, String... meta) {
-        assert nodeId != null : "Node identifier should not be null";
-
-        this.stdin = stdin;
-        this.nodeId = nodeId;
-        this.meta = meta;
-    }
-
-    /** */
-    public String getStdin() {
-        return stdin;
-    }
-
-    /** */
-    public UUID getNodeId() {
-        return nodeId;
-    }
-
-    /** */
-    public String[] getMeta() {
-        return meta;
-    }
-}
diff --git a/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/core/pythonrunning/PythonProcessBuilderSupplier.java b/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/core/pythonrunning/PythonProcessBuilderSupplier.java
deleted file mode 100644
index c7f7fde..0000000
--- a/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/core/pythonrunning/PythonProcessBuilderSupplier.java
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tensorflow.core.pythonrunning;
-
-import java.lang.management.ManagementFactory;
-import java.util.Map;
-import org.apache.ignite.tensorflow.util.SerializableSupplier;
-
-/**
- * Python process builder supplier that is used to create Python process builder.
- */
-public class PythonProcessBuilderSupplier implements SerializableSupplier<ProcessBuilder> {
-    /** */
-    private static final long serialVersionUID = 7181937306294456125L;
-
-    /** Python environment variable name. */
-    private static final String PYTHON_ENV_NAME = "PYTHON";
-
-    /** Interactive flag (allows to used standard input to pass Python script). */
-    private final boolean interactive;
-
-    /** Meta information that adds to script as arguments. */
-    private final String[] meta;
-
-    /**
-     * Constructs a new instance of Python process builder supplier.
-     *
-     * @param interactive Interactive flag (allows to used standard input to pass Python script).
-     * @param meta Meta information that adds to script as arguments.
-     */
-    public PythonProcessBuilderSupplier(boolean interactive, String... meta) {
-        this.interactive = interactive;
-        this.meta = meta;
-    }
-
-    /**
-     * Returns process builder to be used to start Python process.
-     *
-     * @return Process builder to be used to start Python process.
-     */
-    public ProcessBuilder get() {
-        String python = System.getenv(PYTHON_ENV_NAME);
-
-        if (python == null)
-            python = "python3";
-
-        ProcessBuilder procBldr;
-        if (interactive) {
-            String[] cmd = new String[meta.length + 3];
-
-            cmd[0] = python;
-            cmd[1] = "-i";
-            cmd[2] = "-";
-
-            System.arraycopy(meta, 0, cmd, 3, meta.length);
-
-            procBldr = new ProcessBuilder(cmd);
-        }
-        else
-            procBldr = new ProcessBuilder(python);
-
-        Map<String, String> env = procBldr.environment();
-        env.put("PPID", String.valueOf(getProcessId()));
-
-        return procBldr;
-    }
-
-    /**
-     * Returns current process identifier.
-     *
-     * @return Process identifier.
-     */
-    private long getProcessId() {
-        String pid = ManagementFactory.getRuntimeMXBean().getName().split("@")[0];
-
-        return Long.parseLong(pid);
-    }
-}
diff --git a/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/core/pythonrunning/PythonProcessManager.java b/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/core/pythonrunning/PythonProcessManager.java
deleted file mode 100644
index d050c0e..0000000
--- a/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/core/pythonrunning/PythonProcessManager.java
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.tensorflow.core.pythonrunning;
-
-import org.apache.ignite.Ignite;
-import org.apache.ignite.tensorflow.core.ProcessManager;
-import org.apache.ignite.tensorflow.core.ProcessManagerWrapper;
-import org.apache.ignite.tensorflow.core.nativerunning.NativeProcess;
-import org.apache.ignite.tensorflow.core.nativerunning.NativeProcessManager;
-
-/**
- * Python process manager that allows to  start, stop and make other actions with python processes.
- */
-public class PythonProcessManager extends ProcessManagerWrapper<NativeProcess, PythonProcess> {
-    /**
-     * Constructs a new instance of python process manager.
-     *
-     * @param ignite Ignite instance.
-     */
-    public PythonProcessManager(Ignite ignite) {
-        this(new NativeProcessManager(ignite));
-    }
-
-    /**
-     * Constructs a new instance of python process manager.
-     *
-     * @param delegate Delegate.
-     */
-    public PythonProcessManager(ProcessManager<NativeProcess> delegate) {
-        super(delegate);
-    }
-
-    /** {@inheritDoc} */
-    @Override protected NativeProcess transformSpecification(PythonProcess spec) {
-        return new NativeProcess(
-            new PythonProcessBuilderSupplier(true, spec.getMeta()),
-            spec.getStdin(),
-            spec.getNodeId()
-        );
-    }
-}
diff --git a/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/core/pythonrunning/package-info.java b/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/core/pythonrunning/package-info.java
deleted file mode 100644
index 541c047..0000000
--- a/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/core/pythonrunning/package-info.java
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/**
- * <!-- Package description. -->
- * The part of TensorFlow integration infrastructure that allows to start and maintain Python native processes. As
- * described in {@link org.apache.ignite.tensorflow.core.pythonrunning.PythonProcess} user only needs to specify Python
- * code and identifier of the node the process should be started and Python Process Manager will make the rest so that
- * the given code will be executed and maintained on the specified node.
- */
-package org.apache.ignite.tensorflow.core.pythonrunning;
\ No newline at end of file
diff --git a/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/core/util/AsyncNativeProcessRunner.java b/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/core/util/AsyncNativeProcessRunner.java
index b336b97..8bf32e8 100644
--- a/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/core/util/AsyncNativeProcessRunner.java
+++ b/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/core/util/AsyncNativeProcessRunner.java
@@ -17,6 +17,7 @@
 
 package org.apache.ignite.tensorflow.core.util;
 
+import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Future;
 import org.apache.ignite.Ignite;
@@ -68,23 +69,21 @@
         NativeProcessRunner procRunner = doBefore();
 
         fut = executor.submit(() -> {
-            while (!Thread.currentThread().isInterrupted()) {
-                try {
-                    log.debug("Starting native process");
-                    procRunner.startAndWait();
-                    log.debug("Native process completed");
-                    break;
-                }
-                catch (InterruptedException e) {
-                    log.debug("Native process interrupted");
-                    break;
-                }
-                catch (Exception e) {
-                    log.error("Native process failed", e);
-                }
+            try {
+                log.debug("Starting native process");
+                procRunner.startAndWait();
+                log.debug("Native process completed");
             }
-
-            doAfter();
+            catch (InterruptedException e) {
+                log.debug("Native process interrupted");
+            }
+            catch (Exception e) {
+                log.error("Native process failed", e);
+                throw e;
+            }
+            finally {
+                doAfter();
+            }
         });
     }
 
@@ -104,4 +103,23 @@
     public boolean isCompleted() {
         return fut != null && fut.isDone();
     }
+
+    /**
+     * Returns an exception that happened during execution or {@code null} if there is no exception.
+     *
+     * @return Exception that happened during execution or {@code null} if there is no exception.
+     */
+    public Exception getException() {
+        if (!fut.isDone())
+            return null;
+
+        try {
+            fut.get();
+        }
+        catch (InterruptedException | ExecutionException e) {
+            return e;
+        }
+
+        return null;
+    }
 }
diff --git a/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/core/util/PythonProcessBuilderSupplier.java b/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/core/util/PythonProcessBuilderSupplier.java
new file mode 100644
index 0000000..ffb5e82
--- /dev/null
+++ b/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/core/util/PythonProcessBuilderSupplier.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.tensorflow.core.util;
+
+import java.lang.management.ManagementFactory;
+import java.util.Map;
+import org.apache.ignite.tensorflow.util.SerializableSupplier;
+
+/**
+ * Python process builder supplier that is used to create Python process builder.
+ */
+public class PythonProcessBuilderSupplier implements SerializableSupplier<ProcessBuilder> {
+    /** */
+    private static final long serialVersionUID = 7181937306294456125L;
+
+    /** Python environment variable name. */
+    private static final String PYTHON_ENV_NAME = "PYTHON";
+
+    /** Interactive flag (allows to used standard input to pass Python script). */
+    private final boolean interactive;
+
+    /** Meta information that adds to script as arguments. */
+    private final String[] meta;
+
+    /**
+     * Constructs a new instance of Python process builder supplier.
+     *
+     * @param interactive Interactive flag (allows to used standard input to pass Python script).
+     * @param meta Meta information that adds to script as arguments.
+     */
+    public PythonProcessBuilderSupplier(boolean interactive, String... meta) {
+        this.interactive = interactive;
+        this.meta = meta;
+    }
+
+    /**
+     * Returns process builder to be used to start Python process.
+     *
+     * @return Process builder to be used to start Python process.
+     */
+    public ProcessBuilder get() {
+        String python = System.getenv(PYTHON_ENV_NAME);
+
+        if (python == null)
+            python = "python3";
+
+        ProcessBuilder procBldr;
+        if (interactive) {
+            String[] cmd = new String[meta.length + 3];
+
+            cmd[0] = python;
+            cmd[1] = "-i";
+            cmd[2] = "-";
+
+            System.arraycopy(meta, 0, cmd, 3, meta.length);
+
+            procBldr = new ProcessBuilder(cmd);
+        }
+        else
+            procBldr = new ProcessBuilder(python);
+
+        Map<String, String> env = procBldr.environment();
+        env.put("PPID", String.valueOf(getProcessId()));
+
+        return procBldr;
+    }
+
+    /**
+     * Returns current process identifier.
+     *
+     * @return Process identifier.
+     */
+    private long getProcessId() {
+        String pid = ManagementFactory.getRuntimeMXBean().getName().split("@")[0];
+
+        return Long.parseLong(pid);
+    }
+}
diff --git a/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/submitter/command/StartCommand.java b/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/submitter/command/StartCommand.java
index 082b363..0202b8e 100644
--- a/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/submitter/command/StartCommand.java
+++ b/modules/tensorflow/src/main/java/org/apache/ignite/tensorflow/submitter/command/StartCommand.java
@@ -28,6 +28,7 @@
 import java.util.zip.ZipOutputStream;
 import org.apache.commons.io.IOUtils;
 import org.apache.ignite.Ignite;
+import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager;
 import org.apache.ignite.tensorflow.cluster.TensorFlowClusterGatewayManager;
 import org.apache.ignite.tensorflow.cluster.TensorFlowJobArchive;
 import picocli.CommandLine;
@@ -97,7 +98,7 @@
 
         if (file.isDirectory())
             return zipDirectory(file);
-        else if (jobArchivePath.endsWith(".zip"))
+        else if (jobArchivePath.endsWith(FilePageStoreManager.ZIP_SUFFIX))
             return zipArchive(file);
         else
             return zipFile(file);
diff --git a/modules/urideploy/src/main/java/org/apache/ignite/spi/deployment/uri/GridUriDeploymentFileProcessor.java b/modules/urideploy/src/main/java/org/apache/ignite/spi/deployment/uri/GridUriDeploymentFileProcessor.java
index 3564f2f..edde878 100644
--- a/modules/urideploy/src/main/java/org/apache/ignite/spi/deployment/uri/GridUriDeploymentFileProcessor.java
+++ b/modules/urideploy/src/main/java/org/apache/ignite/spi/deployment/uri/GridUriDeploymentFileProcessor.java
@@ -28,6 +28,7 @@
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 import java.util.Set;
 import org.apache.commons.codec.binary.Hex;
@@ -228,6 +229,8 @@
         if (files == null)
             return true;
 
+        Arrays.sort(files);
+
         for (File visited : files) {
             if (visited.isFile()) {
                 if (!addFileDigest(visited, digest, log))
@@ -452,4 +455,4 @@
 
         return false;
     }
-}
\ No newline at end of file
+}
diff --git a/modules/urideploy/src/main/java/org/apache/ignite/spi/deployment/uri/scanners/UriDeploymentScannerManager.java b/modules/urideploy/src/main/java/org/apache/ignite/spi/deployment/uri/scanners/UriDeploymentScannerManager.java
index be7e354..f5bb6d3 100644
--- a/modules/urideploy/src/main/java/org/apache/ignite/spi/deployment/uri/scanners/UriDeploymentScannerManager.java
+++ b/modules/urideploy/src/main/java/org/apache/ignite/spi/deployment/uri/scanners/UriDeploymentScannerManager.java
@@ -114,6 +114,9 @@
                         try {
                             scanner.scan(UriDeploymentScannerManager.this);
                         }
+                        catch (Exception e) {
+                            log.error("Uncaught error in URI deployment scanner", e);
+                        }
                         finally {
                             // Do it in finally to avoid any hanging.
                             if (firstScan) {
@@ -220,4 +223,4 @@
     @Override public String toString() {
         return S.toString(UriDeploymentScannerManager.class, this, "uri", U.hidePassword(uri.toString()));
     }
-}
\ No newline at end of file
+}
diff --git a/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/GridUriDeploymentAbstractSelfTest.java b/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/GridUriDeploymentAbstractSelfTest.java
index 6624c3d..3333300 100644
--- a/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/GridUriDeploymentAbstractSelfTest.java
+++ b/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/GridUriDeploymentAbstractSelfTest.java
@@ -17,8 +17,11 @@
 
 package org.apache.ignite.spi.deployment.uri;
 
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.internal.util.lang.GridAbsPredicateX;
 import org.apache.ignite.spi.deployment.DeploymentListener;
 import org.apache.ignite.spi.deployment.DeploymentResource;
+import org.apache.ignite.testframework.GridTestUtils;
 import org.apache.ignite.testframework.config.GridTestProperties;
 import org.apache.ignite.testframework.junits.spi.GridSpiAbstractTest;
 import org.apache.ignite.testframework.junits.spi.GridSpiTestConfig;
@@ -80,4 +83,24 @@
 
         info("Not deployed task [task=" + task + ']');
     }
-}
\ No newline at end of file
+
+    /**
+     * @param taskName name of task to wait on.
+     * @param expectDeployed if {@code true}, wait for availability, else wait for unavailability.
+     * @param timeout in ms.
+     * @throws Exception if failed.
+     */
+    protected void waitForTask(String taskName, boolean expectDeployed, long timeout) throws IgniteCheckedException {
+        assertTrue("Failed to wait for (un)deployment of " + taskName,
+            GridTestUtils.waitForCondition(new GridAbsPredicateX() {
+                public boolean applyx() throws IgniteCheckedException {
+                    if (expectDeployed)
+                        return getSpi().findResource(taskName) != null;
+                    else
+                        return getSpi().findResource(taskName) == null;
+                }
+            }, timeout));
+
+        info((expectDeployed ? "Deployed" : "Not deployed") + " task [task=" + taskName + ']');
+    }
+}
diff --git a/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/GridUriDeploymentFileProcessorSelfTest.java b/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/GridUriDeploymentFileProcessorSelfTest.java
index fad7cb2..9508fa9 100644
--- a/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/GridUriDeploymentFileProcessorSelfTest.java
+++ b/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/GridUriDeploymentFileProcessorSelfTest.java
@@ -128,14 +128,7 @@
 
         try {
             // Wait for SPI
-            GridTestUtils.waitForCondition(new GridAbsPredicateX() {
-                @Override public boolean applyx() throws IgniteCheckedException {
-                    if (deployed)
-                        return getSpi().findResource(taskId) != null;
-                    else
-                        return getSpi().findResource(taskId) == null;
-                }
-            }, 5000);
+            waitForTask(taskId, deployed, 5000);
 
             if (deployed)
                 assert getSpi().findResource(taskId) != null;
diff --git a/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/GridUriDeploymentMd5CheckSelfTest.java b/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/GridUriDeploymentMd5CheckSelfTest.java
index 923b93b..126e202 100644
--- a/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/GridUriDeploymentMd5CheckSelfTest.java
+++ b/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/GridUriDeploymentMd5CheckSelfTest.java
@@ -47,26 +47,22 @@
     public void testMd5FileCheck() throws Exception {
         undeployCntr.set(0);
 
-        DeploymentResource task = getSpi().findResource("GridUriDeploymentTestWithNameTask7");
+        String taskName = "GridUriDeploymentTestWithNameTask7";
+
+        DeploymentResource task = getSpi().findResource(taskName);
 
         assert task == null;
 
-        U.copy(getGarFile(), new File(getDeployDir(), "uri1.gar"), true);
+        atomicCopy(getGarFile(), getDeployDir(), "uri1.gar");
 
-        Thread.sleep(500);
+        waitForTask(taskName, true, 10000);
 
-        task = getSpi().findResource("GridUriDeploymentTestWithNameTask7");
-
-        assert task != null;
         assert undeployCntr.get() == 0;
 
-        U.copy(getGarFile(), new File(getDeployDir(), "uri2.gar"), true);
+        atomicCopy(getGarFile(), getDeployDir(), "uri2.gar");
 
-        Thread.sleep(500);
+        waitForTask(taskName, true, 10000);
 
-        task = getSpi().findResource("GridUriDeploymentTestWithNameTask7");
-
-        assert task != null;
         assert undeployCntr.get() == 0;
     }
 
@@ -78,31 +74,43 @@
     public void testMd5DirectoryCheck() throws Exception {
         undeployCntr.set(0);
 
-        DeploymentResource task = getSpi().findResource("GridUriDeploymentTestWithNameTask6");
+        String taskName = "GridUriDeploymentTestWithNameTask6";
+
+        DeploymentResource task = getSpi().findResource(taskName);
 
         assert task == null;
 
-        U.copy(getGarDir(), new File(getDeployDir(), "uri1.gar"), true);
+        atomicCopy(getGarDir(), getDeployDir(), "uri1.gar");
 
-        Thread.sleep(500);
+        waitForTask(taskName, true, 10000);
 
-        task = getSpi().findResource("GridUriDeploymentTestWithNameTask6");
-
-        assert task != null;
         assert undeployCntr.get() == 0;
 
-        U.copy(getGarDir(), new File(getDeployDir(), "uri2.gar"), true);
+        atomicCopy(getGarDir(), getDeployDir(), "uri2.gar");
 
-        Thread.sleep(500);
+        waitForTask(taskName, true, 10000);
 
-        task = getSpi().findResource("GridUriDeploymentTestWithNameTask6");
-
-        assert task != null;
         assert undeployCntr.get() == 0;
+    }
 
+    /** {@inheritDoc} */
+    protected void afterTest() throws Exception {
         U.delete(getGarDir());
         U.delete(new File(getDeployDir(), "uri1.gar"));
         U.delete(new File(getDeployDir(), "uri2.gar"));
+
+        Thread.sleep(500);
+    }
+
+    /**
+     * First copies to parent directory, when moves atomically to destination directory.
+     */
+    private static void atomicCopy(File src, File destDir, String fileName) throws IOException {
+        File destParent = new File(destDir.getParent(), fileName);
+
+        U.copy(src, destParent, true);
+
+        destParent.renameTo(new File(destDir, fileName));
     }
 
     /**
@@ -127,7 +135,7 @@
      * @return a valid .gar file path.
      */
     private File getGarFile() {
-        File gar = new File(GridTestProperties.getProperty("ant.urideployment.gar.file"));
+        File gar = U.resolveIgnitePath(GridTestProperties.getProperty("ant.urideployment.gar.file"));
 
         assert gar.isFile();
 
@@ -195,4 +203,4 @@
     @Override protected void afterTestsStopped() throws Exception {
         U.delete(getDeployDir());
     }
-}
\ No newline at end of file
+}
diff --git a/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/GridUriDeploymentMultiScannersErrorThrottlingTest.java b/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/GridUriDeploymentMultiScannersErrorThrottlingTest.java
index 552c8222..246ad80 100644
--- a/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/GridUriDeploymentMultiScannersErrorThrottlingTest.java
+++ b/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/GridUriDeploymentMultiScannersErrorThrottlingTest.java
@@ -34,8 +34,7 @@
     public void testThrottling() throws Exception {
         LT.throttleTimeout(11000);
 
-        // Sleep for 1 min.
-        Thread.sleep(60 * 1000);
+        Thread.sleep(15 * 1000);
     }
 
     /**
@@ -50,4 +49,4 @@
 
         return uriList;
     }
-}
\ No newline at end of file
+}
diff --git a/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/META-INF/deploy-with-resources-ignite.xml b/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/META-INF/deploy-with-resources-ignite.xml
deleted file mode 100644
index 6ec6d9a..0000000
--- a/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/META-INF/deploy-with-resources-ignite.xml
+++ /dev/null
@@ -1,37 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<!--
-    Spring configuration file for test classes in gar-file.
--->
-<beans xmlns="http://www.springframework.org/schema/beans"
-       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-       xmlns:util="http://www.springframework.org/schema/util"
-       xsi:schemaLocation="
-        http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd
-        http://www.springframework.org/schema/util http://www.springframework.org/schema/util/spring-util.xsd">
-    <description>Ignite Spring configuration file in gar-file.</description>
-
-    <!--
-        Test tasks specification.
-    -->
-    <util:list id="tasks">
-        <value>org.apache.ignite.spi.deployment.uri.tasks.GridUriDeploymentTestWithNameTask7</value>
-    </util:list>
-</beans>
diff --git a/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/META-INF/p2p-ignite.xml b/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/META-INF/p2p-ignite.xml
deleted file mode 100644
index 7ccf896..0000000
--- a/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/META-INF/p2p-ignite.xml
+++ /dev/null
@@ -1,42 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one or more
-  contributor license agreements.  See the NOTICE file distributed with
-  this work for additional information regarding copyright ownership.
-  The ASF licenses this file to You under the Apache License, Version 2.0
-  (the "License"); you may not use this file except in compliance with
-  the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<!--
-    Spring configuration file for test classes in gar-file.
--->
-<beans xmlns="http://www.springframework.org/schema/beans"
-       xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
-       xmlns:util="http://www.springframework.org/schema/util"
-       xsi:schemaLocation="
-        http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans.xsd
-        http://www.springframework.org/schema/util http://www.springframework.org/schema/util/spring-util.xsd">
-    <description>Ignite Spring configuration file in gar-file.</description>
-
-    <bean id="userVersion" class="java.lang.String">
-        <constructor-arg value="1"/>
-    </bean>
-
-    <!--
-        Test tasks specification.
-    -->
-    <util:list id="tasks">
-        <value>org.apache.ignite.tests.p2p.P2PTestTaskExternalPath1</value>
-        <value>org.apache.ignite.tests.p2p.P2PTestTaskExternalPath2</value>
-    </util:list>
-</beans>
diff --git a/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/scanners/file/GridFileDeploymentSelfTest.java b/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/scanners/file/GridFileDeploymentSelfTest.java
index 72eec11..d307a32 100644
--- a/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/scanners/file/GridFileDeploymentSelfTest.java
+++ b/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/scanners/file/GridFileDeploymentSelfTest.java
@@ -59,7 +59,7 @@
     }
 
     /**
-     * Tests task from file 'deployfile_nodescr.gar'.
+     * Tests task from file 'deployfile-nodescr.gar'.
      *
      * Looks for task {@code GridUriDeploymentTestTask4} without descriptor file from GAR-file.
      * That task loads resource {@code spring.xml}.
@@ -76,7 +76,7 @@
     }
 
     /**
-     * Tests task from file 'deployfile_bad.gar'.
+     * Tests task from file 'deployfile-bad.gar'.
      *
      * Looks for tasks {@code GridUriDeploymentAbstractTestTask}
      * {@code GridInnerTestTask}
@@ -87,13 +87,17 @@
      */
     public void testBadDeployment() throws Exception {
         checkNoTask("org.apache.ignite.spi.deployment.uri.tasks.GridUriDeploymentAbstractTestTask");
+
         checkNoTask("org.apache.ignite.spi.deployment.uri.tasks.GridInnerTestTask");
+        checkNoTask("org.apache.ignite.spi.deployment.uri.tasks.GridUriDeploymentInnerTestTask$GridInnerTestTask");
+        checkNoTask("org.apache.ignite.spi.deployment.uri.tasks.GridUriDeploymentInnerTestTask.GridInnerTestTask");
+
         checkNoTask("org.apache.ignite.spi.deployment.uri.tasks.GridUriDeploymentInterfaceTestTask");
         checkNoTask("org.apache.ignite.spi.deployment.uri.tasks.GridUriDeploymentNonePublicTestTask");
     }
 
     /**
-     * Tests task from file 'deploy_depend.gar'.
+     * Tests task from file 'deployfile-depend.gar'.
      *
      * Looks for task {@code GridUriDeploymentTestTask1} with descriptor file from GAR-file.
      * That task loads resource {@code spring1.xml} and imports external class from /lib/*.jar
@@ -107,10 +111,12 @@
      */
     public void testDependenceDeployment() throws Exception {
         checkTask("org.apache.ignite.spi.deployment.uri.tasks.GridUriDeploymentTestTask1");
+        getSpi().findResource("org.apache.ignite.spi.deployment.uri.tasks.GridUriDeploymentTestTask1")
+            .getResourceClass().newInstance();
     }
 
     /**
-     * Tests task from file 'deploy_nodescr_depend.gar'.
+     * Tests task from file 'deploydir-nodescr-depend.gar'.
      *
      * Looks for task {@code GridUriDeploymentTestTask2} without descriptor file from GAR-file.
      * That task loads resource {@code spring2.xml} and imports external class from /lib/*.jar
@@ -124,6 +130,8 @@
      */
     public void testNoDescriptorDependenceDeployment() throws Exception {
         checkTask("org.apache.ignite.spi.deployment.uri.tasks.GridUriDeploymentTestTask2");
+        getSpi().findResource("org.apache.ignite.spi.deployment.uri.tasks.GridUriDeploymentTestTask2")
+            .getResourceClass().newInstance();
     }
 
     /**
@@ -149,4 +157,4 @@
         assert getSpi().findResource("org.apache.ignite.spi.deployment.uri.tasks.GridUriDeploymentTestWithNameTask6")
             == null : "Task from GAR with invalid signature should not be deployed.";
     }
-}
\ No newline at end of file
+}
diff --git a/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/scanners/http/GridHttpDeploymentSelfTest.java b/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/scanners/http/GridHttpDeploymentSelfTest.java
index c0044c3..333f707 100644
--- a/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/scanners/http/GridHttpDeploymentSelfTest.java
+++ b/modules/urideploy/src/test/java/org/apache/ignite/spi/deployment/uri/scanners/http/GridHttpDeploymentSelfTest.java
@@ -21,7 +21,6 @@
 import java.io.IOException;
 import java.util.Collections;
 import java.util.List;
-import javax.servlet.http.HttpServletResponse;
 import org.apache.ignite.internal.util.typedef.internal.U;
 import org.apache.ignite.spi.deployment.uri.GridUriDeploymentAbstractSelfTest;
 import org.apache.ignite.spi.deployment.uri.UriDeploymentSpi;
@@ -31,9 +30,6 @@
 import org.eclipse.jetty.server.Server;
 import org.eclipse.jetty.server.ServerConnector;
 import org.eclipse.jetty.server.handler.ResourceHandler;
-import org.eclipse.jetty.util.resource.Resource;
-
-import static org.eclipse.jetty.http.HttpHeader.LAST_MODIFIED;
 
 /**
  * Test http scanner.
@@ -80,13 +76,7 @@
 
         srv.addConnector(conn);
 
-        ResourceHandler hnd = new ResourceHandler() {
-            @Override protected void doResponseHeaders(HttpServletResponse resp, Resource res, String mimeTyp) {
-                super.doResponseHeaders(resp, res, mimeTyp);
-
-                resp.setDateHeader(LAST_MODIFIED.asString(), res.lastModified());
-            }
-        };
+        ResourceHandler hnd = new ResourceHandler();
 
         hnd.setDirectoriesListed(true);
 
@@ -118,16 +108,16 @@
      * @throws Exception If failed.
      */
     public void testDeployUndeploy2Files() throws Exception {
-        checkNoTask("org.apache.ignite.spi.deployment.uri.tasks.GridUriDeploymentTestTask3");
+        String taskName = "org.apache.ignite.spi.deployment.uri.tasks.GridUriDeploymentTestTask3";
+
+        checkNoTask(taskName);
 
         try {
             copyToResourceBase(LIBS_GAR_FILE_PATH, LIBS_GAR);
 
             copyToResourceBase(CLASSES_GAR_FILE_PATH, CLASSES_GAR);
 
-            Thread.sleep(FREQ + 3000);
-
-            checkTask("org.apache.ignite.spi.deployment.uri.tasks.GridUriDeploymentTestTask3");
+            waitForTask(taskName, true, FREQ + 3000);
         }
         catch (Exception e) {
             e.printStackTrace();
@@ -136,9 +126,7 @@
             deleteFromResourceBase(LIBS_GAR);
             deleteFromResourceBase(CLASSES_GAR);
 
-            Thread.sleep(FREQ + 3000);
-
-            checkNoTask("org.apache.ignite.spi.deployment.uri.tasks.GridUriDeploymentTestTask3");
+            waitForTask(taskName, false, FREQ + 3000);
         }
     }
 
@@ -146,20 +134,18 @@
      * @throws Exception If failed.
      */
     public void testSameContantFiles() throws Exception {
-        checkNoTask("org.apache.ignite.spi.deployment.uri.tasks.GridUriDeploymentTestTask3");
+        String taskName = "org.apache.ignite.spi.deployment.uri.tasks.GridUriDeploymentTestTask3";
+
+        checkNoTask(taskName);
 
         try {
             copyToResourceBase(ALL_GAR_FILE_PATH, ALL_GAR);
 
-            Thread.sleep(FREQ + 3000);
-
-            checkTask("org.apache.ignite.spi.deployment.uri.tasks.GridUriDeploymentTestTask3");
+            waitForTask(taskName, true, FREQ + 3000);
 
             copyToResourceBase(ALL_GAR_FILE_PATH, "file-copy.gar");
 
-            Thread.sleep(FREQ + 3000);
-
-            checkTask("org.apache.ignite.spi.deployment.uri.tasks.GridUriDeploymentTestTask3");
+            waitForTask(taskName, true, FREQ + 3000);
         }
         catch (Throwable e) {
             e.printStackTrace();
@@ -168,9 +154,7 @@
             deleteFromResourceBase(ALL_GAR);
             deleteFromResourceBase("file-copy.gar");
 
-            Thread.sleep(FREQ + 3000);
-
-            checkNoTask("org.apache.ignite.spi.deployment.uri.tasks.GridUriDeploymentTestTask3");
+            waitForTask(taskName, false, FREQ + 3000);
         }
     }
 
diff --git a/modules/urideploy/src/test/java/org/apache/ignite/testsuites/IgniteUriDeploymentTestSuite.java b/modules/urideploy/src/test/java/org/apache/ignite/testsuites/IgniteUriDeploymentTestSuite.java
index 817b346..345ce80 100644
--- a/modules/urideploy/src/test/java/org/apache/ignite/testsuites/IgniteUriDeploymentTestSuite.java
+++ b/modules/urideploy/src/test/java/org/apache/ignite/testsuites/IgniteUriDeploymentTestSuite.java
@@ -56,9 +56,9 @@
         suite.addTest(new TestSuite(GridFileDeploymentUndeploySelfTest.class));
         suite.addTest(new TestSuite(GridHttpDeploymentSelfTest.class));
 
-        //suite.addTest(new TestSuite(GridFileDeploymentSelfTest.class));
-        //suite.addTest(new TestSuite(GridUriDeploymentMultiScannersErrorThrottlingTest.class));
-        //suite.addTest(new TestSuite(GridUriDeploymentMd5CheckSelfTest.class));
+        suite.addTest(new TestSuite(GridFileDeploymentSelfTest.class));
+        suite.addTest(new TestSuite(GridUriDeploymentMultiScannersErrorThrottlingTest.class));
+        suite.addTest(new TestSuite(GridUriDeploymentMd5CheckSelfTest.class));
 
         // GAR Ant task tests.
         suite.addTest(IgniteToolsSelfTestSuite.suite());
diff --git a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/cache/VisorCacheCommand.scala b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/cache/VisorCacheCommand.scala
index e3e2001..25f1212 100755
--- a/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/cache/VisorCacheCommand.scala
+++ b/modules/visor-console/src/main/scala/org/apache/ignite/visor/commands/cache/VisorCacheCommand.scala
@@ -356,7 +356,7 @@
 
             val sumT = VisorTextTable()
 
-            sumT #= ("Name(@)", "Mode", "Nodes", "Entries (Heap / Off-heap)", "Hits", "Misses", "Reads", "Writes")
+            sumT #= ("Name(@)", "Mode", "Nodes", "Total entries (Heap / Off-heap)", "Primary entries (Heap / Off-heap)", "Hits", "Misses", "Reads", "Writes")
 
             sortAggregatedData(aggrData, sortType.getOrElse("cn"), reversed).foreach(
                 ad => {
@@ -367,6 +367,7 @@
                         mkCacheName(ad.getName),
                         ad.getMode,
                         ad.getNodes.size(),
+                        (ad.getTotalHeapSize + ad.getTotalOffHeapSize) + " (" + ad.getTotalHeapSize + " / " + ad.getTotalOffHeapSize + ")",
                         (
                             "min: " + (ad.getMinimumHeapSize + ad.getMinimumOffHeapPrimarySize) +
                                 " (" + ad.getMinimumHeapSize + " / " + ad.getMinimumOffHeapPrimarySize + ")",
@@ -424,6 +425,8 @@
                     val csT = VisorTextTable()
 
                     csT += ("Name(@)", cacheNameVar)
+                    csT += ("Total entries (Heap / Off-heap)", (ad.getTotalHeapSize + ad.getTotalOffHeapSize) +
+                        " (" + ad.getTotalHeapSize + " / " + ad.getTotalOffHeapSize + ")")
                     csT += ("Nodes", m.size())
                     csT += ("Total size Min/Avg/Max", (ad.getMinimumHeapSize + ad.getMinimumOffHeapPrimarySize) + " / " +
                         formatDouble(ad.getAverageHeapSize + ad.getAverageOffHeapPrimarySize) + " / " +
@@ -435,7 +438,7 @@
 
                     val ciT = VisorTextTable()
 
-                    ciT #= ("Node ID8(@), IP", "CPUs", "Heap Used", "CPU Load", "Up Time", "Size", "Hi/Mi/Rd/Wr")
+                    ciT #= ("Node ID8(@), IP", "CPUs", "Heap Used", "CPU Load", "Up Time", "Size (Primary / Backup)", "Hi/Mi/Rd/Wr")
 
                     sortData(m.toMap, sortType.getOrElse("hi"), reversed).foreach { case (nid, cm) =>
                         val nm = ignite.cluster.node(nid).metrics()
@@ -448,10 +451,14 @@
                             formatDouble(nm.getCurrentCpuLoad * 100d) + " %",
                             X.timeSpan2HMSM(nm.getUpTime),
                             (
-                                "Total: " + (cm.getHeapEntriesCount + cm.getOffHeapPrimaryEntriesCount),
-                                "  Heap: " + cm.getHeapEntriesCount,
-                                "  Off-Heap: " + cm.getOffHeapPrimaryEntriesCount,
-                                "  Off-Heap Memory: " + formatMemory(cm.getOffHeapAllocatedSize)
+                                "Total: " + (cm.getHeapEntriesCount + cm.getOffHeapEntriesCount) +
+                                    " (" + (cm.getHeapEntriesCount + cm.getOffHeapPrimaryEntriesCount) + " / " + cm.getOffHeapBackupEntriesCount + ")",
+                                "  Heap: " + cm.getHeapEntriesCount + " (" + cm.getHeapEntriesCount + " / " + NA + ")",
+                                "  Off-Heap: " + cm.getOffHeapEntriesCount +
+                                    " (" + cm.getOffHeapPrimaryEntriesCount + " / " + cm.getOffHeapBackupEntriesCount + ")",
+                                "  Off-Heap Memory: " + (if (cm.getOffHeapPrimaryEntriesCount == 0) "0"
+                                    else if (cm.getOffHeapAllocatedSize > 0) formatMemory(cm.getOffHeapAllocatedSize)
+                                    else NA)
                             ),
                             (
                                 "Hi: " + cm.getHits,
diff --git a/modules/web-console/backend/app/agentsHandler.js b/modules/web-console/backend/app/agentsHandler.js
index 4af81b6..6d8c621 100644
--- a/modules/web-console/backend/app/agentsHandler.js
+++ b/modules/web-console/backend/app/agentsHandler.js
@@ -244,6 +244,12 @@
             });
 
             sock.on('cluster:topology', (top) => {
+                if (_.isNil(top)) {
+                    console.log('Invalid format of message: "cluster:topology"');
+
+                    return;
+                }
+
                 const cluster = this.getOrCreateCluster(top);
 
                 _.forEach(this.topLsnrs, (lsnr) => lsnr(agentSocket, cluster, top));
@@ -283,19 +289,8 @@
             _.forEach(tokens, (token) => {
                 this._agentSockets.add(token, agentSocket);
 
-                // TODO start demo if needed.
-                // const browserSockets = _.filter(this._browserSockets[token], 'request._query.IgniteDemoMode');
-                //
-                // // First agent join after user start demo.
-                // if (_.size(browserSockets))
-                //     agentSocket.runDemoCluster(token, browserSockets);
-
                 this._browsersHnd.agentStats(token);
             });
-
-            // ioSocket.on('cluster:topology', (top) => {
-            //
-            // });
         }
 
         /**
@@ -315,7 +310,7 @@
                     this.io = socketio(srv, {path: '/agents'});
 
                     this.io.on('connection', (sock) => {
-                        sock.on('agent:auth', ({ver, bt, tokens, disableDemo}, cb) => {
+                        sock.on('agent:auth', ({ver, bt, tokens, disableDemo} = {}, cb) => {
                             if (_.isEmpty(tokens))
                                 return cb('Tokens not set. Please reload agent archive or check settings');
 
diff --git a/modules/web-console/backend/app/browsersHandler.js b/modules/web-console/backend/app/browsersHandler.js
index c3c2ea4..d0cd112 100644
--- a/modules/web-console/backend/app/browsersHandler.js
+++ b/modules/web-console/backend/app/browsersHandler.js
@@ -204,8 +204,20 @@
 
             nodeListeners(sock) {
                 // Return command result from grid to browser.
-                sock.on('node:rest', ({clusterId, params, credentials}, cb) => {
-                    const demo = sock.request._query.IgniteDemoMode === 'true';
+                sock.on('node:rest', (arg, cb) => {
+                    const {clusterId, params, credentials} = arg || {};
+
+                    if (!_.isFunction(cb))
+                        cb = console.log;
+
+                    const demo = _.get(sock, 'request._query.IgniteDemoMode') === 'true';
+
+                    if ((_.isNil(clusterId) && !demo) || _.isNil(params)) {
+                        console.log('Received invalid message: "node:rest" on socket:', JSON.stringify(sock.handshake));
+
+                        return cb('Invalid format of message: "node:rest"');
+                    }
+
                     const token = sock.request.user.token;
 
                     const agent = this._agentHnd.agent(token, demo, clusterId);
@@ -233,8 +245,20 @@
                 this.registerVisorTask('toggleClusterState', internalVisor('misc.VisorChangeGridActiveStateTask'), internalVisor('misc.VisorChangeGridActiveStateTaskArg'));
 
                 // Return command result from grid to browser.
-                sock.on('node:visor', ({clusterId, params = {}, credentials} = {}, cb) => {
-                    const demo = sock.request._query.IgniteDemoMode === 'true';
+                sock.on('node:visor', (arg, cb) => {
+                    const {clusterId, params, credentials} = arg || {};
+
+                    if (!_.isFunction(cb))
+                        cb = console.log;
+
+                    const demo = _.get(sock, 'request._query.IgniteDemoMode') === 'true';
+
+                    if ((_.isNil(clusterId) && !demo) || _.isNil(params)) {
+                        console.log('Received invalid message: "node:visor" on socket:', JSON.stringify(sock.handshake));
+
+                        return cb('Invalid format of message: "node:visor"');
+                    }
+
                     const token = sock.request.user.token;
 
                     const {taskId, nids, args = []} = params;
diff --git a/modules/web-console/e2e/testcafe/components/FormField.js b/modules/web-console/e2e/testcafe/components/FormField.js
index 71d951c..4ddfc61 100644
--- a/modules/web-console/e2e/testcafe/components/FormField.js
+++ b/modules/web-console/e2e/testcafe/components/FormField.js
@@ -19,10 +19,10 @@
 import {AngularJSSelector} from 'testcafe-angular-selectors';
 
 export class FormField {
-    static ROOT_SELECTOR = '.ignite-form-field';
-    static LABEL_SELECTOR = '.ignite-form-field__label';
+    static ROOT_SELECTOR = '.form-field';
+    static LABEL_SELECTOR = '.form-field__label';
     static CONTROL_SELECTOR = '[ng-model]';
-    static ERRORS_SELECTOR = '.ignite-form-field__errors';
+    static ERRORS_SELECTOR = '.form-field__errors';
 
     /** @type {ReturnType<Selector>} */
     _selector;
diff --git a/modules/web-console/e2e/testcafe/components/modalInput.js b/modules/web-console/e2e/testcafe/components/modalInput.js
index d9ab399..845a3d7 100644
--- a/modules/web-console/e2e/testcafe/components/modalInput.js
+++ b/modules/web-console/e2e/testcafe/components/modalInput.js
@@ -14,16 +14,16 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-
-import {Selector, t} from 'testcafe';
+import {FormField} from './FormField';
+import {t} from 'testcafe';
 
 export class ModalInput {
     constructor() {
-        this.valueInput = Selector('#input-fieldInput');
+        this.valueInput = new FormField({ id: 'inputDialogFieldInput' });
     }
 
     async enterValue(value) {
-        await t.typeText(this.valueInput, value);
+        await t.typeText(this.valueInput.control, value);
     }
 
     async confirm() {
diff --git a/modules/web-console/e2e/testcafe/page-models/PageConfigurationBasic.js b/modules/web-console/e2e/testcafe/page-models/PageConfigurationBasic.js
index bef6606..e979da0 100644
--- a/modules/web-console/e2e/testcafe/page-models/PageConfigurationBasic.js
+++ b/modules/web-console/e2e/testcafe/page-models/PageConfigurationBasic.js
@@ -40,7 +40,7 @@
     constructor() {
         this._selector = Selector('page-configure-basic');
         this.versionPicker = new VersionPicker();
-        this.totalOffheapSizeInput = Selector('pc-form-field-size#memory');
+        this.totalOffheapSizeInput = Selector('form-field-size#memory');
         this.mainFormAction = Selector('.pc-form-actions-panel .btn-ignite-group .btn-ignite:nth-of-type(1)');
         this.contextFormActionsButton = Selector('.pc-form-actions-panel .btn-ignite-group .btn-ignite:nth-of-type(2)');
         this.contextSaveButton = Selector('a[role=menuitem]').withText(new RegExp(`^${PageConfigurationBasic.SAVE_CHANGES_LABEL}$`));
diff --git a/modules/web-console/frontend/.babelrc b/modules/web-console/frontend/.babelrc
index 1759c44..364c90c 100644
--- a/modules/web-console/frontend/.babelrc
+++ b/modules/web-console/frontend/.babelrc
@@ -1,4 +1,15 @@
 {
-  "presets": ["es2015", "stage-1"],
-  "plugins": ["add-module-exports", "transform-object-rest-spread"]
-}
+    "presets": [
+        ["@babel/env", {
+            "targets": {
+                "browsers": [">1%", "not ie 11", "not op_mini all"]
+            }
+        }]
+    ],
+    "plugins": [
+        ["@babel/plugin-proposal-class-properties", { "loose" : true }],
+        "@babel/plugin-proposal-object-rest-spread",
+        "@babel/plugin-syntax-dynamic-import",
+        "@babel/plugin-transform-parameters"
+    ]
+}
\ No newline at end of file
diff --git a/modules/web-console/frontend/.eslintrc b/modules/web-console/frontend/.eslintrc
index 805b339..b6adbff 100644
--- a/modules/web-console/frontend/.eslintrc
+++ b/modules/web-console/frontend/.eslintrc
@@ -150,7 +150,7 @@
     no-sparse-arrays: 1
     no-sync: 0
     no-ternary: 0
-    no-trailing-spaces: 2
+    no-trailing-spaces: ["error", {"ignoreComments": true}]
     no-throw-literal: 0
     no-this-before-super: 2
     no-unexpected-multiline: 2
diff --git a/modules/web-console/frontend/app/app.js b/modules/web-console/frontend/app/app.js
index 1f81f86..3fb6ae0 100644
--- a/modules/web-console/frontend/app/app.js
+++ b/modules/web-console/frontend/app/app.js
@@ -15,6 +15,8 @@
  * limitations under the License.
  */
 
+import './style.scss';
+
 import './vendor';
 import '../public/stylesheets/style.scss';
 import '../app/primitives';
@@ -69,7 +71,6 @@
 import igniteUiAceTabs from './directives/ui-ace-tabs.directive';
 import igniteRetainSelection from './directives/retain-selection.directive';
 import btnIgniteLink from './directives/btn-ignite-link';
-import exposeInput from './components/expose-ignite-form-field-control';
 
 // Services.
 import ChartColors from './services/ChartColors.service';
@@ -88,7 +89,6 @@
 import Messages from './services/Messages.service';
 import ErrorParser from './services/ErrorParser.service';
 import ModelNormalizer from './services/ModelNormalizer.service.js';
-import UnsavedChangesGuard from './services/UnsavedChangesGuard.service';
 import Caches from './services/Caches';
 import {CSV} from './services/CSV';
 import {$exceptionHandler} from './services/exceptionHandler.js';
@@ -130,6 +130,7 @@
 import gridShowingRows from './components/grid-showing-rows';
 import bsSelectMenu from './components/bs-select-menu';
 import protectFromBsSelectRender from './components/protect-from-bs-select-render';
+import uiGrid from './components/ui-grid';
 import uiGridHovering from './components/ui-grid-hovering';
 import uiGridFilters from './components/ui-grid-filters';
 import uiGridColumnResizer from './components/ui-grid-column-resizer';
@@ -145,6 +146,7 @@
 import formField from './components/form-field';
 import igniteChart from './components/ignite-chart';
 import igniteChartSelector from './components/ignite-chart-series-selector';
+import igniteStatus from './components/ignite-status';
 
 import pageProfile from './components/page-profile';
 import pagePasswordChanged from './components/page-password-changed';
@@ -227,6 +229,7 @@
     gridExport.name,
     gridShowingRows.name,
     bsSelectMenu.name,
+    uiGrid.name,
     uiGridHovering.name,
     uiGridFilters.name,
     uiGridColumnResizer.name,
@@ -241,7 +244,6 @@
     connectedClustersDialog.name,
     igniteListOfRegisteredUsers.name,
     pageProfile.name,
-    exposeInput.name,
     pageLanding.name,
     pagePasswordChanged.name,
     pagePasswordReset.name,
@@ -254,10 +256,11 @@
     passwordVisibility.name,
     igniteChart.name,
     igniteChartSelector.name,
+    igniteStatus.name,
     progressLine.name,
     formField.name
 ])
-.service($exceptionHandler.name, $exceptionHandler)
+.service('$exceptionHandler', $exceptionHandler)
 // Directives.
 .directive(...igniteAutoFocus)
 .directive(...igniteBsAffixUpdate)
@@ -286,7 +289,7 @@
 .service('SqlTypes', SqlTypes)
 .service(...ChartColors)
 .service(...IgniteConfirm)
-.service(Confirm.name, Confirm)
+.service('Confirm', Confirm)
 .service('IgniteConfirmBatch', ConfirmBatch)
 .service(...CopyToClipboard)
 .service(...Countries)
@@ -298,10 +301,9 @@
 .service(...LegacyTable)
 .service(...FormUtils)
 .service(...LegacyUtils)
-.service(...UnsavedChangesGuard)
 .service('IgniteActivitiesUserDialog', IgniteActivitiesUserDialog)
 .service('Caches', Caches)
-.service(CSV.name, CSV)
+.service('CSV', CSV)
 .service('IGFSs', IGFSs)
 .service('Models', Models)
 // Filters.
diff --git a/modules/web-console/frontend/app/browserUpdate/index.js b/modules/web-console/frontend/app/browserUpdate/index.js
index 799c4fc..3930ac7 100644
--- a/modules/web-console/frontend/app/browserUpdate/index.js
+++ b/modules/web-console/frontend/app/browserUpdate/index.js
@@ -30,11 +30,7 @@
     l: 'en',
     mobile: false,
     api: 5,
-    text: `
-        <b>Outdated or unsupported browser detected.</b>
-        Web Console may work incorrectly. Please update to one of modern fully supported browser!
-        <a {up_but}>Update</a>
-        <a {ignore_but}>Ignore</a>
-    `,
+    // This should work in older browsers
+    text: '<b>Outdated or unsupported browser detected.</b> Web Console may work incorrectly. Please update to one of modern fully supported browsers! <a {up_but}>Update</a> <a {ignore_but}>Ignore</a>',
     reminder: 0
 });
diff --git a/modules/web-console/frontend/app/components/activities-user-dialog/activities-user-dialog.controller.js b/modules/web-console/frontend/app/components/activities-user-dialog/activities-user-dialog.controller.js
index 078f725..fb9d15a 100644
--- a/modules/web-console/frontend/app/components/activities-user-dialog/activities-user-dialog.controller.js
+++ b/modules/web-console/frontend/app/components/activities-user-dialog/activities-user-dialog.controller.js
@@ -22,6 +22,12 @@
         const $ctrl = this;
 
         $ctrl.user = user;
-        $ctrl.data = _.map(user.activitiesDetail, (amount, action) => ({ action, amount }));
+        $ctrl.data = _.map(user.activitiesDetail, (amount, action) => ({action, amount}));
+
+        $ctrl.columnDefs = [
+            { displayName: 'Description', field: 'action', enableFiltering: false, cellFilter: 'translate'},
+            { displayName: 'Action', field: 'action', enableFiltering: false},
+            { displayName: 'Visited', field: 'amount', enableFiltering: false}
+        ];
     }
 }
diff --git a/modules/web-console/frontend/app/components/activities-user-dialog/activities-user-dialog.tpl.pug b/modules/web-console/frontend/app/components/activities-user-dialog/activities-user-dialog.tpl.pug
index 33d5f62..7e9d3f5 100644
--- a/modules/web-console/frontend/app/components/activities-user-dialog/activities-user-dialog.tpl.pug
+++ b/modules/web-console/frontend/app/components/activities-user-dialog/activities-user-dialog.tpl.pug
@@ -14,29 +14,21 @@
     See the License for the specific language governing permissions and
     limitations under the License.
 
-.modal.modal--ignite(tabindex='-1' role='dialog')
+.modal.modal--ignite.theme--ignite(tabindex='-1' role='dialog')
     .modal-dialog
         .modal-content
             .modal-header
-                h4.modal-title
-                    i.fa.fa-info-circle
-                    | Activity details: {{ ctrl.user.userName }}
+                h4.modal-title Activity details: {{ ctrl.user.userName }}
                 button.close(type='button' aria-label='Close' ng-click='$hide()')
                      svg(ignite-icon='cross')
-            .modal-body.modal-body-with-scroll(id='activities-user-dialog')
+            .modal-body.modal-body-with-scroll
                 .panel--ignite
-                    table.table--ignite(scrollable-container='#activities-user-dialog' st-table='displayedRows' st-safe-src='ctrl.data')
-                        thead
-                            th(st-sort='action | translate') Description
-                            th(st-sort='action') Action
-                            th(st-sort='amount') Visited
-                        tbody
-                            tr(ng-repeat='row in displayedRows')
-                                td
-                                    .text-overflow {{ row.action | translate }}
-                                td
-                                    .text-overflow {{ row.action }}
-                                td.text-right
-                                    .text-overflow {{ row.amount }}
+                    ignite-grid-table(
+                        items='ctrl.data'
+                        column-defs='ctrl.columnDefs'
+                        grid-thin='true'
+                    )
+
             .modal-footer
-                button.btn-ignite.btn-ignite--success(id='confirm-btn-confirm' ng-click='$hide()') Close
+                div
+                    button.btn-ignite.btn-ignite--success(ng-click='$hide()') Close
diff --git a/modules/web-console/frontend/app/components/activities-user-dialog/index.js b/modules/web-console/frontend/app/components/activities-user-dialog/index.js
index 13c1d95..ab3acbb 100644
--- a/modules/web-console/frontend/app/components/activities-user-dialog/index.js
+++ b/modules/web-console/frontend/app/components/activities-user-dialog/index.js
@@ -18,17 +18,19 @@
 import controller from './activities-user-dialog.controller';
 import templateUrl from './activities-user-dialog.tpl.pug';
 
-export default ['$modal', ($modal) => ({ show = true, user }) => {
-    const ActivitiesUserDialog = $modal({
-        templateUrl,
-        show,
-        resolve: {
-            user: () => user
-        },
-        controller,
-        controllerAs: 'ctrl'
-    });
+export default ['$modal', function($modal) {
+    return function({ show = true, user }) {
+        const ActivitiesUserDialog = $modal({
+            templateUrl,
+            show,
+            resolve: {
+                user: () => user
+            },
+            controller,
+            controllerAs: 'ctrl'
+        });
 
-    return ActivitiesUserDialog.$promise
-         .then(() => ActivitiesUserDialog);
+        return ActivitiesUserDialog.$promise
+             .then(() => ActivitiesUserDialog);
+    };
 }];
diff --git a/modules/web-console/frontend/app/components/bs-select-menu/strip.filter.js b/modules/web-console/frontend/app/components/bs-select-menu/strip.filter.js
index 4dbe58b..f2a18e4 100644
--- a/modules/web-console/frontend/app/components/bs-select-menu/strip.filter.js
+++ b/modules/web-console/frontend/app/components/bs-select-menu/strip.filter.js
@@ -17,6 +17,6 @@
 
 export default function() {
     return function(val) {
-        return val.replace(/(<\/?\w+>)/igm, '');
+        return val ? val.replace(/(<\/?\w+>)/igm, '') : '';
     };
 }
diff --git a/modules/web-console/frontend/app/components/bs-select-menu/style.scss b/modules/web-console/frontend/app/components/bs-select-menu/style.scss
index bfa0063..ac3991b 100644
--- a/modules/web-console/frontend/app/components/bs-select-menu/style.scss
+++ b/modules/web-console/frontend/app/components/bs-select-menu/style.scss
@@ -48,9 +48,8 @@
     }
 
     .bssm-item-text {
-        overflow: hidden;
+        overflow: visible;
         white-space: nowrap;
-        text-overflow: ellipsis;
     }
 
     &>li {
@@ -63,6 +62,7 @@
             padding-bottom: 9px;
             background-color: transparent;
             border-radius: 0;
+            padding-right: 30px;
 
             &:hover {
                 background-color: #eeeeee;
diff --git a/modules/web-console/frontend/app/components/connected-clusters-badge/controller.js b/modules/web-console/frontend/app/components/connected-clusters-badge/controller.js
index 64b23cf..8a622d9 100644
--- a/modules/web-console/frontend/app/components/connected-clusters-badge/controller.js
+++ b/modules/web-console/frontend/app/components/connected-clusters-badge/controller.js
@@ -18,7 +18,7 @@
 import AgentManager from 'app/modules/agent/AgentManager.service';
 
 export default class {
-    static $inject = [AgentManager.name, 'ConnectedClustersDialog'];
+    static $inject = ['AgentManager', 'ConnectedClustersDialog'];
 
     /** @type {Number} */
     connectedClusters = 0;
diff --git a/modules/web-console/frontend/app/components/connected-clusters-dialog/components/cell-logout/index.js b/modules/web-console/frontend/app/components/connected-clusters-dialog/components/cell-logout/index.js
index 26f2b12..1f7af80 100644
--- a/modules/web-console/frontend/app/components/connected-clusters-dialog/components/cell-logout/index.js
+++ b/modules/web-console/frontend/app/components/connected-clusters-dialog/components/cell-logout/index.js
@@ -20,7 +20,7 @@
 import AgentManager from 'app/modules/agent/AgentManager.service';
 
 class controller {
-    static $inject = [AgentManager.name];
+    static $inject = ['AgentManager'];
 
     /**
      * @param {AgentManager} agentMgr
diff --git a/modules/web-console/frontend/app/components/connected-clusters-dialog/template.tpl.pug b/modules/web-console/frontend/app/components/connected-clusters-dialog/template.tpl.pug
index dd7b4fc..5ea83fd 100644
--- a/modules/web-console/frontend/app/components/connected-clusters-dialog/template.tpl.pug
+++ b/modules/web-console/frontend/app/components/connected-clusters-dialog/template.tpl.pug
@@ -16,7 +16,7 @@
 
 include /app/helpers/jade/mixins
 
-.modal.modal--ignite.theme--ignite.connected-clusters-dialog(tabindex='-1' role='dialog')
+.modal.modal--ignite.theme-ignite.connected-clusters-dialog(tabindex='-1' role='dialog')
     .modal-dialog.modal-dialog--adjust-height
         form.modal-content(name='$ctrl.form' novalidate)
             .modal-header
@@ -26,9 +26,9 @@
                      svg(ignite-icon="cross")
             .modal-body.modal-body-with-scroll
                 .panel--ignite
-                    ul.tabs.tabs--blue
                     connected-clusters-list(data-options='$ctrl.clusters')
 
             .modal-footer
-                button.btn-ignite.btn-ignite--success(type='button' ng-click='$hide()') Ok
-                
+                div
+                    button.btn-ignite.btn-ignite--success(type='button' ng-click='$hide()') Ok
+
diff --git a/modules/web-console/frontend/app/components/expose-ignite-form-field-control/directives.js b/modules/web-console/frontend/app/components/expose-ignite-form-field-control/directives.js
deleted file mode 100644
index 5184032..0000000
--- a/modules/web-console/frontend/app/components/expose-ignite-form-field-control/directives.js
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// eslint-disable-next-line
-import {IgniteFormField} from 'app/components/page-configure/components/pcValidation'
-
-/**
- * Exposes input to .ignite-form-field scope
- */
-class ExposeIgniteFormFieldControl {
-    /** @type {IgniteFormField} */
-    formField;
-    /** @type {ng.INgModelController} */
-    ngModel;
-    /** 
-     * Name used to access control from $scope.
-     * @type {string}
-     */
-    name;
-
-    $onInit() {
-        if (this.formField && this.ngModel) this.formField.exposeControl(this.ngModel, this.name);
-    }
-}
-
-export function exposeIgniteFormFieldControl() {
-    return {
-        restrict: 'A',
-        controller: ExposeIgniteFormFieldControl,
-        bindToController: {
-            name: '@exposeIgniteFormFieldControl'
-        },
-        require: {
-            formField: '^^?igniteFormField',
-            ngModel: '?ngModel'
-        },
-        scope: false
-    };
-}
diff --git a/modules/web-console/frontend/app/components/expose-ignite-form-field-control/index.js b/modules/web-console/frontend/app/components/expose-ignite-form-field-control/index.js
deleted file mode 100644
index 9a22478..0000000
--- a/modules/web-console/frontend/app/components/expose-ignite-form-field-control/index.js
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import angular from 'angular';
-import {igniteFormField, exposeIgniteFormFieldControl} from './directives';
-
-export default angular
-.module('expose-ignite-form-field-control', [])
-.directive('exposeIgniteFormFieldControl', exposeIgniteFormFieldControl);
diff --git a/modules/web-console/frontend/app/components/form-field/components/form-field-size/controller.js b/modules/web-console/frontend/app/components/form-field/components/form-field-size/controller.js
new file mode 100644
index 0000000..7663e9d
--- /dev/null
+++ b/modules/web-console/frontend/app/components/form-field/components/form-field-size/controller.js
@@ -0,0 +1,140 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import get from 'lodash/get';
+
+export default class PCFormFieldSizeController {
+    /** @type {ng.INgModelController} */
+    ngModel;
+    /** @type {number} */
+    min;
+    /** @type {number} */
+    max;
+    /** @type {ng.ICompiledExpression} */
+    onScaleChange;
+    /** @type {ng.IFormController} */
+    innerForm;
+
+    static $inject = ['$element', '$attrs'];
+
+    /** @type {ig.config.formFieldSize.ISizeTypes} */
+    static sizeTypes = {
+        bytes: [
+            {label: 'Kb', value: 1024},
+            {label: 'Mb', value: 1024 * 1024},
+            {label: 'Gb', value: 1024 * 1024 * 1024}
+        ],
+        seconds: [
+            {label: 'ns', value: 1 / 1000},
+            {label: 'ms', value: 1},
+            {label: 's', value: 1000}
+        ],
+        time: [
+            {label: 'sec', value: 1},
+            {label: 'min', value: 60},
+            {label: 'hour', value: 60 * 60}
+        ]
+    };
+
+    /**
+     * @param {JQLite} $element
+     * @param {ng.IAttributes} $attrs
+     */
+    constructor($element, $attrs) {
+        this.$element = $element;
+        this.$attrs = $attrs;
+        this.id = Math.random();
+    }
+
+    $onDestroy() {
+        this.$element = null;
+    }
+
+    $onInit() {
+        if (!this.min) this.min = 0;
+        if (!this.sizesMenu) this.setDefaultSizeType();
+        this.$element.addClass('form-field');
+        this.ngModel.$render = () => this.assignValue(this.ngModel.$viewValue);
+    }
+
+    $postLink() {
+        if ('min' in this.$attrs)
+            this.ngModel.$validators.min = (value) => this.ngModel.$isEmpty(value) || value === void 0 || value >= this.min;
+        if ('max' in this.$attrs)
+            this.ngModel.$validators.max = (value) => this.ngModel.$isEmpty(value) || value === void 0 || value <= this.max;
+
+        this.ngModel.$validators.step = (value) => this.ngModel.$isEmpty(value) || value === void 0 || Math.floor(value) === value;
+    }
+
+    $onChanges(changes) {
+        if ('sizeType' in changes) {
+            this.sizesMenu = PCFormFieldSizeController.sizeTypes[changes.sizeType.currentValue];
+            this.sizeScale = this.chooseSizeScale(get(changes, 'sizeScaleLabel.currentValue'));
+        }
+        if (!this.sizesMenu) this.setDefaultSizeType();
+        if ('sizeScaleLabel' in changes)
+            this.sizeScale = this.chooseSizeScale(changes.sizeScaleLabel.currentValue);
+
+        if ('min' in changes) this.ngModel.$validate();
+    }
+
+    /**
+     * @param {ig.config.formFieldSize.ISizeTypeOption} value
+     */
+    set sizeScale(value) {
+        this._sizeScale = value;
+        if (this.onScaleChange) this.onScaleChange({$event: this.sizeScale});
+        if (this.ngModel) this.assignValue(this.ngModel.$viewValue);
+    }
+
+    get sizeScale() {
+        return this._sizeScale;
+    }
+
+    /**
+     * @param {number} rawValue
+     */
+    assignValue(rawValue) {
+        if (!this.sizesMenu) this.setDefaultSizeType();
+        return this.value = rawValue
+            ? rawValue / this.sizeScale.value
+            : rawValue;
+    }
+
+    onValueChange() {
+        this.ngModel.$setViewValue(this.value ? this.value * this.sizeScale.value : this.value);
+    }
+
+    _defaultLabel() {
+        if (!this.sizesMenu)
+            return;
+
+        return this.sizesMenu[1].label;
+    }
+
+    chooseSizeScale(label = this._defaultLabel()) {
+        if (!label)
+            return;
+
+        return this.sizesMenu.find((option) => option.label.toLowerCase() === label.toLowerCase());
+    }
+
+    setDefaultSizeType() {
+        this.sizesMenu = PCFormFieldSizeController.sizeTypes.bytes;
+        this.sizeScale = this.chooseSizeScale();
+    }
+}
diff --git a/modules/web-console/frontend/app/components/form-field/components/form-field-size/index.js b/modules/web-console/frontend/app/components/form-field/components/form-field-size/index.js
new file mode 100644
index 0000000..5e08df2
--- /dev/null
+++ b/modules/web-console/frontend/app/components/form-field/components/form-field-size/index.js
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import './style.scss';
+import template from './template.pug';
+import controller from './controller';
+
+export default {
+    controller,
+    template,
+    transclude: true,
+    require: {
+        ngModel: 'ngModel'
+    },
+    bindings: {
+        label: '@',
+        placeholder: '@',
+        min: '@?',
+        max: '@?',
+        tip: '@',
+        required: '<?',
+        sizeType: '@?',
+        sizeScaleLabel: '@?',
+        onScaleChange: '&?',
+        ngDisabled: '<?'
+    }
+};
diff --git a/modules/web-console/frontend/app/components/form-field/components/form-field-size/style.scss b/modules/web-console/frontend/app/components/form-field/components/form-field-size/style.scss
new file mode 100644
index 0000000..c522200
--- /dev/null
+++ b/modules/web-console/frontend/app/components/form-field/components/form-field-size/style.scss
@@ -0,0 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+form-field-size {
+	display: block;
+}
diff --git a/modules/web-console/frontend/app/components/form-field/components/form-field-size/template.pug b/modules/web-console/frontend/app/components/form-field/components/form-field-size/template.pug
new file mode 100644
index 0000000..f587112
--- /dev/null
+++ b/modules/web-console/frontend/app/components/form-field/components/form-field-size/template.pug
@@ -0,0 +1,77 @@
+//-
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+
+         http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+
+include /app/helpers/jade/mixins
+
++form-field__label({
+    label: '{{ ::$ctrl.label }}',
+    name: '$ctrl.id',
+    required: '$ctrl.required',
+    disabled: '$ctrl.ngDisabled'
+})
+    +form-field__tooltip({ title: '{{::$ctrl.tip}}' })(
+        ng-if='::$ctrl.tip'
+    )
+
+.form-field__control.form-field__control-group(ng-form='$ctrl.innerForm')
+    input(
+        type='number'
+        id='{{::$ctrl.id}}Input'
+        ng-model='$ctrl.value'
+        ng-model-options='{allowInvalid: true}'
+        ng-change='$ctrl.onValueChange()'
+        name='numberInput'
+        placeholder='{{$ctrl.placeholder}}'
+        min='{{ $ctrl.min ? $ctrl.min / $ctrl.sizeScale.value : "" }}'
+        max='{{ $ctrl.max ? $ctrl.max / $ctrl.sizeScale.value : "" }}'
+        ng-required='$ctrl.required'
+        ng-disabled='$ctrl.ngDisabled'
+    )
+    button.select-toggle(
+        bs-select
+        bs-options='size as size.label for size in $ctrl.sizesMenu'
+        ng-model='$ctrl.sizeScale'
+        protect-from-bs-select-render
+        ng-disabled='$ctrl.ngDisabled'
+        type='button'
+    )
+        | {{ $ctrl.sizeScale.label }}
+
+.form-field__errors(
+    ng-messages='$ctrl.ngModel.$error'
+    ng-show=`($ctrl.ngModel.$dirty || $ctrl.ngModel.$touched || $ctrl.ngModel.$submitted) && $ctrl.ngModel.$invalid`
+)
+    div(ng-transclude)
+    +form-field__error({
+        error: 'required',
+        message: 'This field could not be empty'
+    })
+    +form-field__error({
+        error: 'min',
+        message: 'Value is less than allowable minimum: {{ $ctrl.min/$ctrl.sizeScale.value }} {{$ctrl.sizeScale.label}}'
+    })
+    +form-field__error({
+        error: 'max',
+        message: 'Value is more than allowable maximum: {{ $ctrl.max/$ctrl.sizeScale.value }} {{$ctrl.sizeScale.label}}'
+    })
+    +form-field__error({
+        error: 'number',
+        message: 'Only numbers allowed'
+    })
+    +form-field__error({
+        error: 'step',
+        message: 'Invalid step'
+    })
\ No newline at end of file
diff --git a/modules/web-console/frontend/app/components/form-field/index.js b/modules/web-console/frontend/app/components/form-field/index.js
index 077ace0..b1ee753 100644
--- a/modules/web-console/frontend/app/components/form-field/index.js
+++ b/modules/web-console/frontend/app/components/form-field/index.js
@@ -20,7 +20,10 @@
 import {directive as showValidationError} from './showValidationError.directive';
 import {directive as copyInputValue} from './copyInputValueButton.directive';
 
+import { default as formFieldSize } from './components/form-field-size';
+
 export default angular
     .module('ignite-console.form-field', [])
+    .component('formFieldSize', formFieldSize)
     .directive('ngModel', showValidationError)
     .directive('copyInputValueButton', copyInputValue);
diff --git a/modules/web-console/frontend/app/components/grid-column-selector/style.scss b/modules/web-console/frontend/app/components/grid-column-selector/style.scss
index 5e0d5d4..872d727 100644
--- a/modules/web-console/frontend/app/components/grid-column-selector/style.scss
+++ b/modules/web-console/frontend/app/components/grid-column-selector/style.scss
@@ -17,6 +17,7 @@
 
 grid-column-selector {
     display: inline-block;
+    line-height: initial;
 
     .btn-ignite, .icon {
         margin: 0 !important;
diff --git a/modules/web-console/frontend/app/components/grid-export/component.js b/modules/web-console/frontend/app/components/grid-export/component.js
index d312959..d4cfb29 100644
--- a/modules/web-console/frontend/app/components/grid-export/component.js
+++ b/modules/web-console/frontend/app/components/grid-export/component.js
@@ -21,7 +21,7 @@
 export default {
     template,
     controller: class {
-        static $inject = ['$scope', 'uiGridGroupingConstants', 'uiGridExporterService', 'uiGridExporterConstants', CSV.name];
+        static $inject = ['$scope', 'uiGridGroupingConstants', 'uiGridExporterService', 'uiGridExporterConstants', 'CSV'];
 
         /**
          * @param {CSV} CSV
@@ -58,10 +58,13 @@
 
             const csvContent = this.uiGridExporterService.formatAsCsv(exportColumnHeaders, data, this.CSV.getSeparator());
 
-            this.uiGridExporterService.downloadFile(this.gridApi.grid.options.exporterCsvFilename, csvContent, this.gridApi.grid.options.exporterOlderExcelCompatibility);
+            const csvFileName = this.fileName || 'export.csv';
+
+            this.uiGridExporterService.downloadFile(csvFileName, csvContent, this.gridApi.grid.options.exporterOlderExcelCompatibility);
         }
     },
     bindings: {
-        gridApi: '<'
+        gridApi: '<',
+        fileName: '<'
     }
 };
diff --git a/modules/web-console/frontend/app/components/grid-item-selected/index.js b/modules/web-console/frontend/app/components/grid-item-selected/index.js
index 583d871..4d76e6e 100644
--- a/modules/web-console/frontend/app/components/grid-item-selected/index.js
+++ b/modules/web-console/frontend/app/components/grid-item-selected/index.js
@@ -17,6 +17,7 @@
 
 import angular from 'angular';
 
+import './style.scss';
 import component from './component';
 
 export default angular
diff --git a/modules/web-console/frontend/app/components/grid-item-selected/style.scss b/modules/web-console/frontend/app/components/grid-item-selected/style.scss
new file mode 100644
index 0000000..b131d82
--- /dev/null
+++ b/modules/web-console/frontend/app/components/grid-item-selected/style.scss
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+grid-item-selected {
+    display: inline-flex;
+    align-items: center;
+
+    font-size: 14px;
+}
diff --git a/modules/web-console/frontend/app/components/grid-showing-rows/component.js b/modules/web-console/frontend/app/components/grid-showing-rows/component.js
index 93fa1b1..19de9af 100644
--- a/modules/web-console/frontend/app/components/grid-showing-rows/component.js
+++ b/modules/web-console/frontend/app/components/grid-showing-rows/component.js
@@ -20,7 +20,7 @@
 
 export default {
     template: `
-        <i>Showing: {{ $ctrl.count }} rows</i>
+        <i ng-pluralize count="$ctrl.count" when="{'one': 'Showing: 1 row', 'other': 'Showing: {} rows'}"></i>
     `,
     controller,
     bindings: {
diff --git a/modules/web-console/frontend/app/components/grid-showing-rows/controller.js b/modules/web-console/frontend/app/components/grid-showing-rows/controller.js
index cb5b496..67c407c 100644
--- a/modules/web-console/frontend/app/components/grid-showing-rows/controller.js
+++ b/modules/web-console/frontend/app/components/grid-showing-rows/controller.js
@@ -42,6 +42,6 @@
             return;
         }
 
-        this.count = _.sumBy(this.gridApi.grid.rows, 'visible');
+        this.count = _.sumBy(this.gridApi.grid.rows, (row) => Number(row.visible));
     }
 }
diff --git a/modules/web-console/frontend/app/components/ignite-chart-series-selector/controller.js b/modules/web-console/frontend/app/components/ignite-chart-series-selector/controller.js
index 00f57ca..f46d8da 100644
--- a/modules/web-console/frontend/app/components/ignite-chart-series-selector/controller.js
+++ b/modules/web-console/frontend/app/components/ignite-chart-series-selector/controller.js
@@ -16,21 +16,21 @@
  */
 
 export default class IgniteChartSeriesSelectorController {
-    static $inject = [];
-
     constructor() {
         this.charts = [];
         this.selectedCharts = [];
     }
 
     $onChanges(changes) {
-        if (changes && 'chartApi' in changes && changes.chartApi.currentValue)
+        if (changes && 'chartApi' in changes && changes.chartApi.currentValue) {
             this.applyValues();
+            this.setSelectedCharts();
+        }
     }
 
     applyValues() {
         this.charts = this._makeMenu();
-        this.selectedCharts = this.charts.map(({ key }) => key);
+        this.selectedCharts = this.charts.filter((chart) => !chart.hidden).map(({ key }) => key);
     }
 
     setSelectedCharts() {
@@ -55,7 +55,8 @@
         return Object.keys(this.chartApi.config.datasetLegendMapping).map((key) => {
             return {
                 key,
-                label: labels[key]
+                label: labels[key].name || labels[key],
+                hidden: labels[key].hidden
             };
         });
     }
diff --git a/modules/web-console/frontend/app/components/ignite-chart/controller.js b/modules/web-console/frontend/app/components/ignite-chart/controller.js
index df11050..c60ef5a 100644
--- a/modules/web-console/frontend/app/components/ignite-chart/controller.js
+++ b/modules/web-console/frontend/app/components/ignite-chart/controller.js
@@ -82,7 +82,7 @@
     /**
      * @param {{chartOptions: ng.IChangesObject<import('chart.js').ChartConfiguration>, chartTitle: ng.IChangesObject<string>, chartDataPoint: ng.IChangesObject<IgniteChartDataPoint>, chartHistory: ng.IChangesObject<Array<IgniteChartDataPoint>>}} changes
      */
-    $onChanges(changes) {
+    async $onChanges(changes) {
         if (this.chart && _.get(changes, 'refreshRate.currentValue'))
             this.onRefreshRateChanged(_.get(changes, 'refreshRate.currentValue'));
 
@@ -94,7 +94,7 @@
 
         if (changes.chartHistory && changes.chartHistory.currentValue && changes.chartHistory.currentValue.length !== changes.chartHistory.previousValue.length) {
             if (!this.chart)
-                this.initChart();
+                await this.initChart();
 
             this.clearDatasets();
             this.localHistory = [...changes.chartHistory.currentValue];
@@ -118,7 +118,7 @@
     async initChart() {
         /** @type {import('chart.js').ChartConfiguration} */
         this.config = {
-            type: 'line',
+            type: 'LineWithVerticalCursor',
             data: {
                 datasets: []
             },
@@ -187,8 +187,9 @@
                 },
                 tooltips: {
                     mode: 'index',
-                    position: 'nearest',
+                    position: 'yCenter',
                     intersect: false,
+                    yAlign: 'center',
                     xPadding: 20,
                     yPadding: 20,
                     bodyFontSize: 13,
@@ -216,6 +217,7 @@
                         duration: this.currentRange.value * 1000 * 60,
                         frameRate: 1000 / this.refreshRate || 1 / 3,
                         refresh: this.refreshRate || 3000,
+                        ttl: this.maxPointsNumber,
                         onRefresh: () => {
                             this.onRefresh();
                         }
@@ -229,6 +231,40 @@
         const chartModule = await import('chart.js');
         const Chart = chartModule.default;
 
+        Chart.Tooltip.positioners.yCenter = (elements) => {
+            const chartHeight = elements[0]._chart.height;
+            const tooltipHeight = 60;
+
+            return {x: elements[0].getCenterPoint().x, y: Math.floor(chartHeight / 2) - Math.floor(tooltipHeight / 2) };
+        };
+
+
+        // Drawing vertical cursor
+        Chart.defaults.LineWithVerticalCursor = Chart.defaults.line;
+        Chart.controllers.LineWithVerticalCursor = Chart.controllers.line.extend({
+            draw(ease) {
+                Chart.controllers.line.prototype.draw.call(this, ease);
+
+                if (this.chart.tooltip._active && this.chart.tooltip._active.length) {
+                    const activePoint = this.chart.tooltip._active[0];
+                    const ctx = this.chart.ctx;
+                    const x = activePoint.tooltipPosition().x;
+                    const topY = this.chart.scales['y-axis-0'].top;
+                    const bottomY = this.chart.scales['y-axis-0'].bottom;
+
+                    // draw line
+                    ctx.save();
+                    ctx.beginPath();
+                    ctx.moveTo(x, topY);
+                    ctx.lineTo(x, bottomY);
+                    ctx.lineWidth = 0.5;
+                    ctx.strokeStyle = '#0080ff';
+                    ctx.stroke();
+                    ctx.restore();
+                }
+            }
+        });
+
         await import('chartjs-plugin-streaming');
 
         this.chart = new Chart(this.ctx, this.config);
@@ -256,12 +292,8 @@
                     this.addDataset(key);
                 }
 
-                // Prune excessive data points.
-                if (this.maxPointsNumber && this.config.data.datasets[datasetIndex].length - this.maxPointsNumber > 0)
-                    this.config.data.datasets[datasetIndex].data.splice(0, this.config.data.datasets[datasetIndex].length - this.maxPointsNumber);
-
                 this.config.data.datasets[datasetIndex].data.push({x: dataPoint.x, y: dataPoint.y[key]});
-                this.config.data.datasets[datasetIndex].borderColor = this.chartOptions.chartColors[datasetIndex];
+                this.config.data.datasets[datasetIndex].borderColor = this.chartColors[datasetIndex];
                 this.config.data.datasets[datasetIndex].borderWidth = 2;
                 this.config.data.datasets[datasetIndex].fill = false;
             }
@@ -290,7 +322,7 @@
         if (this.findDatasetIndex(datasetName) >= 0)
             throw new Error(`Dataset with name ${datasetName} is already in chart`);
         else
-            this.config.data.datasets.push({ label: datasetName, data: [] });
+            this.config.data.datasets.push({ label: datasetName, data: [], hidden: true });
     }
 
     findDatasetIndex(searchedDatasetLabel) {
@@ -298,14 +330,16 @@
     }
 
     changeXRange(range) {
-        const deltaInMilliSeconds = range.value * 60 * 1000;
-        this.chart.config.options.plugins.streaming.duration = deltaInMilliSeconds;
+        if (this.chart) {
+            const deltaInMilliSeconds = range.value * 60 * 1000;
+            this.chart.config.options.plugins.streaming.duration = deltaInMilliSeconds;
 
-        this.clearDatasets();
-        this.newPoints.splice(0, this.newPoints.length, ...this.localHistory);
+            this.clearDatasets();
+            this.newPoints.splice(0, this.newPoints.length, ...this.localHistory);
 
-        this.onRefresh();
-        this.rerenderChart();
+            this.onRefresh();
+            this.rerenderChart();
+        }
     }
 
     onRefreshRateChanged(refreshRate) {
diff --git a/modules/web-console/frontend/app/components/ignite-chart/index.js b/modules/web-console/frontend/app/components/ignite-chart/index.js
index 337ba36..ea951cd 100644
--- a/modules/web-console/frontend/app/components/ignite-chart/index.js
+++ b/modules/web-console/frontend/app/components/ignite-chart/index.js
@@ -32,6 +32,7 @@
             chartHistory: '<',
             chartTitle: '<',
             chartColors: '<',
+            chartHeaderText: '<',
             maxPointsNumber: '<',
             refreshRate: '<'
         }
diff --git a/modules/web-console/frontend/app/components/ignite-chart/template.pug b/modules/web-console/frontend/app/components/ignite-chart/template.pug
index 076d8d4..5108853 100644
--- a/modules/web-console/frontend/app/components/ignite-chart/template.pug
+++ b/modules/web-console/frontend/app/components/ignite-chart/template.pug
@@ -18,6 +18,7 @@
     div
         h5 {{ $ctrl.chartTitle }}
         ignite-chart-series-selector(chart-api='$ctrl.chart')
+        .chart-text(ng-if='$ctrl.chartHeaderText') {{$ctrl.chartHeaderText}}
 
     div
         span Range:
diff --git a/modules/web-console/frontend/app/components/ignite-status/index.js b/modules/web-console/frontend/app/components/ignite-status/index.js
new file mode 100644
index 0000000..a8520bc
--- /dev/null
+++ b/modules/web-console/frontend/app/components/ignite-status/index.js
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import angular from 'angular';
+import './style.scss';
+
+export default angular
+    .module('ignite-console.ignite-status', []);
diff --git a/modules/web-console/frontend/app/components/ignite-status/style.scss b/modules/web-console/frontend/app/components/ignite-status/style.scss
new file mode 100644
index 0000000..d2877fc
--- /dev/null
+++ b/modules/web-console/frontend/app/components/ignite-status/style.scss
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+@import "../../../public/stylesheets/variables";
+
+// Statuses coloring
+.ignite-status__active {
+  color: $ignite-status-active !important;
+}
+
+.ignite-status__inactive {
+  color: $ignite-status-inactive;
+}
diff --git a/modules/web-console/frontend/app/components/input-dialog/input-dialog.tpl.pug b/modules/web-console/frontend/app/components/input-dialog/input-dialog.tpl.pug
index 5c6e173..50087e9 100644
--- a/modules/web-console/frontend/app/components/input-dialog/input-dialog.tpl.pug
+++ b/modules/web-console/frontend/app/components/input-dialog/input-dialog.tpl.pug
@@ -20,7 +20,7 @@
     .modal-dialog
         form.modal-content(name='ctrl.form' novalidate)
             .modal-header
-                h4.modal-title 
+                h4.modal-title
                     i.fa.fa-clone
                     span {{ ctrl.title }}
                 button.close(type='button' aria-label='Close' ng-click='$hide()')
@@ -29,11 +29,18 @@
             .modal-body
                 .row
                     .col-100
-                        +ignite-form-field-text('{{ ctrl.label }}', 'ctrl.value', '"input-field"', false, true, 'Enter value')(
-                            data-ignite-form-field-input-autofocus='true'
+                        +form-field__text({
+                            label: '{{ ctrl.label }}',
+                            model: 'ctrl.value',
+                            name: '"inputDialogField"',
+                            required: true,
+                            placeholder: 'Enter value'
+                        })(
+                            ignite-form-field-input-autofocus='true'
                             ignite-on-enter='form.$valid && ctrl.confirm()'
                         )
 
             .modal-footer
-                button#copy-btn-cancel.btn-ignite.btn-ignite--link-success(ng-click='$hide()') Cancel
-                button#copy-btn-confirm.btn-ignite.btn-ignite--success(ng-disabled='ctrl.form.$invalid' ng-click='ctrl.confirm()') Confirm
+                div
+                    button#copy-btn-cancel.btn-ignite.btn-ignite--link-success(ng-click='$hide()') Cancel
+                    button#copy-btn-confirm.btn-ignite.btn-ignite--success(ng-disabled='ctrl.form.$invalid' ng-click='ctrl.confirm()') Confirm
diff --git a/modules/web-console/frontend/app/components/list-editable/components/list-editable-cols/cols.style.scss b/modules/web-console/frontend/app/components/list-editable/components/list-editable-cols/cols.style.scss
index 5df29cc..5735be3 100644
--- a/modules/web-console/frontend/app/components/list-editable/components/list-editable-cols/cols.style.scss
+++ b/modules/web-console/frontend/app/components/list-editable/components/list-editable-cols/cols.style.scss
@@ -19,7 +19,6 @@
     $index-column-width: 46px;
     $remove-column-width: 36px;
 
-    margin-left: 10px;
     margin-right: $remove-column-width;
     transition: 0.2s opacity;
 
@@ -27,7 +26,7 @@
         margin-left: $index-column-width;
     }
 
-    .ignite-form-field__label {
+    .form-field__label {
         padding-left: 0;
         padding-right: 0;
         float: none;
@@ -41,10 +40,15 @@
     }
 
     &+list-editable {
+        .form-field__label,
         .ignite-form-field__label {
             display: none;
         }
 
+        .form-field:not(.form-field__checkbox) {
+            margin-left: -11px;
+        }
+
         .le-row-item-view:nth-last-child(2) {
             display: none;
         }
diff --git a/modules/web-console/frontend/app/components/list-editable/components/list-editable-cols/cols.template.pug b/modules/web-console/frontend/app/components/list-editable/components/list-editable-cols/cols.template.pug
index d060f45..f1aff2e 100644
--- a/modules/web-console/frontend/app/components/list-editable/components/list-editable-cols/cols.template.pug
+++ b/modules/web-console/frontend/app/components/list-editable/components/list-editable-cols/cols.template.pug
@@ -19,7 +19,7 @@
     ng-disabled='$ctrl.ngDisabled'
 )
     .list-editable-cols__header-cell(ng-repeat='col in ::$ctrl.colDefs' ng-class='::col.cellClass')
-        span.ignite-form-field__label
+        span.form-field__label
             | {{ ::col.name }}
             svg(
                 ng-if='::col.tip'
diff --git a/modules/web-console/frontend/app/components/list-editable/components/list-editable-one-way/index.js b/modules/web-console/frontend/app/components/list-editable/components/list-editable-one-way/index.js
index 3c49003..652ac0a 100644
--- a/modules/web-console/frontend/app/components/list-editable/components/list-editable-one-way/index.js
+++ b/modules/web-console/frontend/app/components/list-editable/components/list-editable-one-way/index.js
@@ -21,4 +21,4 @@
 
 export default angular
     .module('ignite-console.list-editable.one-way', [])
-    .directive(directive.name, directive);
+    .directive('listEditableOneWay', directive);
diff --git a/modules/web-console/frontend/app/components/list-editable/components/list-editable-save-on-changes/index.js b/modules/web-console/frontend/app/components/list-editable/components/list-editable-save-on-changes/index.js
index 642e84a..18e26db 100644
--- a/modules/web-console/frontend/app/components/list-editable/components/list-editable-save-on-changes/index.js
+++ b/modules/web-console/frontend/app/components/list-editable/components/list-editable-save-on-changes/index.js
@@ -19,6 +19,6 @@
 import {ngModel, listEditableTransclude} from './directives';
 
 export default angular
-.module('list-editable.save-on-changes', [])
-.directive(ngModel.name, ngModel)
-.directive(listEditableTransclude.name, listEditableTransclude);
+    .module('list-editable.save-on-changes', [])
+    .directive('ngModel', ngModel)
+    .directive('listEditableTransclude', listEditableTransclude);
diff --git a/modules/web-console/frontend/app/components/list-editable/components/list-editable-transclude/index.js b/modules/web-console/frontend/app/components/list-editable/components/list-editable-transclude/index.js
index 39d5b73..6d681a4 100644
--- a/modules/web-console/frontend/app/components/list-editable/components/list-editable-transclude/index.js
+++ b/modules/web-console/frontend/app/components/list-editable/components/list-editable-transclude/index.js
@@ -20,4 +20,4 @@
 
 export default angular
     .module('list-editable.transclude', [])
-    .directive(listEditableTransclude.name, listEditableTransclude);
+    .directive('listEditableTransclude', listEditableTransclude);
diff --git a/modules/web-console/frontend/app/components/list-editable/style.scss b/modules/web-console/frontend/app/components/list-editable/style.scss
index 4d60528..0db6f7a 100644
--- a/modules/web-console/frontend/app/components/list-editable/style.scss
+++ b/modules/web-console/frontend/app/components/list-editable/style.scss
@@ -53,7 +53,7 @@
     }
 
     .le-body {
-        box-shadow: 0 1px 3px 0 rgba(0, 0, 0, 0.2);
+        box-shadow: 0 1px 0 0 rgba(0, 0, 0, 0.2);
     }
 
     .le-row-sort {
@@ -113,10 +113,6 @@
                 min-height: 36px;
                 align-items: center;
             }
-
-            &-edit {
-                margin-left: -11px;
-            }
         }
 
         &--editable {
diff --git a/modules/web-console/frontend/app/components/list-of-registered-users/controller.js b/modules/web-console/frontend/app/components/list-of-registered-users/controller.js
index 53a5521..f8b2797 100644
--- a/modules/web-console/frontend/app/components/list-of-registered-users/controller.js
+++ b/modules/web-console/frontend/app/components/list-of-registered-users/controller.js
@@ -210,7 +210,7 @@
             const sdt = $ctrl.params.startDate;
             const edt = $ctrl.params.endDate;
 
-            $ctrl.gridOptions.exporterCsvFilename = `web_console_users_${dtFilter(sdt, 'yyyy_MM')}.csv`;
+            $ctrl.exporterCsvFilename = `web_console_users_${dtFilter(sdt, 'yyyy_MM')}.csv`;
 
             const startDate = Date.UTC(sdt.getFullYear(), sdt.getMonth(), 1);
             const endDate = Date.UTC(edt.getFullYear(), edt.getMonth() + 1, 1);
diff --git a/modules/web-console/frontend/app/components/list-of-registered-users/style.scss b/modules/web-console/frontend/app/components/list-of-registered-users/style.scss
index 5a0c713..359a19d 100644
--- a/modules/web-console/frontend/app/components/list-of-registered-users/style.scss
+++ b/modules/web-console/frontend/app/components/list-of-registered-users/style.scss
@@ -29,17 +29,7 @@
         }
     }
 
-    .ui-grid-settings--heading {
-        display: flex;
-    }
-
-    & > a {
-        display: inline-block;
-        margin: 10px;
-        margin-left: 0;
-
-        &.active {
-            font-weight: bold;
-        }
+    .form-field--inline:first-child {
+        margin-right: 20px;
     }
 }
\ No newline at end of file
diff --git a/modules/web-console/frontend/app/components/list-of-registered-users/template.tpl.pug b/modules/web-console/frontend/app/components/list-of-registered-users/template.tpl.pug
index 6656c90..d32b64d 100644
--- a/modules/web-console/frontend/app/components/list-of-registered-users/template.tpl.pug
+++ b/modules/web-console/frontend/app/components/list-of-registered-users/template.tpl.pug
@@ -17,15 +17,15 @@
 include /app/helpers/jade/mixins
 
 ul.tabs.tabs--blue
-    li(role='presentation' ng-class='{ active: $ctrl.groupBy === "user" }') 
-        a(ng-click='$ctrl.groupByUser()') 
+    li(role='presentation' ng-class='{ active: $ctrl.groupBy === "user" }')
+        a(ng-click='$ctrl.groupByUser()')
             span Users
             span.badge.badge--blue(ng-hide='$ctrl.groupBy === "user"')
                 | {{ $ctrl.gridOptions.data.length }}
             span.badge.badge--blue(ng-show='$ctrl.groupBy === "user"')
                 | {{ $ctrl.filteredRows.length }}
     li(role='presentation' ng-class='{ active: $ctrl.groupBy === "company" }')
-        a(ng-click='$ctrl.groupByCompany()') 
+        a(ng-click='$ctrl.groupByCompany()')
             span Companies
             span.badge.badge--blue {{ $ctrl.companies.length }}
     li(role='presentation' ng-class='{ active: $ctrl.groupBy === "country" }')
@@ -34,32 +34,53 @@
             span.badge.badge--blue {{ $ctrl.countries.length }}
 
 .panel--ignite
-    .panel-heading.ui-grid-settings.ui-grid-ignite__panel
-        .panel-title
+    header.header-with-selector
+        div(ng-if='!$ctrl.selected.length')
+            span(ng-if='$ctrl.groupBy === "user"') List of registered users
+            span(ng-if='$ctrl.groupBy === "company"') List of registered companies
+            span(ng-if='$ctrl.groupBy === "country"') List of registered countries
+            grid-column-selector(grid-api='$ctrl.gridApi')
+
+        div(ng-if='$ctrl.selected.length')
+            grid-item-selected(grid-api='$ctrl.gridApi')
+
+        div
+            .form-field--inline
+                +form-field__text({
+                    label: 'Exclude:',
+                    model: '$ctrl.params.companiesExclude',
+                    name: '"exclude"',
+                    placeholder: 'Exclude by company name...'
+                })
+
+            .form-field--inline
+                +form-field__datepicker({
+                    label: 'Period: from',
+                    model: '$ctrl.params.startDate',
+                    name: '"startDate"',
+                    maxdate: '$ctrl.params.endDate'
+                })
+            .form-field--inline
+                +form-field__datepicker({
+                    label: 'to',
+                    model: '$ctrl.params.endDate',
+                    name: '"endDate"',
+                    mindate: '$ctrl.params.startDate'
+                })
+
+            grid-export(file-name='$ctrl.exporterCsvFilename' grid-api='$ctrl.gridApi')
+
             +ignite-form-field-bsdropdown({
                 label: 'Actions',
                 model: '$ctrl.action',
                 name: 'action',
                 disabled: '!$ctrl.selected.length',
-                required: false,
                 options: '$ctrl.actionOptions'
             })
-            grid-export(grid-api='$ctrl.gridApi')
-            form.ui-grid-settings-dateperiod(name=form novalidate)
-                -var form = 'admin'
-                +ignite-form-field-datepicker('Period: from', '$ctrl.params.startDate', '"startDate"', null, '$ctrl.params.endDate')
-                +ignite-form-field-datepicker('to', '$ctrl.params.endDate', '"endDate"', '$ctrl.params.startDate', null)
-            form.ui-grid-settings-filter
-                -var form = 'admin'
-                +ignite-form-field-text('Exclude:', '$ctrl.params.companiesExclude', '"exclude"', false, false, 'Exclude by company name...')
 
-            .ui-grid-settings--heading(ng-hide='$ctrl.selected.length')
-                span(ng-if='$ctrl.groupBy === "user"') List of registered users
-                span(ng-if='$ctrl.groupBy === "company"') List of registered companies
-                span(ng-if='$ctrl.groupBy === "country"') List of registered countries
-                grid-column-selector(grid-api='$ctrl.gridApi')
-            .panel-selected(ng-show='$ctrl.selected.length')
-                grid-item-selected(grid-api='$ctrl.gridApi')
-
-    .panel-collapse
+    .ignite-grid-table
         .grid.ui-grid--ignite.ui-grid-disabled-group-selection(ui-grid='$ctrl.gridOptions' ui-grid-resize-columns ui-grid-selection ui-grid-exporter ui-grid-pinning ui-grid-grouping ui-grid-hovering)
+
+    grid-no-data(grid-api='$ctrl.gridApi')
+        grid-no-data-filtered
+           | Nothing to display. Check your filters.
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/affinity.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/affinity.pug
index ce2cad5..68ee4d5 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/affinity.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/affinity.pug
@@ -19,68 +19,123 @@
 -var form = 'affinity'
 -var model = '$ctrl.clonedCache'
 -var affModel = model + '.affinity'
--var affMapModel = model + '.affinityMapper'
 -var rendezvousAff = affModel + '.kind === "Rendezvous"'
 -var fairAff = affModel + '.kind === "Fair"'
 -var customAff = affModel + '.kind === "Custom"'
--var customAffMapper = affMapModel + '.kind === "Custom"'
 -var rendPartitionsRequired = rendezvousAff + ' && ' + affModel + '.Rendezvous.affinityBackupFilter'
 -var fairPartitionsRequired = fairAff + ' && ' + affModel + '.Fair.affinityBackupFilter'
 
 panel-collapsible(ng-form=form on-open=`ui.loadPanel('${form}')`)
     panel-title Affinity Collocation
     panel-description
-        | Collocate data with data to improve performance and scalability of your application. 
+        | Collocate data with data to improve performance and scalability of your application.
         a.link-success(href="https://apacheignite.readme.io/docs/affinity-collocation" target="_blank") More info
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-60(ng-if='$ctrl.available(["1.0.0", "2.0.0"])')
-                +dropdown('Function:', `${affModel}.kind`, '"AffinityKind"', 'true', 'Default', 'affinityFunction',
-                    'Key topology resolver to provide mapping from keys to nodes<br/>\
-                    <ul>\
-                        <li>Rendezvous - Based on Highest Random Weight algorithm</li>\
-                        <li>Fair - Tries to ensure that all nodes get equal number of partitions with minimum amount of reassignments between existing nodes</li>\
-                        <li>Custom - Custom implementation of key affinity fynction</li>\
-                        <li>Default - By default rendezvous affinity function  with 1024 partitions is used</li>\
-                    </ul>')
+                +form-field__dropdown({
+                    label: 'Function:',
+                    model: `${affModel}.kind`,
+                    name: '"AffinityKind"',
+                    placeholder: 'Default',
+                    options: 'affinityFunction',
+                    tip: 'Key topology resolver to provide mapping from keys to nodes<br/>\
+                                        <ul>\
+                                            <li>Rendezvous - Based on Highest Random Weight algorithm</li>\
+                                            <li>Fair - Tries to ensure that all nodes get equal number of partitions with minimum amount of reassignments between existing nodes</li>\
+                                            <li>Custom - Custom implementation of key affinity fynction</li>\
+                                            <li>Default - By default rendezvous affinity function  with 1024 partitions is used</li>\
+                                        </ul>'
+                })
             .pc-form-grid-col-60(ng-if='$ctrl.available("2.0.0")')
-                +dropdown('Function:', `${affModel}.kind`, '"AffinityKind"', 'true', 'Default', 'affinityFunction',
-                    'Key topology resolver to provide mapping from keys to nodes<br/>\
-                    <ul>\
-                        <li>Rendezvous - Based on Highest Random Weight algorithm</li>\
-                        <li>Custom - Custom implementation of key affinity fynction</li>\
-                        <li>Default - By default rendezvous affinity function  with 1024 partitions is used</li>\
-                    </ul>')
+                +form-field__dropdown({
+                    label: 'Function:',
+                    model: `${affModel}.kind`,
+                    name: '"AffinityKind"',
+                    placeholder: 'Default',
+                    options: 'affinityFunction',
+                    tip: 'Key topology resolver to provide mapping from keys to nodes<br/>\
+                                       <ul>\
+                                           <li>Rendezvous - Based on Highest Random Weight algorithm</li>\
+                                           <li>Custom - Custom implementation of key affinity fynction</li>\
+                                           <li>Default - By default rendezvous affinity function  with 1024 partitions is used</li>\
+                                       </ul>'
+                })
             .pc-form-group
                 .pc-form-grid-row(ng-if=rendezvousAff)
                     .pc-form-grid-col-60
-                        +number-required('Partitions', `${affModel}.Rendezvous.partitions`, '"RendPartitions"', 'true', rendPartitionsRequired, '1024', '1', 'Number of partitions')
+                        +form-field__number({
+                            label: 'Partitions',
+                            model: `${affModel}.Rendezvous.partitions`,
+                            name: '"RendPartitions"',
+                            required: rendPartitionsRequired,
+                            placeholder: '1024',
+                            min: '1',
+                            tip: 'Number of partitions'
+                        })
                     .pc-form-grid-col-60
-                        +java-class('Backup filter', `${affModel}.Rendezvous.affinityBackupFilter`, '"RendAffinityBackupFilter"', 'true', 'false',
-                            'Backups will be selected from all nodes that pass this filter')
+                        +form-field__java-class({
+                            label: 'Backup filter',
+                            model: `${affModel}.Rendezvous.affinityBackupFilter`,
+                            name: '"RendAffinityBackupFilter"',
+                            tip: 'Backups will be selected from all nodes that pass this filter'
+                        })
                     .pc-form-grid-col-60
-                        +checkbox('Exclude neighbors', `${affModel}.Rendezvous.excludeNeighbors`, '"RendExcludeNeighbors"',
-                            'Exclude same - host - neighbors from being backups of each other and specified number of backups')
+                        +form-field__checkbox({
+                            label: 'Exclude neighbors',
+                            model: `${affModel}.Rendezvous.excludeNeighbors`,
+                            name: '"RendExcludeNeighbors"',
+                            tip: 'Exclude same - host - neighbors from being backups of each other and specified number of backups'
+                        })
                 .pc-form-grid-row(ng-if=fairAff)
                     .pc-form-grid-col-60
-                        +number-required('Partitions', `${affModel}.Fair.partitions`, '"FairPartitions"', 'true', fairPartitionsRequired, '256', '1', 'Number of partitions')
+                        +form-field__number({
+                            label: 'Partitions',
+                            model: `${affModel}.Fair.partitions`,
+                            name: '"FairPartitions"',
+                            required: fairPartitionsRequired,
+                            placeholder: '256',
+                            min: '1',
+                            tip: 'Number of partitions'
+                        })
                     .pc-form-grid-col-60
-                        +java-class('Backup filter', `${affModel}.Fair.affinityBackupFilter`, '"FairAffinityBackupFilter"', 'true', 'false',
-                            'Backups will be selected from all nodes that pass this filter')
+                        +form-field__java-class({
+                            label: 'Backup filter',
+                            model: `${affModel}.Fair.affinityBackupFilter`,
+                            name: '"FairAffinityBackupFilter"',
+                            tip: 'Backups will be selected from all nodes that pass this filter'
+                        })
                     .pc-form-grid-col-60
-                        +checkbox('Exclude neighbors', `${affModel}.Fair.excludeNeighbors`, '"FairExcludeNeighbors"',
-                            'Exclude same - host - neighbors from being backups of each other and specified number of backups')
+                        +form-field__checkbox({
+                            label: 'Exclude neighbors',
+                            model: `${affModel}.Fair.excludeNeighbors`,
+                            name: '"FairExcludeNeighbors"',
+                            tip: 'Exclude same - host - neighbors from being backups of each other and specified number of backups'
+                        })
                 .pc-form-grid-row(ng-if=customAff)
                     .pc-form-grid-col-60
-                        +java-class('Class name:', `${affModel}.Custom.className`, '"AffCustomClassName"', 'true', customAff,
-                            'Custom key affinity function implementation class name')
+                        +form-field__java-class({
+                            label: 'Class name:',
+                            model: `${affModel}.Custom.className`,
+                            name: '"AffCustomClassName"',
+                            required: customAff,
+                            tip: 'Custom key affinity function implementation class name'
+                        })
             .pc-form-grid-col-60
-                +java-class('Mapper:', model + '.affinityMapper', '"AffMapCustomClassName"', 'true', 'false',
-                    'Provide custom affinity key for any given key')
+                +form-field__java-class({
+                    label: 'Mapper:',
+                    model: `${model}.affinityMapper`,
+                    name: '"AffMapCustomClassName"',
+                    tip: 'Provide custom affinity key for any given key'
+                })
 
             //- Since ignite 2.0
             .pc-form-grid-col-60(ng-if='$ctrl.available("2.0.0")')
-                +java-class('Topology validator:', model + '.topologyValidator', '"topologyValidator"', 'true', 'false')
+                +form-field__java-class({
+                    label: 'Topology validator:',
+                    model: `${model}.topologyValidator`,
+                    name: '"topologyValidator"'
+                })
 
         .pca-form-column-6
             +preview-xml-java(model, 'cacheAffinity')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/concurrency.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/concurrency.pug
index d99f894..bb355f0 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/concurrency.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/concurrency.pug
@@ -26,39 +26,61 @@
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-30
-                +number('Max async operations:', `${model}.maxConcurrentAsyncOperations`, '"maxConcurrentAsyncOperations"', 'true', '500', '0',
-                    'Maximum number of allowed concurrent asynchronous operations<br/>\
-                    If <b>0</b> then number of concurrent asynchronous operations is unlimited')
+                +form-field__number({
+                    label: 'Max async operations:',
+                    model: `${model}.maxConcurrentAsyncOperations`,
+                    name: '"maxConcurrentAsyncOperations"',
+                    placeholder: '500',
+                    min: '0',
+                    tip: 'Maximum number of allowed concurrent asynchronous operations<br/>\
+                         If <b>0</b> then number of concurrent asynchronous operations is unlimited'
+                })
             .pc-form-grid-col-30
-                +number('Default lock timeout:', `${model}.defaultLockTimeout`, '"defaultLockTimeout"', 'true', '0', '0',
-                    'Default lock acquisition timeout in milliseconds<br/>\
-                    If <b>0</b> then lock acquisition will never timeout')
+                +form-field__number({
+                    label: 'Default lock timeout:',
+                    model: `${model}.defaultLockTimeout`,
+                    name: '"defaultLockTimeout"',
+                    placeholder: '0',
+                    min: '0',
+                    tip: 'Default lock acquisition timeout in milliseconds<br/>\
+                         If <b>0</b> then lock acquisition will never timeout'
+                })
 
             //- Removed in ignite 2.0
             .pc-form-grid-col-60(ng-if='$ctrl.available(["1.0.0", "2.0.0"])' ng-hide=`${model}.atomicityMode === 'TRANSACTIONAL'`)
-                +dropdown('Entry versioning:', `${model}.atomicWriteOrderMode`, '"atomicWriteOrderMode"', 'true', 'Choose versioning',
-                    '[\
-                        {value: "CLOCK", label: "CLOCK"},\
-                        {value: "PRIMARY", label: "PRIMARY"}\
-                    ]',
-                    'Write ordering mode determines which node assigns the write version, sender or the primary node\
-                    <ul>\
-                        <li>CLOCK - in this mode write versions are assigned on a sender node which generally leads to better performance</li>\
-                        <li>PRIMARY - in this mode version is assigned only on primary node. This means that sender will only send write request to primary node, which in turn will assign write version and forward it to backups</li>\
-                    </ul>')
+                +form-field__dropdown({
+                    label: 'Entry versioning:',
+                    model: `${model}.atomicWriteOrderMode`,
+                    name: '"atomicWriteOrderMode"',
+                    placeholder: 'Choose versioning',
+                    options: '[\
+                                            {value: "CLOCK", label: "CLOCK"},\
+                                            {value: "PRIMARY", label: "PRIMARY"}\
+                                        ]',
+                    tip: 'Write ordering mode determines which node assigns the write version, sender or the primary node\
+                                        <ul>\
+                                            <li>CLOCK - in this mode write versions are assigned on a sender node which generally leads to better performance</li>\
+                                            <li>PRIMARY - in this mode version is assigned only on primary node. This means that sender will only send write request to primary node, which in turn will assign write version and forward it to backups</li>\
+                                        </ul>'
+                })
 
             .pc-form-grid-col-60
-                +dropdown('Write synchronization mode:', `${model}.writeSynchronizationMode`, '"writeSynchronizationMode"', 'true', 'PRIMARY_SYNC',
-                    '[\
-                        {value: "FULL_SYNC", label: "FULL_SYNC"},\
-                        {value: "FULL_ASYNC", label: "FULL_ASYNC"},\
-                        {value: "PRIMARY_SYNC", label: "PRIMARY_SYNC"}\
-                    ]',
-                    'Write synchronization mode\
-                    <ul>\
-                        <li>FULL_SYNC - Ignite will wait for write or commit replies from all nodes</li>\
-                        <li>FULL_ASYNC - Ignite will not wait for write or commit responses from participating nodes</li>\
-                        <li>PRIMARY_SYNC - Makes sense for PARTITIONED mode. Ignite will wait for write or commit to complete on primary node</li>\
-                    </ul>')
+                +form-field__dropdown({
+                    label: 'Write synchronization mode:',
+                    model: `${model}.writeSynchronizationMode`,
+                    name: '"writeSynchronizationMode"',
+                    placeholder: 'PRIMARY_SYNC',
+                    options: '[\
+                                            {value: "FULL_SYNC", label: "FULL_SYNC"},\
+                                            {value: "FULL_ASYNC", label: "FULL_ASYNC"},\
+                                            {value: "PRIMARY_SYNC", label: "PRIMARY_SYNC"}\
+                                        ]',
+                    tip: 'Write synchronization mode\
+                                        <ul>\
+                                            <li>FULL_SYNC - Ignite will wait for write or commit replies from all nodes</li>\
+                                            <li>FULL_ASYNC - Ignite will not wait for write or commit responses from participating nodes</li>\
+                                            <li>PRIMARY_SYNC - Makes sense for PARTITIONED mode. Ignite will wait for write or commit to complete on primary node</li>\
+                                        </ul>'
+                })
         .pca-form-column-6
             +preview-xml-java(model, 'cacheConcurrency')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/general.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/general.pug
index 29977be..9552396 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/general.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/general.pug
@@ -22,12 +22,12 @@
 panel-collapsible(opened=`::true` ng-form=form)
     panel-title General
     panel-description
-        | Common cache configuration. 
+        | Common cache configuration.
         a.link-success(href="https://apacheignite.readme.io/docs/data-grid" target="_blank") More info
     panel-content.pca-form-row
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-60
-                +sane-ignite-form-field-text({
+                +form-field__text({
                     label: 'Name:',
                     model: `${model}.name`,
                     name: '"cacheName"',
@@ -38,9 +38,9 @@
                     ignite-unique-property='name'
                     ignite-unique-skip=`["_id", ${model}]`
                 )
-                    +unique-feedback(`${model}.name`, 'Cache name should be unique')
+                    +form-field__error({ error: 'igniteUnique', message: 'Cache name should be unique' })
             .pc-form-grid-col-60
-                +sane-ignite-form-field-dropdown({
+                +form-field__dropdown({
                     label: 'Domain models:',
                     model: `${model}.domains`,
                     name: '"domains"',
@@ -51,63 +51,102 @@
                     tip: 'Select domain models to describe types in cache'
                 })
             .pc-form-grid-col-60(ng-if='$ctrl.available("2.1.0")')
-                +text('Group:', `${model}.groupName`, '"groupName"', 'false', 'Input group name',
-                    'Cache group name.<br/>\
-                    Caches with the same group name share single underlying "physical" cache (partition set), but are logically isolated.')
+                +form-field__text({
+                    label: 'Group:',
+                    model: `${model}.groupName`,
+                    name: '"groupName"',
+                    placeholder: 'Input group name',
+                    tip: 'Cache group name.<br/>\
+                          Caches with the same group name share single underlying "physical" cache (partition set), but are logically isolated.'
+                })
             .pc-form-grid-col-30
-                +cacheMode('Mode:', `${model}.cacheMode`, '"cacheMode"', 'PARTITIONED')
+                +form-field__cache-modes({
+                    label: 'Mode:',
+                    model: `${model}.cacheMode`,
+                    name: '"cacheMode"',
+                    placeholder: 'PARTITIONED'
+                })
 
             .pc-form-grid-col-30
-                +dropdown('Atomicity:', `${model}.atomicityMode`, '"atomicityMode"', 'true', 'ATOMIC',
-                    '[\
-                        {value: "ATOMIC", label: "ATOMIC"},\
-                        {value: "TRANSACTIONAL", label: "TRANSACTIONAL"}\
-                    ]',
-                    'Atomicity:\
-                    <ul>\
-                        <li>ATOMIC - in this mode distributed transactions and distributed locking are not supported</li>\
-                        <li>TRANSACTIONAL - in this mode specified fully ACID-compliant transactional cache behavior</li>\
-                    </ul>')
+                +form-field__dropdown({
+                    label: 'Atomicity:',
+                    model: `${model}.atomicityMode`,
+                    name: '"atomicityMode"',
+                    placeholder: 'ATOMIC',
+                    options: '[\
+                                            {value: "ATOMIC", label: "ATOMIC"},\
+                                            {value: "TRANSACTIONAL", label: "TRANSACTIONAL"}\
+                                        ]',
+                    tip: 'Atomicity:\
+                                        <ul>\
+                                            <li>ATOMIC - in this mode distributed transactions and distributed locking are not supported</li>\
+                                            <li>TRANSACTIONAL - in this mode specified fully ACID-compliant transactional cache behavior</li>\
+                                        </ul>'
+                })
             .pc-form-grid-col-30(ng-is=`${model}.cacheMode === 'PARTITIONED'`)
-                +number('Backups:', `${model}.backups`, '"backups"', 'true', '0', '0', 'Number of nodes used to back up single partition for partitioned cache')
+                +form-field__number({
+                    label: 'Backups:',
+                    model: `${model}.backups`,
+                    name: '"checkpointS3ClientExecutionTimeout"',
+                    placeholder: '0',
+                    min: '0',
+                    tip: 'Number of nodes used to back up single partition for partitioned cache'
+                })
             //- Since ignite 2.0
             .pc-form-grid-col-30(ng-if='$ctrl.available("2.0.0")')
-                +dropdown('Partition loss policy:', `${model}.partitionLossPolicy`, '"partitionLossPolicy"', 'true', 'IGNORE',
-                '[\
-                    {value: "READ_ONLY_SAFE", label: "READ_ONLY_SAFE"},\
-                    {value: "READ_ONLY_ALL", label: "READ_ONLY_ALL"},\
-                    {value: "READ_WRITE_SAFE", label: "READ_WRITE_SAFE"},\
-                    {value: "READ_WRITE_ALL", label: "READ_WRITE_ALL"},\
-                    {value: "IGNORE", label: "IGNORE"}\
-                ]',
-                'Partition loss policies:\
-                <ul>\
-                    <li>READ_ONLY_SAFE - in this mode all writes to the cache will be failed with an exception,\
-                        reads will only be allowed for keys in  non-lost partitions.\
-                        Reads from lost partitions will be failed with an exception.</li>\
-                    <li>READ_ONLY_ALL - in this mode all writes to the cache will be failed with an exception.\
-                        All reads will proceed as if all partitions were in a consistent state.\
-                        The result of reading from a lost partition is undefined and may be different on different nodes in the cluster.</li>\
-                    <li>READ_WRITE_SAFE - in this mode all reads and writes will be allowed for keys in valid partitions.\
-                        All reads and writes for keys in lost partitions will be failed with an exception.</li>\
-                    <li>READ_WRITE_ALL - in this mode all reads and writes will proceed as if all partitions were in a consistent state.\
-                        The result of reading from a lost partition is undefined and may be different on different nodes in the cluster.</li>\
-                    <li>IGNORE - in this mode if partition is lost, reset it state and do not clear intermediate data.\
-                        The result of reading from a previously lost and not cleared partition is undefined and may be different\
-                        on different nodes in the cluster.</li>\
-                </ul>')
+                +form-field__dropdown({
+                    label:'Partition loss policy:',
+                    model: `${model}.partitionLossPolicy`,
+                    name: '"partitionLossPolicy"',
+                    placeholder: 'IGNORE',
+                    options: '[\
+                                        {value: "READ_ONLY_SAFE", label: "READ_ONLY_SAFE"},\
+                                        {value: "READ_ONLY_ALL", label: "READ_ONLY_ALL"},\
+                                        {value: "READ_WRITE_SAFE", label: "READ_WRITE_SAFE"},\
+                                        {value: "READ_WRITE_ALL", label: "READ_WRITE_ALL"},\
+                                        {value: "IGNORE", label: "IGNORE"}\
+                                    ]',
+                    tip: 'Partition loss policies:\
+                                    <ul>\
+                                        <li>READ_ONLY_SAFE - in this mode all writes to the cache will be failed with an exception,\
+                                            reads will only be allowed for keys in  non-lost partitions.\
+                                            Reads from lost partitions will be failed with an exception.</li>\
+                                        <li>READ_ONLY_ALL - in this mode all writes to the cache will be failed with an exception.\
+                                            All reads will proceed as if all partitions were in a consistent state.\
+                                            The result of reading from a lost partition is undefined and may be different on different nodes in the cluster.</li>\
+                                        <li>READ_WRITE_SAFE - in this mode all reads and writes will be allowed for keys in valid partitions.\
+                                            All reads and writes for keys in lost partitions will be failed with an exception.</li>\
+                                        <li>READ_WRITE_ALL - in this mode all reads and writes will proceed as if all partitions were in a consistent state.\
+                                            The result of reading from a lost partition is undefined and may be different on different nodes in the cluster.</li>\
+                                        <li>IGNORE - in this mode if partition is lost, reset it state and do not clear intermediate data.\
+                                            The result of reading from a previously lost and not cleared partition is undefined and may be different\
+                                            on different nodes in the cluster.</li>\
+                                    </ul>'
+                })
             .pc-form-grid-col-60(ng-show=`${model}.cacheMode === 'PARTITIONED' && ${model}.backups`)
-                +checkbox('Read from backup', `${model}.readFromBackup`, '"readFromBackup"',
-                    'Flag indicating whether data can be read from backup<br/>\
-                    If not set then always get data from primary node (never from backup)')
+                +form-field__checkbox({
+                    label: 'Read from backup',
+                    model: `${model}.readFromBackup`,
+                    name: '"readFromBackup"',
+                    tip: 'Flag indicating whether data can be read from backup<br/>\
+                          If not set then always get data from primary node (never from backup)'
+                })
             .pc-form-grid-col-60
-                +checkbox('Copy on read', `${model}.copyOnRead`, '"copyOnRead"',
-                    'Flag indicating whether copy of the value stored in cache should be created for cache operation implying return value<br/>\
-                    Also if this flag is set copies are created for values passed to CacheInterceptor and to CacheEntryProcessor')
+                +form-field__checkbox({
+                    label: 'Copy on read',
+                    model: `${model}.copyOnRead`,
+                    name: '"copyOnRead"',
+                    tip: 'Flag indicating whether copy of the value stored in cache should be created for cache operation implying return value<br/>\
+                          Also if this flag is set copies are created for values passed to CacheInterceptor and to CacheEntryProcessor'
+                })
             .pc-form-grid-col-60(ng-show=`${model}.cacheMode === 'PARTITIONED' && ${model}.atomicityMode === 'TRANSACTIONAL'`)
-                +checkbox('Invalidate near cache', `${model}.isInvalidate`, '"isInvalidate"',
-                    'Invalidation flag for near cache entries in transaction<br/>\
-                    If set then values will be invalidated (nullified) upon commit in near cache')
+                +form-field__checkbox({
+                    label: 'Invalidate near cache',
+                    model: `${model}.isInvalidate`,
+                    name: '"isInvalidate"',
+                    tip: 'Invalidation flag for near cache entries in transaction<br/>\
+                          If set then values will be invalidated (nullified) upon commit in near cache'
+                })
 
         .pca-form-column-6
             +preview-xml-java(model, 'cacheGeneral')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/memory.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/memory.pug
index 10eb488..571e8ec 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/memory.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/memory.pug
@@ -22,7 +22,7 @@
 panel-collapsible(ng-form=form on-open=`ui.loadPanel('${form}')`)
     panel-title Memory
     panel-description
-        | Cache memory settings. 
+        | Cache memory settings.
         a.link-success(
             href="https://apacheignite.readme.io/v1.9/docs/off-heap-memory"
             target="_blank"
@@ -37,21 +37,35 @@
         .pca-form-column-6.pc-form-grid-row
             //- Since ignite 2.0
             .pc-form-grid-col-60(ng-if='$ctrl.available("2.0.0")')
-                +checkbox('Onheap cache enabled', model + '.onheapCacheEnabled', '"OnheapCacheEnabled"', 'Checks if the on-heap cache is enabled for the off-heap based page memory')
-
+                +form-field__checkbox({
+                    label: 'Onheap cache enabled',
+                    model: model + '.onheapCacheEnabled',
+                    name: '"OnheapCacheEnabled"',
+                    tip: 'Checks if the on-heap cache is enabled for the off-heap based page memory'
+                })
             //- Since ignite 2.0 deprecated in ignite 2.3
             .pc-form-grid-col-60(ng-if='$ctrl.available(["2.0.0", "2.3.0"])')
-                +text('Memory policy name:', model + '.memoryPolicyName', '"MemoryPolicyName"', 'false', 'default',
-                    'Name of memory policy configuration for this cache')
+                +form-field__text({
+                    label: 'Memory policy name:',
+                    model: `${model}.memoryPolicyName`,
+                    name: '"MemoryPolicyName"',
+                    placeholder: 'default',
+                    tip: 'Name of memory policy configuration for this cache'
+                })
 
             //- Since ignite 2.3
             .pc-form-grid-col-60(ng-if='$ctrl.available("2.3.0")')
-                +text('Data region name:', model + '.dataRegionName', '"DataRegionName"', 'false', 'default',
-                    'Name of data region configuration for this cache')
+                +form-field__text({
+                    label: 'Data region name:',
+                    model: `${model}.dataRegionName`,
+                    name: '"DataRegionName"',
+                    placeholder: 'default',
+                    tip: 'Name of data region configuration for this cache'
+                })
 
             //- Removed in ignite 2.0
             .pc-form-grid-col-60(ng-if-start='$ctrl.available(["1.0.0", "2.0.0"])')
-                +sane-ignite-form-field-dropdown({
+                +form-field__dropdown({
                     label: 'Mode:',
                     model: `${model}.memoryMode`,
                     name: '"memoryMode"',
@@ -88,9 +102,9 @@
                     ui-validate-watch=`"${model}.domains.length"`
                     ng-model-options='{allowInvalid: true}'
                 )
-                    +form-field-feedback(null, 'offheapAndDomains', 'Query indexing could not be enabled while values are stored off-heap')
+                    +form-field__error({ error: 'offheapAndDomains', message: 'Query indexing could not be enabled while values are stored off-heap' })
             .pc-form-grid-col-60(ng-if=`${model}.memoryMode !== 'OFFHEAP_VALUES'`)
-                +sane-ignite-form-field-dropdown({
+                +form-field__dropdown({
                     label: 'Off-heap memory:',
                     model: `${model}.offHeapMode`,
                     name: '"offHeapMode"',
@@ -111,12 +125,12 @@
                     ui-validate-watch=`'${model}.memoryMode'`
                     ng-model-options='{allowInvalid: true}'
                 )
-                    +form-field-feedback(null, 'offheapDisabled', 'Off-heap storage can\'t be disabled when memory mode is OFFHEAP_TIERED')
+                    +form-field__error({ error: 'offheapDisabled', message: 'Off-heap storage can\'t be disabled when memory mode is OFFHEAP_TIERED' })
             .pc-form-grid-col-60(
                 ng-if=`${model}.offHeapMode === 1 && ${model}.memoryMode !== 'OFFHEAP_VALUES'`
                 ng-if-end
             )
-                pc-form-field-size(
+                form-field-size(
                     label='Off-heap memory max size:'
                     ng-model=`${model}.offHeapMaxMemory`
                     name='offHeapMaxMemory'
@@ -127,32 +141,53 @@
                     size-type='bytes'
                     required='true'
                 )
-            +evictionPolicy(`${model}.evictionPolicy`, '"evictionPolicy"', 'true',
-                `$ctrl.Caches.evictionPolicy.required(${model})`,
-                'Optional cache eviction policy<br/>\
-                Must be set for entries to be evicted from on-heap to off-heap or swap\
-                <ul>\
-                    <li>Least Recently Used(LRU) - Eviction policy based on LRU algorithm and supports batch eviction</li>\
-                    <li>First In First Out (FIFO) - Eviction policy based on FIFO algorithm and supports batch eviction</li>\
-                    <li>SORTED - Eviction policy which will select the minimum cache entry for eviction</li>\
-                </ul>')
+
+            +form-field__eviction-policy({
+                model: `${model}.evictionPolicy`,
+                name: '"evictionPolicy"',
+                enabled: 'true',
+                required: `$ctrl.Caches.evictionPolicy.required(${model})`,
+                tip: 'Optional cache eviction policy<br/>\
+                      Must be set for entries to be evicted from on-heap to off-heap or swap\
+                      <ul>\
+                          <li>Least Recently Used(LRU) - Eviction policy based on LRU algorithm and supports batch eviction</li>\
+                          <li>First In First Out (FIFO) - Eviction policy based on FIFO algorithm and supports batch eviction</li>\
+                          <li>SORTED - Eviction policy which will select the minimum cache entry for eviction</li>\
+                      </ul>'
+            })
 
             //- Since ignite 2.0
             .pc-form-grid-col-60(ng-if='$ctrl.available("2.0.0")')
-                +java-class('Eviction filter:', model + '.evictionFilter', '"EvictionFilter"', 'true', 'false', 'Eviction filter to specify which entries should not be evicted')
+                +form-field__java-class({
+                    label: 'Eviction filter:',
+                    model: `${model}.evictionFilter`,
+                    name: '"EvictionFilter"',
+                    tip: 'Eviction filter to specify which entries should not be evicted'
+                })
 
             //- Removed in ignite 2.0
             .pc-form-grid-col-60(ng-if-start='$ctrl.available(["1.0.0", "2.0.0"])')
-                +number('Start size:', `${model}.startSize`, '"startSize"', 'true', '1500000', '0',
-                    'In terms of size and capacity, Ignite internal cache map acts exactly like a normal Java HashMap: it has some initial capacity\
-                    (which is pretty small by default), which doubles as data arrives. The process of internal cache map resizing is CPU-intensive\
-                    and time-consuming, and if you load a huge dataset into cache (which is a normal use case), the map will have to resize a lot of times.\
-                    To avoid that, you can specify the initial cache map capacity, comparable to the expected size of your dataset.\
-                    This will save a lot of CPU resources during the load time, because the map would not have to resize.\
-                    For example, if you expect to load 10 million entries into cache, you can set this property to 10 000 000.\
-                    This will save you from cache internal map resizes.')
+                +form-field__number({
+                    label: 'Start size:',
+                    model: `${model}.startSize`,
+                    name: '"startSize"',
+                    placeholder: '1500000',
+                    min: '0',
+                    tip: 'In terms of size and capacity, Ignite internal cache map acts exactly like a normal Java HashMap: it has some initial capacity\
+                          (which is pretty small by default), which doubles as data arrives. The process of internal cache map resizing is CPU-intensive\
+                          and time-consuming, and if you load a huge dataset into cache (which is a normal use case), the map will have to resize a lot of times.\
+                          To avoid that, you can specify the initial cache map capacity, comparable to the expected size of your dataset.\
+                          This will save a lot of CPU resources during the load time, because the map would not have to resize.\
+                          For example, if you expect to load 10 million entries into cache, you can set this property to 10 000 000.\
+                          This will save you from cache internal map resizes.'
+                })
             .pc-form-grid-col-60(ng-if-end)
-                +checkbox('Swap enabled', `${model}.swapEnabled`, '"swapEnabled"', 'Flag indicating whether swap storage is enabled or not for this cache')
+                +form-field__checkbox({
+                    label: 'Swap enabled',
+                    model: `${model}.swapEnabled`,
+                    name: '"swapEnabled"',
+                    tip: 'Flag indicating whether swap storage is enabled or not for this cache'
+                })
 
         .pca-form-column-6
             +preview-xml-java(model, 'cacheMemory')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/near-cache-client.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/near-cache-client.pug
index 2b6705d..ed0e38a 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/near-cache-client.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/near-cache-client.pug
@@ -26,8 +26,8 @@
 )
     panel-title Near cache on client node
     panel-description
-        | Near cache settings for client nodes. 
-        | Near cache is a small local cache that stores most recently or most frequently accessed data. 
+        | Near cache settings for client nodes.
+        | Near cache is a small local cache that stores most recently or most frequently accessed data.
         | Should be used in case when it is impossible to send computations to remote nodes.
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
@@ -35,16 +35,32 @@
             -var enabled = `${nearCfg}.enabled`
 
             .pc-form-grid-col-60
-                +checkbox('Enabled', enabled, '"clientNearEnabled"', 'Flag indicating whether to configure near cache')
+                +form-field__checkbox({
+                    label: 'Enabled',
+                    model: enabled,
+                    name: '"clientNearEnabled"',
+                    tip: 'Flag indicating whether to configure near cache'
+                })
             .pc-form-grid-col-60
-                +number('Start size:', `${nearCfg}.nearStartSize`, '"clientNearStartSize"', enabled, '375000', '0',
-                    'Initial cache size for near cache which will be used to pre-create internal hash table after start')
-            +evictionPolicy(`${nearCfg}.nearEvictionPolicy`, '"clientNearCacheEvictionPolicy"', enabled, 'false',
-                'Near cache eviction policy\
-                <ul>\
-                    <li>Least Recently Used (LRU) - Eviction policy based on LRU algorithm and supports batch eviction</li>\
-                    <li>First In First Out (FIFO) - Eviction policy based on FIFO algorithm and supports batch eviction</li>\
-                    <li>SORTED - Eviction policy which will select the minimum cache entry for eviction</li>\
-                </ul>')
+                +form-field__number({
+                    label: 'Start size:',
+                    model: `${nearCfg}.nearStartSize`,
+                    name: '"clientNearStartSize"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '375000',
+                    min: '0',
+                    tip: 'Initial cache size for near cache which will be used to pre-create internal hash table after start'
+                })
+            +form-field__eviction-policy({
+                model: `${nearCfg}.nearEvictionPolicy`,
+                name: '"clientNearCacheEvictionPolicy"',
+                enabled: enabled,
+                tip: 'Near cache eviction policy\
+                     <ul>\
+                         <li>Least Recently Used (LRU) - Eviction policy based on LRU algorithm and supports batch eviction</li>\
+                         <li>First In First Out (FIFO) - Eviction policy based on FIFO algorithm and supports batch eviction</li>\
+                         <li>SORTED - Eviction policy which will select the minimum cache entry for eviction</li>\
+                     </ul>'
+            })
         .pca-form-column-6
             +preview-xml-java(model, 'cacheNearClient')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/near-cache-server.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/near-cache-server.pug
index 3a91fd2..3d2043a 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/near-cache-server.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/near-cache-server.pug
@@ -26,9 +26,9 @@
 )
     panel-title Near cache on server node
     panel-description
-        | Near cache settings. 
-        | Near cache is a small local cache that stores most recently or most frequently accessed data. 
-        | Should be used in case when it is impossible to send computations to remote nodes. 
+        | Near cache settings.
+        | Near cache is a small local cache that stores most recently or most frequently accessed data.
+        | Should be used in case when it is impossible to send computations to remote nodes.
         a.link-success(href="https://apacheignite.readme.io/docs/near-caches" target="_blank") More info
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
@@ -36,16 +36,32 @@
             -var enabled = `${nearCfg}.enabled`
 
             .pc-form-grid-col-60
-                +checkbox('Enabled', enabled, '"nearCacheEnabled"', 'Flag indicating whether to configure near cache')
+                +form-field__checkbox({
+                    label: 'Enabled',
+                    model: enabled,
+                    name: '"nearCacheEnabled"',
+                    tip: 'Flag indicating whether to configure near cache'
+                })
             .pc-form-grid-col-60
-                +number('Start size:', `${nearCfg}.nearStartSize`, '"nearStartSize"', enabled, '375000', '0',
-                    'Initial cache size for near cache which will be used to pre-create internal hash table after start')
-            +evictionPolicy(`${model}.nearConfiguration.nearEvictionPolicy`, '"nearCacheEvictionPolicy"', enabled, 'false',
-                'Near cache eviction policy\
-                <ul>\
-                    <li>Least Recently Used (LRU) - Eviction policy based on LRU algorithm and supports batch eviction</li>\
-                    <li>First In First Out (FIFO) - Eviction policy based on FIFO algorithm and supports batch eviction</li>\
-                    <li>SORTED - Eviction policy which will select the minimum cache entry for eviction</li>\
-                </ul>')
+                +form-field__number({
+                    label: 'Start size:',
+                    model: `${nearCfg}.nearStartSize`,
+                    name: '"nearStartSize"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '375000',
+                    min: '0',
+                    tip: 'Initial cache size for near cache which will be used to pre-create internal hash table after start'
+                })
+            +form-field__eviction-policy({
+                model: `${model}.nearConfiguration.nearEvictionPolicy`,
+                name: '"nearCacheEvictionPolicy"',
+                enabled: enabled,
+                tip: 'Near cache eviction policy\
+                     <ul>\
+                         <li>Least Recently Used (LRU) - Eviction policy based on LRU algorithm and supports batch eviction</li>\
+                         <li>First In First Out (FIFO) - Eviction policy based on FIFO algorithm and supports batch eviction</li>\
+                         <li>SORTED - Eviction policy which will select the minimum cache entry for eviction</li>\
+                     </ul>'
+            })
         .pca-form-column-6
             +preview-xml-java(model, 'cacheNearServer')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/node-filter.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/node-filter.pug
index 11938f9..32afac1 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/node-filter.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/node-filter.pug
@@ -29,11 +29,18 @@
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-60
-                +dropdown('Node filter:', nodeFilterKind, '"nodeFilter"', 'true', 'Not set', '::$ctrl.Caches.nodeFilterKinds', 'Node filter variant')
+                +form-field__dropdown({
+                    label: 'Node filter:',
+                    model: nodeFilterKind,
+                    name: '"nodeFilter"',
+                    placeholder: 'Not set',
+                    options: '::$ctrl.Caches.nodeFilterKinds',
+                    tip: 'Node filter variant'
+                })
             .pc-form-grid-col-60(
                 ng-if=igfsFilter
             )
-                +sane-ignite-form-field-dropdown({
+                +form-field__dropdown({
                     label: 'IGFS:',
                     model: `${nodeFilter}.IGFS.igfs`,
                     name: '"igfsNodeFilter"',
@@ -45,9 +52,15 @@
                 })(
                     pc-is-in-collection='$ctrl.igfsIDs'
                 )
-                    +form-field-feedback(_, 'isInCollection', `Cluster doesn't have such an IGFS`)
+                    +form-field__error({ error: 'isInCollection',  message: `Cluster doesn't have such an IGFS` })
             .pc-form-grid-col-60(ng-show=customFilter)
-                +java-class('Class name:', `${nodeFilter}.Custom.className`, '"customNodeFilter"',
-                    'true', customFilter, 'Class name of custom node filter implementation', customFilter)
+                +form-field__java-class({
+                    label: 'Class name:',
+                    model: `${nodeFilter}.Custom.className`,
+                    name: '"customNodeFilter"',
+                    required: customFilter,
+                    tip: 'Class name of custom node filter implementation',
+                    validationActive: customFilter
+                })
         .pca-form-column-6
             +preview-xml-java(model, 'cacheNodeFilter', 'igfss')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/query.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/query.pug
index 20869f5..8fedfc0 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/query.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/query.pug
@@ -21,45 +21,68 @@
 
 panel-collapsible(ng-form=form on-open=`ui.loadPanel('${form}')`)
     panel-title Queries & Indexing
-    panel-description 
-        | Cache queries settings. 
+    panel-description
+        | Cache queries settings.
         a.link-success(href="https://apacheignite-sql.readme.io/docs/select" target="_blank") More info
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-60
-                +text('SQL schema name:', `${model}.sqlSchema`, '"sqlSchema"', 'false', 'Input schema name',
-                    'Specify any custom name to be used as SQL schema for current cache. This name will correspond to SQL ANSI-99 standard.\
-                    Nonquoted identifiers are not case sensitive. Quoted identifiers are case sensitive.\
-                    When SQL schema is not specified, quoted cache name should used instead.<br/>\
-                    For example:\
-                    <ul>\
-                        <li>\
-                            Query without schema names (quoted cache names will be used):\
-                            SELECT * FROM "PersonsCache".Person p INNER JOIN "OrganizationsCache".Organization o on p.org = o.id\
-                        </li>\
-                        <li>\
-                            The same query using schema names "Persons" and "Organizations":\
-                            SELECT * FROM Persons.Person p INNER JOIN Organizations.Organization o on p.org = o.id\
-                        </li>\
-                    </ul>')
+                +form-field__text({
+                    label: 'SQL schema name:',
+                    model: `${model}.sqlSchema`,
+                    name: '"sqlSchema"',
+                    placeholder: 'Input schema name',
+                    tip: 'Cache group name.<br/>\
+                          Caches with the same group name share single underlying "physical" cache (partition set), but are logically isolated.'
+                })
 
             //- Removed in ignite 2.0
             .pc-form-grid-col-60(ng-if='$ctrl.available(["1.0.0", "2.0.0"])')
-                +number('On-heap cache for off-heap indexes:', `${model}.sqlOnheapRowCacheSize`, '"sqlOnheapRowCacheSize"', 'true', '10240', '1',
-                    'Number of SQL rows which will be cached onheap to avoid deserialization on each SQL index access')
+                +form-field__number({
+                    label: 'On-heap cache for off-heap indexes:',
+                    model: `${model}.sqlOnheapRowCacheSize`,
+                    name: '"sqlOnheapRowCacheSize"',
+                    placeholder: '10240',
+                    min: '1',
+                    tip: 'Specify any custom name to be used as SQL schema for current cache. This name will correspond to SQL ANSI-99 standard.\
+                          Nonquoted identifiers are not case sensitive. Quoted identifiers are case sensitive.\
+                          When SQL schema is not specified, quoted cache name should used instead.<br/>\
+                          For example:\
+                          <ul>\
+                            <li>\
+                            Query without schema names (quoted cache names will be used):\
+                            SELECT * FROM "PersonsCache".Person p INNER JOIN "OrganizationsCache".Organization o on p.org = o.id\
+                            </li>\
+                            <li>\
+                                The same query using schema names "Persons" and "Organizations":\
+                                SELECT * FROM Persons.Person p INNER JOIN Organizations.Organization o on p.org = o.id\
+                            </li>\
+                          </ul>'
+                })
 
             //- Deprecated in ignite 2.1
             .pc-form-grid-col-60(ng-if='$ctrl.available(["1.0.0", "2.1.0"])')
-                +number('Long query timeout:', `${model}.longQueryWarningTimeout`, '"longQueryWarningTimeout"', 'true', '3000', '0',
-                    'Timeout in milliseconds after which long query warning will be printed')
+                +form-field__number({
+                    label: 'Long query timeout:',
+                    model: `${model}.longQueryWarningTimeout`,
+                    name: '"longQueryWarningTimeout"',
+                    placeholder: '3000',
+                    min: '0',
+                    tip: 'Timeout in milliseconds after which long query warning will be printed'
+                })
             .pc-form-grid-col-60
-                +number('History size:', `${model}.queryDetailMetricsSize`, '"queryDetailMetricsSize"', 'true', '0', '0',
-                    'Size of queries detail metrics that will be stored in memory for monitoring purposes')
+                +form-field__number({
+                    label: 'History size:',
+                    model: `${model}.queryDetailMetricsSize`,
+                    name: '"queryDetailMetricsSize"',
+                    placeholder: '0',
+                    min: '0',
+                    tip: 'Size of queries detail metrics that will be stored in memory for monitoring purposes'
+                })
             .pc-form-grid-col-60
                 mixin caches-query-list-sql-functions()
                     .ignite-form-field
                         -let items = `${model}.sqlFunctionClasses`;
-                        -let uniqueTip = 'SQL function with such class name already exists!'
 
                         list-editable(
                             ng-model=items
@@ -72,7 +95,7 @@
 
                             list-editable-item-edit
                                 +list-java-class-field('SQL function', '$item', '"sqlFunction"', items)
-                                    +unique-feedback('"sqlFunction"', uniqueTip)
+                                    +form-field__error({ error: 'igniteUnique', message: 'SQL function with such class name already exists!' })
 
                             list-editable-no-items
                                 list-editable-add-item-button(
@@ -87,21 +110,35 @@
 
             //- Removed in ignite 2.0
             .pc-form-grid-col-60(ng-if='$ctrl.available(["1.0.0", "2.0.0"])')
-                +checkbox('Snapshotable index', `${model}.snapshotableIndex`, '"snapshotableIndex"',
-                    'Flag indicating whether SQL indexes should support snapshots')
+                +form-field__checkbox({
+                    label: 'Snapshotable index',
+                    model: `${model}.snapshotableIndex`,
+                    name: '"snapshotableIndex"',
+                    tip: 'Flag indicating whether SQL indexes should support snapshots'
+                })
 
             .pc-form-grid-col-60
-                +checkbox('Escape table and filed names', `${model}.sqlEscapeAll`, '"sqlEscapeAll"',
-                    'If enabled than all schema, table and field names will be escaped with double quotes (for example: "tableName"."fieldName").<br/>\
-                    This enforces case sensitivity for field names and also allows having special characters in table and field names.<br/>\
-                    Escaped names will be used for creation internal structures in Ignite SQL engine.')
+                +form-field__checkbox({
+                    label: 'Escape table and filed names',
+                    model: `${model}.sqlEscapeAll`,
+                    name: '"sqlEscapeAll"',
+                    tip: 'If enabled than all schema, table and field names will be escaped with double quotes (for example: "tableName"."fieldName").<br/>\
+                         This enforces case sensitivity for field names and also allows having special characters in table and field names.<br/>\
+                         Escaped names will be used for creation internal structures in Ignite SQL engine.'
+                })
 
             //- Since ignite 2.0
             .pc-form-grid-col-30(ng-if-start='$ctrl.available("2.0.0")')
-                +number('Query parallelism', model + '.queryParallelism', '"queryParallelism"', 'true', '1', '1',
-                    'A hint to query execution engine on desired degree of parallelism within a single node')
+                +form-field__number({
+                    label: 'Query parallelism',
+                    model: `${model}.queryParallelism`,
+                    name: '"queryParallelism"',
+                    placeholder: '1',
+                    min: '1',
+                    tip: 'A hint to query execution engine on desired degree of parallelism within a single node'
+                })
             .pc-form-grid-col-30(ng-if-end)
-                +sane-ignite-form-field-number({
+                +form-field__number({
                     label: 'SQL index max inline size:',
                     model: `${model}.sqlIndexMaxInlineSize`,
                     name: '"sqlIndexMaxInlineSize"',
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/rebalance.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/rebalance.pug
index 7563435..feb7699 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/rebalance.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/rebalance.pug
@@ -26,41 +26,82 @@
 )
     panel-title Rebalance
     panel-description
-        | Cache rebalance settings. 
+        | Cache rebalance settings.
         a.link-success(href="https://apacheignite.readme.io/docs/rebalancing" target="_blank") More info
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-30
-                +dropdown('Mode:', `${model}.rebalanceMode`, '"rebalanceMode"', 'true', 'ASYNC',
-                    '[\
+                +form-field__dropdown({
+                    label: 'Mode:',
+                    model: `${model}.rebalanceMode`,
+                    name: '"rebalanceMode"',
+                    placeholder: 'ASYNC',
+                    options: '[\
                         {value: "SYNC", label: "SYNC"},\
                         {value: "ASYNC", label: "ASYNC"},\
                         {value: "NONE", label: "NONE"}\
                     ]',
-                    'Rebalance modes\
-                    <ul>\
-                        <li>Synchronous - in this mode distributed caches will not start until all necessary data is loaded from other available grid nodes</li>\
-                        <li>Asynchronous - in this mode distributed caches will start immediately and will load all necessary data from other available grid nodes in the background</li>\
-                        <li>None - in this mode no rebalancing will take place which means that caches will be either loaded on demand from persistent store whenever data is accessed, or will be populated explicitly</li>\
-                    </ul>')
+                    tip: 'Rebalance modes\
+                        <ul>\
+                            <li>Synchronous - in this mode distributed caches will not start until all necessary data is loaded from other available grid nodes</li>\
+                            <li>Asynchronous - in this mode distributed caches will start immediately and will load all necessary data from other available grid nodes in the background</li>\
+                            <li>None - in this mode no rebalancing will take place which means that caches will be either loaded on demand from persistent store whenever data is accessed, or will be populated explicitly</li>\
+                        </ul>'
+                })
             .pc-form-grid-col-30
-                +number('Batch size:', `${model}.rebalanceBatchSize`, '"rebalanceBatchSize"', 'true', '512 * 1024', '1',
-                    'Size (in bytes) to be loaded within a single rebalance message<br/>\
-                    Rebalancing algorithm will split total data set on every node into multiple batches prior to sending data')
+                +form-field__number({
+                    label: 'Batch size:',
+                    model: `${model}.rebalanceBatchSize`,
+                    name: '"rebalanceBatchSize"',
+                    placeholder: '512 * 1024',
+                    min: '1',
+                    tip: 'Size (in bytes) to be loaded within a single rebalance message<br/>\
+                          Rebalancing algorithm will split total data set on every node into multiple batches prior to sending data'
+                })
             .pc-form-grid-col-30
-                +number('Batches prefetch count:', `${model}.rebalanceBatchesPrefetchCount`, '"rebalanceBatchesPrefetchCount"', 'true', '2', '1',
-                    'Number of batches generated by supply node at rebalancing start')
+                +form-field__number({
+                    label: 'Batches prefetch count:',
+                    model: `${model}.rebalanceBatchesPrefetchCount`,
+                    name: '"rebalanceBatchesPrefetchCount"',
+                    placeholder: '2',
+                    min: '1',
+                    tip: 'Number of batches generated by supply node at rebalancing start'
+                })
             .pc-form-grid-col-30
-                +number('Order:', `${model}.rebalanceOrder`, '"rebalanceOrder"', 'true', '0', Number.MIN_SAFE_INTEGER,
-                    'If cache rebalance order is positive, rebalancing for this cache will be started only when rebalancing for all caches with smaller rebalance order (except caches with rebalance order 0) will be completed')
+                +form-field__number({
+                    label: 'Order:',
+                    model: `${model}.rebalanceOrder`,
+                    name: '"rebalanceOrder"',
+                    placeholder: '0',
+                    min: 'Number.MIN_SAFE_INTEGER',
+                    tip: 'If cache rebalance order is positive, rebalancing for this cache will be started only when rebalancing for all caches with smaller rebalance order (except caches with rebalance order 0) will be completed'
+                })
             .pc-form-grid-col-20
-                +number('Delay:', `${model}.rebalanceDelay`, '"rebalanceDelay"', 'true', '0', '0',
-                    'Delay in milliseconds upon a node joining or leaving topology (or crash) after which rebalancing should be started automatically')
+                +form-field__number({
+                    label: 'Delay:',
+                    model: `${model}.rebalanceDelay`,
+                    name: '"rebalanceDelay"',
+                    placeholder: '0',
+                    min: '0',
+                    tip: 'Delay in milliseconds upon a node joining or leaving topology (or crash) after which rebalancing should be started automatically'
+                })
             .pc-form-grid-col-20
-                +number('Timeout:', `${model}.rebalanceTimeout`, '"rebalanceTimeout"', 'true', '10000', '0',
-                    'Rebalance timeout in milliseconds')
+                +form-field__number({
+                    label: 'Timeout:',
+                    model: `${model}.rebalanceTimeout`,
+                    name: '"rebalanceTimeout"',
+                    placeholder: '10000',
+                    min: '0',
+                    tip: 'Rebalance timeout in milliseconds'
+                })
             .pc-form-grid-col-20
-                +number('Throttle:', `${model}.rebalanceThrottle`, '"rebalanceThrottle"', 'true', '0', '0',
-                    'Time in milliseconds to wait between rebalance messages to avoid overloading of CPU or network')
+                +form-field__number({
+                    label: 'Throttle:',
+                    model: `${model}.rebalanceThrottle`,
+                    name: '"rebalanceThrottle"',
+                    placeholder: '0',
+                    min: '0',
+                    tip: 'Time in milliseconds to wait between rebalance messages to avoid overloading of CPU or network'
+                })
         .pca-form-column-6
             +preview-xml-java(model, 'cacheRebalance')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/statistics.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/statistics.pug
index bf58354..a6c55ab 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/statistics.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/statistics.pug
@@ -25,10 +25,19 @@
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-60
-                +checkbox('Statistics enabled', `${model}.statisticsEnabled`, '"statisticsEnabled"', 'Flag indicating whether statistics gathering is enabled on this cache')
+                +form-field__checkbox({
+                    label: 'Statistics enabled',
+                    model: `${model}.statisticsEnabled`,
+                    name: '"statisticsEnabled"',
+                    tip: 'Flag indicating whether statistics gathering is enabled on this cache'
+                })
             .pc-form-grid-col-60
-                +checkbox('Management enabled', `${model}.managementEnabled`, '"managementEnabled"',
-                'Flag indicating whether management is enabled on this cache<br/>\
-                If enabled the CacheMXBean for each cache is registered in the platform MBean server')
+                +form-field__checkbox({
+                    label: 'Management enabled',
+                    model: `${model}.managementEnabled`,
+                    name: '"managementEnabled"',
+                    tip: 'Flag indicating whether management is enabled on this cache<br/>\
+                         If enabled the CacheMXBean for each cache is registered in the platform MBean server'
+                })
         .pca-form-column-6
             +preview-xml-java(model, 'cacheStatistics')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/store.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/store.pug
index 32d966a..903ea08 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/store.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cache-edit-form/templates/store.pug
@@ -19,40 +19,17 @@
 -var form = 'store'
 -var model = '$ctrl.clonedCache'
 
-mixin hibernateField(name, model, items, valid, save, newItem)
-    -var resetOnEnter = newItem ? '(stopblur = true) && (group.add = [{}])' : '(field.edit = false)'
-    -var onEnter = `${valid} && (${save}); ${valid} && ${resetOnEnter};`
-
-    -var onEscape = newItem ? 'group.add = []' : 'field.edit = false'
-
-    -var resetOnBlur = newItem ? '!stopblur && (group.add = [])' : 'field.edit = false'
-    -var onBlur = `${valid} && (${save}); ${resetOnBlur};`
-
-    div(ignite-on-focus-out=onBlur)
-        if block
-            block
-
-        .input-tip
-            +ignite-form-field-input(name, model, false, 'true', 'key=value')(
-                data-ignite-property-unique=items
-                data-ignite-property-value-specified
-                data-ignite-form-field-input-autofocus='true'
-
-                ignite-on-enter=onEnter
-                ignite-on-escape=onEscape
-            )
-
 panel-collapsible(ng-form=form on-open=`ui.loadPanel('${form}')`)
     panel-title Store
-    panel-description 
-        | Cache store settings. 
+    panel-description
+        | Cache store settings.
         | #[a.link-success(href="https://apacheignite.readme.io/docs/3rd-party-store" target="_blank") More info]
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             -var storeFactory = `${model}.cacheStoreFactory`;
             -var storeFactoryKind = `${storeFactory}.kind`;
             .pc-form-grid-col-60
-                +sane-ignite-form-field-dropdown({
+                +form-field__dropdown({
                     label: 'Store factory:',
                     model: storeFactoryKind,
                     name: '"cacheStoreFactory"',
@@ -65,24 +42,24 @@
                         <li>Hibernate BLOB store factory - Objects are stored in underlying database in BLOB format backed by Hibernate</li>
                     </ul>`
                 })(
-                    ui-validate=`{
+                ui-validate=`{
                         writeThroughOn: '$ctrl.Caches.cacheStoreFactory.storeDisabledValueOff(${model}, ${model}.writeThrough)',
                         readThroughOn: '$ctrl.Caches.cacheStoreFactory.storeDisabledValueOff(${model}, ${model}.readThrough)',
                         writeBehindOn: '$ctrl.Caches.cacheStoreFactory.storeDisabledValueOff(${model}, ${model}.writeBehindEnabled)'
                     }`
-                    ui-validate-watch-collection=`"[${model}.readThrough, ${model}.writeThrough, ${model}.writeBehindEnabled]"`
-                    ng-model-options='{allowInvalid: true}'
+                ui-validate-watch-collection=`"[${model}.readThrough, ${model}.writeThrough, ${model}.writeBehindEnabled]"`
+                ng-model-options='{allowInvalid: true}'
                 )
-                    +form-field-feedback(null, 'writeThroughOn', 'Write through is enabled but store is not set')
-                    +form-field-feedback(null, 'readThroughOn', 'Read through is enabled but store is not set')
-                    +form-field-feedback(null, 'writeBehindOn', 'Write-behind is enabled but store is not set')
+                    +form-field__error({ error: 'writeThroughOn', message: 'Write through is enabled but store is not set' })
+                    +form-field__error({ error: 'readThroughOn', message: 'Read through is enabled but store is not set' })
+                    +form-field__error({ error: 'writeBehindOn', message: 'Write-behind is enabled but store is not set' })
             .pc-form-group(ng-if=storeFactoryKind)
                 .pc-form-grid-row(ng-if=`${storeFactoryKind} === 'CacheJdbcPojoStoreFactory'`)
                     -var pojoStoreFactory = `${storeFactory}.CacheJdbcPojoStoreFactory`
                     -var required = `${storeFactoryKind} === 'CacheJdbcPojoStoreFactory'`
 
                     .pc-form-grid-col-60
-                        +sane-ignite-form-field-text({
+                        +form-field__text({
                             label: 'Data source bean name:',
                             model: `${pojoStoreFactory}.dataSourceBean`,
                             name: '"pojoDataSourceBean"',
@@ -93,66 +70,130 @@
                             is-valid-java-identifier
                             not-java-reserved-word
                         )
-                            +form-field-feedback(null, 'required', 'Data source bean name is required')
-                            +form-field-feedback(null, 'isValidJavaIdentifier', 'Data source bean name is not a valid Java identifier')
-                            +form-field-feedback(null, 'notJavaReservedWord', 'Data source bean name should not be a Java reserved word')
+                            +form-field__error({ error: 'required', message: 'Data source bean name is required' })
+                            +form-field__error({ error: 'isValidJavaIdentifier', message: 'Data source bean name is not a valid Java identifier' })
+                            +form-field__error({ error: 'notJavaReservedWord', message: 'Data source bean name should not be a Java reserved word' })
                     .pc-form-grid-col-60
-                        +dialect('Dialect:', `${pojoStoreFactory}.dialect`, '"pojoDialect"', required,
-                            'Dialect of SQL implemented by a particular RDBMS:', 'Generic JDBC dialect',
-                            'Choose JDBC dialect')
+                        +form-field__dialect({
+                            label: 'Dialect:',
+                            model: `${pojoStoreFactory}.dialect`,
+                            name: '"pojoDialect"',
+                            required,
+                            tip: 'Dialect of SQL implemented by a particular RDBMS:',
+                            genericDialectName: 'Generic JDBC dialect',
+                            placeholder: 'Choose JDBC dialect'
+                        })
                     .pc-form-grid-col-30
-                        +number('Batch size:', `${pojoStoreFactory}.batchSize`, '"pojoBatchSize"', true, '512', '1',
-                            'Maximum batch size for writeAll and deleteAll operations')
+                        +form-field__number({
+                            label:'Batch size:',
+                            model: `${pojoStoreFactory}.batchSize`,
+                            name: '"pojoBatchSize"',
+                            placeholder: '512',
+                            min: '1',
+                            tip: 'Maximum batch size for writeAll and deleteAll operations'
+                        })
                     .pc-form-grid-col-30
-                        +number('Thread count:', `${pojoStoreFactory}.maximumPoolSize`, '"pojoMaximumPoolSize"', true, 'availableProcessors', '1',
-                            'Maximum workers thread count.<br/>\
-                            These threads are responsible for load cache.')
+                        +form-field__number({
+                            label: 'Thread count:',
+                            model: `${pojoStoreFactory}.maximumPoolSize`,
+                            name: '"pojoMaximumPoolSize"',
+                            placeholder: 'availableProcessors',
+                            min: '1',
+                            tip: 'Maximum workers thread count.<br/>\
+                                 These threads are responsible for load cache.'
+                        })
                     .pc-form-grid-col-30
-                        +number('Maximum write attempts:', `${pojoStoreFactory}.maximumWriteAttempts`, '"pojoMaximumWriteAttempts"', true, '2', '0',
-                            'Maximum write attempts in case of database error')
+                        +form-field__number({
+                            label: 'Maximum write attempts:',
+                            model: `${pojoStoreFactory}.maximumWriteAttempts`,
+                            name: '"pojoMaximumWriteAttempts"',
+                            placeholder: '2',
+                            min: '0',
+                            tip: 'Maximum write attempts in case of database error'
+                        })
                     .pc-form-grid-col-30
-                        +number('Parallel load threshold:', `${pojoStoreFactory}.parallelLoadCacheMinimumThreshold`, '"pojoParallelLoadCacheMinimumThreshold"', true, '512', '0',
-                            'Parallel load cache minimum threshold.<br/>\
-                            If <b>0</b> then load sequentially.')
+                        +form-field__number({
+                            label: 'Parallel load threshold:',
+                            model: `${pojoStoreFactory}.parallelLoadCacheMinimumThreshold`,
+                            name: '"pojoParallelLoadCacheMinimumThreshold"',
+                            placeholder: '512',
+                            min: '0',
+                            tip: 'Parallel load cache minimum threshold.<br/>\
+                                 If <b>0</b> then load sequentially.'
+                        })
                     .pc-form-grid-col-60
-                        +java-class('Hasher', `${pojoStoreFactory}.hasher`, '"pojoHasher"', 'true', 'false', 'Hash calculator', required)
+                        +form-field__java-class({
+                            label: 'Hasher:',
+                            model: `${pojoStoreFactory}.hasher`,
+                            name: '"pojoHasher"',
+                            tip: 'Hash calculator',
+                            validationActive: required
+                        })
                     .pc-form-grid-col-60
-                        +java-class('Transformer', `${pojoStoreFactory}.transformer`, '"pojoTransformer"', 'true', 'false', 'Types transformer', required)
+                        +form-field__java-class({
+                            label: 'Transformer:',
+                            model: `${pojoStoreFactory}.transformer`,
+                            name: '"pojoTransformer"',
+                            tip: 'Types transformer',
+                            validationActive: required
+                        })
                     .pc-form-grid-col-60
-                        +checkbox('Escape table and filed names', `${pojoStoreFactory}.sqlEscapeAll`, '"sqlEscapeAll"',
-                            'If enabled than all schema, table and field names will be escaped with double quotes (for example: "tableName"."fieldName").<br/>\
-                            This enforces case sensitivity for field names and also allows having special characters in table and field names.<br/>\
-                            Escaped names will be used for CacheJdbcPojoStore internal SQL queries.')
+                        +form-field__checkbox({
+                            label: 'Escape table and filed names',
+                            model:`${pojoStoreFactory}.sqlEscapeAll`,
+                            name: '"sqlEscapeAll"',
+                            tip: 'If enabled than all schema, table and field names will be escaped with double quotes (for example: "tableName"."fieldName").<br/>\
+                                  This enforces case sensitivity for field names and also allows having special characters in table and field names.<br/>\
+                                  Escaped names will be used for CacheJdbcPojoStore internal SQL queries.'
+                        })
                 .pc-form-grid-row(ng-if=`${storeFactoryKind} === 'CacheJdbcBlobStoreFactory'`)
                     -var blobStoreFactory = `${storeFactory}.CacheJdbcBlobStoreFactory`
                     -var blobStoreFactoryVia = `${blobStoreFactory}.connectVia`
 
                     .pc-form-grid-col-60
-                        +dropdown('Connect via:', blobStoreFactoryVia, '"connectVia"', 'true', 'Choose connection method',
-                            '[\
-                                {value: "URL", label: "URL"},\
-                                {value: "DataSource", label: "Data source"}\
-                            ]',
-                            'You can connect to database via:\
-                            <ul>\
-                                <li>JDBC URL, for example: jdbc:h2:mem:myDatabase</li>\
-                                <li>Configured data source</li>\
-                            </ul>')
+                        +form-field__dropdown({
+                            label: 'Connect via:',
+                            model: blobStoreFactoryVia,
+                            name: '"connectVia"',
+                            placeholder: 'Choose connection method',
+                            options: '[\
+                                                        {value: "URL", label: "URL"},\
+                                                        {value: "DataSource", label: "Data source"}\
+                                                    ]',
+                            tip: 'You can connect to database via:\
+                                                    <ul>\
+                                                        <li>JDBC URL, for example: jdbc:h2:mem:myDatabase</li>\
+                                                        <li>Configured data source</li>\
+                                                    </ul>'
+                        })
 
                     -var required = `${storeFactoryKind} === 'CacheJdbcBlobStoreFactory' && ${blobStoreFactoryVia} === 'URL'`
 
                     .pc-form-grid-col-60(ng-if-start=`${blobStoreFactoryVia} === 'URL'`)
-                        +text('Connection URL:', `${blobStoreFactory}.connectionUrl`, '"connectionUrl"', required, 'Input URL',
-                            'URL for database access, for example: jdbc:h2:mem:myDatabase')
+                        +form-field__text({
+                            label: 'Connection URL:',
+                            model: `${blobStoreFactory}.connectionUrl`,
+                            name: '"connectionUrl"',
+                            required: required,
+                            placeholder: 'Input URL',
+                            tip: 'URL for database access, for example: jdbc:h2:mem:myDatabase'
+                        })
                     .pc-form-grid-col-30
-                        +text('User:', `${blobStoreFactory}.user`, '"user"', required, 'Input user name', 'User name for database access')
+                        +form-field__text({
+                            label: 'User:',
+                            model: `${blobStoreFactory}.user`,
+                            name: '"user"',
+                            required: required,
+                            placeholder: 'Input user name',
+                            tip: 'User name for database access'
+                        })
                     .pc-form-grid-col-30(ng-if-end)
                         .pc-form-grid__text-only-item Password will be generated as stub.
 
                     -var required = `${storeFactoryKind} === 'CacheJdbcBlobStoreFactory' && ${blobStoreFactoryVia} !== 'URL'`
 
                     .pc-form-grid-col-60(ng-if-start=`${blobStoreFactoryVia} !== 'URL'`)
-                        +sane-ignite-form-field-text({
+                        +form-field__text({
                             label: 'Data source bean name:',
                             model: `${blobStoreFactory}.dataSourceBean`,
                             name: '"blobDataSourceBean"',
@@ -160,105 +201,150 @@
                             placeholder: 'Input bean name',
                             tip: 'Name of the data source bean in Spring context'
                         })(
-                            is-valid-java-identifier
-                            not-java-reserved-word
+                        is-valid-java-identifier
+                        not-java-reserved-word
                         )
-                            +form-field-feedback(null, 'required', 'Data source bean name is required')
-                            +form-field-feedback(null, 'isValidJavaIdentifier', 'Data source bean name is not a valid Java identifier')
-                            +form-field-feedback(null, 'notJavaReservedWord', 'Data source bean name should not be a Java reserved word')
+                            +form-field__error({ error: 'required', message: 'Data source bean name is required' })
+                            +form-field__error({ error: 'isValidJavaIdentifier', message: 'Data source bean name is not a valid Java identifier' })
+                            +form-field__error({ error: 'notJavaReservedWord', message: 'Data source bean name should not be a Java reserved word' })
                     .pc-form-grid-col-60(ng-if-end)
-                        +dialect('Database:', `${blobStoreFactory}.dialect`, '"blobDialect"', required, 'Supported databases:', 'Generic database', 'Choose database')
+                        +form-field__dialect({
+                            label: 'Database:',
+                            model: `${blobStoreFactory}.dialect`,
+                            name: '"blobDialect"',
+                            required,
+                            tip: 'Supported databases:',
+                            genericDialectName: 'Generic database',
+                            placeholder: 'Choose database'
+                        })
 
                     .pc-form-grid-col-60
-                        +checkbox('Init schema', `${blobStoreFactory}.initSchema`, '"initSchema"',
-                            'Flag indicating whether DB schema should be initialized by Ignite (default behaviour) or was explicitly created by user')
+                        +form-field__checkbox({
+                            label: 'Init schema',
+                            model: `${blobStoreFactory}.initSchema`,
+                            name: '"initSchema"',
+                            tip: 'Flag indicating whether DB schema should be initialized by Ignite (default behaviour) or was explicitly created by user'
+                        })
                     .pc-form-grid-col-60
-                        +text('Create query:', `${blobStoreFactory}.createTableQuery`, '"createTableQuery"', 'false', 'SQL for table creation',
-                            'Query for table creation in underlying database<br/>\
-                            Default value: create table if not exists ENTRIES (key binary primary key, val binary)')
+                        +form-field__text({
+                            label: 'Create query:',
+                            model: `${blobStoreFactory}.createTableQuery`,
+                            name: '"createTableQuery"',
+                            placeholder: 'SQL for table creation',
+                            tip: 'Query for table creation in underlying database<br/>\
+                                 Default value: create table if not exists ENTRIES (key binary primary key, val binary)'
+                        })
                     .pc-form-grid-col-60
-                        +text('Load query:', `${blobStoreFactory}.loadQuery`, '"loadQuery"', 'false', 'SQL for load entry',
-                            'Query for entry load from underlying database<br/>\
-                            Default value: select * from ENTRIES where key=?')
+                        +form-field__text({
+                            label: 'Load query:',
+                            model: `${blobStoreFactory}.loadQuery`,
+                            name: '"loadQuery"',
+                            placeholder: 'SQL for load entry',
+                            tip: 'Query for entry load from underlying database<br/>\
+                                 Default value: select * from ENTRIES where key=?'
+                        })
                     .pc-form-grid-col-60
-                        +text('Insert query:', `${blobStoreFactory}.insertQuery`, '"insertQuery"', 'false', 'SQL for insert entry',
-                            'Query for insert entry into underlying database<br/>\
-                            Default value: insert into ENTRIES (key, val) values (?, ?)')
+                        +form-field__text({
+                            label: 'Insert query:',
+                            model: `${blobStoreFactory}.insertQuery`,
+                            name: '"insertQuery"',
+                            placeholder: 'SQL for insert entry',
+                            tip: 'Query for insert entry into underlying database<br/>\
+                                 Default value: insert into ENTRIES (key, val) values (?, ?)'
+                        })
                     .pc-form-grid-col-60
-                        +text('Update query:', `${blobStoreFactory}.updateQuery`, '"updateQuery"', 'false', 'SQL for update entry',
-                            'Query for update entry in underlying database<br/>\
-                            Default value: update ENTRIES set val=? where key=?')
+                        +form-field__text({
+                            label: 'Update query:',
+                            model: `${blobStoreFactory}.updateQuery`,
+                            name: '"updateQuery"',
+                            placeholder: 'SQL for update entry',
+                            tip: 'Query for update entry in underlying database<br/>\
+                                 Default value: update ENTRIES set val=? where key=?'
+                        })
                     .pc-form-grid-col-60
-                        +text('Delete query:', `${blobStoreFactory}.deleteQuery`, '"deleteQuery"', 'false', 'SQL for delete entry',
-                            'Query for delete entry from underlying database<br/>\
-                            Default value: delete from ENTRIES where key=?')
+                        +form-field__text({
+                            label: 'Delete query:',
+                            model: `${blobStoreFactory}.deleteQuery`,
+                            name: '"deleteQuery"',
+                            placeholder: 'SQL for delete entry',
+                            tip: 'Query for delete entry from underlying database<br/>\
+                                 Default value: delete from ENTRIES where key=?'
+                        })
 
                 .pc-form-grid-row(ng-if=`${storeFactoryKind} === 'CacheHibernateBlobStoreFactory'`)
                     -var hibernateStoreFactory = `${storeFactory}.CacheHibernateBlobStoreFactory`
 
                     .pc-form-grid-col-60
                         .ignite-form-field
-                            +ignite-form-field__label('Hibernate properties:', '"hibernateProperties"')
-                                +tooltip(`List of Hibernate properties<bt />
-                                    For example: connection.url=jdbc:h2:mem:exampleDb`)
-                            .ignite-form-field__control
-                                +list-pair-edit({
-                                    items: `${hibernateStoreFactory}.hibernateProperties`,
-                                    keyLbl: 'Property name', 
-                                    valLbl: 'Property value',
-                                    itemName: 'property',
-                                    itemsName: 'properties'
-                                })
+                            +form-field__label({ label: 'Hibernate properties:', name: '"hibernateProperties"' })
+                                +form-field__tooltip({ title: `List of Hibernate properties<bt />
+                                    For example: connection.url=jdbc:h2:mem:exampleDb` })
+
+                            +list-pair-edit({
+                                items: `${hibernateStoreFactory}.hibernateProperties`,
+                                keyLbl: 'Property name',
+                                valLbl: 'Property value',
+                                itemName: 'property',
+                                itemsName: 'properties'
+                            })
 
             - form = 'store'
             .pc-form-grid-col-60
-                +checkbox('Keep binary in store', `${model}.storeKeepBinary`, '"storeKeepBinary"',
-                    'Flag indicating that CacheStore implementation is working with binary objects instead of Java objects')
+                +form-field__checkbox({
+                    label: 'Keep binary in store',
+                    model: `${model}.storeKeepBinary`,
+                    name: '"storeKeepBinary"',
+                    tip: 'Flag indicating that CacheStore implementation is working with binary objects instead of Java objects'
+                })
             .pc-form-grid-col-60
-                +checkbox('Load previous value', `${model}.loadPreviousValue`, '"loadPreviousValue"',
-                    'Flag indicating whether value should be loaded from store if it is not in the cache for following cache operations: \
-                    <ul> \
-                        <li>IgniteCache.putIfAbsent()</li> \
-                        <li>IgniteCache.replace()</li> \
-                        <li>IgniteCache.remove()</li> \
-                        <li>IgniteCache.getAndPut()</li> \
-                        <li>IgniteCache.getAndRemove()</li> \
-                        <li>IgniteCache.getAndReplace()</li> \
-                        <li> IgniteCache.getAndPutIfAbsent()</li>\
-                    </ul>')
+                +form-field__checkbox({
+                    label: 'Load previous value',
+                    model: `${model}.loadPreviousValue`,
+                    name: '"loadPreviousValue"',
+                    tip: 'Flag indicating whether value should be loaded from store if it is not in the cache for following cache operations: \
+                        <ul> \
+                            <li>IgniteCache.putIfAbsent()</li> \
+                            <li>IgniteCache.replace()</li> \
+                            <li>IgniteCache.remove()</li> \
+                            <li>IgniteCache.getAndPut()</li> \
+                            <li>IgniteCache.getAndRemove()</li> \
+                            <li>IgniteCache.getAndReplace()</li> \
+                            <li> IgniteCache.getAndPutIfAbsent()</li>\
+                        </ul>'
+                })
             .pc-form-grid-col-60
-                +sane-form-field-checkbox({
+                +form-field__checkbox({
                     label: 'Read-through',
                     model: `${model}.readThrough`,
                     name: '"readThrough"',
                     tip: 'Flag indicating whether read-through caching should be used'
                 })(
-                    ng-model-options='{allowInvalid: true}'
-                    ui-validate=`{
+                ng-model-options='{allowInvalid: true}'
+                ui-validate=`{
                         storeEnabledReadOrWriteOn: '$ctrl.Caches.cacheStoreFactory.storeEnabledReadOrWriteOn(${model})'
                     }`
-                    ui-validate-watch-collection=`"[${storeFactoryKind}, ${model}.writeThrough, ${model}.readThrough]"`
+                ui-validate-watch-collection=`"[${storeFactoryKind}, ${model}.writeThrough, ${model}.readThrough]"`
                 )
-                    +form-field-feedback(0, 'storeEnabledReadOrWriteOn', 'Read or write through should be turned on when store kind is set')
+                    +form-field__error({ error: 'storeEnabledReadOrWriteOn', message: 'Read or write through should be turned on when store kind is set' })
             .pc-form-grid-col-60
-                +sane-form-field-checkbox({
+                +form-field__checkbox({
                     label: 'Write-through',
                     model: `${model}.writeThrough`,
                     name: '"writeThrough"',
                     tip: 'Flag indicating whether write-through caching should be used'
                 })(
-                    ng-model-options='{allowInvalid: true}'
-                    ui-validate=`{
+                ng-model-options='{allowInvalid: true}'
+                ui-validate=`{
                         storeEnabledReadOrWriteOn: '$ctrl.Caches.cacheStoreFactory.storeEnabledReadOrWriteOn(${model})'
                     }`
-                    ui-validate-watch-collection=`"[${storeFactoryKind}, ${model}.writeThrough, ${model}.readThrough]"`
+                ui-validate-watch-collection=`"[${storeFactoryKind}, ${model}.writeThrough, ${model}.readThrough]"`
                 )
-                    +form-field-feedback(0, 'storeEnabledReadOrWriteOn', 'Read or write through should be turned on when store kind is set')
+                    +form-field__error({ error: 'storeEnabledReadOrWriteOn', message: 'Read or write through should be turned on when store kind is set' })
 
             -var enabled = `${model}.writeBehindEnabled`
 
             .pc-form-grid-col-60.pc-form-group__text-title
-                +sane-form-field-checkbox({
+                +form-field__checkbox({
                     label: 'Write-behind',
                     model: enabled,
                     name: '"writeBehindEnabled"',
@@ -267,16 +353,23 @@
                         Write-behind is a special mode when updates to cache accumulated and then asynchronously flushed to persistent store as a bulk operation.
                     `
                 })(
-                    ng-model-options='{allowInvalid: true}'
+                ng-model-options='{allowInvalid: true}'
                 )
-                    +form-field-feedback(0, 'storeDisabledValueOff', 'Write-behind is enabled but store kind is not set')
+                    +form-field__error({ error: 'storeDisabledValueOff', message: 'Write-behind is enabled but store kind is not set' })
             .pc-form-group.pc-form-grid-row(ng-if=enabled)
                 .pc-form-grid-col-30
-                    +number('Batch size:', `${model}.writeBehindBatchSize`, '"writeBehindBatchSize"', enabled, '512', '1',
-                        'Maximum batch size for write-behind cache store operations<br/>\
-                         Store operations(get or remove) are combined in a batch of this size to be passed to cache store')
+                    +form-field__number({
+                        label: 'Batch size:',
+                        model: `${model}.writeBehindBatchSize`,
+                        name: '"writeBehindBatchSize"',
+                        disabled: `!(${enabled})`,
+                        placeholder: '512',
+                        min: '1',
+                        tip: 'Maximum batch size for write-behind cache store operations<br/>\
+                              Store operations(get or remove) are combined in a batch of this size to be passed to cache store'
+                    })
                 .pc-form-grid-col-30
-                    +sane-ignite-form-field-number({
+                    +form-field__number({
                         label: 'Flush size:',
                         model: `${model}.writeBehindFlushSize`,
                         name: '"writeBehindFlushSize"',
@@ -285,10 +378,10 @@
                         tip: `Maximum size of the write-behind cache<br/>
                          If cache size exceeds this value, all cached items are flushed to the cache store and write cache is cleared`
                     })(
-                        ng-model-options='{allowInvalid: true}'
+                    ng-model-options='{allowInvalid: true}'
                     )
                 .pc-form-grid-col-30
-                    +sane-ignite-form-field-number({
+                    +form-field__number({
                         label: 'Flush frequency:',
                         model: `${model}.writeBehindFlushFrequency`,
                         name: '"writeBehindFlushFrequency"',
@@ -296,15 +389,28 @@
                         min: `{{ $ctrl.Caches.writeBehindFlush.min(${model}) }}`,
                         tip: `Frequency with which write-behind cache is flushed to the cache store in milliseconds`
                     })(
-                        ng-model-options='{allowInvalid: true}'
+                    ng-model-options='{allowInvalid: true}'
                     )
                 .pc-form-grid-col-30
-                    +number('Flush threads count:', `${model}.writeBehindFlushThreadCount`, '"writeBehindFlushThreadCount"', enabled, '1', '1',
-                        'Number of threads that will perform cache flushing')
+                    +form-field__number({
+                        label: 'Flush threads count:',
+                        model: `${model}.writeBehindFlushThreadCount`,
+                        name: '"writeBehindFlushThreadCount"',
+                        disabled: `!(${enabled})`,
+                        placeholder: '1',
+                        min: '1',
+                        tip: 'Number of threads that will perform cache flushing'
+                    })
 
                 //- Since ignite 2.0
                 .pc-form-grid-col-60(ng-if='$ctrl.available("2.0.0")')
-                    +checkbox-enabled('Write coalescing', model + '.writeBehindCoalescing', '"WriteBehindCoalescing"', enabled, 'Write coalescing flag for write-behind cache store')
+                    +form-field__checkbox({
+                        label: 'Write coalescing',
+                        model: model + '.writeBehindCoalescing',
+                        name: '"WriteBehindCoalescing"',
+                        disabled: `!${enabled}`,
+                        tip: 'Write coalescing flag for write-behind cache store'
+                    })
 
         .pca-form-column-6
             +preview-xml-java(model, 'cacheStore', 'domains')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/atomic.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/atomic.pug
index 13424f8..9f5f138 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/atomic.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/atomic.pug
@@ -26,50 +26,95 @@
     panel-title Atomic configuration
     panel-description
         | Configuration for atomic data structures.
-        | Atomics are distributed across the cluster, essentially enabling performing atomic operations (such as increment-and-get or compare-and-set) with the same globally-visible value. 
+        | Atomics are distributed across the cluster, essentially enabling performing atomic operations (such as increment-and-get or compare-and-set) with the same globally-visible value.
         | #[a.link-success(href="https://apacheignite.readme.io/docs/atomic-types" target="_blank") More info]
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-30
-                +dropdown('Cache mode:', `${model}.cacheMode`, '"cacheMode"', 'true', 'PARTITIONED',
-                    '[\
-                        {value: "LOCAL", label: "LOCAL"},\
-                        {value: "REPLICATED", label: "REPLICATED"},\
-                        {value: "PARTITIONED", label: "PARTITIONED"}\
-                    ]',
-                    'Cache modes:\
-                    <ul>\
-                        <li>Partitioned - in this mode the overall key set will be divided into partitions and all partitions will be split equally between participating nodes</li>\
-                        <li>Replicated - in this mode all the keys are distributed to all participating nodes</li>\
-                        <li>Local - in this mode caches residing on different grid nodes will not know about each other</li>\
-                    </ul>')
+                +form-field__dropdown({
+                    label: 'Cache mode:',
+                    model: `${model}.cacheMode`,
+                    name: '"cacheMode"',
+                    placeholder: 'PARTITIONED',
+                    options: '[\
+                                            {value: "LOCAL", label: "LOCAL"},\
+                                            {value: "REPLICATED", label: "REPLICATED"},\
+                                            {value: "PARTITIONED", label: "PARTITIONED"}\
+                                        ]',
+                    tip: 'Cache modes:\
+                                        <ul>\
+                                            <li>Partitioned - in this mode the overall key set will be divided into partitions and all partitions will be split equally between participating nodes</li>\
+                                            <li>Replicated - in this mode all the keys are distributed to all participating nodes</li>\
+                                            <li>Local - in this mode caches residing on different grid nodes will not know about each other</li>\
+                                        </ul>'
+                })
             .pc-form-grid-col-30
-                +number('Sequence reserve:', `${model}.atomicSequenceReserveSize`, '"atomicSequenceReserveSize"', 'true', '1000', '0',
-                    'Default number of sequence values reserved for IgniteAtomicSequence instances<br/>\
-                    After a certain number has been reserved, consequent increments of sequence will happen locally, without communication with other nodes, until the next reservation has to be made')
+                +form-field__number({
+                    label: 'Sequence reserve:',
+                    model: `${model}.atomicSequenceReserveSize`,
+                    name: '"atomicSequenceReserveSize"',
+                    placeholder: '1000',
+                    min: '0',
+                    tip: 'Default number of sequence values reserved for IgniteAtomicSequence instances<br/>\
+                          After a certain number has been reserved, consequent increments of sequence will happen locally, without communication with other nodes, until the next reservation has to be made'
+                })
             .pc-form-grid-col-60(ng-show=`!(${model}.cacheMode && ${model}.cacheMode != "PARTITIONED")`)
-                +number('Backups:', model + '.backups', '"backups"', 'true', '0', '0', 'Number of backup nodes')
+                +form-field__number({
+                    label: 'Backups:',
+                    model: model + '.backups',
+                    name: '"backups"',
+                    placeholder: '0',
+                    min: '0',
+                    tip: 'Number of backup nodes'
+                })
 
             .pc-form-grid-col-60(ng-if-start='$ctrl.available("2.1.0")')
-                +dropdown('Function:', `${affModel}.kind`, '"AffinityKind"', 'true', 'Default', '$ctrl.Clusters.affinityFunctions',
-                    'Key topology resolver to provide mapping from keys to nodes\
-                    <ul>\
-                        <li>Rendezvous - Based on Highest Random Weight algorithm<br/></li>\
-                        <li>Custom - Custom implementation of key affinity function<br/></li>\
-                        <li>Default - By default rendezvous affinity function  with 1024 partitions is used<br/></li>\
-                    </ul>')
+                +form-field__dropdown({
+                    label: 'Function:',
+                    model: `${affModel}.kind`,
+                    name: '"AffinityKind"',
+                    placeholder: 'Default',
+                    options: '$ctrl.Clusters.affinityFunctions',
+                    tip: 'Key topology resolver to provide mapping from keys to nodes\
+                                        <ul>\
+                                            <li>Rendezvous - Based on Highest Random Weight algorithm<br/></li>\
+                                            <li>Custom - Custom implementation of key affinity function<br/></li>\
+                                            <li>Default - By default rendezvous affinity function  with 1024 partitions is used<br/></li>\
+                                        </ul>'
+                })
             .pc-form-group(ng-if-end ng-if=rendezvousAff + ' || ' + customAff)
                 .pc-form-grid-row
                     .pc-form-grid-col-30(ng-if-start=rendezvousAff)
-                        +number-required('Partitions', `${affModel}.Rendezvous.partitions`, '"RendPartitions"', 'true', rendPartitionsRequired, '1024', '1', 'Number of partitions')
+                        +form-field__number({
+                            label: 'Partitions',
+                            model: `${affModel}.Rendezvous.partitions`,
+                            name: '"RendPartitions"',
+                            required: rendPartitionsRequired,
+                            placeholder: '1024',
+                            min: '1',
+                            tip: 'Number of partitions'
+                        })
                     .pc-form-grid-col-30
-                        +java-class('Backup filter', `${affModel}.Rendezvous.affinityBackupFilter`, '"RendAffinityBackupFilter"', 'true', 'false',
-                            'Backups will be selected from all nodes that pass this filter')
+                        +form-field__java-class({
+                            label: 'Backup filter',
+                            model: `${affModel}.Rendezvous.affinityBackupFilter`,
+                            name: '"RendAffinityBackupFilter"',
+                            tip: 'Backups will be selected from all nodes that pass this filter'
+                        })
                     .pc-form-grid-col-60(ng-if-end)
-                        +checkbox('Exclude neighbors', `${affModel}.Rendezvous.excludeNeighbors`, '"RendExcludeNeighbors"',
-                            'Exclude same - host - neighbors from being backups of each other and specified number of backups')
+                        +form-field__checkbox({
+                            label: 'Exclude neighbors',
+                            model: `${affModel}.Rendezvous.excludeNeighbors`,
+                            name: '"RendExcludeNeighbors"',
+                            tip: 'Exclude same - host - neighbors from being backups of each other and specified number of backups'
+                        })
                     .pc-form-grid-col-60(ng-if=customAff)
-                        +java-class('Class name:', `${affModel}.Custom.className`, '"AffCustomClassName"', 'true', customAff,
-                            'Custom key affinity function implementation class name')
+                        +form-field__java-class({
+                            label: 'Class name:',
+                            model: `${affModel}.Custom.className`,
+                            name: '"AffCustomClassName"',
+                            required: customAff,
+                            tip: 'Custom key affinity function implementation class name'
+                        })
         .pca-form-column-6
             +preview-xml-java(model, 'clusterAtomics')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/attributes.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/attributes.pug
index b57f1da..f704bc9 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/attributes.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/attributes.pug
@@ -25,16 +25,16 @@
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6
             .ignite-form-field
-                +ignite-form-field__label('User attributes:', '"userAttributes"')
-                    +tooltip(`User-defined attributes to add to node`)
-                .ignite-form-field__control
-                    +list-pair-edit({
-                        items: `${model}.attributes`,
-                        keyLbl: 'Attribute name', 
-                        valLbl: 'Attribute value',
-                        itemName: 'attribute',
-                        itemsName: 'attributes'
-                    })
+                +form-field__label({ label: 'User attributes:', name: '"userAttributes"'})
+                    +form-field__tooltip({ title: `User-defined attributes to add to node` })
+
+                +list-pair-edit({
+                    items: `${model}.attributes`,
+                    keyLbl: 'Attribute name', 
+                    valLbl: 'Attribute value',
+                    itemName: 'attribute',
+                    itemsName: 'attributes'
+                })
 
         .pca-form-column-6
             +preview-xml-java(model, 'clusterUserAttributes')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/binary.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/binary.pug
index 17fe4cd..a20a3fd 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/binary.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/binary.pug
@@ -22,59 +22,103 @@
 panel-collapsible(ng-form=form on-open=`ui.loadPanel('${form}')`)
     panel-title Binary configuration
     panel-description
-        | Configuration of specific binary types. 
+        | Configuration of specific binary types.
         | #[a.link-success(href="https://apacheignite.readme.io/docs/binary-marshaller" target="_blank") More info]
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-60
-                +java-class('ID mapper:', model + '.idMapper', '"idMapper"', 'true', 'false',
-                    'Maps given from BinaryNameMapper type and filed name to ID that will be used by Ignite in internals<br/>\
-                    Ignite never writes full strings for field or type names. Instead, for performance reasons, Ignite writes integer hash codes for type/class and field names. It has been tested that hash code conflicts for the type/class names or the field names within the same type are virtually non - existent and, to gain performance, it is safe to work with hash codes. For the cases when hash codes for different types or fields actually do collide <b>BinaryIdMapper</b> allows to override the automatically generated hash code IDs for the type and field names')
+                +form-field__java-class({
+                    label: 'ID mapper:',
+                    model: model + '.idMapper',
+                    name: '"idMapper"',
+                    tip: 'Maps given from BinaryNameMapper type and filed name to ID that will be used by Ignite in internals<br/>\
+                          Ignite never writes full strings for field or type names. Instead, for performance reasons, Ignite writes integer hash codes for type/class and field names. It has been tested that hash code conflicts for the type/class names or the field names within the same type are virtually non - existent and, to gain performance, it is safe to work with hash codes. For the cases when hash codes for different types or fields actually do collide <b>BinaryIdMapper</b> allows to override the automatically generated hash code IDs for the type and field names'
+                })
             .pc-form-grid-col-60
-                +java-class('Name mapper:', model + '.nameMapper', '"nameMapper"', 'true', 'false', 'Maps type/class and field names to different names')
+                +form-field__java-class({
+                    label: 'Name mapper:',
+                    model: model + '.nameMapper',
+                    name: '"nameMapper"',
+                    tip: 'Maps type/class and field names to different names'
+                })
             .pc-form-grid-col-60
-                +java-class('Serializer:', model + '.serializer', '"serializer"', 'true', 'false', 'Class with custom serialization logic for binary objects')
+                +form-field__java-class({
+                    label: 'Serializer:',
+                    model: model + '.serializer',
+                    name: '"serializer"',
+                    tip: 'Class with custom serialization logic for binary objects'
+                })
             .pc-form-grid-col-60
                 .ignite-form-field
-                    +ignite-form-field__label('Type configurations:', '"typeConfigurations"')
-                        +tooltip(`Configuration properties for binary types`)
-                    .ignite-form-field__control
-                        -var items = model + '.typeConfigurations'
-                        list-editable.pc-list-editable-with-form-grid(ng-model=items name='typeConfigurations')
-                            list-editable-item-edit.pc-form-grid-row
-                                - form = '$parent.form'
-                                .pc-form-grid-col-60
-                                    +java-class-autofocus('Type name:', '$item.typeName', '"typeName"', 'true', 'true', 'true', 'Type name')(
-                                        ignite-unique=items
-                                        ignite-unique-property='typeName'
-                                    )
-                                        +unique-feedback(`$item.typeName`, 'Type name should be unique.')
-                                .pc-form-grid-col-60
-                                    +java-class('ID mapper:', '$item.idMapper', '"idMapper"', 'true', 'false',
-                                        'Maps given from BinaryNameMapper type and filed name to ID that will be used by Ignite in internals<br/>\
-                                        Ignite never writes full strings for field or type/class names.\
-                                        Instead, for performance reasons, Ignite writes integer hash codes for type/class and field names.\
-                                        It has been tested that hash code conflicts for the type/class names or the field names within the same type are virtually non - existent and,\
-                                        to gain performance, it is safe to work with hash codes.\
-                                        For the cases when hash codes for different types or fields actually do collide <b>BinaryIdMapper</b> allows to override the automatically generated hash code IDs for the type and field names')
-                                .pc-form-grid-col-60
-                                    +java-class('Name mapper:', '$item.nameMapper', '"nameMapper"', 'true', 'false',
-                                        'Maps type/class and field names to different names')
-                                .pc-form-grid-col-60
-                                    +java-class('Serializer:', '$item.serializer', '"serializer"', 'true', 'false',
-                                        'Class with custom serialization logic for binary object')
-                                .pc-form-grid-col-60
-                                    +checkbox('Enum', '$item.enum', 'enum', 'Flag indicating that this type is the enum')
+                    +form-field__label({ label: 'Type configurations:', name: '"typeConfigurations"' })
+                        +form-field__tooltip({ title: `Configuration properties for binary types`})
 
-                            list-editable-no-items
-                                list-editable-add-item-button(
-                                    add-item=`$ctrl.Clusters.addBinaryTypeConfiguration($ctrl.clonedCluster)`
-                                    label-single='configuration'
-                                    label-multiple='configurations'
+                    -var items = model + '.typeConfigurations'
+                    list-editable.pc-list-editable-with-form-grid(ng-model=items name='typeConfigurations')
+                        list-editable-item-edit.pc-form-grid-row
+                            - form = '$parent.form'
+                            .pc-form-grid-col-60
+                                +form-field__java-class({
+                                    label: 'Type name:',
+                                    model: '$item.typeName',
+                                    name: '"typeName"',
+                                    required: 'true',
+                                    tip: 'Type name'
+                                })(
+                                    ignite-form-field-input-autofocus='true'
+                                    ignite-unique=items
+                                    ignite-unique-property='typeName'
                                 )
+                                    +form-field__error({ error: 'igniteUnique', message: 'Type name should be unique.' })
+                            .pc-form-grid-col-60
+                                +form-field__java-class({
+                                    label: 'ID mapper:',
+                                    model: '$item.idMapper',
+                                    name: '"idMapper"',
+                                    tip: 'Maps given from BinaryNameMapper type and filed name to ID that will be used by Ignite in internals<br/>\
+                                          Ignite never writes full strings for field or type/class names.\
+                                          Instead, for performance reasons, Ignite writes integer hash codes for type/class and field names.\
+                                          It has been tested that hash code conflicts for the type/class names or the field names within the same type are virtually non - existent and,\
+                                          to gain performance, it is safe to work with hash codes.\
+                                          For the cases when hash codes for different types or fields actually do collide <b>BinaryIdMapper</b> allows to override the automatically generated hash code IDs for the type and field names'
+                                })
+                            .pc-form-grid-col-60
+                                +form-field__java-class({
+                                    label: 'Name mapper:',
+                                    model: '$item.nameMapper',
+                                    name: '"nameMapper"',
+                                    tip: 'Maps type/class and field names to different names'
+                                })
+
+                            .pc-form-grid-col-60
+                                +form-field__java-class({
+                                    label: 'Serializer:',
+                                    model: '$item.serializer',
+                                    name: '"serializer"',
+                                    tip: 'Class with custom serialization logic for binary object'
+                                })
+                            .pc-form-grid-col-60
+                                +form-field__checkbox({
+                                    label: 'Enum',
+                                    model: '$item.enum',
+                                    name: 'enum',
+                                    tip: 'Flag indicating that this type is the enum'
+                                })
+
+                        list-editable-no-items
+                            list-editable-add-item-button(
+                                add-item=`$ctrl.Clusters.addBinaryTypeConfiguration($ctrl.clonedCluster)`
+                                label-single='configuration'
+                                label-multiple='configurations'
+                            )
 
             - form = 'binary'
             .pc-form-grid-col-60
-                +checkbox('Compact footer', model + '.compactFooter', '"compactFooter"', 'When enabled, Ignite will not write fields metadata when serializing objects (this will increase serialization performance), because internally <b>BinaryMarshaller</b> already distribute metadata inside cluster')
+                +form-field__checkbox({
+                    label: 'Compact footer',
+                    model: model + '.compactFooter',
+                    name: '"compactFooter"',
+                    tip: 'When enabled, Ignite will not write fields metadata when serializing objects (this will increase serialization performance), because internally <b>BinaryMarshaller</b> already distribute metadata inside cluster'
+                })
         .pca-form-column-6
             +preview-xml-java(model, 'clusterBinary')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/cache-key-cfg.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/cache-key-cfg.pug
index 0b34ce4..a17e52a 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/cache-key-cfg.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/cache-key-cfg.pug
@@ -25,39 +25,41 @@
         | Cache key configuration allows to collocate objects in a partitioned cache based on field in cache key without explicit usage of annotations on user classes.
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6
-            mixin clusters-cache-key-cfg
-                .ignite-form-field
-                    +ignite-form-field__label('Cache key configuration:', '"cacheKeyConfiguration"')
-                    .ignite-form-field__control
-                        -let items = model
-                        list-editable.pc-list-editable-with-form-grid(ng-model=items name='cacheKeyConfiguration')
-                            list-editable-item-edit.pc-form-grid-row
-                                - form = '$parent.form'
-                                .pc-form-grid-col-60
-                                    +java-class-autofocus('Type name:', '$item.typeName', '"cacheKeyTypeName"', 'true', 'true', 'true', 'Type name')(
-                                        ignite-unique=items
-                                        ignite-unique-property='typeName'
-                                    )
-                                        +unique-feedback(`cacheKeyTypeName`, 'Type name should be unique.')
-                                .pc-form-grid-col-60
-                                    +sane-ignite-form-field-text({
-                                        label: 'Affinity key field name:',
-                                        model: '$item.affinityKeyFieldName',
-                                        name: '"affinityKeyFieldName"',
-                                        disabled: 'false',
-                                        placeholder: 'Enter field name',
-                                        tip: 'Affinity key field name',
-                                        required: true
-                                    })
+            .ignite-form-field
+                +form-field__label({ label: 'Cache key configuration:', name: '"cacheKeyConfiguration"' })
 
-                            list-editable-no-items
-                                list-editable-add-item-button(
-                                    add-item=`(${items} = ${items} || []).push({})`
-                                    label-single='configuration'
-                                    label-multiple='configurations'
-                                )
+                list-editable.pc-list-editable-with-form-grid(ng-model=model name='cacheKeyConfiguration')
+                    list-editable-item-edit.pc-form-grid-row
+                        - form = '$parent.form'
+                        .pc-form-grid-col-60
+                            +form-field__java-class({
+                                label: 'Type name:',
+                                model: '$item.typeName',
+                                name: '"cacheKeyTypeName"',
+                                required: 'true',
+                                tip: 'Type name'
+                            })(
+                                ignite-form-field-input-autofocus='true'
+                                ignite-unique=model
+                                ignite-unique-property='typeName'
+                            )
+                                +form-field__error({ error: 'igniteUnique', message: 'Type name should be unique.' })
+                        .pc-form-grid-col-60
+                            +form-field__text({
+                                label: 'Affinity key field name:',
+                                model: '$item.affinityKeyFieldName',
+                                name: '"affinityKeyFieldName"',
+                                placeholder: 'Enter field name',
+                                tip: 'Affinity key field name',
+                                required: true
+                            })
 
-            +clusters-cache-key-cfg
+                    list-editable-no-items
+                        list-editable-add-item-button(
+                            add-item=`(${model} = ${model} || []).push({})`
+                            label-single='configuration'
+                            label-multiple='configurations'
+                        )
 
         .pca-form-column-6
             +preview-xml-java(model, 'clusterCacheKeyConfiguration')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/checkpoint.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/checkpoint.pug
index 7d56f14..760f996 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/checkpoint.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/checkpoint.pug
@@ -24,59 +24,85 @@
 panel-collapsible(ng-form=form)
     panel-title Checkpointing
     panel-description
-        | Checkpointing provides an ability to save an intermediate job state. 
+        | Checkpointing provides an ability to save an intermediate job state.
         | #[a.link-success(href="https://apacheignite.readme.io/docs/checkpointing" target="_blank") More info]
     panel-content.pca-form-row
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-60
                 .ignite-form-field
-                    +ignite-form-field__label('Checkpoint SPI configurations:', '"checkpointSPIConfigurations"')
-                    .ignite-form-field__control
-                        list-editable.pc-list-editable-with-form-grid(ng-model=model name='checkpointSPIConfigurations')
-                            list-editable-item-edit(item-name='$checkpointSPI').pc-form-grid-row
-                                .pc-form-grid-col-60
-                                    +dropdown-required('Checkpoint SPI:', '$checkpointSPI.kind', '"checkpointKind"', 'true', 'true', 'Choose checkpoint configuration variant', '[\
-                                            {value: "FS", label: "File System"},\
-                                            {value: "Cache", label: "Cache"},\
-                                            {value: "S3", label: "Amazon S3"},\
-                                            {value: "JDBC", label: "Database"},\
-                                            {value: "Custom", label: "Custom"}\
-                                        ]',
-                                        'Provides an ability to save an intermediate job state\
-                                        <ul>\
+                    +form-field__label({ label: 'Checkpoint SPI configurations:', name: '"checkpointSPIConfigurations"' })
+
+                    list-editable.pc-list-editable-with-form-grid(ng-model=model name='checkpointSPIConfigurations')
+                        list-editable-item-edit(item-name='$checkpointSPI').pc-form-grid-row
+                            .pc-form-grid-col-60
+                                +form-field__dropdown({
+                                    label: 'Checkpoint SPI:',
+                                    model: '$checkpointSPI.kind',
+                                    name: '"checkpointKind"',
+                                    required: 'true',
+                                    placeholder: 'Choose checkpoint configuration variant',
+                                    options: '[\
+                                                {value: "FS", label: "File System"},\
+                                                {value: "Cache", label: "Cache"},\
+                                                {value: "S3", label: "Amazon S3"},\
+                                                {value: "JDBC", label: "Database"},\
+                                                {value: "Custom", label: "Custom"}\
+                                               ]',
+                                    tip: 'Provides an ability to save an intermediate job state\
+                                          <ul>\
                                             <li>File System - Uses a shared file system to store checkpoints</li>\
                                             <li>Cache - Uses a cache to store checkpoints</li>\
                                             <li>Amazon S3 - Uses Amazon S3 to store checkpoints</li>\
                                             <li>Database - Uses a database to store checkpoints</li>\
                                             <li>Custom - Custom checkpoint SPI implementation</li>\
-                                        </ul>')
+                                          </ul>'
+                                })
 
-                                include ./checkpoint/fs
+                            include ./checkpoint/fs
 
-                                .pc-form-grid-col-60(ng-if-start=CacheCheckpoint)
-                                    +dropdown-required-empty('Cache:', '$checkpointSPI.Cache.cache', '"checkpointCacheCache"', 'true', CacheCheckpoint,
-                                        'Choose cache', 'No caches configured for current cluster', '$ctrl.cachesMenu', 'Cache to use for storing checkpoints')(
-                                        pc-is-in-collection='$ctrl.clonedCluster.caches'
-                                    )
-                                        +form-field-feedback(form, 'isInCollection', `Cluster doesn't have such a cache`)
-                                .pc-form-grid-col-60(ng-if-end)
-                                    +java-class('Listener:', '$checkpointSPI.Cache.checkpointListener', '"checkpointCacheListener"', 'true', 'false',
-                                        'Checkpoint listener implementation class name', CacheCheckpoint)
-
-                                include ./checkpoint/s3
-
-                                include ./checkpoint/jdbc
-
-                                .pc-form-grid-col-60(ng-if=CustomCheckpoint)
-                                    +java-class('Class name:', '$checkpointSPI.Custom.className', '"checkpointCustomClassName"', 'true', CustomCheckpoint,
-                                    'Custom CheckpointSpi implementation class', CustomCheckpoint)
-
-                            list-editable-no-items
-                                list-editable-add-item-button(
-                                    add-item=`$edit($ctrl.Clusters.addCheckpointSPI($ctrl.clonedCluster))`
-                                    label-single='checkpoint SPI configuration'
-                                    label-multiple='checkpoint SPI configurations'
+                            .pc-form-grid-col-60(ng-if-start=CacheCheckpoint)
+                                +form-field__dropdown({
+                                    label: 'Cache:',
+                                    model: '$checkpointSPI.Cache.cache',
+                                    name: '"checkpointCacheCache"',
+                                    required: CacheCheckpoint,
+                                    placeholder: 'Choose cache',
+                                    placeholderEmpty: 'No caches configured for current cluster',
+                                    options: '$ctrl.cachesMenu',
+                                    tip: 'Cache to use for storing checkpoints'
+                                })(
+                                    pc-is-in-collection='$ctrl.clonedCluster.caches'
                                 )
-        
+                                    +form-field__error({ error: 'isInCollection', message: `Cluster doesn't have such a cache` })
+                            .pc-form-grid-col-60(ng-if-end)
+                                +form-field__java-class({
+                                    label: 'Listener:',
+                                    model: '$checkpointSPI.Cache.checkpointListener',
+                                    name: '"checkpointCacheListener"',
+                                    tip: 'Checkpoint listener implementation class name',
+                                    validationActive: CacheCheckpoint
+                                })
+
+                            include ./checkpoint/s3
+
+                            include ./checkpoint/jdbc
+
+                            .pc-form-grid-col-60(ng-if=CustomCheckpoint)
+                                +form-field__java-class({
+                                    label: 'Class name:',
+                                    model: '$checkpointSPI.Custom.className',
+                                    name: '"checkpointCustomClassName"',
+                                    required: CustomCheckpoint,
+                                    tip: 'Custom CheckpointSpi implementation class',
+                                    validationActive: CustomCheckpoint
+                                })
+
+                        list-editable-no-items
+                            list-editable-add-item-button(
+                                add-item=`$edit($ctrl.Clusters.addCheckpointSPI($ctrl.clonedCluster))`
+                                label-single='checkpoint SPI configuration'
+                                label-multiple='checkpoint SPI configurations'
+                            )
+
         .pca-form-column-6
             +preview-xml-java('$ctrl.clonedCluster', 'clusterCheckpoint', '$ctrl.caches')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/checkpoint/fs.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/checkpoint/fs.pug
index 0359cf3..0cda6fa 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/checkpoint/fs.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/checkpoint/fs.pug
@@ -30,7 +30,13 @@
                 tip: 'Paths to a shared directory where checkpoints will be stored'
             }]`
         )
-            +unique-feedback(_, 'Such path already exists!')
+            +form-field__error({ error: 'igniteUnique', message: 'Such path already exists!' })
 
 .pc-form-grid-col-60(ng-if-end)
-    +java-class('Listener:', '$checkpointSPI.FS.checkpointListener', '"checkpointFsListener"', 'true', 'false', 'Checkpoint listener implementation class name', '$checkpointSPI.kind === "FS"')
+    +form-field__java-class({
+        label: 'Listener:',
+        model: '$checkpointSPI.FS.checkpointListener',
+        name: '"checkpointFsListener"',
+        tip: 'Checkpoint listener implementation class name',
+        validationActive: '$checkpointSPI.kind === "FS"'
+    })
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/checkpoint/jdbc.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/checkpoint/jdbc.pug
index 00a8681..be4afc4 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/checkpoint/jdbc.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/checkpoint/jdbc.pug
@@ -19,29 +19,106 @@
 -var jdbcCheckpoint = '$checkpointSPI.kind === "JDBC"'
 
 .pc-form-grid-col-30(ng-if-start='$checkpointSPI.kind === "JDBC"')
-    +text('Data source bean name:', '$checkpointSPI.JDBC.dataSourceBean', '"checkpointJdbcDataSourceBean"', jdbcCheckpoint, 'Input bean name',
-    'Name of the data source bean in Spring context')
+    +form-field__text({
+        label: 'Data source bean name:',
+        model: '$checkpointSPI.JDBC.dataSourceBean',
+        name: '"checkpointJdbcDataSourceBean"',
+        required: jdbcCheckpoint,
+        placeholder: 'Input bean name',
+        tip: 'Name of the data source bean in Spring context'
+    })
 .pc-form-grid-col-30
-    +dialect('Dialect:', '$checkpointSPI.JDBC.dialect', '"checkpointJdbcDialect"', jdbcCheckpoint,
-    'Dialect of SQL implemented by a particular RDBMS:', 'Generic JDBC dialect', 'Choose JDBC dialect')
+    +form-field__dialect({
+        label: 'Dialect:',
+        model: '$checkpointSPI.JDBC.dialect',
+        name: '"checkpointJdbcDialect"',
+        required: jdbcCheckpoint,
+        tip: 'Dialect of SQL implemented by a particular RDBMS:',
+        genericDialectName: 'Generic JDBC dialect',
+        placeholder: 'Choose JDBC dialect'
+    })
+
 .pc-form-grid-col-60
-    +java-class('Listener:', '$checkpointSPI.JDBC.checkpointListener', '"checkpointJdbcListener"', 'true', 'false',
-        'Checkpoint listener implementation class name', jdbcCheckpoint)
+    +form-field__java-class({
+        label: 'Listener:',
+        model: '$checkpointSPI.JDBC.checkpointListener',
+        name: '"checkpointJdbcListener"',
+        tip: 'Checkpoint listener implementation class name',
+        validationActive: jdbcCheckpoint
+    })
 .pc-form-grid-col-60
-    +text('User:', '$checkpointSPI.JDBC.user', '"checkpointJdbcUser"', 'false', 'Input user name', 'Checkpoint jdbc user name')
+    +form-field__text({
+        label: 'User:',
+        model: '$checkpointSPI.JDBC.user',
+        name: '"checkpointJdbcUser"',
+        placeholder: 'Input user name',
+        tip: 'Checkpoint jdbc user name'
+    })
 .pc-form-grid-col-30
-    +text('Table name:', '$checkpointSPI.JDBC.checkpointTableName', '"checkpointJdbcCheckpointTableName"', 'false', 'CHECKPOINTS', 'Checkpoint table name')
+    +form-field__text({
+        label: 'Table name:',
+        model: '$checkpointSPI.JDBC.checkpointTableName',
+        name: '"checkpointJdbcCheckpointTableName"',
+        placeholder: 'CHECKPOINTS',
+        tip: 'Checkpoint table name'
+    })
 .pc-form-grid-col-30
-    +number('Number of retries:', '$checkpointSPI.JDBC.numberOfRetries', '"checkpointJdbcNumberOfRetries"', 'true', '2', '0', 'Number of retries in case of DB failure')
+    +form-field__number({
+        label: 'Number of retries:',
+        model: '$checkpointSPI.JDBC.numberOfRetries',
+        name: '"checkpointJdbcNumberOfRetries"',
+        placeholder: '2',
+        min: '0',
+        tip: 'Number of retries in case of DB failure'
+    })
 .pc-form-grid-col-30
-    +text('Key field name:', '$checkpointSPI.JDBC.keyFieldName', '"checkpointJdbcKeyFieldName"', 'false', 'NAME', 'Checkpoint key field name')
+    +form-field__text({
+        label: 'Key field name:',
+        model: '$checkpointSPI.JDBC.keyFieldName',
+        name: '"checkpointJdbcKeyFieldName"',
+        placeholder: 'NAME',
+        tip: 'Checkpoint key field name'
+    })
 .pc-form-grid-col-30
-    +dropdown('Key field type:', '$checkpointSPI.JDBC.keyFieldType', '"checkpointJdbcKeyFieldType"', 'true', 'VARCHAR', '::$ctrl.supportedJdbcTypes', 'Checkpoint key field type')
+    +form-field__dropdown({
+        label: 'Key field type:',
+        model: '$checkpointSPI.JDBC.keyFieldType',
+        name: '"checkpointJdbcKeyFieldType"',
+        placeholder: 'VARCHAR',
+        options: '::$ctrl.supportedJdbcTypes',
+        tip: 'Checkpoint key field type'
+    })
 .pc-form-grid-col-30
-    +text('Value field name:', '$checkpointSPI.JDBC.valueFieldName', '"checkpointJdbcValueFieldName"', 'false', 'VALUE', 'Checkpoint value field name')
+    +form-field__text({
+        label: 'Value field name:',
+        model: '$checkpointSPI.JDBC.valueFieldName',
+        name: '"checkpointJdbcValueFieldName"',
+        placeholder: 'VALUE',
+        tip: 'Checkpoint value field name'
+    })
 .pc-form-grid-col-30
-    +dropdown('Value field type:', '$checkpointSPI.JDBC.valueFieldType', '"checkpointJdbcValueFieldType"', 'true', 'BLOB', '::$ctrl.supportedJdbcTypes', 'Checkpoint value field type')
+    +form-field__dropdown({
+        label: 'Value field type:',
+        model: '$checkpointSPI.JDBC.valueFieldType',
+        name: '"checkpointJdbcValueFieldType"',
+        placeholder: 'BLOB',
+        options: '::$ctrl.supportedJdbcTypes',
+        tip: 'Checkpoint value field type'
+    })
 .pc-form-grid-col-30
-    +text('Expire date field name:', '$checkpointSPI.JDBC.expireDateFieldName', '"checkpointJdbcExpireDateFieldName"', 'false', 'EXPIRE_DATE', 'Checkpoint expire date field name')
+    +form-field__text({
+        label:'Expire date field name:',
+        model: '$checkpointSPI.JDBC.expireDateFieldName',
+        name: '"checkpointJdbcExpireDateFieldName"',
+        placeholder: 'EXPIRE_DATE',
+        tip: 'Checkpoint expire date field name'
+    })
 .pc-form-grid-col-30(ng-if-end)
-    +dropdown('Expire date field type:', '$checkpointSPI.JDBC.expireDateFieldType', '"checkpointJdbcExpireDateFieldType"', 'true', 'DATETIME', '::$ctrl.supportedJdbcTypes', 'Checkpoint expire date field type')
+    +form-field__dropdown({
+        label: 'Expire date field type:',
+        model: '$checkpointSPI.JDBC.expireDateFieldType',
+        name: '"checkpointJdbcExpireDateFieldType"',
+        placeholder: 'DATETIME',
+        options: '::$ctrl.supportedJdbcTypes',
+        tip: 'Checkpoint expire date field type'
+    })
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/checkpoint/s3.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/checkpoint/s3.pug
index 8e284fc..1f6eef2 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/checkpoint/s3.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/checkpoint/s3.pug
@@ -28,177 +28,416 @@
 -var checkpointS3CustomRetry = checkpointS3 + ' && ' + clientRetryModel + '.kind === "Custom"'
 
 .pc-form-grid-col-60(ng-if-start='$checkpointSPI.kind === "S3"')
-    +dropdown-required('AWS credentials:', '$checkpointSPI.S3.awsCredentials.kind', '"checkpointS3AwsCredentials"', 'true', checkpointS3, 'Custom', '[\
-        {value: "Basic", label: "Basic"},\
-        {value: "Properties", label: "Properties"},\
-        {value: "Anonymous", label: "Anonymous"},\
-        {value: "BasicSession", label: "Basic with session"},\
-        {value: "Custom", label: "Custom"}\
-    ]',
-    'AWS credentials\
-    <ul>\
-        <li>Basic - Allows callers to pass in the AWS access key and secret access in the constructor</li>\
-        <li>Properties - Reads in AWS access keys from a properties file</li>\
-        <li>Anonymous - Allows use of "anonymous" credentials</li>\
-        <li>Database - Session credentials with keys and session token</li>\
-        <li>Custom - Custom AWS credentials provider</li>\
-    </ul>')
+    +form-field__dropdown({
+        label: 'AWS credentials:',
+        model: '$checkpointSPI.S3.awsCredentials.kind',
+        name: '"checkpointS3AwsCredentials"',
+        required: checkpointS3,
+        placeholder: 'Custom',
+        options: '[\
+                {value: "Basic", label: "Basic"},\
+                {value: "Properties", label: "Properties"},\
+                {value: "Anonymous", label: "Anonymous"},\
+                {value: "BasicSession", label: "Basic with session"},\
+                {value: "Custom", label: "Custom"}\
+            ]',
+        tip: 'AWS credentials\
+            <ul>\
+                <li>Basic - Allows callers to pass in the AWS access key and secret access in the constructor</li>\
+                <li>Properties - Reads in AWS access keys from a properties file</li>\
+                <li>Anonymous - Allows use of "anonymous" credentials</li>\
+                <li>Database - Session credentials with keys and session token</li>\
+                <li>Custom - Custom AWS credentials provider</li>\
+            </ul>'
+    })
+
 .pc-form-group.pc-form-grid-row(ng-if=checkpointS3Path)
     .pc-form-grid-col-60
-        +text('Path:', credentialsModel + '.Properties.path', '"checkpointS3PropertiesPath"', checkpointS3Path, 'Input properties file path',
-        'The file from which to read the AWS credentials properties')
+        +form-field__text({
+            label: 'Path:',
+            model: `${credentialsModel}.Properties.path`,
+            name: '"checkpointS3PropertiesPath"',
+            required: checkpointS3Path,
+            placeholder: 'Input properties file path',
+            tip: 'The file from which to read the AWS credentials properties'
+        })
 .pc-form-group.pc-form-grid-row(ng-if=checkpointS3Custom)
     .pc-form-grid-col-60
-        +java-class('Class name:', credentialsModel + '.Custom.className', '"checkpointS3CustomClassName"', 'true', checkpointS3Custom,
-        'Custom AWS credentials provider implementation class', checkpointS3Custom)
+        +form-field__java-class({
+            label: 'Class name:',
+            model: credentialsModel + '.Custom.className',
+            name: '"checkpointS3CustomClassName"',
+            required: checkpointS3Custom,
+            tip: 'Custom AWS credentials provider implementation class',
+            validationActive:checkpointS3Custom
+        })
 .pc-form-grid-col-60
     label Note, AWS credentials will be generated as stub
 .pc-form-grid-col-60
-    +text('Bucket name suffix:', '$checkpointSPI.S3.bucketNameSuffix', '"checkpointS3BucketNameSuffix"', 'false', 'default-bucket')
+    +form-field__text({
+        label: 'Bucket name suffix:',
+        model: '$checkpointSPI.S3.bucketNameSuffix',
+        name: '"checkpointS3BucketNameSuffix"',
+        placeholder: 'default-bucket'
+    })
 .pc-form-grid-col-60(ng-if-start=`$ctrl.available("2.4.0")`)
-    +text('Bucket endpoint:', `$checkpointSPI.S3.bucketEndpoint`, '"checkpointS3BucketEndpoint"', false, 'Input bucket endpoint',
-    'Bucket endpoint for IP finder<br/> \
-    For information about possible endpoint names visit <a href="http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">docs.aws.amazon.com</a>')
+    +form-field__text({
+        label: 'Bucket endpoint:',
+        model: `$checkpointSPI.S3.bucketEndpoint`,
+        name: '"checkpointS3BucketEndpoint"',
+        placeholder: 'Input bucket endpoint',
+        tip: 'Bucket endpoint for IP finder<br/> \
+            For information about possible endpoint names visit <a href="http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">docs.aws.amazon.com</a>'
+    })
 .pc-form-grid-col-60(ng-if-end)
-    +text('SSE algorithm:', `$checkpointSPI.S3.SSEAlgorithm`, '"checkpointS3SseAlgorithm"', false, 'Input SSE algorithm',
-    'Server-side encryption algorithm for Amazon S3-managed encryption keys<br/> \
-    For information about possible S3-managed encryption keys visit <a href="http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html">docs.aws.amazon.com</a>')
+    +form-field__text({
+        label: 'SSE algorithm:',
+        model: `$checkpointSPI.S3.SSEAlgorithm`,
+        name: '"checkpointS3SseAlgorithm"',
+        placeholder: 'Input SSE algorithm',
+        tip: 'Server-side encryption algorithm for Amazon S3-managed encryption keys<br/> \
+              For information about possible S3-managed encryption keys visit <a href="http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html">docs.aws.amazon.com</a>'
+    })
 .pc-form-grid-col-60
-    +java-class('Listener:', '$checkpointSPI.S3.checkpointListener', '"checkpointS3Listener"', 'true', 'false',
-        'Checkpoint listener implementation class name', checkpointS3)
+    +form-field__java-class({
+        label: 'Listener:',
+        model: '$checkpointSPI.S3.checkpointListener',
+        name: '"checkpointS3Listener"',
+        tip: 'Checkpoint listener implementation class name',
+        validationActive: checkpointS3
+    })
 .pc-form-grid-col-60.pc-form-group__text-title
     span Client configuration
 .pc-form-group.pc-form-grid-row(ng-if-end)
     .pc-form-grid-col-30
-        +dropdown('Protocol:', clientCfgModel + '.protocol', '"checkpointS3Protocol"', 'true', 'HTTPS', '[\
-                {value: "HTTP", label: "HTTP"},\
-                {value: "HTTPS", label: "HTTPS"}\
-            ]',
-        'Provides an ability to save an intermediate job state\
-        <ul>\
-            <li>HTTP - Using the HTTP protocol is less secure than HTTPS, but can slightly reduce\
-                the system resources used when communicating with AWS</li>\
-            <li>HTTPS - Using the HTTPS protocol is more secure than using the HTTP protocol, but\
-                may use slightly more system resources. AWS recommends using HTTPS for maximize security</li>\
-        </ul>')
+        +form-field__dropdown({
+            label: 'Protocol:',
+            model: clientCfgModel + '.protocol',
+            name: '"checkpointS3Protocol"',
+            placeholder: 'HTTPS',
+            options: '[\
+                            {value: "HTTP", label: "HTTP"},\
+                            {value: "HTTPS", label: "HTTPS"}\
+                        ]',
+            tip: 'Provides an ability to save an intermediate job state\
+                    <ul>\
+                        <li>HTTP - Using the HTTP protocol is less secure than HTTPS, but can slightly reduce\
+                            the system resources used when communicating with AWS</li>\
+                        <li>HTTPS - Using the HTTPS protocol is more secure than using the HTTP protocol, but\
+                            may use slightly more system resources. AWS recommends using HTTPS for maximize security</li>\
+                    </ul>'
+        })
     .pc-form-grid-col-30
-        +number('Maximum connections:', clientCfgModel + '.maxConnections', '"checkpointS3MaxConnections"',
-        'true', '50', '1', 'Maximum number of allowed open HTTP connections')
+        +form-field__number({
+            label:'Maximum connections:',
+            model:clientCfgModel + '.maxConnections',
+            name: '"checkpointS3MaxConnections"',
+            placeholder: '50',
+            min: '1',
+            tip: 'Maximum number of allowed open HTTP connections'
+        })
     .pc-form-grid-col-60
-        +text('User agent prefix:', clientCfgModel + '.userAgentPrefix', '"checkpointS3UserAgentPrefix"', 'false', 'System specific header',
-        'HTTP user agent prefix to send with all requests')
+        +form-field__text({
+            label: 'User agent prefix:',
+            model: `${clientCfgModel}.userAgentPrefix`,
+            name: '"checkpointS3UserAgentPrefix"',
+            placeholder: 'System specific header',
+            tip: 'HTTP user agent prefix to send with all requests'
+        })
     .pc-form-grid-col-60
-        +text('User agent suffix:', clientCfgModel + '.userAgentSuffix', '"checkpointS3UserAgentSuffix"', 'false', 'System specific header',
-        'HTTP user agent suffix to send with all requests')
+        +form-field__text({
+            label: 'User agent suffix:',
+            model: `${clientCfgModel}.userAgentSuffix`,
+            name: '"checkpointS3UserAgentSuffix"',
+            placeholder: 'System specific header',
+            tip: 'HTTP user agent suffix to send with all requests'
+        })
     .pc-form-grid-col-60
-        +text-ip-address('Local address:', clientCfgModel + '.localAddress', '"checkpointS3LocalAddress"', 'true', 'Not specified',
-        'Optionally specifies the local address to bind to')
+        +form-field__ip-address({
+            label: 'Local address:',
+            model: clientCfgModel + '.localAddress',
+            name: '"checkpointS3LocalAddress"',
+            enabled: 'true',
+            placeholder: 'Not specified',
+            tip: 'Optionally specifies the local address to bind to'
+        })
     .pc-form-grid-col-40
-        +text('Proxy host:', clientCfgModel + '.proxyHost', '"checkpointS3ProxyHost"', 'false', 'Not specified',
-        'Optional proxy host the client will connect through')
+        +form-field__text({
+            label: 'Proxy host:',
+            model: `${clientCfgModel}.proxyHost`,
+            name: '"checkpointS3ProxyHost"',
+            placeholder: 'Not specified',
+            tip: 'Optional proxy host the client will connect through'
+        })
     .pc-form-grid-col-20
-        +number('Proxy port:', clientCfgModel + '.proxyPort', '"checkpointS3ProxyPort"', 'true', 'Not specified', '0',
-        'Optional proxy port the client will connect through')
+        +form-field__number({
+            label: 'Proxy port:',
+            model: clientCfgModel + '.proxyPort',
+            name: '"checkpointS3ProxyPort"',
+            placeholder: 'Not specified',
+            min: '0',
+            tip: 'Optional proxy port the client will connect through'
+        })
     .pc-form-grid-col-30
-        +text('Proxy user:', clientCfgModel + '.proxyUsername', '"checkpointS3ProxyUsername"', 'false', 'Not specified',
-        'Optional proxy user name to use if connecting through a proxy')
+        +form-field__text({
+            label: 'Proxy user:',
+            model: clientCfgModel + '.proxyUsername',
+            name: '"checkpointS3ProxyUsername"',
+            placeholder: 'Not specified',
+            tip: 'Optional proxy user name to use if connecting through a proxy'
+        })
     .pc-form-grid-col-30
-        +text('Proxy domain:', clientCfgModel + '.proxyDomain', '"checkpointS3ProxyDomain"', 'false', 'Not specified',
-        'Optional Windows domain name for configuring an NTLM proxy')
+        +form-field__text({
+            label: 'Proxy domain:',
+            model: `${clientCfgModel}.proxyDomain`,
+            name: '"checkpointS3ProxyDomain"',
+            placeholder: 'Not specified',
+            tip: 'Optional Windows domain name for configuring an NTLM proxy'
+        })
     .pc-form-grid-col-60
-        +text('Proxy workstation:', clientCfgModel + '.proxyWorkstation', '"checkpointS3ProxyWorkstation"', 'false', 'Not specified',
-        'Optional Windows workstation name for configuring NTLM proxy support')
+        +form-field__text({
+            label: 'Proxy workstation:',
+            model: `${clientCfgModel}.proxyWorkstation`,
+            name: '"checkpointS3ProxyWorkstation"',
+            placeholder: 'Not specified',
+            tip: 'Optional Windows workstation name for configuring NTLM proxy support'
+        })
     .pc-form-grid-col-60
-        +text('Non proxy hosts:', clientCfgModel + '.nonProxyHosts', '"checkpointS3NonProxyHosts"', 'false', 'Not specified',
-        'Optional hosts the client will access without going through the proxy')
+        +form-field__text({
+            label: 'Non proxy hosts:',
+            model: `${clientCfgModel}.nonProxyHosts`,
+            name: '"checkpointS3NonProxyHosts"',
+            placeholder: 'Not specified',
+            tip: 'Optional hosts the client will access without going through the proxy'
+        })
     .pc-form-grid-col-60
-        +dropdown('Retry policy:', clientRetryModel + '.kind', '"checkpointS3RetryPolicy"', 'true', 'Default', '[\
-                                            {value: "Default", label: "Default SDK retry policy"},\
-                                            {value: "DefaultMaxRetries", label: "Default with the specified max retry count"},\
-                                            {value: "DynamoDB", label: "Default for DynamoDB client"},\
-                                            {value: "DynamoDBMaxRetries", label: "DynamoDB with the specified max retry count"},\
-                                            {value: "Custom", label: "Custom configured"}\
-                                        ]',
-        'Provides an ability to save an intermediate job state\
-        <ul>\
-            <li>SDK default retry policy - This policy will honor the maxErrorRetry set in ClientConfiguration</li>\
-            <li>Default with the specified max retry count - Default SDK retry policy with the specified max retry count</li>\
-            <li>Default for DynamoDB client - This policy will honor the maxErrorRetry set in ClientConfiguration</li>\
-            <li>DynamoDB with the specified max retry count - This policy will honor the maxErrorRetry set in ClientConfiguration with the specified max retry count</li>\
-            <li>Custom configured - Custom configured SDK retry policy</li>\
-        </ul>')
+        +form-field__dropdown({
+            label: 'Retry policy:',
+            model: `${clientRetryModel}.kind`,
+            name: '"checkpointS3RetryPolicy"',
+            placeholder: 'Default',
+            options: '[\
+                                                        {value: "Default", label: "Default SDK retry policy"},\
+                                                        {value: "DefaultMaxRetries", label: "Default with the specified max retry count"},\
+                                                        {value: "DynamoDB", label: "Default for DynamoDB client"},\
+                                                        {value: "DynamoDBMaxRetries", label: "DynamoDB with the specified max retry count"},\
+                                                        {value: "Custom", label: "Custom configured"}\
+                                                    ]',
+            tip: 'Provides an ability to save an intermediate job state\
+                    <ul>\
+                        <li>SDK default retry policy - This policy will honor the maxErrorRetry set in ClientConfiguration</li>\
+                        <li>Default with the specified max retry count - Default SDK retry policy with the specified max retry count</li>\
+                        <li>Default for DynamoDB client - This policy will honor the maxErrorRetry set in ClientConfiguration</li>\
+                        <li>DynamoDB with the specified max retry count - This policy will honor the maxErrorRetry set in ClientConfiguration with the specified max retry count</li>\
+                        <li>Custom configured - Custom configured SDK retry policy</li>\
+                    </ul>'
+        })
     .pc-form-group.pc-form-grid-row(ng-if=checkpointS3DefaultMaxRetry)
         .pc-form-grid-col-60
-            +number-required('Maximum retry attempts:', clientRetryModel + '.DefaultMaxRetries.maxErrorRetry', '"checkpointS3DefaultMaxErrorRetry"', 'true', checkpointS3DefaultMaxRetry, '-1', '1',
-            'Maximum number of retry attempts for failed requests')
+            +form-field__number({
+                label: 'Maximum retry attempts:',
+                model: clientRetryModel + '.DefaultMaxRetries.maxErrorRetry',
+                name: '"checkpointS3DefaultMaxErrorRetry"',
+                required: checkpointS3DefaultMaxRetry,
+                placeholder: '-1',
+                min: '1',
+                tip: 'Maximum number of retry attempts for failed requests'
+            })
     .pc-form-group.pc-form-grid-row(ng-if=checkpointS3DynamoDbMaxRetry)
         .pc-form-grid-col-60
-            +number-required('Maximum retry attempts:', clientRetryModel + '.DynamoDBMaxRetries.maxErrorRetry', '"checkpointS3DynamoDBMaxErrorRetry"', 'true', checkpointS3DynamoDbMaxRetry, '-1', '1',
-            'Maximum number of retry attempts for failed requests')
+            +form-field__number({
+                label: 'Maximum retry attempts:',
+                model: clientRetryModel + '.DynamoDBMaxRetries.maxErrorRetry',
+                name: '"checkpointS3DynamoDBMaxErrorRetry"',
+                required: checkpointS3DynamoDbMaxRetry,
+                placeholder: '-1',
+                min: '1',
+                tip: 'Maximum number of retry attempts for failed requests'
+            })
     .pc-form-group.pc-form-grid-row(ng-if=checkpointS3CustomRetry)
         .pc-form-grid-col-60
-            +java-class('Retry condition:', clientRetryModel + '.Custom.retryCondition', '"checkpointS3CustomRetryPolicy"', 'true', checkpointS3CustomRetry,
-            'Retry condition on whether a specific request and exception should be retried', checkpointS3CustomRetry)
+            +form-field__java-class({
+                label: 'Retry condition:',
+                model: clientRetryModel + '.Custom.retryCondition',
+                name: '"checkpointS3CustomRetryPolicy"',
+                required: checkpointS3CustomRetry,
+                tip: 'Retry condition on whether a specific request and exception should be retried',
+                validationActive: checkpointS3CustomRetry
+            })
         .pc-form-grid-col-60
-            +java-class('Backoff strategy:', clientRetryModel + '.Custom.backoffStrategy', '"checkpointS3CustomBackoffStrategy"', 'true', checkpointS3CustomRetry,
-            'Back-off strategy for controlling how long the next retry should wait', checkpointS3CustomRetry)
+            +form-field__java-class({
+                label: 'Backoff strategy:',
+                model: clientRetryModel + '.Custom.backoffStrategy',
+                name: '"checkpointS3CustomBackoffStrategy"',
+                required: checkpointS3CustomRetry,
+                tip: 'Back-off strategy for controlling how long the next retry should wait',
+                validationActive: checkpointS3CustomRetry
+            })
         .pc-form-grid-col-60
-            +number-required('Maximum retry attempts:', clientRetryModel + '.Custom.maxErrorRetry', '"checkpointS3CustomMaxErrorRetry"', 'true', checkpointS3CustomRetry, '-1', '1',
-            'Maximum number of retry attempts for failed requests')
+            +form-field__number({
+                label: 'Maximum retry attempts:',
+                model: clientRetryModel + '.Custom.maxErrorRetry',
+                name: '"checkpointS3CustomMaxErrorRetry"',
+                required: checkpointS3CustomRetry,
+                placeholder: '-1',
+                min: '1',
+                tip: 'Maximum number of retry attempts for failed requests'
+            })
         .pc-form-grid-col-60
-            +checkbox('Honor the max error retry set', clientRetryModel + '.Custom.honorMaxErrorRetryInClientConfig', '"checkpointS3CustomHonorMaxErrorRetryInClientConfig"',
-            'Whether this retry policy should honor the max error retry set by ClientConfiguration#setMaxErrorRetry(int)')
+            +form-field__checkbox({
+                label: 'Honor the max error retry set',
+                model: clientRetryModel + '.Custom.honorMaxErrorRetryInClientConfig',
+                name: '"checkpointS3CustomHonorMaxErrorRetryInClientConfig"',
+                tip: 'Whether this retry policy should honor the max error retry set by ClientConfiguration#setMaxErrorRetry(int)'
+            })
     .pc-form-grid-col-60
-        +number('Maximum retry attempts:', clientCfgModel + '.maxErrorRetry', '"checkpointS3MaxErrorRetry"', 'true', '-1', '0',
-        'Maximum number of retry attempts for failed retryable requests<br/>\
-        If -1 the configured RetryPolicy will be used to control the retry count')
+        +form-field__number({
+            label: 'Maximum retry attempts:',
+            model: `${clientCfgModel}.maxErrorRetry`,
+            name: '"checkpointS3MaxErrorRetry"',
+            placeholder: '-1',
+            min: '0',
+            tip: 'Maximum number of retry attempts for failed retryable requests<br/>\
+                  If -1 the configured RetryPolicy will be used to control the retry count'
+        })
     .pc-form-grid-col-30
-        +number('Socket timeout:', clientCfgModel + '.socketTimeout', '"checkpointS3SocketTimeout"', 'true', '50000', '0',
-        'Amount of time in milliseconds to wait for data to be transfered over an established, open connection before the connection times out and is closed<br/>\
-        A value of <b>0</b> means infinity')
+        +form-field__number({
+            label: 'Socket timeout:',
+            model: `${clientCfgModel}.socketTimeout`,
+            name: '"checkpointS3SocketTimeout"',
+            placeholder: '50000',
+            min: '0',
+            tip: 'Amount of time in milliseconds to wait for data to be transfered over an established, open connection before the connection times out and is closed<br/>\
+                  A value of <b>0</b> means infinity'
+        })
     .pc-form-grid-col-30
-        +number('Connection timeout:', clientCfgModel + '.connectionTimeout', '"checkpointS3ConnectionTimeout"', 'true', '50000', '0',
-        'Amount of time in milliseconds to wait when initially establishing a connection before giving up and timing out<br/>\
-        A value of <b>0</b> means infinity')
+        +form-field__number({
+            label: 'Connection timeout:',
+            model: `${clientCfgModel}.connectionTimeout`,
+            name: '"checkpointS3ConnectionTimeout"',
+            placeholder: '50000',
+            min: '0',
+            tip: 'Amount of time in milliseconds to wait when initially establishing a connection before giving up and timing out<br/>\
+                  A value of <b>0</b> means infinity'
+        })
     .pc-form-grid-col-30
-        +number('Request timeout:', clientCfgModel + '.requestTimeout', '"checkpointS3RequestTimeout"', 'true', '0', '-1',
-        'Amount of time in milliseconds to wait for the request to complete before giving up and timing out<br/>\
-        A non - positive value means infinity')
+        +form-field__number({
+            label: 'Request timeout:',
+            model: `${clientCfgModel}.requestTimeout`,
+            name: '"checkpointS3RequestTimeout"',
+            placeholder: '0',
+            min: '-1',
+            tip: 'Amount of time in milliseconds to wait for the request to complete before giving up and timing out<br/>\
+                  A non - positive value means infinity'
+        })
     .pc-form-grid-col-30
-        +number('Idle timeout:', clientCfgModel + '.connectionMaxIdleMillis', '"checkpointS3ConnectionMaxIdleMillis"', 'true', '60000', '0',
-        'Maximum amount of time that an idle connection may sit in the connection pool and still be eligible for reuse')
+        +form-field__number({
+            label: 'Idle timeout:',
+            model: `${clientCfgModel}.connectionMaxIdleMillis`,
+            name: '"checkpointS3ConnectionMaxIdleMillis"',
+            placeholder: '60000',
+            min: '0',
+            tip: 'Maximum amount of time that an idle connection may sit in the connection pool and still be eligible for reuse'
+        })
     .pc-form-grid-col-30
-        +text('Signature algorithm:', clientCfgModel + '.signerOverride', '"checkpointS3SignerOverride"', 'false', 'Not specified',
-        'Name of the signature algorithm to use for signing requests made by this client')
+        +form-field__text({
+            label: 'Signature algorithm:',
+            model: `${clientCfgModel}.signerOverride`,
+            name: '"checkpointS3SignerOverride"',
+            placeholder: 'Not specified',
+            tip: 'Name of the signature algorithm to use for signing requests made by this client'
+        })
     .pc-form-grid-col-30
-        +number('Connection TTL:', clientCfgModel + '.connectionTTL', '"checkpointS3ConnectionTTL"', 'true', '-1', '-1',
-        'Expiration time in milliseconds for a connection in the connection pool<br/>\
-        By default, it is set to <b>-1</b>, i.e. connections do not expire')
+        +form-field__number({
+            label: 'Connection TTL:',
+            model: `${clientCfgModel}.connectionTTL`,
+            name: '"checkpointS3ConnectionTTL"',
+            placeholder: '-1',
+            min: '-1',
+            tip: 'Expiration time in milliseconds for a connection in the connection pool<br/>\
+                  By default, it is set to <b>-1</b>, i.e. connections do not expire'
+        })
     .pc-form-grid-col-60
-        +java-class('DNS resolver:', clientCfgModel + '.dnsResolver', '"checkpointS3DnsResolver"', 'true', 'false',
-        'DNS Resolver that should be used to for resolving AWS IP addresses', checkpointS3)
+        +form-field__java-class({
+            label: 'DNS resolver:',
+            model: clientCfgModel + '.dnsResolver',
+            name: '"checkpointS3DnsResolver"',
+            tip: 'DNS Resolver that should be used to for resolving AWS IP addresses',
+            validationActive: checkpointS3
+        })
     .pc-form-grid-col-60
-        +number('Response metadata cache size:', clientCfgModel + '.responseMetadataCacheSize', '"checkpointS3ResponseMetadataCacheSize"', 'true', '50', '0',
-        'Response metadata cache size')
+        +form-field__number({
+            label: 'Response metadata cache size:',
+            model: `${clientCfgModel}.responseMetadataCacheSize`,
+            name: '"checkpointS3ResponseMetadataCacheSize"',
+            placeholder: '50',
+            min: '0',
+            tip: 'Response metadata cache size'
+        })
     .pc-form-grid-col-60
-        +java-class('SecureRandom class name:', clientCfgModel + '.secureRandom', '"checkpointS3SecureRandom"', 'true', 'false',
-        'SecureRandom to be used by the SDK class name', checkpointS3)
+        +form-field__java-class({
+            label: 'SecureRandom class name:',
+            model: clientCfgModel + '.secureRandom',
+            name: '"checkpointS3SecureRandom"',
+            tip: 'SecureRandom to be used by the SDK class name',
+            validationActive: checkpointS3
+        })
     .pc-form-grid-col-60
-        +number('Client execution timeout:', clientCfgModel + '.clientExecutionTimeout', '"checkpointS3ClientExecutionTimeout"', 'true', '0', '0',
-        'Amount of time in milliseconds to allow the client to complete the execution of an API call<br/>\
-        <b>0</b> value disables that feature')
+        +form-field__number({
+            label: 'Client execution timeout:',
+            model: `${clientCfgModel}.clientExecutionTimeout`,
+            name: '"checkpointS3ClientExecutionTimeout"',
+            placeholder: '0',
+            min: '0',
+            tip: 'Amount of time in milliseconds to allow the client to complete the execution of an API call<br/>\
+                  <b>0</b> value disables that feature'
+        })
     .pc-form-grid-col-60
-        +checkbox('Cache response metadata', clientCfgModel + '.cacheResponseMetadata', '"checkpointS3CacheResponseMetadata"', 'Cache response metadata')
+        +form-field__checkbox({
+            label: 'Cache response metadata',
+            model: clientCfgModel + '.cacheResponseMetadata',
+            name: '"checkpointS3CacheResponseMetadata"',
+            tip: 'Cache response metadata'
+        })
     .pc-form-grid-col-60
-        +checkbox('Use expect continue', clientCfgModel + '.useExpectContinue', '"checkpointS3UseExpectContinue"', 'Optional override to enable/disable support for HTTP/1.1 handshake utilizing EXPECT: 100-Continue')
+        +form-field__checkbox({
+            label: 'Use expect continue',
+            model: clientCfgModel + '.useExpectContinue',
+            name: '"checkpointS3UseExpectContinue"',
+            tip: 'Optional override to enable/disable support for HTTP/1.1 handshake utilizing EXPECT: 100-Continue'
+        })
     .pc-form-grid-col-60
-        +checkbox('Use throttle retries', clientCfgModel + '.useThrottleRetries', '"checkpointS3UseThrottleRetries"', 'Retry throttling will be used')
+        +form-field__checkbox({
+            label: 'Use throttle retries',
+            model: clientCfgModel + '.useThrottleRetries',
+            name: '"checkpointS3UseThrottleRetries"',
+            tip: 'Retry throttling will be used'
+        })
     .pc-form-grid-col-60
-        +checkbox('Use reaper', clientCfgModel + '.useReaper', '"checkpointS3UseReaper"', 'Checks if the IdleConnectionReaper is to be started')
+        +form-field__checkbox({
+            label: 'Use reaper',
+            model: clientCfgModel + '.useReaper',
+            name: '"checkpointS3UseReaper"',
+            tip: 'Checks if the IdleConnectionReaper is to be started'
+        })
     .pc-form-grid-col-60
-        +checkbox('Use GZIP', clientCfgModel + '.useGzip', '"checkpointS3UseGzip"', 'Checks if gzip compression is used')
+        +form-field__checkbox({
+            label: 'Use GZIP',
+            model: clientCfgModel + '.useGzip',
+            name: '"checkpointS3UseGzip"',
+            tip: 'Checks if gzip compression is used'
+        })
     .pc-form-grid-col-60
-        +checkbox('Preemptively basic authentication', clientCfgModel + '.preemptiveBasicProxyAuth', '"checkpointS3PreemptiveBasicProxyAuth"',
-        'Attempt to authenticate preemptively against proxy servers using basic authentication')
+        +form-field__checkbox({
+            label: 'Preemptively basic authentication',
+            model: clientCfgModel + '.preemptiveBasicProxyAuth',
+            name: '"checkpointS3PreemptiveBasicProxyAuth"',
+            tip: 'Attempt to authenticate preemptively against proxy servers using basic authentication'
+        })
     .pc-form-grid-col-60
-        +checkbox('TCP KeepAlive', clientCfgModel + '.useTcpKeepAlive', '"checkpointS3UseTcpKeepAlive"', 'TCP KeepAlive support is enabled')
+        +form-field__checkbox({
+            label: 'TCP KeepAlive',
+            model: clientCfgModel + '.useTcpKeepAlive',
+            name: '"checkpointS3UseTcpKeepAlive"',
+            tip: 'TCP KeepAlive support is enabled'
+        })
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/client-connector.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/client-connector.pug
index 620137b..00c3563 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/client-connector.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/client-connector.pug
@@ -28,49 +28,156 @@
     panel-content.pca-form-row(ng-if=`$ctrl.available("2.3.0") && ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-60
-                +checkbox('Enabled', connectionEnabled, '"ClientConnectorEnabled"', 'Flag indicating whether to configure client connector configuration')
+                +form-field__checkbox({
+                    label: 'Enabled',
+                    model: connectionEnabled,
+                    name: '"ClientConnectorEnabled"',
+                    tip: 'Flag indicating whether to configure client connector configuration'
+                })
             .pc-form-grid-col-60
-                +text-enabled('Host:', `${connectionModel}.host`, '"ClientConnectorHost"', connectionEnabled, 'false', 'localhost')
+                +form-field__text({
+                    label: 'Host:',
+                    model: `${connectionModel}.host`,
+                    name: '"ClientConnectorHost"',
+                    disabled: `!(${connectionEnabled})`,
+                    placeholder: 'localhost'
+                })
             .pc-form-grid-col-30
-                +number('Port:', `${connectionModel}.port`, '"ClientConnectorPort"', connectionEnabled, '10800', '1025')
+                +form-field__number({
+                    label: 'Port:',
+                    model: `${connectionModel}.port`,
+                    name: '"ClientConnectorPort"',
+                    disabled: `!(${connectionEnabled})`,
+                    placeholder: '10800',
+                    min: '1025'
+                })
             .pc-form-grid-col-30
-                +number('Port range:', `${connectionModel}.portRange`, '"ClientConnectorPortRange"', connectionEnabled, '100', '0')
+                +form-field__number({
+                    label: 'Port range:',
+                    model: `${connectionModel}.portRange`,
+                    name: '"ClientConnectorPortRange"',
+                    disabled: `!(${connectionEnabled})`,
+                    placeholder: '100',
+                    min: '0'
+                })
             .pc-form-grid-col-30
-                +number('Socket send buffer size:', `${connectionModel}.socketSendBufferSize`, '"ClientConnectorSocketSendBufferSize"', connectionEnabled, '0', '0',
-                    'Socket send buffer size<br/>\
-                    When set to <b>0</b>, operation system default will be used')
+                +form-field__number({
+                    label: 'Socket send buffer size:',
+                    model: `${connectionModel}.socketSendBufferSize`,
+                    name: '"ClientConnectorSocketSendBufferSize"',
+                    disabled: `!(${connectionEnabled})`,
+                    placeholder: '0',
+                    min: '0',
+                    tip: 'Socket send buffer size<br/>\
+                          When set to <b>0</b>, operation system default will be used'
+                })
             .pc-form-grid-col-30
-                +number('Socket receive buffer size:', `${connectionModel}.socketReceiveBufferSize`, '"ClientConnectorSocketReceiveBufferSize"', connectionEnabled, '0', '0',
-                    'Socket receive buffer size<br/>\
-                    When set to <b>0</b>, operation system default will be used')
+                +form-field__number({
+                    label: 'Socket receive buffer size:',
+                    model: `${connectionModel}.socketReceiveBufferSize`,
+                    name: '"ClientConnectorSocketReceiveBufferSize"',
+                    disabled: `!(${connectionEnabled})`,
+                    placeholder: '0',
+                    min: '0',
+                    tip: 'Socket receive buffer size<br/>\
+                          When set to <b>0</b>, operation system default will be used'
+                })
             .pc-form-grid-col-30
-                +number('Max connection cursors:', `${connectionModel}.maxOpenCursorsPerConnection`, '"ClientConnectorMaxOpenCursorsPerConnection"', connectionEnabled, '128', '0',
-                    'Max number of opened cursors per connection')
+                +form-field__number({
+                    label: 'Max connection cursors:',
+                    model: `${connectionModel}.maxOpenCursorsPerConnection`,
+                    name: '"ClientConnectorMaxOpenCursorsPerConnection"',
+                    disabled: `!(${connectionEnabled})`,
+                    placeholder: '128',
+                    min: '0',
+                    tip: 'Max number of opened cursors per connection'
+                })
             .pc-form-grid-col-30
-                +number('Pool size:', `${connectionModel}.threadPoolSize`, '"ClientConnectorThreadPoolSize"', connectionEnabled, 'max(8, availableProcessors)', '1',
-                    'Size of thread pool that is in charge of processing SQL requests')
+                +form-field__number({
+                    label: 'Pool size:',
+                    model: `${connectionModel}.threadPoolSize`,
+                    name: '"ClientConnectorThreadPoolSize"',
+                    disabled: `!(${connectionEnabled})`,
+                    placeholder: 'max(8, availableProcessors)',
+                    min: '1',
+                    tip: 'Size of thread pool that is in charge of processing SQL requests'
+                })
             .pc-form-grid-col-60
-                +checkbox-enabled('TCP_NODELAY option', `${connectionModel}.tcpNoDelay`, '"ClientConnectorTcpNoDelay"', connectionEnabled)
+                +form-field__checkbox({
+                    label: 'TCP_NODELAY option',
+                    model: `${connectionModel}.tcpNoDelay`,
+                    name: '"ClientConnectorTcpNoDelay"',
+                    disabled: `!${connectionEnabled}`
+                })
             .pc-form-grid-col-60(ng-if='$ctrl.available("2.4.0")')
-                +number('Idle timeout:', `${connectionModel}.idleTimeout`, '"ClientConnectorIdleTimeout"', connectionEnabled, '0', '-1',
-                    'Idle timeout for client connections<br/>\
-                    Zero or negative means no timeout')
+                +form-field__number({
+                    label: 'Idle timeout:',
+                    model: `${connectionModel}.idleTimeout`,
+                    name: '"ClientConnectorIdleTimeout"',
+                    disabled: `!(${connectionEnabled})`,
+                    placeholder: '0',
+                    min: '-1',
+                    tip: 'Idle timeout for client connections<br/>\
+                         Zero or negative means no timeout'
+                })
             .pc-form-grid-col-60(ng-if-start='$ctrl.available("2.5.0")')
-                +checkbox-enabled('Enable SSL', `${connectionModel}.sslEnabled`, '"ClientConnectorSslEnabled"', connectionEnabled, 'Enable secure socket layer on client connector')
+                +form-field__checkbox({
+                    label: 'Enable SSL',
+                    model: `${connectionModel}.sslEnabled`,
+                    name: '"ClientConnectorSslEnabled"',
+                    disabled: `!${connectionEnabled}`,
+                    tip: 'Enable secure socket layer on client connector'
+                })
             .pc-form-grid-col-60
-                +checkbox-enabled('Enable SSL client auth', `${connectionModel}.sslClientAuth`, '"ClientConnectorSslClientAuth"', sslEnabled, 'Flag indicating whether or not SSL client authentication is required')
+                +form-field__checkbox({
+                    label: 'Enable SSL client auth',
+                    model: `${connectionModel}.sslClientAuth`,
+                    name: '"ClientConnectorSslClientAuth"',
+                    disabled: `!(${sslEnabled})`,
+                    tip: 'Flag indicating whether or not SSL client authentication is required'
+                })
             .pc-form-grid-col-60
-                +checkbox-enabled('Use Ignite SSL', `${connectionModel}.useIgniteSslContextFactory`, '"ClientConnectorUseIgniteSslContextFactory"', sslEnabled, 'Use SSL factory Ignite configuration')
+                +form-field__checkbox({
+                    label: 'Use Ignite SSL',
+                    model: `${connectionModel}.useIgniteSslContextFactory`,
+                    name: '"ClientConnectorUseIgniteSslContextFactory"',
+                    disabled: `!(${sslEnabled})`,
+                    tip: 'Use SSL factory Ignite configuration'
+                })
             .pc-form-grid-col-60(ng-if-end)
-                +java-class('SSL factory:', `${connectionModel}.sslContextFactory`, '"ClientConnectorSslContextFactory"', sslFactoryEnabled, sslFactoryEnabled,
-                'If SSL factory specified then replication will be performed through secure SSL channel created with this factory<br/>\
-                If not present <b>isUseIgniteSslContextFactory()</b> flag will be evaluated<br/>\
-                If set to <b>true</b> and <b>IgniteConfiguration#getSslContextFactory()</b> exists, then Ignite SSL context factory will be used to establish secure connection')
+                +form-field__java-class({
+                    label:'SSL factory:',
+                    model: `${connectionModel}.sslContextFactory`,
+                    name: '"ClientConnectorSslContextFactory"',
+                    disabled: `!(${sslFactoryEnabled})`,
+                    required: sslFactoryEnabled,
+                    tip: 'If SSL factory specified then replication will be performed through secure SSL channel created with this factory<br/>\
+                          If not present <b>isUseIgniteSslContextFactory()</b> flag will be evaluated<br/>\
+                          If set to <b>true</b> and <b>IgniteConfiguration#getSslContextFactory()</b> exists, then Ignite SSL context factory will be used to establish secure connection'
+                })
             .pc-form-grid-col-60(ng-if-start='$ctrl.available("2.4.0")')
-                +checkbox-enabled('JDBC Enabled', `${connectionModel}.jdbcEnabled`, '"ClientConnectorJdbcEnabled"', connectionEnabled, 'Access through JDBC is enabled')
+                +form-field__checkbox({
+                    label: 'JDBC Enabled',
+                    model: `${connectionModel}.jdbcEnabled`,
+                    name: '"ClientConnectorJdbcEnabled"',
+                    disabled: `!${connectionEnabled}`,
+                    tip: 'Access through JDBC is enabled'
+                })
             .pc-form-grid-col-60
-                +checkbox-enabled('ODBC Enabled', `${connectionModel}.odbcEnabled`, '"ClientConnectorOdbcEnabled"', connectionEnabled, 'Access through ODBC is enabled')
+                +form-field__checkbox({
+                    label: 'ODBC Enabled',
+                    model: `${connectionModel}.odbcEnabled`,
+                    name: '"ClientConnectorOdbcEnabled"',
+                    disabled: `!${connectionEnabled}`,
+                    tip: 'Access through ODBC is enabled'
+                })
             .pc-form-grid-col-60(ng-if-end)
-                +checkbox-enabled('Thin client enabled', `${connectionModel}.thinClientEnabled`, '"ClientConnectorThinCliEnabled"', connectionEnabled, 'Access through thin client is enabled')
+                +form-field__checkbox({
+                    label: 'Thin client enabled',
+                    model: `${connectionModel}.thinClientEnabled`,
+                    name: '"ClientConnectorThinCliEnabled"',
+                    disabled: `!${connectionEnabled}`,
+                    tip: 'Access through thin client is enabled'
+                })
         .pca-form-column-6
             +preview-xml-java(model, 'clusterClientConnector')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/collision.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/collision.pug
index c315af1..e3cacd3 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/collision.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/collision.pug
@@ -23,27 +23,32 @@
 panel-collapsible(ng-form=form on-open=`ui.loadPanel('${form}')`)
     panel-title Collision configuration
     panel-description
-        | Configuration Collision SPI allows to regulate how grid jobs get executed when they arrive on a destination node for execution. 
+        | Configuration Collision SPI allows to regulate how grid jobs get executed when they arrive on a destination node for execution.
         | #[a.link-success(href="https://apacheignite.readme.io/docs/job-scheduling" target="_blank") More info]
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-60
-                +dropdown('CollisionSpi:', modelCollisionKind, '"collisionKind"', 'true', '',
-                    '[\
+                +form-field__dropdown({
+                    label:'CollisionSpi:',
+                    model: modelCollisionKind,
+                    name: '"collisionKind"',
+                    placeholder: 'Choose discovery',
+                    options: '[\
                         {value: "JobStealing", label: "Job stealing"},\
                         {value: "FifoQueue", label: "FIFO queue"},\
                         {value: "PriorityQueue", label: "Priority queue"},\
                         {value: "Custom", label: "Custom"},\
                         {value: "Noop", label: "Default"}\
                     ]',
-                    'Regulate how grid jobs get executed when they arrive on a destination node for execution\
-                    <ul>\
-                        <li>Job stealing - supports job stealing from over-utilized nodes to under-utilized nodes</li>\
-                        <li>FIFO queue - jobs are ordered as they arrived</li>\
-                        <li>Priority queue - jobs are first ordered by their priority</li>\
-                        <li>Custom - custom CollisionSpi implementation</li>\
-                        <li>Default - jobs are activated immediately on arrival to mapped node</li>\
-                    </ul>')
+                    tip: 'Regulate how grid jobs get executed when they arrive on a destination node for execution\
+                       <ul>\
+                           <li>Job stealing - supports job stealing from over-utilized nodes to under-utilized nodes</li>\
+                           <li>FIFO queue - jobs are ordered as they arrived</li>\
+                           <li>Priority queue - jobs are first ordered by their priority</li>\
+                           <li>Custom - custom CollisionSpi implementation</li>\
+                           <li>Default - jobs are activated immediately on arrival to mapped node</li>\
+                       </ul>'
+                })
             .pc-form-group(ng-show=`${modelCollisionKind} !== 'Noop'`)
                 .pc-form-grid-row(ng-show=`${modelCollisionKind} === 'JobStealing'`)
                     include ./collision/job-stealing
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/collision/custom.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/collision/custom.pug
index c1d11d5..64bd5e4 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/collision/custom.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/collision/custom.pug
@@ -20,4 +20,11 @@
 -var required = '$ctrl.clonedCluster.collision.kind === "Custom"'
 
 .pc-form-grid-col-60
-    +java-class('Class:', `${model}.class`, '"collisionCustom"', 'true', required, 'CollisionSpi implementation class', required)
+    +form-field__java-class({
+        label: 'Class:',
+        model: `${model}.class`,
+        name: '"collisionCustom"',
+        required: required,
+        tip: 'CollisionSpi implementation class',
+        validationActive: required
+    })
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/collision/fifo-queue.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/collision/fifo-queue.pug
index c009386..de795b7 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/collision/fifo-queue.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/collision/fifo-queue.pug
@@ -19,8 +19,20 @@
 -var model = '$ctrl.clonedCluster.collision.FifoQueue'
 
 .pc-form-grid-col-30
-    +number('Parallel jobs number:', `${model}.parallelJobsNumber`, '"fifoParallelJobsNumber"', 'true', 'availableProcessors * 2', '1',
-        'Number of jobs that can be executed in parallel')
+    +form-field__number({
+        label: 'Parallel jobs number:',
+        model: `${model}.parallelJobsNumber`,
+        name: '"fifoParallelJobsNumber"',
+        placeholder: 'availableProcessors * 2',
+        min: '1',
+        tip: 'Number of jobs that can be executed in parallel'
+    })
 .pc-form-grid-col-30
-    +number('Wait jobs number:', `${model}.waitingJobsNumber`, '"fifoWaitingJobsNumber"', 'true', 'Integer.MAX_VALUE', '0',
-        'Maximum number of jobs that are allowed to wait in waiting queue')
+    +form-field__number({
+        label: 'Wait jobs number:',
+        model: `${model}.waitingJobsNumber`,
+        name: '"fifoWaitingJobsNumber"',
+        placeholder: 'Integer.MAX_VALUE',
+        min: '0',
+        tip: 'Maximum number of jobs that are allowed to wait in waiting queue'
+    })
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/collision/job-stealing.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/collision/job-stealing.pug
index f1b0a56..8722544 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/collision/job-stealing.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/collision/job-stealing.pug
@@ -20,32 +20,64 @@
 -var stealingAttributes = `${model}.stealingAttributes`
 
 .pc-form-grid-col-30
-    +number('Active jobs threshold:', `${model}.activeJobsThreshold`, '"jsActiveJobsThreshold"', 'true', '95', '0',
-        'Number of jobs that can be executed in parallel')
+    +form-field__number({
+        label: 'Active jobs threshold:',
+        model: `${model}.activeJobsThreshold`,
+        name: '"jsActiveJobsThreshold"',
+        placeholder: '95',
+        min: '0',
+        tip: 'Number of jobs that can be executed in parallel'
+    })
 .pc-form-grid-col-30
-    +number('Wait jobs threshold:', `${model}.waitJobsThreshold`, '"jsWaitJobsThreshold"', 'true', '0', '0',
-        'Job count threshold at which this node will start stealing jobs from other nodes')
+    +form-field__number({
+        label: 'Wait jobs threshold:',
+        model: `${model}.waitJobsThreshold`,
+        name: '"jsWaitJobsThreshold"',
+        placeholder: '0',
+        min: '0',
+        tip: 'Job count threshold at which this node will start stealing jobs from other nodes'
+    })
 .pc-form-grid-col-30
-    +number('Message expire time:', `${model}.messageExpireTime`, '"jsMessageExpireTime"', 'true', '1000', '1',
-        'Message expire time in ms')
+    +form-field__number({
+        label: 'Message expire time:',
+        model: `${model}.messageExpireTime`,
+        name: '"jsMessageExpireTime"',
+        placeholder: '1000',
+        min: '1',
+        tip: 'Message expire time in ms'
+    })
 .pc-form-grid-col-30
-    +number('Maximum stealing attempts:', `${model}.maximumStealingAttempts`, '"jsMaximumStealingAttempts"', 'true', '5', '1',
-        'Maximum number of attempts to steal job by another node')
+    +form-field__number({
+        label: 'Maximum stealing attempts:',
+        model: `${model}.maximumStealingAttempts`,
+        name: '"jsMaximumStealingAttempts"',
+        placeholder: '5',
+        min: '1',
+        tip: 'Maximum number of attempts to steal job by another node'
+    })
 .pc-form-grid-col-60
-    +checkbox('Stealing enabled', `${model}.stealingEnabled`, '"jsStealingEnabled"',
-        'Node should attempt to steal jobs from other nodes')
+    +form-field__checkbox({
+        label: 'Stealing enabled',
+        model: `${model}.stealingEnabled`,
+        name: '"jsStealingEnabled"',
+        tip: 'Node should attempt to steal jobs from other nodes'
+    })
 .pc-form-grid-col-60
-    +java-class('External listener:', `${model}.externalCollisionListener`, '"jsExternalCollisionListener"', 'true', 'false',
-        'Listener to be set for notification of external collision events', '$ctrl.clonedCluster.collision.kind === "JobStealing"')
+    +form-field__java-class({
+        label: 'External listener:',
+        model: `${model}.externalCollisionListener`,
+        name: '"jsExternalCollisionListener"',
+        tip: 'Listener to be set for notification of external collision events',
+        validationActive: '$ctrl.clonedCluster.collision.kind === "JobStealing"'
+    })
 .pc-form-grid-col-60
     .ignite-form-field
-        +ignite-form-field__label('Stealing attributes:', '"stealingAttributes"')
-            +tooltip(`Configuration parameter to enable stealing to/from only nodes that have these attributes set`)
-        .ignite-form-field__control
-            +list-pair-edit({
-                items: stealingAttributes,
-                keyLbl: 'Attribute name', 
-                valLbl: 'Attribute value',
-                itemName: 'stealing attribute',
-                itemsName: 'stealing attributes'
-            })
+        +form-field__label({ label: 'Stealing attributes:', name: '"stealingAttributes"' })
+            +form-field__tooltip(`Configuration parameter to enable stealing to/from only nodes that have these attributes set`)
+        +list-pair-edit({
+            items: stealingAttributes,
+            keyLbl: 'Attribute name',
+            valLbl: 'Attribute value',
+            itemName: 'stealing attribute',
+            itemsName: 'stealing attributes'
+        })
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/collision/priority-queue.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/collision/priority-queue.pug
index fd198ce..c8ae733 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/collision/priority-queue.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/collision/priority-queue.pug
@@ -19,23 +19,61 @@
 -var model = '$ctrl.clonedCluster.collision.PriorityQueue'
 
 .pc-form-grid-col-30
-    +number('Parallel jobs number:', `${model}.parallelJobsNumber`, '"priorityParallelJobsNumber"', 'true', 'availableProcessors * 2', '1',
-        'Number of jobs that can be executed in parallel')
+    +form-field__number({
+        label: 'Parallel jobs number:',
+        model: `${model}.parallelJobsNumber`,
+        name: '"priorityParallelJobsNumber"',
+        placeholder: 'availableProcessors * 2',
+        min: '1',
+        tip: 'Number of jobs that can be executed in parallel'
+    })
 .pc-form-grid-col-30
-    +number('Waiting jobs number:', `${model}.waitingJobsNumber`, '"priorityWaitingJobsNumber"', 'true', 'Integer.MAX_VALUE', '0',
-        'Maximum number of jobs that are allowed to wait in waiting queue')
+    +form-field__number({
+        label: 'Waiting jobs number:',
+        model: `${model}.waitingJobsNumber`,
+        name: '"priorityWaitingJobsNumber"',
+        placeholder: 'Integer.MAX_VALUE',
+        min: '0',
+        tip: 'Maximum number of jobs that are allowed to wait in waiting queue'
+    })
 .pc-form-grid-col-30
-    +text('Priority attribute key:', `${model}.priorityAttributeKey`, '"priorityPriorityAttributeKey"', 'false', 'grid.task.priority',
-        'Task priority attribute key')
+    +form-field__text({
+        label: 'Priority attribute key:',
+        model: `${model}.priorityAttributeKey`,
+        name: '"priorityPriorityAttributeKey"',
+        placeholder: 'grid.task.priority',
+        tip: 'Task priority attribute key'
+    })
 .pc-form-grid-col-30
-    +text('Job priority attribute key:', `${model}.jobPriorityAttributeKey`, '"priorityJobPriorityAttributeKey"', 'false', 'grid.job.priority',
-        'Job priority attribute key')
+    +form-field__text({
+        label: 'Job priority attribute key:',
+        model: `${model}.jobPriorityAttributeKey`,
+        name: '"priorityJobPriorityAttributeKey"',
+        placeholder: 'grid.job.priority',
+        tip: 'Job priority attribute key'
+    })
 .pc-form-grid-col-30
-    +number('Default priority:', `${model}.defaultPriority`, '"priorityDefaultPriority"', 'true', '0', '0',
-        'Default priority to use if a job does not have priority attribute set')
+    +form-field__number({
+        label: 'Default priority:',
+        model: `${model}.defaultPriority`,
+        name: '"priorityDefaultPriority"',
+        placeholder: '0',
+        min: '0',
+        tip: 'Default priority to use if a job does not have priority attribute set'
+    })
 .pc-form-grid-col-30
-    +number('Starvation increment:', `${model}.starvationIncrement`, '"priorityStarvationIncrement"', 'true', '1', '0',
-        'Value to increment job priority by every time a lower priority job gets behind a higher priority job')
+    +form-field__number({
+        label: 'Starvation increment:',
+        model: `${model}.starvationIncrement`,
+        name: '"priorityStarvationIncrement"',
+        placeholder: '1',
+        min: '0',
+        tip: 'Value to increment job priority by every time a lower priority job gets behind a higher priority job'
+    })
 .pc-form-grid-col-60
-    +checkbox('Starvation prevention enabled', `${model}.starvationPreventionEnabled`, '"priorityStarvationPreventionEnabled"',
-        'Job starvation prevention is enabled')
+    +form-field__checkbox({
+        label: 'Starvation prevention enabled',
+        model: `${model}.starvationPreventionEnabled`,
+        name: '"priorityStarvationPreventionEnabled"',
+        tip: 'Job starvation prevention is enabled'
+    })
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/communication.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/communication.pug
index 8b43521..cdf473a 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/communication.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/communication.pug
@@ -24,30 +24,84 @@
     panel-title Communication
     panel-description
         | Configuration of communication with other nodes by TCP/IP.
-        | Provide basic plumbing to send and receive grid messages and is utilized for all distributed grid operations. 
+        | Provide basic plumbing to send and receive grid messages and is utilized for all distributed grid operations.
         | #[a.link-success(href="https://apacheignite.readme.io/docs/network-config" target="_blank") More info]
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-30
-                +number('Timeout:', `${model}.networkTimeout`, '"commNetworkTimeout"', 'true', '5000', '1', 'Maximum timeout in milliseconds for network requests')
+                +form-field__number({
+                    label: 'Timeout:',
+                    model: `${model}.networkTimeout`,
+                    name: '"commNetworkTimeout"',
+                    placeholder: '5000',
+                    min: '1',
+                    tip: 'Maximum timeout in milliseconds for network requests'
+                })
             .pc-form-grid-col-30
-                +number('Send retry delay:', `${model}.networkSendRetryDelay`, '"networkSendRetryDelay"', 'true', '1000', '1', 'Interval in milliseconds between message send retries')
+                +form-field__number({
+                    label: 'Send retry delay:',
+                    model: `${model}.networkSendRetryDelay`,
+                    name: '"networkSendRetryDelay"',
+                    placeholder: '1000',
+                    min: '1',
+                    tip: 'Interval in milliseconds between message send retries'
+                })
             .pc-form-grid-col-30
-                +number('Send retry count:', `${model}.networkSendRetryCount`, '"networkSendRetryCount"', 'true', '3', '1', 'Message send retries count')
+                +form-field__number({
+                    label: 'Send retry count:',
+                    model: `${model}.networkSendRetryCount`,
+                    name: '"networkSendRetryCount"',
+                    placeholder: '3',
+                    min: '1',
+                    tip: 'Message send retries count'
+                })
             .pc-form-grid-col-30(ng-if='$ctrl.available(["1.0.0", "2.3.0"])')
-                +number('Discovery startup delay:', `${model}.discoveryStartupDelay`, '"discoveryStartupDelay"', 'true', '60000', '1', 'This value is used to expire messages from waiting list whenever node discovery discrepancies happen')
+                +form-field__number({
+                    label: 'Discovery startup delay:',
+                    model: `${model}.discoveryStartupDelay`,
+                    name: '"discoveryStartupDelay"',
+                    placeholder: '60000',
+                    min: '1',
+                    tip: 'This value is used to expire messages from waiting list whenever node discovery discrepancies happen'
+                })
             .pc-form-grid-col-60
-                +java-class('Communication listener:', `${communication}.listener`, '"comListener"', 'true', 'false', 'Listener of communication events')
+                +form-field__java-class({
+                    label: 'Communication listener:',
+                    model: `${communication}.listener`,
+                    name: '"comListener"',
+                    tip: 'Listener of communication events'
+                })
             .pc-form-grid-col-30
-                +text-ip-address('Local IP address:', `${communication}.localAddress`, '"comLocalAddress"', 'true', '0.0.0.0',
-                    'Local host address for socket binding<br/>\
-                    If not specified use all available addres on local host')
+                +form-field__ip-address({
+                    label: 'Local IP address:',
+                    model: `${communication}.localAddress`,
+                    name: '"comLocalAddress"',
+                    enabled: 'true',
+                    placeholder: '0.0.0.0',
+                    tip: 'Local host address for socket binding<br/>\
+                         If not specified use all available addres on local host'
+                })
             .pc-form-grid-col-30
-                +number-min-max('Local port:', `${communication}.localPort`, '"comLocalPort"', 'true', '47100', '1024', '65535', 'Local port for socket binding')
+                +form-field__number({
+                    label: 'Local port:',
+                    model: `${communication}.localPort`,
+                    name: '"comLocalPort"',
+                    placeholder: '47100',
+                    min: '1024',
+                    max: '65535',
+                    tip: 'Local port for socket binding'
+                })
             .pc-form-grid-col-30
-                +number('Local port range:', `${communication}.localPortRange`, '"comLocalPortRange"', 'true', '100', '1', 'Local port range for local host ports')
+                +form-field__number({
+                    label: 'Local port range:',
+                    model: `${communication}.localPortRange`,
+                    name: '"comLocalPortRange"',
+                    placeholder: '100',
+                    min: '1',
+                    tip: 'Local port range for local host ports'
+                })
             .pc-form-grid-col-30
-                +sane-ignite-form-field-number({
+                +form-field__number({
                     label: 'Shared memory port:',
                     model: `${communication}.sharedMemoryPort`,
                     name: '"sharedMemoryPort"',
@@ -58,25 +112,72 @@
                 })(
                     pc-not-in-collection='::$ctrl.Clusters.sharedMemoryPort.invalidValues'
                 )
-                    +form-field-feedback('"sharedMemoryPort"', 'notInCollection', 'Shared memory port should be more than "{{ ::$ctrl.Clusters.sharedMemoryPort.invalidValues[0] }}" or equal to "{{ ::$ctrl.Clusters.sharedMemoryPort.min }}"')
+                    +form-field__error({ error: 'notInCollection', message: 'Shared memory port should be more than "{{ ::$ctrl.Clusters.sharedMemoryPort.invalidValues[0] }}" or equal to "{{ ::$ctrl.Clusters.sharedMemoryPort.min }}"' })
             .pc-form-grid-col-30
-                +number('Idle connection timeout:', `${communication}.idleConnectionTimeout`, '"idleConnectionTimeout"', 'true', '30000', '1',
-                    'Maximum idle connection timeout upon which a connection to client will be closed')
+                +form-field__number({
+                    label: 'Idle connection timeout:',
+                    model: `${communication}.idleConnectionTimeout`,
+                    name: '"idleConnectionTimeout"',
+                    placeholder: '30000',
+                    min: '1',
+                    tip: 'Maximum idle connection timeout upon which a connection to client will be closed'
+                })
             .pc-form-grid-col-30
-                +number('Connect timeout:', `${communication}.connectTimeout`, '"connectTimeout"', 'true', '5000', '0', 'Connect timeout used when establishing connection with remote nodes')
+                +form-field__number({
+                    label: 'Connect timeout:',
+                    model: `${communication}.connectTimeout`,
+                    name: '"connectTimeout"',
+                    placeholder: '5000',
+                    min: '0',
+                    tip: 'Connect timeout used when establishing connection with remote nodes'
+                })
             .pc-form-grid-col-30
-                +number('Max. connect timeout:', `${communication}.maxConnectTimeout`, '"maxConnectTimeout"', 'true', '600000', '0', 'Maximum connect timeout')
+                +form-field__number({
+                    label: 'Max. connect timeout:',
+                    model: `${communication}.maxConnectTimeout`,
+                    name: '"maxConnectTimeout"',
+                    placeholder: '600000',
+                    min: '0',
+                    tip: 'Maximum connect timeout'
+                })
             .pc-form-grid-col-30
-                +number('Reconnect count:', `${communication}.reconnectCount`, '"comReconnectCount"', 'true', '10', '1',
-                    'Maximum number of reconnect attempts used when establishing connection with remote nodes')
+                +form-field__number({
+                    label: 'Reconnect count:',
+                    model: `${communication}.reconnectCount`,
+                    name: '"comReconnectCount"',
+                    placeholder: '10',
+                    min: '1',
+                    tip: 'Maximum number of reconnect attempts used when establishing connection with remote nodes'
+                })
             .pc-form-grid-col-30
-                +number('Socket send buffer:', `${communication}.socketSendBuffer`, '"socketSendBuffer"', 'true', '32768', '0', 'Send buffer size for sockets created or accepted by this SPI')
+                +form-field__number({
+                    label: 'Socket send buffer:',
+                    model: `${communication}.socketSendBuffer`,
+                    name: '"socketSendBuffer"',
+                    placeholder: '32768',
+                    min: '0',
+                    tip: 'Send buffer size for sockets created or accepted by this SPI'
+                })
             .pc-form-grid-col-30
-                +number('Socket receive buffer:', `${communication}.socketReceiveBuffer`, '"socketReceiveBuffer"', 'true', '32768', '0', 'Receive buffer size for sockets created or accepted by this SPI')
+                +form-field__number({
+                    label: 'Socket receive buffer:',
+                    model: `${communication}.socketReceiveBuffer`,
+                    name: '"socketReceiveBuffer"',
+                    placeholder: '32768',
+                    min: '0',
+                    tip: 'Receive buffer size for sockets created or accepted by this SPI'
+                })
             .pc-form-grid-col-30
-                +number('Slow client queue limit:', `${communication}.slowClientQueueLimit`, '"slowClientQueueLimit"', 'true', '0', '0', 'Slow client queue limit')
+                +form-field__number({
+                    label: 'Slow client queue limit:',
+                    model: `${communication}.slowClientQueueLimit`,
+                    name: '"slowClientQueueLimit"',
+                    placeholder: '0',
+                    min: '0',
+                    tip: 'Slow client queue limit'
+                })
             .pc-form-grid-col-30
-                +sane-ignite-form-field-number({
+                +form-field__number({
                     label: 'Ack send threshold:',
                     model: `${communication}.ackSendThreshold`,
                     name: '"ackSendThreshold"',
@@ -85,7 +186,7 @@
                     tip: 'Number of received messages per connection to node after which acknowledgment message is sent'
                 })
             .pc-form-grid-col-30
-                +sane-ignite-form-field-number({
+                +form-field__number({
                     label: 'Message queue limit:',
                     model: `${communication}.messageQueueLimit`,
                     name: '"messageQueueLimit"',
@@ -95,7 +196,7 @@
                 })
             .pc-form-grid-col-30
                 //- allowInvalid: true prevents from infinite digest loop when old value was 0 and becomes less than allowed minimum
-                +sane-ignite-form-field-number({
+                +form-field__number({
                     label: 'Unacknowledged messages:',
                     model: `${communication}.unacknowledgedMessagesBufferSize`,
                     name: '"unacknowledgedMessagesBufferSize"',
@@ -117,18 +218,51 @@
                     }`
                 )
             .pc-form-grid-col-30
-                +number('Socket write timeout:', `${communication}.socketWriteTimeout`, '"socketWriteTimeout"', 'true', '2000', '0', 'Socket write timeout')
+                +form-field__number({
+                    label: 'Socket write timeout:',
+                    model: `${communication}.socketWriteTimeout`,
+                    name: '"socketWriteTimeout"',
+                    placeholder: '2000',
+                    min: '0',
+                    tip: 'Socket write timeout'
+                })
             .pc-form-grid-col-30
-                +number('Selectors count:', `${communication}.selectorsCount`, '"selectorsCount"', 'true', 'min(4, availableProcessors)', '1', 'Count of selectors te be used in TCP server')
+                +form-field__number({
+                    label: 'Selectors count:',
+                    model: `${communication}.selectorsCount`,
+                    name: '"selectorsCount"',
+                    placeholder: 'min(4, availableProcessors)',
+                    min: '1',
+                    tip: 'Count of selectors te be used in TCP server'
+                })
             .pc-form-grid-col-60
-                +java-class('Address resolver:', `${communication}.addressResolver`, '"comAddressResolver"', 'true', 'false', 'Provides resolution between external and internal addresses')
+                +form-field__java-class({
+                    label: 'Address resolver:',
+                    model: `${communication}.addressResolver`,
+                    name: '"comAddressResolver"',
+                    tip: 'Provides resolution between external and internal addresses'
+                })
             .pc-form-grid-col-60
-                +checkbox('Direct buffer', `${communication}.directBuffer`, '"directBuffer"',
-                'If value is true, then SPI will use ByteBuffer.allocateDirect(int) call<br/>\
-                Otherwise, SPI will use ByteBuffer.allocate(int) call')
+                +form-field__checkbox({
+                    label: 'Direct buffer',
+                    model: `${communication}.directBuffer`,
+                    name: '"directBuffer"',
+                    tip: 'If value is true, then SPI will use ByteBuffer.allocateDirect(int) call<br/>\
+                          Otherwise, SPI will use ByteBuffer.allocate(int) call'
+                })
             .pc-form-grid-col-60
-                +checkbox('Direct send buffer', `${communication}.directSendBuffer`, '"directSendBuffer"', 'Flag defining whether direct send buffer should be used')
+                +form-field__checkbox({
+                    label: 'Direct send buffer',
+                    model: `${communication}.directSendBuffer`,
+                    name: '"directSendBuffer"',
+                    tip: 'Flag defining whether direct send buffer should be used'
+                })
             .pc-form-grid-col-60
-                +checkbox('TCP_NODELAY option', `${communication}.tcpNoDelay`, '"tcpNoDelay"', 'Value for TCP_NODELAY socket option')
+                +form-field__checkbox({
+                    label: 'TCP_NODELAY option',
+                    model: `${communication}.tcpNoDelay`,
+                    name: '"tcpNoDelay"',
+                    tip: 'Value for TCP_NODELAY socket option'
+                })
         .pca-form-column-6
             +preview-xml-java(model, 'clusterCommunication')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/connector.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/connector.pug
index 76c5016..fd52e5c 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/connector.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/connector.pug
@@ -24,77 +24,209 @@
 panel-collapsible(ng-form=form on-open=`ui.loadPanel('${form}')`)
     panel-title Connector configuration
     panel-description
-        | Configure HTTP REST configuration to enable HTTP server features. 
+        | Configure HTTP REST configuration to enable HTTP server features.
         | #[a.link-success(href="https://apacheignite.readme.io/docs/rest-api#general-configuration" target="_blank") More info]
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-60
-                +checkbox('Enabled', enabled, '"restEnabled"', 'Flag indicating whether to configure connector configuration')
+                +form-field__checkbox({
+                    label: 'Enabled',
+                    model: enabled,
+                    name: '"restEnabled"',
+                    tip: 'Flag indicating whether to configure connector configuration'
+                })
             .pc-form-grid-col-60
-                +text-enabled('Jetty configuration path:', `${model}.jettyPath`, '"connectorJettyPath"', enabled, 'false', 'Input path to Jetty configuration',
-                    'Path, either absolute or relative to IGNITE_HOME, to Jetty XML configuration file<br/>\
-                    Jetty is used to support REST over HTTP protocol for accessing Ignite APIs remotely<br/>\
-                    If not provided, Jetty instance with default configuration will be started picking IgniteSystemProperties.IGNITE_JETTY_HOST and IgniteSystemProperties.IGNITE_JETTY_PORT as host and port respectively')
+                +form-field__text({
+                    label: 'Jetty configuration path:',
+                    model: `${model}.jettyPath`,
+                    name: '"connectorJettyPath"',
+                    disabled: `!(${enabled})`,
+                    placeholder: 'Input path to Jetty configuration',
+                    tip: 'Path, either absolute or relative to IGNITE_HOME, to Jetty XML configuration file<br/>\
+                          Jetty is used to support REST over HTTP protocol for accessing Ignite APIs remotely<br/>\
+                          If not provided, Jetty instance with default configuration will be started picking IgniteSystemProperties.IGNITE_JETTY_HOST and IgniteSystemProperties.IGNITE_JETTY_PORT as host and port respectively'
+                })
             .pc-form-grid-col-20
-                +text-ip-address('TCP host:', `${model}.host`, '"connectorHost"', enabled, 'IgniteConfiguration#getLocalHost()',
-                    'Host for TCP binary protocol server<br/>\
-                    This can be either an IP address or a domain name<br/>\
-                    If not defined, system - wide local address will be used IgniteConfiguration#getLocalHost()<br/>\
-                    You can also use "0.0.0.0" value to bind to all locally - available IP addresses')
+                +form-field__ip-address({
+                    label:'TCP host:',
+                    model: `${model}.host`,
+                    name: '"connectorHost"',
+                    enabled: enabled,
+                    placeholder: 'IgniteConfiguration#getLocalHost()',
+                    tip: 'Host for TCP binary protocol server<br/>\
+                         This can be either an IP address or a domain name<br/>\
+                         If not defined, system - wide local address will be used IgniteConfiguration#getLocalHost()<br/>\
+                         You can also use "0.0.0.0" value to bind to all locally - available IP addresses'
+                })
             .pc-form-grid-col-20
-                +number-min-max('TCP port:', `${model}.port`, '"connectorPort"', enabled, '11211', '1024', '65535', 'Port for TCP binary protocol server')
+                +form-field__number({
+                    label: 'TCP port:',
+                    model: `${model}.port`,
+                    name: '"connectorPort"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '11211',
+                    min: '1024',
+                    max: '65535',
+                    tip: 'Port for TCP binary protocol server'
+                })
             .pc-form-grid-col-20
-                +number('TCP port range:', `${model}.portRange`, '"connectorPortRange"', enabled, '100', '1', 'Number of ports for TCP binary protocol server to try if configured port is already in use')
+                +form-field__number({
+                    label: 'TCP port range:',
+                    model: `${model}.portRange`,
+                    name: '"connectorPortRange"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '100',
+                    min: '1',
+                    tip: 'Number of ports for TCP binary protocol server to try if configured port is already in use'
+                })
             .pc-form-grid-col-60
-                +number('Idle query cursor timeout:', `${model}.idleQueryCursorTimeout`, '"connectorIdleQueryCursorTimeout"', enabled, '600000', '0',
-                    'Reject open query cursors that is not used timeout<br/>\
-                    If no fetch query request come within idle timeout, it will be removed on next check for old query cursors')
+                +form-field__number({
+                    label: 'Idle query cursor timeout:',
+                    model: `${model}.idleQueryCursorTimeout`,
+                    name: '"connectorIdleQueryCursorTimeout"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '600000',
+                    min: '0',
+                    tip: 'Reject open query cursors that is not used timeout<br/>\
+                          If no fetch query request come within idle timeout, it will be removed on next check for old query cursors'
+                })
             .pc-form-grid-col-60
-                +number('Idle query cursor check frequency:', `${model}.idleQueryCursorCheckFrequency`, '"connectorIdleQueryCursorCheckFrequency"', enabled, '60000', '0',
-                    'Idle query cursors check frequency<br/>\
-                    This setting is used to reject open query cursors that is not used')
+                +form-field__number({
+                    label: 'Idle query cursor check frequency:',
+                    model: `${model}.idleQueryCursorCheckFrequency`,
+                    name: '"connectorIdleQueryCursorCheckFrequency"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '60000',
+                    min: '0',
+                    tip: 'Idle query cursors check frequency<br/>\
+                          This setting is used to reject open query cursors that is not used'
+                })
             .pc-form-grid-col-30
-                +number('Idle timeout:', `${model}.idleTimeout`, '"connectorIdleTimeout"', enabled, '7000', '0',
-                    'Idle timeout for REST server<br/>\
-                    This setting is used to reject half - opened sockets<br/>\
-                    If no packets come within idle timeout, the connection is closed')
+                +form-field__number({
+                    label: 'Idle timeout:',
+                    model: `${model}.idleTimeout`,
+                    name: '"connectorIdleTimeout"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '7000',
+                    min: '0',
+                    tip: 'Idle timeout for REST server<br/>\
+                          This setting is used to reject half - opened sockets<br/>\
+                          If no packets come within idle timeout, the connection is closed'
+                })
             .pc-form-grid-col-30
-                +number('Receive buffer size:', `${model}.receiveBufferSize`, '"connectorReceiveBufferSize"', enabled, '32768', '0', 'REST TCP server receive buffer size')
+                +form-field__number({
+                    label: 'Receive buffer size:',
+                    model: `${model}.receiveBufferSize`,
+                    name: '"connectorReceiveBufferSize"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '32768',
+                    min: '0',
+                    tip: 'REST TCP server receive buffer size'
+                })
             .pc-form-grid-col-30
-                +number('Send buffer size:', `${model}.sendBufferSize`, '"connectorSendBufferSize"', enabled, '32768', '0', 'REST TCP server send buffer size')
+                +form-field__number({
+                    label: 'Send buffer size:',
+                    model: `${model}.sendBufferSize`,
+                    name: '"connectorSendBufferSize"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '32768',
+                    min: '0',
+                    tip: 'REST TCP server send buffer size'
+                })
             .pc-form-grid-col-30
-                +number('Send queue limit:', `${model}.sendQueueLimit`, '"connectorSendQueueLimit"', enabled, 'unlimited', '0',
-                    'REST TCP server send queue limit<br/>\
-                    If the limit exceeds, all successive writes will block until the queue has enough capacity')
+                +form-field__number({
+                    label: 'Send queue limit:',
+                    model: `${model}.sendQueueLimit`,
+                    name: '"connectorSendQueueLimit"',
+                    disabled: `!(${enabled})`,
+                    placeholder: 'unlimited',
+                    min: '0',
+                    tip: 'REST TCP server send queue limit<br/>\
+                         If the limit exceeds, all successive writes will block until the queue has enough capacity'
+                })
             .pc-form-grid-col-60
-                +checkbox-enabled('Direct buffer', `${model}.directBuffer`, '"connectorDirectBuffer"', enabled,
-                    'Flag indicating whether REST TCP server should use direct buffers<br/>\
-                    A direct buffer is a buffer that is allocated and accessed using native system calls, without using JVM heap<br/>\
-                    Enabling direct buffer may improve performance and avoid memory issues(long GC pauses due to huge buffer size)')
+                +form-field__checkbox({
+                    label: 'Direct buffer',
+                    model: `${model}.directBuffer`,
+                    name: '"connectorDirectBuffer"',
+                    disabled: `!${enabled}`,
+                    tip: 'Flag indicating whether REST TCP server should use direct buffers<br/>\
+                          A direct buffer is a buffer that is allocated and accessed using native system calls, without using JVM heap<br/>\
+                          Enabling direct buffer may improve performance and avoid memory issues(long GC pauses due to huge buffer size)'
+                })
             .pc-form-grid-col-60
-                +checkbox-enabled('TCP_NODELAY option', `${model}.noDelay`, '"connectorNoDelay"', enabled,
-                    'Flag indicating whether TCP_NODELAY option should be set for accepted client connections<br/>\
-                    Setting this option reduces network latency and should be enabled in majority of cases<br/>\
-                    For more information, see Socket#setTcpNoDelay(boolean)')
+                +form-field__checkbox({
+                    label: 'TCP_NODELAY option',
+                    model: `${model}.noDelay`,
+                    name: '"connectorNoDelay"',
+                    disabled: `!${enabled}`,
+                    tip: 'Flag indicating whether TCP_NODELAY option should be set for accepted client connections<br/>\
+                          Setting this option reduces network latency and should be enabled in majority of cases<br/>\
+                          For more information, see Socket#setTcpNoDelay(boolean)'
+                })
             .pc-form-grid-col-30
-                +number('Selector count:', `${model}.selectorCount`, '"connectorSelectorCount"', enabled, 'min(4, availableProcessors)', '1',
-                    'Number of selector threads in REST TCP server<br/>\
-                    Higher value for this parameter may increase throughput, but also increases context switching')
+                +form-field__number({
+                    label: 'Selector count:',
+                    model: `${model}.selectorCount`,
+                    name: '"connectorSelectorCount"',
+                    disabled: `!(${enabled})`,
+                    placeholder: 'min(4, availableProcessors)',
+                    min: '1',
+                    tip: 'Number of selector threads in REST TCP server<br/>\
+                          Higher value for this parameter may increase throughput, but also increases context switching'
+                })
             .pc-form-grid-col-30
-                +number('Thread pool size:', `${model}.threadPoolSize`, '"connectorThreadPoolSize"', enabled, 'max(8, availableProcessors) * 2', '1',
-                    'Thread pool size to use for processing of client messages (REST requests)')
+                +form-field__number({
+                    label: 'Thread pool size:',
+                    model: `${model}.threadPoolSize`,
+                    name: '"connectorThreadPoolSize"',
+                    disabled: `!(${enabled})`,
+                    placeholder: 'max(8, availableProcessors) * 2',
+                    min: '1',
+                    tip: 'Thread pool size to use for processing of client messages (REST requests)'
+                })
             .pc-form-grid-col-60
-                +java-class('Message interceptor:', `${model}.messageInterceptor`, '"connectorMessageInterceptor"', enabled, 'false',
-                    'Interceptor allows to transform all objects exchanged via REST protocol<br/>\
-                    For example if you use custom serialisation on client you can write interceptor to transform binary representations received from client to Java objects and later access them from java code directly')
+                +form-field__java-class({
+                    label: 'Message interceptor:',
+                    model: `${model}.messageInterceptor`,
+                    name: '"connectorMessageInterceptor"',
+                    disabled: `!(${enabled})`,
+                    tip: 'Interceptor allows to transform all objects exchanged via REST protocol<br/>\
+                         For example if you use custom serialisation on client you can write interceptor to transform binary representations received from client to Java objects and later access them from java code directly'
+                })
             .pc-form-grid-col-60
-                +text-enabled('Secret key:', `${model}.secretKey`, '"connectorSecretKey"', enabled, 'false', 'Specify to enable authentication', 'Secret key to authenticate REST requests')
+                +form-field__text({
+                    label: 'Secret key:',
+                    model: `${model}.secretKey`,
+                    name: '"connectorSecretKey"',
+                    disabled: `!(${enabled})`,
+                    placeholder: 'Specify to enable authentication',
+                    tip: 'Secret key to authenticate REST requests'
+                })
             .pc-form-grid-col-60
-                +checkbox-enabled('Enable SSL', `${model}.sslEnabled`, '"connectorSslEnabled"', enabled, 'Enables/disables SSL for REST TCP binary protocol')
+                +form-field__checkbox({
+                    label: 'Enable SSL',
+                    model: `${model}.sslEnabled`,
+                    name: '"connectorSslEnabled"',
+                    disabled: `!${enabled}`,
+                    tip: 'Enables/disables SSL for REST TCP binary protocol'
+                })
             .pc-form-grid-col-60
-                +checkbox-enabled('Enable SSL client auth', `${model}.sslClientAuth`, '"connectorSslClientAuth"', sslEnabled, 'Flag indicating whether or not SSL client authentication is required')
+                +form-field__checkbox({
+                    label: 'Enable SSL client auth',
+                    model: `${model}.sslClientAuth`,
+                    name: '"connectorSslClientAuth"',
+                    disabled: `!(${sslEnabled})`,
+                    tip: 'Flag indicating whether or not SSL client authentication is required'
+                })
             .pc-form-grid-col-60
-                +java-class('SSL factory:', `${model}.sslFactory`, '"connectorSslFactory"', sslEnabled, sslEnabled,
-                    'Instance of Factory that will be used to create an instance of SSLContext for Secure Socket Layer on TCP binary protocol')
+                +form-field__java-class({
+                    label: 'SSL factory:',
+                    model: `${model}.sslFactory`,
+                    name: '"connectorSslFactory"',
+                    disabled: `!(${sslEnabled})`,
+                    required: sslEnabled,
+                    tip: 'Instance of Factory that will be used to create an instance of SSLContext for Secure Socket Layer on TCP binary protocol'
+                })
         .pca-form-column-6
             +preview-xml-java(model, 'clusterConnector')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/data-storage.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/data-storage.pug
index ea27c3c..e18b0cb 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/data-storage.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/data-storage.pug
@@ -23,7 +23,7 @@
 
 mixin data-region-form({modelAt, namePlaceholder, dataRegionsAt})
     .pc-form-grid-col-60
-        +sane-ignite-form-field-text({
+        +form-field__text({
             label: 'Name:',
             model: `${modelAt}.name`,
             name: '"name"',
@@ -36,11 +36,11 @@
             ignite-unique-property='name'
             ignite-unique-skip=`["_id", ${modelAt}]`
         )
-            +form-field-feedback(_, 'notInCollection', '{{::$ctrl.Clusters.dataRegion.name.invalidValues[0]}} is reserved for internal use')
-            +form-field-feedback(_, 'igniteUnique', 'Name should be unique')
+            +form-field__error({ error: 'notInCollection', message: '{{::$ctrl.Clusters.dataRegion.name.invalidValues[0]}} is reserved for internal use' })
+            +form-field__error({ error: 'igniteUnique', message: 'Name should be unique' })
 
     .pc-form-grid-col-30
-        pc-form-field-size(
+        form-field-size(
             label='Initial size:'
             ng-model=`${modelAt}.initialSize`
             name='initialSize'
@@ -50,7 +50,7 @@
         )
 
     .pc-form-grid-col-30
-        pc-form-field-size(
+        form-field-size(
             ng-model=`${modelAt}.maxSize`
             ng-model-options='{allowInvalid: true}'
             name='maxSize'
@@ -60,27 +60,45 @@
         )
 
     .pc-form-grid-col-60(ng-if=`!${modelAt}.persistenceEnabled || ${modelAt}.swapPath`)
-        +text('Swap file path:', `${modelAt}.swapPath`, '"swapPath"', 'false', 'Input swap file path', 'An optional path to a memory mapped file for this data region')
-        
-    .pc-form-grid-col-60
-        +number('Checkpoint page buffer:', `${modelAt}.checkpointPageBufferSize`, '"checkpointPageBufferSize"', 'true', '0', '0', 'Amount of memory allocated for a checkpoint temporary buffer in bytes')
+        +form-field__text({
+            label: 'Swap file path:',
+            model: `${modelAt}.swapPath`,
+            name: '"swapPath"',
+            placeholder: 'Input swap file path',
+            tip: 'An optional path to a memory mapped file for this data region'
+        })
 
     .pc-form-grid-col-60
-        +dropdown('Eviction mode:', `${modelAt}.pageEvictionMode`, '"pageEvictionMode"', 'true', 'DISABLED',
-            '[\
+        +form-field__number({
+            label: 'Checkpoint page buffer:',
+            model: `${modelAt}.checkpointPageBufferSize`,
+            name: '"checkpointPageBufferSize"',
+            placeholder: '0',
+            min: '0',
+            tip: 'Amount of memory allocated for a checkpoint temporary buffer in bytes'
+        })
+
+    .pc-form-grid-col-60
+        +form-field__dropdown({
+            label: 'Entry versioning:',
+            model: `${modelAt}.pageEvictionMode`,
+            name: '"pageEvictionMode"',
+            placeholder: 'DISABLED',
+            options: '[\
                 {value: "DISABLED", label: "DISABLED"},\
                 {value: "RANDOM_LRU", label: "RANDOM_LRU"},\
                 {value: "RANDOM_2_LRU", label: "RANDOM_2_LRU"}\
             ]',
-            `An algorithm for memory pages eviction
-            <ul>
-                <li>DISABLED - Eviction is disabled</li>
-                <li>RANDOM_LRU - Once a memory region defined by a data region is configured, an off-heap array is allocated to track last usage timestamp for every individual data page</li>
-                <li>RANDOM_2_LRU - Differs from Random - LRU only in a way that two latest access timestamps are stored for every data page</li>
-            </ul>`)
+            tip: `An algorithm for memory pages eviction
+                <ul>
+                    <li>DISABLED - Eviction is disabled</li>
+                    <li>RANDOM_LRU - Once a memory region defined by a data region is configured, an off-heap array is allocated to track last usage timestamp for every individual data page</li>
+                    <li>RANDOM_2_LRU - Differs from Random - LRU only in a way that two latest access timestamps are stored for every data page</li>
+                </ul>`
+        })
 
     .pc-form-grid-col-30
-        +sane-ignite-form-field-number({
+        +form-field__number({
             label: 'Eviction threshold:',
             model: `${modelAt}.evictionThreshold`,
             name: '"evictionThreshold"',
@@ -92,7 +110,7 @@
         })
 
     .pc-form-grid-col-30
-        +sane-ignite-form-field-number({
+        +form-field__number({
             label: 'Empty pages pool size:',
             model: `${modelAt}.emptyPagesPoolSize`,
             name: '"emptyPagesPoolSize"',
@@ -103,7 +121,7 @@
         })
 
     .pc-form-grid-col-30
-        +sane-ignite-form-field-number({
+        +form-field__number({
             label: 'Metrics sub interval count:',
             model: `${modelAt}.metricsSubIntervalCount`,
             name: '"metricsSubIntervalCount"',
@@ -114,7 +132,7 @@
         })
 
     .pc-form-grid-col-30
-        pc-form-field-size(
+        form-field-size(
             ng-model=`${modelAt}.metricsRateTimeInterval`
             ng-model-options='{allowInvalid: true}'
             name='metricsRateTimeInterval'
@@ -126,24 +144,32 @@
             on-scale-change='_metricsRateTimeIntervalScale = $event'
             size-scale-label='s'
         )
-            
+
     .pc-form-grid-col-60
-        +checkbox('Metrics enabled', `${modelAt}.metricsEnabled`, '"MemoryPolicyMetricsEnabled"',
-            'Whether memory metrics are enabled by default on node startup')
+        +form-field__checkbox({
+            label: 'Metrics enabled',
+            model: `${modelAt}.metricsEnabled`,
+            name: '"MemoryPolicyMetricsEnabled"',
+            tip: 'Whether memory metrics are enabled by default on node startup'
+        })
 
     .pc-form-grid-col-60(ng-if=`!${modelAt}.swapPath`)
-        +checkbox('Persistence enabled', `${modelAt}.persistenceEnabled`, '"RegionPersistenceEnabled" + $index',
-            'Enable Ignite Native Persistence')
+        +form-field__checkbox({
+            label: 'Persistence enabled',
+            model: `${modelAt}.persistenceEnabled`,
+            name: '"RegionPersistenceEnabled" + $index',
+            tip: 'Enable Ignite Native Persistence'
+        })
 
 panel-collapsible(ng-show='$ctrl.available("2.3.0")' ng-form=form on-open=`ui.loadPanel('${form}')`)
     panel-title Data storage configuration
     panel-description
-        | Page memory is a manageable off-heap based memory architecture that is split into pages of fixed size. 
+        | Page memory is a manageable off-heap based memory architecture that is split into pages of fixed size.
         | #[a.link-success(href="https://apacheignite.readme.io/docs/distributed-persistent-store" target="_blank") More info]
     panel-content.pca-form-row(ng-if=`$ctrl.available("2.3.0") && ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-30
-                +sane-ignite-form-field-dropdown({
+                +form-field__dropdown({
                     label: 'Page size:',
                     model: `${model}.pageSize`,
                     name: '"DataStorageConfigurationPageSize"',
@@ -151,13 +177,19 @@
                     tip: 'Every memory region is split on pages of fixed size'
                 })
             .pc-form-grid-col-30
-                +number('Concurrency level:', model + '.concurrencyLevel', '"DataStorageConfigurationConcurrencyLevel"',
-                    'true', 'availableProcessors', '2', 'The number of concurrent segments in Ignite internal page mapping tables')
+                +form-field__number({
+                    label: 'Concurrency level:',
+                    model: model + '.concurrencyLevel',
+                    name: '"DataStorageConfigurationConcurrencyLevel"',
+                    placeholder: 'availableProcessors',
+                    min: '2',
+                    tip: 'The number of concurrent segments in Ignite internal page mapping tables'
+                })
             .pc-form-grid-col-60.pc-form-group__text-title
                 span System region
             .pc-form-group.pc-form-grid-row
                 .pc-form-grid-col-30
-                    pc-form-field-size(
+                    form-field-size(
                         label='Initial size:'
                         ng-model=`${model}.systemRegionInitialSize`
                         name='DataStorageSystemRegionInitialSize'
@@ -167,7 +199,7 @@
                         on-scale-change='systemRegionInitialSizeScale = $event'
                     )
                 .pc-form-grid-col-30
-                    pc-form-field-size(
+                    form-field-size(
                         label='Max size:'
                         ng-model=`${model}.systemRegionMaxSize`
                         name='DataStorageSystemRegionMaxSize'
@@ -186,119 +218,259 @@
                 })
             .pc-form-grid-col-60
                 .ignite-form-field
-                    .ignite-form-field__label Data region configurations
-                    .ignite-form-field__control
-                        list-editable.pc-list-editable-with-form-grid(
-                            name='dataRegionConfigurations'
-                            ng-model=dataRegionConfigurations
-                        )
-                            list-editable-item-edit.pc-form-grid-row
-                                - form = '$parent.form'
-                                +data-region-form({
-                                    modelAt: '$item',
-                                    namePlaceholder: 'Data region name',
-                                    dataRegionsAt: dataRegionConfigurations
-                                })
-                                - form = 'dataStorageConfiguration'
-                            list-editable-no-items
-                                list-editable-add-item-button(
-                                    add-item=`$ctrl.Clusters.addDataRegionConfiguration($ctrl.clonedCluster)`
-                                    label-single='data region configuration'
-                                    label-multiple='data region configurations'
-                                )
+                    +form-field__label({ label: 'Data region configurations' })
+
+                    list-editable.pc-list-editable-with-form-grid(
+                        name='dataRegionConfigurations'
+                        ng-model=dataRegionConfigurations
+                    )
+                        list-editable-item-edit.pc-form-grid-row
+                            - form = '$parent.form'
+                            +data-region-form({
+                                modelAt: '$item',
+                                namePlaceholder: 'Data region name',
+                                dataRegionsAt: dataRegionConfigurations
+                            })
+                            - form = 'dataStorageConfiguration'
+                        list-editable-no-items
+                            list-editable-add-item-button(
+                                add-item=`$ctrl.Clusters.addDataRegionConfiguration($ctrl.clonedCluster)`
+                                label-single='data region configuration'
+                                label-multiple='data region configurations'
+                            )
 
             .pc-form-grid-col-60
-                +text-enabled('Storage path:', `${model}.storagePath`, '"DataStoragePath"', 'true', 'false', 'db',
-                    'Directory where index and partition files are stored')
+                +form-field__text({
+                    label: 'Storage path:',
+                    model: `${model}.storagePath`,
+                    name: '"DataStoragePath"',
+                    placeholder: 'db',
+                    tip: 'Directory where index and partition files are stored'
+                })
             .pc-form-grid-col-60
-                +number('Checkpoint frequency:', `${model}.checkpointFrequency`, '"DataStorageCheckpointFrequency"', 'true', '180000', '1',
-                    'Frequency which is a minimal interval when the dirty pages will be written to the Persistent Store')
+                +form-field__number({
+                    label: 'Checkpoint frequency:',
+                    model: `${model}.checkpointFrequency`,
+                    name: '"DataStorageCheckpointFrequency"',
+                    placeholder: '180000',
+                    min: '1',
+                    tip: 'Frequency which is a minimal interval when the dirty pages will be written to the Persistent Store'
+                })
             .pc-form-grid-col-20
-                +number('Checkpoint threads:', `${model}.checkpointThreads`, '"DataStorageCheckpointThreads"', 'true', '4', '1', 'A number of threads to use for the checkpoint purposes')
+                +form-field__number({
+                    label: 'Checkpoint threads:',
+                    model: `${model}.checkpointThreads`,
+                    name: '"DataStorageCheckpointThreads"',
+                    placeholder: '4',
+                    min: '1',
+                    tip: 'A number of threads to use for the checkpoint purposes'
+                })
             .pc-form-grid-col-20
-                +dropdown('Checkpoint write order:', `${model}.checkpointWriteOrder`, '"DataStorageCheckpointWriteOrder"', 'true', 'SEQUENTIAL',
-                    '[\
+                +form-field__dropdown({
+                    label: 'Checkpoint write order:',
+                    model: `${model}.checkpointWriteOrder`,
+                    name: '"DataStorageCheckpointWriteOrder"',
+                    placeholder: 'SEQUENTIAL',
+                    options: '[\
                         {value: "RANDOM", label: "RANDOM"},\
                         {value: "SEQUENTIAL", label: "SEQUENTIAL"}\
                     ]',
-                    'Order of writing pages to disk storage during checkpoint.\
-                    <ul>\
-                        <li>RANDOM - Pages are written in order provided by checkpoint pages collection iterator</li>\
-                        <li>SEQUENTIAL - All checkpoint pages are collected into single list and sorted by page index</li>\
-                    </ul>')
+                    tip: 'Order of writing pages to disk storage during checkpoint.\
+                        <ul>\
+                            <li>RANDOM - Pages are written in order provided by checkpoint pages collection iterator</li>\
+                            <li>SEQUENTIAL - All checkpoint pages are collected into single list and sorted by page index</li>\
+                        </ul>'
+                })
             .pc-form-grid-col-20
-                +dropdown('WAL mode:', `${model}.walMode`, '"DataStorageWalMode"', 'true', 'DEFAULT',
-                    '[\
+                +form-field__dropdown({
+                    label: 'WAL mode:',
+                    model: `${model}.walMode`,
+                    name: '"DataStorageWalMode"',
+                    placeholder: 'DEFAULT',
+                    options: '[\
                         {value: "DEFAULT", label: "DEFAULT"},\
                         {value: "LOG_ONLY", label: "LOG_ONLY"},\
                         {value: "BACKGROUND", label: "BACKGROUND"},\
                         {value: "NONE", label: "NONE"}\
                     ]',
-                    'Type define behavior wal fsync.\
-                    <ul>\
-                        <li>DEFAULT - full-sync disk writes</li>\
-                        <li>LOG_ONLY - flushes application buffers</li>\
-                        <li>BACKGROUND - does not force application&#39;s buffer flush</li>\
-                        <li>NONE - WAL is disabled</li>\
-                    </ul>')
+                    tip: 'Type define behavior wal fsync.\
+                        <ul>\
+                            <li>DEFAULT - full-sync disk writes</li>\
+                            <li>LOG_ONLY - flushes application buffers</li>\
+                            <li>BACKGROUND - does not force application&#39;s buffer flush</li>\
+                            <li>NONE - WAL is disabled</li>\
+                        </ul>'
+                })
             .pc-form-grid-col-60
-                +text-enabled('WAL path:', `${model}.walPath`, '"DataStorageWalPath"', 'true', 'false', 'db/wal', 'A path to the directory where WAL is stored')
+                +form-field__text({
+                    label: 'WAL path:',
+                    model: `${model}.walPath`,
+                    name: '"DataStorageWalPath"',
+                    placeholder: 'db/wal',
+                    tip: 'A path to the directory where WAL is stored'
+                })
             .pc-form-grid-col-60
-                +text-enabled('WAL archive path:', `${model}.walArchivePath`, '"DataStorageWalArchivePath"', 'true', 'false', 'db/wal/archive', 'A path to the WAL archive directory')
+                +form-field__text({
+                    label: 'WAL archive path:',
+                    model: `${model}.walArchivePath`,
+                    name: '"DataStorageWalArchivePath"',
+                    placeholder: 'db/wal/archive',
+                    tip: 'A path to the WAL archive directory'
+                })
             .pc-form-grid-col-20
-                +number('WAL segments:', `${model}.walSegments`, '"DataStorageWalSegments"', 'true', '10', '1', 'A number of WAL segments to work with')
+                +form-field__number({
+                    label: 'WAL segments:',
+                    model: `${model}.walSegments`,
+                    name: '"DataStorageWalSegments"',
+                    placeholder: '10',
+                    min: '1',
+                    tip: 'A number of WAL segments to work with'
+                })
             .pc-form-grid-col-20
-                +number('WAL segment size:', `${model}.walSegmentSize`, '"DataStorageWalSegmentSize"', 'true', '67108864', '0', 'Size of a WAL segment')
+                +form-field__number({
+                    label: 'WAL segment size:',
+                    model: `${model}.walSegmentSize`,
+                    name: '"DataStorageWalSegmentSize"',
+                    placeholder: '67108864',
+                    min: '0',
+                    tip: 'Size of a WAL segment'
+                })
             .pc-form-grid-col-20
-                +number('WAL history size:', `${model}.walHistorySize`, '"DataStorageWalHistorySize"', 'true', '20', '1', 'A total number of checkpoints to keep in the WAL history')
+                +form-field__number({
+                    label: 'WAL history size:',
+                    model: `${model}.walHistorySize`,
+                    name: '"DataStorageWalHistorySize"',
+                    placeholder: '20',
+                    min: '1',
+                    tip: 'A total number of checkpoints to keep in the WAL history'
+                })
             .pc-form-grid-col-60(ng-if='$ctrl.available("2.4.0")')
-                +number('WAL buffer size:', `${model}.walBufferSize`, '"DataStorageWalBufferSize"', 'true', 'WAL segment size / 4', '1',
-                    'Size of WAL buffer')
+                +form-field__number({
+                    label: 'WAL buffer size:',
+                    model: `${model}.walBufferSize`,
+                    name: '"DataStorageWalBufferSize"',
+                    placeholder: 'WAL segment size / 4',
+                    min: '1',
+                    tip: 'Size of WAL buffer'
+                })
             .pc-form-grid-col-30
-                +number('WAL flush frequency:', `${model}.walFlushFrequency`, '"DataStorageWalFlushFrequency"', 'true', '2000', '1',
-                    'How often will be fsync, in milliseconds. In background mode, exist thread which do fsync by timeout')
+                +form-field__number({
+                    label: 'WAL flush frequency:',
+                    model: `${model}.walFlushFrequency`,
+                    name: '"DataStorageWalFlushFrequency"',
+                    placeholder: '2000',
+                    min: '1',
+                    tip: 'How often will be fsync, in milliseconds. In background mode, exist thread which do fsync by timeout'
+                })
             .pc-form-grid-col-30
-                +number('WAL fsync delay:', `${model}.walFsyncDelayNanos`, '"DataStorageWalFsyncDelay"', 'true', '1000', '1', 'WAL fsync delay, in nanoseconds')
+                +form-field__number({
+                    label: 'WAL fsync delay:',
+                    model: `${model}.walFsyncDelayNanos`,
+                    name: '"DataStorageWalFsyncDelay"',
+                    placeholder: '1000',
+                    min: '1',
+                    tip: 'WAL fsync delay, in nanoseconds'
+                })
             .pc-form-grid-col-60
-                +number('WAL record iterator buffer size:', `${model}.walRecordIteratorBufferSize`, '"DataStorageWalRecordIteratorBufferSize"', 'true', '67108864', '1',
-                    'How many bytes iterator read from disk(for one reading), during go ahead WAL')
+                +form-field__number({
+                    label: 'WAL record iterator buffer size:',
+                    model: `${model}.walRecordIteratorBufferSize`,
+                    name: '"DataStorageWalRecordIteratorBufferSize"',
+                    placeholder: '67108864',
+                    min: '1',
+                    tip: 'How many bytes iterator read from disk(for one reading), during go ahead WAL'
+                })
             .pc-form-grid-col-30
-                +number('Lock wait time:', `${model}.lockWaitTime`, '"DataStorageLockWaitTime"', 'true', '10000', '1',
-                    'Time out in milliseconds, while wait and try get file lock for start persist manager')
+                +form-field__number({
+                    label: 'Lock wait time:',
+                    model: `${model}.lockWaitTime`,
+                    name: '"DataStorageLockWaitTime"',
+                    placeholder: '10000',
+                    min: '1',
+                    tip: 'Time out in milliseconds, while wait and try get file lock for start persist manager'
+                })
             .pc-form-grid-col-30
-                +number('WAL thread local buffer size:', `${model}.walThreadLocalBufferSize`, '"DataStorageWalThreadLocalBufferSize"', 'true', '131072', '1',
-                    'Define size thread local buffer. Each thread which write to WAL have thread local buffer for serialize recode before write in WAL')
+                +form-field__number({
+                    label: 'WAL thread local buffer size:',
+                    model: `${model}.walThreadLocalBufferSize`,
+                    name: '"DataStorageWalThreadLocalBufferSize"',
+                    placeholder: '131072',
+                    min: '1',
+                    tip: 'Define size thread local buffer. Each thread which write to WAL have thread local buffer for serialize recode before write in WAL'
+                })
             .pc-form-grid-col-30
-                +number('Metrics sub interval count:', `${model}.metricsSubIntervalCount`, '"DataStorageMetricsSubIntervalCount"', 'true', '5', '1',
-                    'Number of sub - intervals the whole rate time interval will be split into to calculate rate - based metrics')
+                +form-field__number({
+                    label: 'Metrics sub interval count:',
+                    model: `${model}.metricsSubIntervalCount`,
+                    name: '"DataStorageMetricsSubIntervalCount"',
+                    placeholder: '5',
+                    min: '1',
+                    tip: 'Number of sub - intervals the whole rate time interval will be split into to calculate rate - based metrics'
+                })
             .pc-form-grid-col-30
-                +number('Metrics rate time interval:', `${model}.metricsRateTimeInterval`, '"DataStorageMetricsRateTimeInterval"', 'true', '60000', '1000',
-                    'The length of the time interval for rate - based metrics. This interval defines a window over which hits will be tracked')
+                +form-field__number({
+                    label: 'Metrics rate time interval:',
+                    model: `${model}.metricsRateTimeInterval`,
+                    name: '"DataStorageMetricsRateTimeInterval"',
+                    placeholder: '60000',
+                    min: '1000',
+                    tip: 'The length of the time interval for rate - based metrics. This interval defines a window over which hits will be tracked'
+                })
             .pc-form-grid-col-30
-                +dropdown('File IO factory:', `${model}.fileIOFactory`, '"DataStorageFileIOFactory"', 'true', 'Default',
-                    '[\
+                +form-field__dropdown({
+                    label: 'File IO factory:',
+                    model: `${model}.fileIOFactory`,
+                    name: '"DataStorageFileIOFactory"',
+                    placeholder: 'Default',
+                    options: '[\
                         {value: "RANDOM", label: "RANDOM"},\
                         {value: "ASYNC", label: "ASYNC"},\
                         {value: null, label: "Default"},\
                     ]',
-                    'Order of writing pages to disk storage during checkpoint.\
-                    <ul>\
-                        <li>RANDOM - Pages are written in order provided by checkpoint pages collection iterator</li>\
-                        <li>SEQUENTIAL - All checkpoint pages are collected into single list and sorted by page index</li>\
-                    </ul>')
+                    tip: 'Order of writing pages to disk storage during checkpoint.\
+                        <ul>\
+                            <li>RANDOM - Pages are written in order provided by checkpoint pages collection iterator</li>\
+                            <li>SEQUENTIAL - All checkpoint pages are collected into single list and sorted by page index</li>\
+                        </ul>'
+                })
+
             .pc-form-grid-col-30
-                +number('WAL auto archive after inactivity:', `${model}.walAutoArchiveAfterInactivity`, '"DataStorageWalAutoArchiveAfterInactivity"', 'true', '-1', '-1',
-                    'Time in millis to run auto archiving segment after last record logging')
+                +form-field__number({
+                    label: 'WAL auto archive after inactivity:',
+                    model: `${model}.walAutoArchiveAfterInactivity`,
+                    name: '"DataStorageWalAutoArchiveAfterInactivity"',
+                    placeholder: '-1',
+                    min: '-1',
+                    tip: 'Time in millis to run auto archiving segment after last record logging'
+                })
             .pc-form-grid-col-60
-                +checkbox-enabled('Metrics enabled', `${model}.metricsEnabled`, '"DataStorageMetricsEnabled"', 'true', 'Flag indicating whether persistence metrics collection is enabled')
+                +form-field__checkbox({
+                    label: 'Metrics enabled',
+                    model: `${model}.metricsEnabled`,
+                    name: '"DataStorageMetricsEnabled"',
+                    tip: 'Flag indicating whether persistence metrics collection is enabled'
+                })
             .pc-form-grid-col-60
-                +checkbox-enabled('Always write full pages', `${model}.alwaysWriteFullPages`, '"DataStorageAlwaysWriteFullPages"', 'true', 'Flag indicating whether always write full pages')
+                +form-field__checkbox({
+                    label: 'Always write full pages',
+                    model: `${model}.alwaysWriteFullPages`,
+                    name: '"DataStorageAlwaysWriteFullPages"',
+                    tip: 'Flag indicating whether always write full pages'
+                })
             .pc-form-grid-col-60
-                +checkbox('Write throttling enabled', `${model}.writeThrottlingEnabled`, '"DataStorageWriteThrottlingEnabled"',
-                    'Throttle threads that generate dirty pages too fast during ongoing checkpoint')
+                +form-field__checkbox({
+                    label: 'Write throttling enabled',
+                    model: `${model}.writeThrottlingEnabled`,
+                    name: '"DataStorageWriteThrottlingEnabled"',
+                    tip: 'Throttle threads that generate dirty pages too fast during ongoing checkpoint'
+                })
             .pc-form-grid-col-60(ng-if='$ctrl.available("2.4.0")')
-                +checkbox('Enable WAL compaction', `${model}.walCompactionEnabled`, '"DataStorageWalCompactionEnabled"',
-                    'If true, system filters and compresses WAL archive in background')
+                +form-field__checkbox({
+                    label: 'Enable WAL compaction',
+                    model: `${model}.walCompactionEnabled`,
+                    name: '"DataStorageWalCompactionEnabled"',
+                    tip: 'If true, system filters and compresses WAL archive in background'
+                })
 
         .pca-form-column-6
             +preview-xml-java(model, 'clusterDataStorageConfiguration')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/deployment.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/deployment.pug
index 1f0b615..1d93886 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/deployment.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/deployment.pug
@@ -35,34 +35,58 @@
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-60
-                +dropdown('Deployment mode:', `${model}.deploymentMode`, '"deploymentMode"', 'true', 'SHARED',
-                    '[\
+                +form-field__dropdown({
+                    label: 'Deployment mode:',
+                    model: `${model}.deploymentMode`,
+                    name: '"deploymentMode"',
+                    placeholder: 'SHARED',
+                    options: '[\
                         {value: "PRIVATE", label: "PRIVATE"},\
                         {value: "ISOLATED", label: "ISOLATED"}, \
                         {value: "SHARED", label: "SHARED"},\
                         {value: "CONTINUOUS", label: "CONTINUOUS"}\
                     ]',
-                    'Task classes and resources sharing mode<br/>\
+                    tip: 'Task classes and resources sharing mode<br/>\
                     The following deployment modes are supported:\
-                    <ul>\
-                        <li>PRIVATE - in this mode deployed classes do not share resources</li>\
-                        <li>ISOLATED - in this mode tasks or classes deployed within the same class loader will share the same instances of resources</li>\
-                        <li>SHARED - same as ISOLATED, but now tasks from different master nodes with the same user version and same class loader will share the same class loader on remote nodes</li>\
-                        <li>CONTINUOUS - same as SHARED deployment mode, but resources will not be undeployed even after all master nodes left grid</li>\
-                    </ul>')
+                        <ul>\
+                            <li>PRIVATE - in this mode deployed classes do not share resources</li>\
+                            <li>ISOLATED - in this mode tasks or classes deployed within the same class loader will share the same instances of resources</li>\
+                            <li>SHARED - same as ISOLATED, but now tasks from different master nodes with the same user version and same class loader will share the same class loader on remote nodes</li>\
+                            <li>CONTINUOUS - same as SHARED deployment mode, but resources will not be undeployed even after all master nodes left grid</li>\
+                        </ul>'
+                })
             .pc-form-grid-col-60
-                +checkbox('Enable peer class loading', `${model}.peerClassLoadingEnabled`, '"peerClassLoadingEnabled"', 'Enables/disables peer class loading')
+                +form-field__checkbox({
+                    label: 'Enable peer class loading',
+                    model: `${model}.peerClassLoadingEnabled`,
+                    name: '"peerClassLoadingEnabled"',
+                    tip: 'Enables/disables peer class loading'
+                })
             .pc-form-grid-col-60
-                +number('Missed resources cache size:', `${model}.peerClassLoadingMissedResourcesCacheSize`, '"peerClassLoadingMissedResourcesCacheSize"', enabled, '100', '0',
-                    'If size greater than 0, missed resources will be cached and next resource request ignored<br/>\
-                    If size is 0, then request for the resource will be sent to the remote node every time this resource is requested')
+                +form-field__number({
+                    label: 'Missed resources cache size:',
+                    model: `${model}.peerClassLoadingMissedResourcesCacheSize`,
+                    name: '"peerClassLoadingMissedResourcesCacheSize"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '100',
+                    min: '0',
+                    tip: 'If size greater than 0, missed resources will be cached and next resource request ignored<br/>\
+                          If size is 0, then request for the resource will be sent to the remote node every time this resource is requested'
+                })
             .pc-form-grid-col-60
-                +number('Pool size:', `${model}.peerClassLoadingThreadPoolSize`, '"peerClassLoadingThreadPoolSize"', enabled, '2', '1', 'Thread pool size to use for peer class loading')
+                +form-field__number({
+                    label: 'Pool size:',
+                    model: `${model}.peerClassLoadingThreadPoolSize`,
+                    name: '"peerClassLoadingThreadPoolSize"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '2',
+                    min: '1',
+                    tip: 'Thread pool size to use for peer class loading'
+                })
             .pc-form-grid-col-60
                 mixin clusters-deployment-packages
                     .ignite-form-field
                         -let items = exclude
-                        -var uniqueTip = 'Such package already exists!'
 
                         list-editable(
                             ng-model=items
@@ -77,10 +101,17 @@
                             list-editable-item-view {{ $item }}
 
                             list-editable-item-edit
-                                +list-java-package-field('Package name', '$item', '"packageName"', items)(
-                                    ignite-auto-focus
+                                +form-field__java-package({
+                                    label: 'Package name',
+                                    model: '$item',
+                                    name: '"packageName"',
+                                    placeholder: 'Enter package name',
+                                    required: enabled
+                                })(
+                                    ignite-unique=items
+                                    ignite-form-field-input-autofocus='true'
                                 )
-                                    +unique-feedback('"packageName"', uniqueTip)
+                                    +form-field__error({ error: 'igniteUnique', message: 'Such package already exists!' })
 
                             list-editable-no-items
                                 list-editable-add-item-button(
@@ -95,30 +126,38 @@
 
             //- Since ignite 2.0
             .pc-form-grid-col-60(ng-if='$ctrl.available("2.0.0")')
-                +java-class('Class loader:', model + '.classLoader', '"classLoader"', 'true', 'false',
-                    'Loader which will be used for instantiating execution context')
+                +form-field__java-class({
+                    label: 'Class loader:',
+                    model: `${model}.classLoader`,
+                    name: '"classLoader"',
+                    tip: 'Loader which will be used for instantiating execution context'
+                })
 
             .pc-form-grid-col-60
-                +dropdown('Deployment variant:', modelDeployment + '.kind', '"deploymentKind"', 'true', 'Default',
-                    '[\
+                +form-field__dropdown({
+                    label: 'Deployment variant:',
+                    model: `${modelDeployment}.kind`,
+                    name: '"deploymentKind"',
+                    placeholder: 'Default',
+                    options: '[\
                         {value: "URI", label: "URI"},\
                         {value: "Local", label: "Local"}, \
                         {value: "Custom", label: "Custom"},\
                         {value: null, label: "Default"}\
                     ]',
-                    'Grid deployment SPI is in charge of deploying tasks and classes from different sources:\
-                    <ul>\
-                        <li>URI - Deploy tasks from different sources like file system folders, email and HTTP</li>\
-                        <li>Local - Only within VM deployment on local node</li>\
-                        <li>Custom - Custom implementation of DeploymentSpi</li>\
-                        <li>Default - Default configuration of LocalDeploymentSpi will be used</li>\
-                    </ul>')
+                    tip: 'Grid deployment SPI is in charge of deploying tasks and classes from different sources:\
+                        <ul>\
+                            <li>URI - Deploy tasks from different sources like file system folders, email and HTTP</li>\
+                            <li>Local - Only within VM deployment on local node</li>\
+                            <li>Custom - Custom implementation of DeploymentSpi</li>\
+                            <li>Default - Default configuration of LocalDeploymentSpi will be used</li>\
+                        </ul>'
+                })
             .pc-form-group(ng-show=uriDeployment).pc-form-grid-row
                 .pc-form-grid-col-60
                     mixin clusters-deployment-uri
                         .ignite-form-field
                             -let items = uriListModel
-                            -var uniqueTip = 'Such URI already configured!'
 
                             list-editable(
                                 ng-model=items
@@ -132,7 +171,7 @@
 
                                 list-editable-item-edit
                                     +list-url-field('URL', '$item', '"url"', items)
-                                        +unique-feedback('"url"', uniqueTip)
+                                        +form-field__error({ error: 'igniteUnique', message: 'Such URI already configured!' })
 
                                 list-editable-no-items
                                     list-editable-add-item-button(
@@ -146,13 +185,17 @@
                     - var form = 'deployment'
 
                 .pc-form-grid-col-60
-                    +text('Temporary directory path:', modelDeployment + '.URI.temporaryDirectoryPath', '"DeploymentURITemporaryDirectoryPath"', 'false', 'Temporary directory path',
-                    'Absolute path to temporary directory which will be used by deployment SPI to keep all deployed classes in')
+                    +form-field__text({
+                        label: 'Temporary directory path:',
+                        model: modelDeployment + '.URI.temporaryDirectoryPath',
+                        name: '"DeploymentURITemporaryDirectoryPath"',
+                        placeholder: 'Temporary directory path',
+                        tip: 'Absolute path to temporary directory which will be used by deployment SPI to keep all deployed classes in'
+                    })
                 .pc-form-grid-col-60
                     mixin clusters-deployment-scanner
                         .ignite-form-field
                             -let items = scannerModel
-                            -var uniqueTip = 'Such scanner already configured!'
 
                             list-editable(
                                 ng-model=items
@@ -163,7 +206,7 @@
 
                                 list-editable-item-edit
                                     +list-java-class-field('Scanner', '$item', '"scanner"', items)
-                                        +unique-feedback('"scanner"', uniqueTip)
+                                        +form-field__error({ error: 'igniteUnique', message: 'Such scanner already configured!' })
 
                                 list-editable-no-items
                                     list-editable-add-item-button(
@@ -177,16 +220,45 @@
                     - var form = 'deployment'
 
                 .pc-form-grid-col-60
-                    +java-class('Listener:', `${modelDeployment}.URI.listener`, '"DeploymentURIListener"', 'true', 'false', 'Deployment event listener', uriDeployment)
+                    +form-field__java-class({
+                        label: 'Listener:',
+                        model: `${modelDeployment}.URI.listener`,
+                        name: '"DeploymentURIListener"',
+                        tip: 'Deployment event listener',
+                        validationActive: uriDeployment
+                    })
                 .pc-form-grid-col-60
-                    +checkbox('Check MD5', `${modelDeployment}.URI.checkMd5`, '"DeploymentURICheckMd5"', 'Exclude files with same md5s from deployment')
+                    +form-field__checkbox({
+                        label: 'Check MD5',
+                        model: `${modelDeployment}.URI.checkMd5`,
+                        name: '"DeploymentURICheckMd5"',
+                        tip: 'Exclude files with same md5s from deployment'
+                    })
                 .pc-form-grid-col-60
-                    +checkbox('Encode URI', `${modelDeployment}.URI.encodeUri`, '"DeploymentURIEncodeUri"', 'URI must be encoded before usage')
+                    +form-field__checkbox({
+                        label: 'Encode URI',
+                        model: `${modelDeployment}.URI.encodeUri`,
+                        name: '"DeploymentURIEncodeUri"',
+                        tip: 'URI must be encoded before usage'
+                    })
             .pc-form-group(ng-show=localDeployment).pc-form-grid-row
                 .pc-form-grid-col-60
-                    +java-class('Listener:', `${modelDeployment}.Local.listener`, '"DeploymentLocalListener"', 'true', 'false', 'Deployment event listener', localDeployment)
+                    +form-field__java-class({
+                        label: 'Listener:',
+                        model: `${modelDeployment}.Local.listener`,
+                        name: '"DeploymentLocalListener"',
+                        tip: 'Deployment event listener',
+                        validationActive: localDeployment
+                    })
             .pc-form-group(ng-show=customDeployment).pc-form-grid-row
                 .pc-form-grid-col-60
-                    +java-class('Class:', `${modelDeployment}.Custom.className`, '"DeploymentCustom"', 'true', customDeployment, 'DeploymentSpi implementation class', customDeployment)
+                    +form-field__java-class({
+                        label: 'Class:',
+                        model: `${modelDeployment}.Custom.className`,
+                        name: '"DeploymentCustom"',
+                        required: customDeployment,
+                        tip: 'DeploymentSpi implementation class',
+                        validationActive: customDeployment
+                    })
         .pca-form-column-6
             +preview-xml-java(model, 'clusterDeployment')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/discovery.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/discovery.pug
index d0a9102..0f777f9 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/discovery.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/discovery.pug
@@ -27,71 +27,205 @@
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-20
-                +text-ip-address('Local address:', `${model}.localAddress`, '"discoLocalAddress"', 'true', '228.1.2.4',
-                    'Local host IP address that discovery SPI uses<br/>\
-                    If not provided a first found non-loopback address will be used')
+                +form-field__ip-address({
+                    label: 'Local address:',
+                    model: `${model}.localAddress`,
+                    name: '"discoLocalAddress"',
+                    enabled: 'true',
+                    placeholder: '228.1.2.4',
+                    tip: 'Local host IP address that discovery SPI uses<br/>\
+                         If not provided a first found non-loopback address will be used'
+                })
             .pc-form-grid-col-20
-                +number-min-max('Local port:', `${model}.localPort`, '"discoLocalPort"', 'true', '47500', '1024', '65535', 'Local port which node uses')
+                +form-field__number({
+                    label: 'Local port:',
+                    model: `${model}.localPort`,
+                    name: '"discoLocalPort"',
+                    placeholder: '47500',
+                    min: '1024',
+                    max: '65535',
+                    tip: 'Local port which node uses'
+                })
             .pc-form-grid-col-20
-                +number('Local port range:', `${model}.localPortRange`, '"discoLocalPortRange"', 'true', '100', '1', 'Local port range')
+                +form-field__number({
+                    label: 'Local port range:',
+                    model: `${model}.localPortRange`,
+                    name: '"discoLocalPortRange"',
+                    placeholder: '100',
+                    min: '1',
+                    tip: 'Local port range'
+                })
             .pc-form-grid-col-60
-                +java-class('Address resolver:', `${model}.addressResolver`, '"discoAddressResolver"', 'true', 'false',
-                    'Provides resolution between external and internal addresses')
+                +form-field__java-class({
+                    label:'Address resolver:',
+                    model: `${model}.addressResolver`,
+                    name: '"discoAddressResolver"',
+                    tip: 'Provides resolution between external and internal addresses'
+                })
             .pc-form-grid-col-30
-                +number('Socket timeout:', `${model}.socketTimeout`, '"socketTimeout"', 'true', '5000', '0', 'Socket operations timeout')
+                +form-field__number({
+                    label: 'Socket timeout:',
+                    model: `${model}.socketTimeout`,
+                    name: '"socketTimeout"',
+                    placeholder: '5000',
+                    min: '0',
+                    tip: 'Socket operations timeout'
+                })
             .pc-form-grid-col-30
-                +sane-ignite-form-field-number({
+                +form-field__number({
                     label: 'Acknowledgement timeout:',
                     model: `${model}.ackTimeout`,
                     name: '"ackTimeout"',
-                    disabled: 'false',
                     placeholder: '5000',
                     min: '0',
                     max: `{{ ${model}.maxAckTimeout || 600000 }}`,
                     tip: 'Message acknowledgement timeout'
                 })
-                    +form-field-feedback('"ackTimeout"', 'max', `Acknowledgement timeout should be less than max acknowledgement timeout ({{ ${model}.maxAckTimeout || 60000 }}).`)
+                    +form-field__error({ error: 'max', message: `Acknowledgement timeout should be less than max acknowledgement timeout ({{ ${model}.maxAckTimeout || 60000 }}).` })
             .pc-form-grid-col-30
-                +number('Max acknowledgement timeout:', `${model}.maxAckTimeout`, '"maxAckTimeout"', 'true', '600000', '0', 'Maximum message acknowledgement timeout')
+                +form-field__number({
+                    label: 'Max acknowledgement timeout:',
+                    model: `${model}.maxAckTimeout`,
+                    name: '"maxAckTimeout"',
+                    placeholder: '600000',
+                    min: '0',
+                    tip: 'Maximum message acknowledgement timeout'
+                })
             .pc-form-grid-col-30
-                +number('Network timeout:', `${model}.networkTimeout`, '"discoNetworkTimeout"', 'true', '5000', '1', 'Timeout to use for network operations')
+                +form-field__number({
+                    label: 'Network timeout:',
+                    model: `${model}.networkTimeout`,
+                    name: '"discoNetworkTimeout"',
+                    placeholder: '5000',
+                    min: '1',
+                    tip: 'Timeout to use for network operations'
+                })
             .pc-form-grid-col-30
-                +number('Join timeout:', `${model}.joinTimeout`, '"joinTimeout"', 'true', '0', '0',
-                    'Join timeout<br/>' +
-                    '0 means wait forever')
+                +form-field__number({
+                    label: 'Join timeout:',
+                    model: `${model}.joinTimeout`,
+                    name: '"joinTimeout"',
+                    placeholder: '0',
+                    min: '0',
+                    tip: 'Join timeout<br/>' +
+                          '0 means wait forever'
+                })
             .pc-form-grid-col-30
-                +number('Thread priority:', `${model}.threadPriority`, '"threadPriority"', 'true', '10', '1', 'Thread priority for all threads started by SPI')
+                +form-field__number({
+                    label: 'Thread priority:',
+                    model: `${model}.threadPriority`,
+                    name: '"threadPriority"',
+                    placeholder: '10',
+                    min: '1',
+                    tip: 'Thread priority for all threads started by SPI'
+                })
 
             //- Removed in ignite 2.0
             .pc-form-grid-col-60(ng-if-start='$ctrl.available(["1.0.0", "2.0.0"])')
-                +number('Heartbeat frequency:', `${model}.heartbeatFrequency`, '"heartbeatFrequency"', 'true', '2000', '1', 'Heartbeat messages issuing frequency')
+                +form-field__number({
+                    label: 'Heartbeat frequency:',
+                    model: `${model}.heartbeatFrequency`,
+                    name: '"heartbeatFrequency"',
+                    placeholder: '2000',
+                    min: '1',
+                    tip: 'Heartbeat messages issuing frequency'
+                })
             .pc-form-grid-col-30
-                +number('Max heartbeats miss w/o init:', `${model}.maxMissedHeartbeats`, '"maxMissedHeartbeats"', 'true', '1', '1',
-                    'Max heartbeats count node can miss without initiating status check')
+                +form-field__number({
+                    label: 'Max heartbeats miss w/o init:',
+                    model: `${model}.maxMissedHeartbeats`,
+                    name: '"maxMissedHeartbeats"',
+                    placeholder: '1',
+                    min: '1',
+                    tip: 'Max heartbeats count node can miss without initiating status check'
+                })
             .pc-form-grid-col-30(ng-if-end)
-                +number('Max missed client heartbeats:', `${model}.maxMissedClientHeartbeats`, '"maxMissedClientHeartbeats"', 'true', '5', '1',
-                    'Max heartbeats count node can miss without failing client node')
+                +form-field__number({
+                    label: 'Max missed client heartbeats:',
+                    model: `${model}.maxMissedClientHeartbeats`,
+                    name: '"maxMissedClientHeartbeats"',
+                    placeholder: '5',
+                    min: '1',
+                    tip: 'Max heartbeats count node can miss without failing client node'
+                })
 
             .pc-form-grid-col-60
-                +number('Topology history:', `${model}.topHistorySize`, '"topHistorySize"', 'true', '1000', '0', 'Size of topology snapshots history')
+                +form-field__number({
+                    label: 'Topology history:',
+                    model: `${model}.topHistorySize`,
+                    name: '"topHistorySize"',
+                    placeholder: '1000',
+                    min: '0',
+                    tip: 'Size of topology snapshots history'
+                })
             .pc-form-grid-col-60
-                +java-class('Discovery listener:', `${model}.listener`, '"discoListener"', 'true', 'false', 'Listener for grid node discovery events')
+                +form-field__java-class({
+                    label: 'Discovery listener:',
+                    model: `${model}.listener`,
+                    name: '"discoListener"',
+                    tip: 'Listener for grid node discovery events'
+                })
             .pc-form-grid-col-60
-                +java-class('Data exchange:', `${model}.dataExchange`, '"dataExchange"', 'true', 'false', 'Class name of handler for initial data exchange between Ignite nodes')
+                +form-field__java-class({
+                    label: 'Data exchange:',
+                    model: `${model}.dataExchange`,
+                    name: '"dataExchange"',
+                    tip: 'Class name of handler for initial data exchange between Ignite nodes'
+                })
             .pc-form-grid-col-60
-                +java-class('Metrics provider:', `${model}.metricsProvider`, '"metricsProvider"', 'true', 'false', 'Class name of metric provider to discovery SPI')
+                +form-field__java-class({
+                    label: 'Metrics provider:',
+                    model: `${model}.metricsProvider`,
+                    name: '"metricsProvider"',
+                    tip: 'Class name of metric provider to discovery SPI'
+                })
             .pc-form-grid-col-30
-                +number('Reconnect count:', `${model}.reconnectCount`, '"discoReconnectCount"', 'true', '10', '1', 'Reconnect attempts count')
+                +form-field__number({
+                    label: 'Reconnect count:',
+                    model: `${model}.reconnectCount`,
+                    name: '"discoReconnectCount"',
+                    placeholder: '10',
+                    min: '1',
+                    tip: 'Reconnect attempts count'
+                })
             .pc-form-grid-col-30
-                +number('Statistics frequency:', `${model}.statisticsPrintFrequency`, '"statisticsPrintFrequency"', 'true', '0', '1', 'Statistics print frequency')
+                +form-field__number({
+                    label: 'Statistics frequency:',
+                    model: `${model}.statisticsPrintFrequency`,
+                    name: '"statisticsPrintFrequency"',
+                    placeholder: '0',
+                    min: '1',
+                    tip: 'Statistics print frequency'
+                })
             .pc-form-grid-col-60
-                +number('IP finder clean frequency:', `${model}.ipFinderCleanFrequency`, '"ipFinderCleanFrequency"', 'true', '60000', '1', 'IP finder clean frequency')
+                +form-field__number({
+                    label: 'IP finder clean frequency:',
+                    model: `${model}.ipFinderCleanFrequency`,
+                    name: '"ipFinderCleanFrequency"',
+                    placeholder: '60000',
+                    min: '1',
+                    tip: 'IP finder clean frequency'
+                })
             .pc-form-grid-col-60
-                +java-class('Node authenticator:', `${model}.authenticator`, '"authenticator"', 'true', 'false', 'Class name of node authenticator implementation')
+                +form-field__java-class({
+                    label: 'Node authenticator:',
+                    model: `${model}.authenticator`,
+                    name: '"authenticator"',
+                    tip: 'Class name of node authenticator implementation'
+                })
             .pc-form-grid-col-60
-                +checkbox('Force server mode', `${model}.forceServerMode`, '"forceServerMode"', 'Force start TCP/IP discovery in server mode')
+                +form-field__checkbox({
+                    label: 'Force server mode',
+                    model: `${model}.forceServerMode`,
+                    name: '"forceServerMode"',
+                    tip: 'Force start TCP/IP discovery in server mode'
+                })
             .pc-form-grid-col-60
-                +checkbox('Client reconnect disabled', `${model}.clientReconnectDisabled`, '"clientReconnectDisabled"',
-                    'Disable try of client to reconnect after server detected client node failure')
+                +form-field__checkbox({
+                    label: 'Client reconnect disabled',
+                    model: `${model}.clientReconnectDisabled`,
+                    name: '"clientReconnectDisabled"',
+                    tip: 'Disable try of client to reconnect after server detected client node failure'
+                })
         .pca-form-column-6
             +preview-xml-java(model, 'clusterDiscovery')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/events.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/events.pug
index a41999e..9967af4 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/events.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/events.pug
@@ -26,41 +26,87 @@
 panel-collapsible(ng-form=form on-open=`ui.loadPanel('${form}')`)
     panel-title Events
     panel-description
-        | Grid events are used for notification about what happens within the grid. 
+        | Grid events are used for notification about what happens within the grid.
         | #[a.link-success(href="https://apacheignite.readme.io/docs/events" target="_blank") More info]
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-60(ng-if='$ctrl.available(["1.0.0", "2.0.0"])')
-                +dropdown('Event storage:', modelEventStorageKind, '"eventStorageKind"', 'true', 'Disabled', '$ctrl.eventStorage',
-                'Regulate how grid store events locally on node\
-                <ul>\
-                    <li>Memory - All events are kept in the FIFO queue in-memory</li>\
-                    <li>Custom - Custom implementation of event storage SPI</li>\
-                </ul>')
+                +form-field__dropdown({
+                    label: 'Event storage:',
+                    model: modelEventStorageKind,
+                    name: '"eventStorageKind"',
+                    placeholder: 'Disabled',
+                    options: '$ctrl.eventStorage',
+                    tip: 'Regulate how grid store events locally on node\
+                        <ul>\
+                            <li>Memory - All events are kept in the FIFO queue in-memory</li>\
+                            <li>Custom - Custom implementation of event storage SPI</li>\
+                        </ul>'
+                })
             .pc-form-grid-col-60(ng-if='$ctrl.available("2.0.0")')
-                +dropdown('Event storage:', modelEventStorageKind, '"eventStorageKind"', 'true', 'Disabled', '$ctrl.eventStorage',
-                'Regulate how grid store events locally on node\
-                <ul>\
-                    <li>Memory - All events are kept in the FIFO queue in-memory</li>\
-                    <li>Custom - Custom implementation of event storage SPI</li>\
-                    <li>Disabled - Events are not collected</li>\
-                </ul>')
+                +form-field__dropdown({
+                    label: 'Event storage:',
+                    model: modelEventStorageKind,
+                    name: '"eventStorageKind"',
+                    placeholder: 'Disabled',
+                    options: '$ctrl.eventStorage',
+                    tip: 'Regulate how grid store events locally on node\
+                        <ul>\
+                            <li>Memory - All events are kept in the FIFO queue in-memory</li>\
+                            <li>Custom - Custom implementation of event storage SPI</li>\
+                            <li>Disabled - Events are not collected</li>\
+                        </ul>'
+                })
             .pc-form-group.pc-form-grid-row(ng-if=modelEventStorageKind)
                 .pc-form-grid-col-30(ng-if-start=eventStorageMemory)
-                    +number('Events expiration time:', `${modelEventStorage}.Memory.expireAgeMs`, '"EventStorageExpireAgeMs"', 'true', 'Long.MAX_VALUE', '1', 'All events that exceed this value will be removed from the queue when next event comes')
+                    +form-field__number({
+                        label: 'Events expiration time:',
+                        model: `${modelEventStorage}.Memory.expireAgeMs`,
+                        name: '"writeBehindBatchSize"',
+                        placeholder: 'Long.MAX_VALUE',
+                        min: '1',
+                        tip: 'All events that exceed this value will be removed from the queue when next event comes'
+                    })
                 .pc-form-grid-col-30
-                    +number('Events queue size:', `${modelEventStorage}.Memory.expireCount`, '"EventStorageExpireCount"', 'true', '10000', '1', 'Events will be filtered out when new request comes')
+                    +form-field__number({
+                        label: 'Events queue size:',
+                        model: `${modelEventStorage}.Memory.expireCount`,
+                        name: '"EventStorageExpireCount"',
+                        placeholder: '10000',
+                        min: '1',
+                        tip: 'Events will be filtered out when new request comes'
+                    })
                 .pc-form-grid-col-60(ng-if-end)
-                    +java-class('Filter:', `${modelEventStorage}.Memory.filter`, '"EventStorageFilter"', 'true', 'false',
-                    'Filter for events to be recorded<br/>\
-                    Should be implementation of o.a.i.lang.IgnitePredicate&lt;o.a.i.events.Event&gt;', eventStorageMemory)
+                    +form-field__java-class({
+                        label: 'Filter:',
+                        model: `${modelEventStorage}.Memory.filter`,
+                        name: '"EventStorageFilter"',
+                        tip: 'Filter for events to be recorded<br/>\
+                             Should be implementation of o.a.i.lang.IgnitePredicate&lt;o.a.i.events.Event&gt;',
+                        validationActive: eventStorageMemory
+                    })
 
                 .pc-form-grid-col-60(ng-if=eventStorageCustom)
-                    +java-class('Class:', `${modelEventStorage}.Custom.className`, '"EventStorageCustom"', 'true', eventStorageCustom, 'Event storage implementation class name', eventStorageCustom)
+                    +form-field__java-class({
+                        label: 'Class:',
+                        model: `${modelEventStorage}.Custom.className`,
+                        name: '"EventStorageCustom"',
+                        required: eventStorageCustom,
+                        tip: 'Event storage implementation class name',
+                        validationActive: eventStorageCustom
+                    })
 
                 .pc-form-grid-col-60
-                    +dropdown-multiple('Include type:', `${model}.includeEventTypes`, '"includeEventTypes"', true, 'Choose recorded event types', '', '$ctrl.eventGroups',
-                    'Array of event types, which will be recorded by GridEventStorageManager#record(Event)<br/>\
-                    Note, that either the include event types or the exclude event types can be established')
+                    +form-field__dropdown({
+                        label: 'Include type:',
+                        model: `${model}.includeEventTypes`,
+                        name: '"includeEventTypes"',
+                        multiple: true,
+                        placeholder: 'Choose recorded event types',
+                        placeholderEmpty: '',
+                        options: '$ctrl.eventGroups',
+                        tip: 'Array of event types, which will be recorded by GridEventStorageManager#record(Event)<br/>\
+                             Note, that either the include event types or the exclude event types can be established'
+                    })
         .pca-form-column-6
             +preview-xml-java(model, 'clusterEvents')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/failover.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/failover.pug
index 85c441e..3fafe9f 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/failover.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/failover.pug
@@ -24,63 +24,92 @@
 panel-collapsible(ng-form=form on-open=`ui.loadPanel('${form}')`)
     panel-title Failover configuration
     panel-description
-        | Failover SPI provides ability to supply custom logic for handling failed execution of a grid job. 
+        | Failover SPI provides ability to supply custom logic for handling failed execution of a grid job.
         | #[a.link-success(href="https://apacheignite.readme.io/docs/fault-tolerance" target="_blank") More info]
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             //- Since ignite 2.0
             .pc-form-grid-col-60(ng-if-start='$ctrl.available("2.0.0")')
-                +number('Failure detection timeout:', model + '.failureDetectionTimeout', '"failureDetectionTimeout"', 'true',
-                    '10000', '1', 'Failure detection timeout is used to determine how long the communication or discovery SPIs should wait before considering a remote connection failed')
+                +form-field__number({
+                    label: 'Failure detection timeout:',
+                    model: model + '.failureDetectionTimeout',
+                    name: '"failureDetectionTimeout"',
+                    placeholder: '10000',
+                    min: '1',
+                    tip: 'Failure detection timeout is used to determine how long the communication or discovery SPIs should wait before considering a remote connection failed'
+                })
             .pc-form-grid-col-60(ng-if-end)
-                +number('Client failure detection timeout:', model + '.clientFailureDetectionTimeout', '"clientFailureDetectionTimeout"', 'true',
-                    '30000', '1', 'Failure detection timeout is used to determine how long the communication or discovery SPIs should wait before considering a remote connection failed')
+                +form-field__number({
+                    label: 'Client failure detection timeout:',
+                    model: model + '.clientFailureDetectionTimeout',
+                    name: '"clientFailureDetectionTimeout"',
+                    placeholder: '30000',
+                    min: '1',
+                    tip: 'Failure detection timeout is used to determine how long the communication or discovery SPIs should wait before considering a remote connection failed'
+                })
 
             .pc-form-grid-col-60
                 mixin clusters-failover-spi
                     .ignite-form-field
-                        +ignite-form-field__label('Failover SPI configurations:', '"failoverSpi"')
-                            +tooltip(`Failover SPI configurations`)
-                        .ignite-form-field__control
-                            -let items = failoverSpi
+                        +form-field__label({ label: 'Failover SPI configurations:', name: '"failoverSpi"' })
+                            +form-field__tooltip({ title: `Failover SPI configurations` })
+                        -let items = failoverSpi
 
-                            list-editable.pc-list-editable-with-form-grid(ng-model=items name='failoverSpi')
-                                list-editable-item-edit.pc-form-grid-row
-                                    .pc-form-grid-col-60
-                                        +sane-ignite-form-field-dropdown({
-                                            required: true,
-                                            label: 'Failover SPI:',
-                                            model: '$item.kind',
-                                            name: '"failoverKind"',
-                                            placeholder: 'Choose Failover SPI',
-                                            options: '::$ctrl.Clusters.failoverSpis',
-                                            tip: `
-                                            Provides ability to supply custom logic for handling failed execution of a grid job
-                                            <ul>
-                                                <li>Job stealing - Supports job stealing from over-utilized nodes to under-utilized nodes</li>
-                                                <li>Never - Jobs are ordered as they arrived</li>
-                                                <li>Always - Jobs are first ordered by their priority</li>
-                                                <li>Custom - Jobs are activated immediately on arrival to mapped node</li>
-                                                <li>Default - Default FailoverSpi implementation</li>
-                                            </ul>`
-                                        })
+                        list-editable.pc-list-editable-with-form-grid(ng-model=items name='failoverSpi')
+                            list-editable-item-edit.pc-form-grid-row
+                                .pc-form-grid-col-60
+                                    +form-field__dropdown({
+                                        required: true,
+                                        label: 'Failover SPI:',
+                                        model: '$item.kind',
+                                        name: '"failoverKind"',
+                                        placeholder: 'Choose Failover SPI',
+                                        options: '::$ctrl.Clusters.failoverSpis',
+                                        tip: `
+                                        Provides ability to supply custom logic for handling failed execution of a grid job
+                                        <ul>
+                                            <li>Job stealing - Supports job stealing from over-utilized nodes to under-utilized nodes</li>
+                                            <li>Never - Jobs are ordered as they arrived</li>
+                                            <li>Always - Jobs are first ordered by their priority</li>
+                                            <li>Custom - Jobs are activated immediately on arrival to mapped node</li>
+                                            <li>Default - Default FailoverSpi implementation</li>
+                                        </ul>`
+                                    })
 
-                                    .pc-form-grid-col-60(ng-show='$item.kind === "JobStealing"')
-                                        +number('Maximum failover attempts:', '$item.JobStealing.maximumFailoverAttempts', '"jsMaximumFailoverAttempts"', 'true', '5', '0',
-                                            'Maximum number of attempts to execute a failed job on another node')
-                                    .pc-form-grid-col-60(ng-show='$item.kind === "Always"')
-                                        +number('Maximum failover attempts:', '$item.Always.maximumFailoverAttempts', '"alwaysMaximumFailoverAttempts"', 'true', '5', '0',
-                                            'Maximum number of attempts to execute a failed job on another node')
-                                    .pc-form-grid-col-60(ng-show=failoverCustom)
-                                        +java-class('SPI implementation', '$item.Custom.class', '"failoverSpiClass"', 'true', failoverCustom,
-                                            'Custom FailoverSpi implementation class name.', failoverCustom)
+                                .pc-form-grid-col-60(ng-show='$item.kind === "JobStealing"')
+                                    +form-field__number({
+                                        label: 'Maximum failover attempts:',
+                                        model: '$item.JobStealing.maximumFailoverAttempts',
+                                        name: '"jsMaximumFailoverAttempts"',
+                                        placeholder: '5',
+                                        min: '0',
+                                        tip: 'Maximum number of attempts to execute a failed job on another node'
+                                    })
+                                .pc-form-grid-col-60(ng-show='$item.kind === "Always"')
+                                    +form-field__number({
+                                        label: 'Maximum failover attempts:',
+                                        model: '$item.Always.maximumFailoverAttempts',
+                                        name: '"alwaysMaximumFailoverAttempts"',
+                                        placeholder: '5',
+                                        min: '0',
+                                        tip: 'Maximum number of attempts to execute a failed job on another node'
+                                    })
+                                .pc-form-grid-col-60(ng-show=failoverCustom)
+                                    +form-field__java-class({
+                                        label: 'SPI implementation',
+                                        model: '$item.Custom.class',
+                                        name: '"failoverSpiClass"',
+                                        required: failoverCustom,
+                                        tip: 'Custom FailoverSpi implementation class name.',
+                                        validationActive: failoverCustom
+                                    })
 
-                                list-editable-no-items
-                                    list-editable-add-item-button(
-                                        add-item=`(${items} = ${items} || []).push({})`
-                                        label-single='failover SPI'
-                                        label-multiple='failover SPIs'
-                                    )
+                            list-editable-no-items
+                                list-editable-add-item-button(
+                                    add-item=`(${items} = ${items} || []).push({})`
+                                    label-single='failover SPI'
+                                    label-multiple='failover SPIs'
+                                )
 
                 +clusters-failover-spi
 
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general.pug
index 86f6384..26a949a 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general.pug
@@ -38,11 +38,10 @@
     panel-content.pca-form-row
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-30
-                +sane-ignite-form-field-text({
+                +form-field__text({
                     label: 'Name:',
                     model: `${model}.name`,
                     name: '"clusterName"',
-                    disabled: 'false',
                     placeholder: 'Input name',
                     required: true,
                     tip: 'Instance name allows to indicate to what grid this particular grid instance belongs to'
@@ -51,28 +50,40 @@
                     ignite-unique-property='name'
                     ignite-unique-skip=`["_id", ${model}]`
                 )
-                    +unique-feedback(`${model}.name`, 'Cluster name should be unique.')
+                    +form-field__error({ error: 'igniteUnique', message: 'Cluster name should be unique.' })
 
             .pc-form-grid-col-30
-                +text-ip-address('Local host:', `${model}.localHost`, '"localHost"', 'true', '0.0.0.0',
-                    'System-wide local address or host for all Ignite components to bind to<br/>\
-                    If not defined then Ignite tries to use local wildcard address<br/>\
-                    That means that all services will be available on all network interfaces of the host machine')
+                +form-field__ip-address({
+                    label: 'Local host:',
+                    model: `${model}.localHost`,
+                    name: '"localHost"',
+                    enabled: 'true',
+                    placeholder: '0.0.0.0',
+                    tip: 'System-wide local address or host for all Ignite components to bind to<br/>\
+                          If not defined then Ignite tries to use local wildcard address<br/>\
+                          That means that all services will be available on all network interfaces of the host machine'
+                })
 
             .pc-form-grid-col-60
-                +dropdown('Discovery:', `${model}.discovery.kind`, '"discovery"', 'true', 'Choose discovery', '$ctrl.Clusters.discoveries',
-                'Discovery allows to discover remote nodes in grid\
-                <ul>\
-                    <li>Static IPs - IP Finder which works only with pre configured list of IP addresses specified</li>\
-                    <li>Multicast - Multicast based IP finder</li>\
-                    <li>AWS S3 - AWS S3 based IP finder that automatically discover cluster nodes on Amazon EC2 cloud</li>\
-                    <li>Apache jclouds - Apache jclouds multi cloud toolkit based IP finder for cloud platforms with unstable IP addresses</li>\
-                    <li>Google cloud storage - Google Cloud Storage based IP finder that automatically discover cluster nodes on Google Compute Engine cluster</li>\
-                    <li>JDBC - JDBC based IP finder that use database to store node IP address</li>\
-                    <li>Shared filesystem - Shared filesystem based IP finder that use file to store node IP address</li>\
-                    <li>Apache ZooKeeper - Apache ZooKeeper based IP finder when you use ZooKeeper to coordinate your distributed environment</li>\
-                    <li>Kubernetes - IP finder for automatic lookup of Ignite nodes running in Kubernetes environment</li>\
-                </ul>')
+                +form-field__dropdown({
+                    label: 'Discovery:',
+                    model: `${model}.discovery.kind`,
+                    name: '"discovery"',
+                    placeholder: 'Choose discovery',
+                    options: '$ctrl.Clusters.discoveries',
+                    tip: 'Discovery allows to discover remote nodes in grid\
+                        <ul>\
+                            <li>Static IPs - IP Finder which works only with pre configured list of IP addresses specified</li>\
+                            <li>Multicast - Multicast based IP finder</li>\
+                            <li>AWS S3 - AWS S3 based IP finder that automatically discover cluster nodes on Amazon EC2 cloud</li>\
+                            <li>Apache jclouds - Apache jclouds multi cloud toolkit based IP finder for cloud platforms with unstable IP addresses</li>\
+                            <li>Google cloud storage - Google Cloud Storage based IP finder that automatically discover cluster nodes on Google Compute Engine cluster</li>\
+                            <li>JDBC - JDBC based IP finder that use database to store node IP address</li>\
+                            <li>Shared filesystem - Shared filesystem based IP finder that use file to store node IP address</li>\
+                            <li>Apache ZooKeeper - Apache ZooKeeper based IP finder when you use ZooKeeper to coordinate your distributed environment</li>\
+                            <li>Kubernetes - IP finder for automatic lookup of Ignite nodes running in Kubernetes environment</li>\
+                        </ul>'
+                })
             .pc-form-group
                 +discovery-cloud()(ng-if=`${modelDiscoveryKind} === 'Cloud'`)
                 +discovery-google()(ng-if=`${modelDiscoveryKind} === 'GoogleStorage'`)
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/cloud.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/cloud.pug
index 074756e..800302a 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/cloud.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/cloud.pug
@@ -28,19 +28,42 @@
 
     div.pc-form-grid-row&attributes(attributes=attributes)
         .pc-form-grid-col-30
-            +text('Credential:', `${model}.credential`, '"credential"', 'false', 'Input cloud credential',
-                'Credential that is used during authentication on the cloud<br/>\
-                Depending on a cloud platform it can be a password or access key')
+            +form-field__text({
+                label: 'Credential:',
+                model: `${model}.credential`,
+                name: '"credential"',
+                placeholder: 'Input cloud credential',
+                tip: 'Credential that is used during authentication on the cloud<br/>\
+                      Depending on a cloud platform it can be a password or access key'
+            })
         .pc-form-grid-col-30
-            +text('Path to credential:', `${model}.credentialPath`, '"credentialPath"', 'false', 'Input path to credential',
-                'Path to a credential that is used during authentication on the cloud<br/>\
-                Access key or private key should be stored in a plain or PEM file without a passphrase')
+            +form-field__text({
+                label: 'Path to credential:',
+                model: `${model}.credentialPath`,
+                name: '"credentialPath"',
+                placeholder: 'Input path to credential',
+                tip: 'Path to a credential that is used during authentication on the cloud<br/>\
+                     Access key or private key should be stored in a plain or PEM file without a passphrase'
+            })
         .pc-form-grid-col-30
-            +text('Identity:', `${model}.identity`, '"' + discoveryKind + 'Identity"', required, 'Input identity',
-                'Identity that is used as a user name during a connection to the cloud<br/>\
-                Depending on a cloud platform it can be an email address, user name, etc')
+            +form-field__text({
+                label: 'Identity:',
+                model: `${model}.identity`,
+                name: '"' + discoveryKind + 'Identity"',
+                required: required,
+                placeholder: 'Input identity',
+                tip: 'Identity that is used as a user name during a connection to the cloud<br/>\
+                     Depending on a cloud platform it can be an email address, user name, etc'
+            })
         .pc-form-grid-col-30
-            +text('Provider:', `${model}.provider`, '"' + discoveryKind + 'Provider"', required, 'Input provider', 'Cloud provider to use')
+            +form-field__text({
+                label:'Provider:',
+                model: `${model}.provider`,
+                name: '"' + discoveryKind + 'Provider"',
+                required: required,
+                placeholder: 'Input provider',
+                tip: 'Cloud provider to use'
+            })
         .pc-form-grid-col-60
             .ignite-form-field
                 +list-text-field({
@@ -57,8 +80,7 @@
                         Note, that some cloud providers, like Google Compute Engine, doesn't have a notion of a region. For such providers regions are redundant"
                     }]`
                 )
-                    +unique-feedback(_, 'Such region already exists!')
-
+                    +form-field__error({ error: 'igniteUnique', message: 'Such region already exists!' })
         .pc-form-grid-col-60
             .ignite-form-field
                 +list-text-field({
@@ -75,4 +97,4 @@
                         Note, that some cloud providers, like Rackspace, doesn't have a notion of a zone. For such providers zones are redundant"
                     }]`
                 )
-                    +unique-feedback(_, 'Such zone already exists!')
+                    +form-field__error({ error: 'igniteUnique', message: 'Such zone already exists!' })
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/google.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/google.pug
index 7de3843..01996ac 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/google.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/google.pug
@@ -23,16 +23,41 @@
 
     .pc-form-grid-row&attributes(attributes=attributes)
         .pc-form-grid-col-30
-            +text('Project name:', `${model}.projectName`, `'${discoveryKind}ProjectName'`, required, 'Input project name', '' +
-                'Google Cloud Platforms project name<br/>\
-                Usually this is an auto generated project number(ex. 208709979073) that can be found in "Overview" section of Google Developer Console')
+            +form-field__text({
+                label: 'Project name:',
+                model: `${model}.projectName`,
+                name: `'${discoveryKind}ProjectName'`,
+                required: required,
+                placeholder: 'Input project name',
+                tip: 'Google Cloud Platforms project name<br/>\
+                     Usually this is an auto generated project number(ex. 208709979073) that can be found in "Overview" section of Google Developer Console'
+            })
         .pc-form-grid-col-30
-            +text('Bucket name:', `${model}.bucketName`, `'${discoveryKind}BucketName'`, required, 'Input bucket name',
-                'Google Cloud Storage bucket name<br/>\
-                If the bucket does not exist Ignite will automatically create it<br/>\
-                However the name must be unique across whole Google Cloud Storage and Service Account Id must be authorized to perform this operation')
+            +form-field__text({
+                label: 'Bucket name:',
+                model: `${model}.bucketName`,
+                name: `'${discoveryKind}BucketName'`,
+                required: required,
+                placeholder: 'Input bucket name',
+                tip: 'Google Cloud Storage bucket name<br/>\
+                     If the bucket does not exist Ignite will automatically create it<br/>\
+                     However the name must be unique across whole Google Cloud Storage and Service Account Id must be authorized to perform this operation'
+            })
         .pc-form-grid-col-30
-            +text('Private key path:', `${model}.serviceAccountP12FilePath`, `'${discoveryKind}ServiceAccountP12FilePath'`, required, 'Input private key path',
-                'Full path to the private key in PKCS12 format of the Service Account')
+            +form-field__text({
+                label: 'Private key path:',
+                model: `${model}.serviceAccountP12FilePath`,
+                name: `'${discoveryKind}ServiceAccountP12FilePath'`,
+                required: required,
+                placeholder: 'Input private key path',
+                tip: 'Full path to the private key in PKCS12 format of the Service Account'
+            })
         .pc-form-grid-col-30
-            +text('Account id:', `${model}.serviceAccountId`, `'${discoveryKind}ServiceAccountId'`, required, 'Input account id', 'Service account ID (typically an e-mail address)')
+            +form-field__text({
+                label: 'Account id:',
+                model: `${model}.serviceAccountId`,
+                name: `'${discoveryKind}ServiceAccountId'`,
+                required: required,
+                placeholder: 'Input account id',
+                tip: 'Service account ID (typically an e-mail address)'
+            })
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/jdbc.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/jdbc.pug
index 7b23a22..eb9f0aa 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/jdbc.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/jdbc.pug
@@ -22,14 +22,31 @@
 
     .pc-form-grid-row&attributes(attributes=attributes)
         .pc-form-grid-col-30
-            +text('Data source bean name:', `${model}.dataSourceBean`,
-                '"dataSourceBean"', required, 'Input bean name', 'Name of the data source bean in Spring context')
+            +form-field__text({
+                label: 'Data source bean name:',
+                model: `${model}.dataSourceBean`,
+                name: '"dataSourceBean"',
+                required: required,
+                placeholder:'Input bean name',
+                tip: 'Name of the data source bean in Spring context'
+            })
         .pc-form-grid-col-30
-            +dialect('Dialect:', `${model}.dialect`, '"dialect"', required,
-                'Dialect of SQL implemented by a particular RDBMS:', 'Generic JDBC dialect', 'Choose JDBC dialect')
+            +form-field__dialect({
+                label: 'Dialect:',
+                model: `${model}.dialect`,
+                name: '"dialect"',
+                required,
+                tip: 'Dialect of SQL implemented by a particular RDBMS:',
+                genericDialectName: 'Generic JDBC dialect',
+                placeholder: 'Choose JDBC dialect'
+            })
         .pc-form-grid-col-60
-            +checkbox('DB schema should be initialized by Ignite', `${model}.initSchema`, '"initSchema"',
-                'Flag indicating whether DB schema should be initialized by Ignite or was explicitly created by user')
+            +form-field__checkbox({
+                label: 'DB schema should be initialized by Ignite',
+                model: `${model}.initSchema`,
+                name: '"initSchema"',
+                tip: 'Flag indicating whether DB schema should be initialized by Ignite or was explicitly created by user'
+            })
         .pc-form-grid-col-30(ng-if=`$ctrl.Clusters.requiresProprietaryDrivers(${modelAt})`)
             a.link-success(ng-href=`{{ $ctrl.Clusters.JDBCDriverURL(${modelAt}) }}` target='_blank')
                 | Download JDBC drivers?
\ No newline at end of file
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/kubernetes.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/kubernetes.pug
index 9232022..32d94fc 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/kubernetes.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/kubernetes.pug
@@ -22,17 +22,38 @@
 
     .pc-form-grid-row&attributes(attributes=attributes)
         .pc-form-grid-col-30
-            +text('Service name:', `${model}.serviceName`, `'${discoveryKind}ServiceName'`, 'false', 'ignite',
-                "The name of Kubernetes service for Ignite pods' IP addresses lookup.<br/>\
-                The name of the service must be equal to the name set in service's Kubernetes configuration.<br/>\
-                If this parameter is not changed then the name of the service has to be set to 'ignite' in the corresponding Kubernetes configuration.")
+            +form-field__text({
+                label: 'Service name:',
+                model: `${model}.serviceName`,
+                name: `'${discoveryKind}ServiceName'`,
+                placeholder: 'ignite',
+                tip: "The name of Kubernetes service for Ignite pods' IP addresses lookup.<br/>\
+                     The name of the service must be equal to the name set in service's Kubernetes configuration.<br/>\
+                     If this parameter is not changed then the name of the service has to be set to 'ignite' in the corresponding Kubernetes configuration."
+            })
         .pc-form-grid-col-30
-            +text('Namespace:', `${model}.namespace`, `'${discoveryKind}Namespace'`, 'false', 'default',
-                "The namespace the Kubernetes service belongs to.<br/>\
-                By default, it's supposed that the service is running under Kubernetes `default` namespace.")
+            +form-field__text({
+                label: 'Namespace:',
+                model: `${model}.namespace`,
+                name: `'${discoveryKind}Namespace'`,
+                placeholder: 'default',
+                tip: "The namespace the Kubernetes service belongs to.<br/>\
+                      By default, it's supposed that the service is running under Kubernetes `default` namespace."
+            })
         .pc-form-grid-col-60
-            +url('Kubernetes server:', `${model}.masterUrl`, `'${discoveryKind}MasterUrl'`, 'true', 'false', 'https://kubernetes.default.svc.cluster.local:443',
-                'The host name of the Kubernetes API server')
+            +form-field__url({
+                label: 'Kubernetes server:',
+                model: `${model}.masterUrl`,
+                name: `'${discoveryKind}MasterUrl'`,
+                enabled: 'true',
+                placeholder: 'https://kubernetes.default.svc.cluster.local:443',
+                tip: 'The host name of the Kubernetes API server'
+            })
         .pc-form-grid-col-60
-            +text('Service token file:', `${model}.accountToken`, `'${discoveryKind}AccountToken'`, 'false', '/var/run/secrets/kubernetes.io/serviceaccount/token',
-                'The path to the service token file')
+            +form-field__text({
+                label: 'Service token file:',
+                model: `${model}.accountToken`,
+                name: `'${discoveryKind}AccountToken'`,
+                placeholder: '/var/run/secrets/kubernetes.io/serviceaccount/token',
+                tip: 'The path to the service token file'
+            })
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/multicast.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/multicast.pug
index 2d7aa4b..b767e9c 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/multicast.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/multicast.pug
@@ -22,42 +22,73 @@
 
     .pc-form-grid-row&attributes(attributes=attributes)
         .pc-form-grid-col-30
-            +text-ip-address('IP address:', `${model}.multicastGroup`, '"multicastGroup"', 'true', '228.1.2.4', 'IP address of multicast group')
+            +form-field__ip-address({
+                label: 'IP address:',
+                model: `${model}.multicastGroup`,
+                name: '"multicastGroup"',
+                enabled: 'true',
+                placeholder: '228.1.2.4',
+                tip: 'IP address of multicast group'
+            })
         .pc-form-grid-col-30
-            +number-min-max('Port number:', `${model}.multicastPort`, '"multicastPort"', 'true', '47400', '0', '65535', 'Port number which multicast messages are sent to')
+            +form-field__number({
+                label: 'Port number:',
+                model: `${model}.multicastPort`,
+                name: '"multicastPort"',
+                placeholder: '47400',
+                min: '0',
+                max: '65535',
+                tip: 'Port number which multicast messages are sent to'
+            })
         .pc-form-grid-col-20
-            +number('Waits for reply:', `${model}.responseWaitTime`, '"responseWaitTime"', 'true', '500', '0',
-                'Time in milliseconds IP finder waits for reply to multicast address request')
+            +form-field__number({
+                label: 'Waits for reply:',
+                model: `${model}.responseWaitTime`,
+                name: '"responseWaitTime"',
+                placeholder: '500',
+                min: '0',
+                tip: 'Time in milliseconds IP finder waits for reply to multicast address request'
+            })
         .pc-form-grid-col-20
-            +number('Attempts count:', `${model}.addressRequestAttempts`, '"addressRequestAttempts"', 'true', '2', '0',
-                'Number of attempts to send multicast address request<br/>\
-                IP finder re - sends request only in case if no reply for previous request is received')
+            +form-field__number({
+                label: 'Attempts count:',
+                model: `${model}.addressRequestAttempts`,
+                name: '"addressRequestAttempts"',
+                placeholder: '2',
+                min: '0',
+                tip: 'Number of attempts to send multicast address request<br/>\
+                     IP finder re - sends request only in case if no reply for previous request is received'
+            })
         .pc-form-grid-col-20.pc-form-grid-col-free
-            +text-ip-address('Local address:', `${model}.localAddress`, '"localAddress"', 'true', '0.0.0.0',
-                'Local host address used by this IP finder<br/>\
-                If provided address is non - loopback then multicast socket is bound to this interface<br/>\
-                If local address is not set or is any local address then IP finder creates multicast sockets for all found non - loopback addresses')
+            +form-field__ip-address({
+                label: 'Local address:',
+                model: `${model}.localAddress`,
+                name: '"localAddress"',
+                enabled: 'true',
+                placeholder: '0.0.0.0',
+                tip: 'Local host address used by this IP finder<br/>\
+                     If provided address is non - loopback then multicast socket is bound to this interface<br/>\
+                     If local address is not set or is any local address then IP finder creates multicast sockets for all found non - loopback addresses'
+            })
         .pc-form-grid-col-60
             .ignite-form-field
-                .ignite-form-field__control
-                    +list-addresses({
-                        items: addresses,
-                        name: 'multicastAddresses',
-                        tip: `Addresses may be represented as follows:
-                        <ul>
-                            <li>IP address (e.g. 127.0.0.1, 9.9.9.9, etc)</li>
-                            <li>IP address and port (e.g. 127.0.0.1:47500, 9.9.9.9:47501, etc)</li>
-                            <li>IP address and port range (e.g. 127.0.0.1:47500..47510, 9.9.9.9:47501..47504, etc)</li>
-                            <li>Hostname (e.g. host1.com, host2, etc)</li>
-                            <li>Hostname and port (e.g. host1.com:47500, host2:47502, etc)</li>
-                            <li>Hostname and port range (e.g. host1.com:47500..47510, host2:47502..47508, etc)</li>
-                        </ul>
-                        If port is 0 or not provided then default port will be used (depends on discovery SPI configuration)<br />
-                        If port range is provided (e.g. host:port1..port2) the following should be considered:
-                        </ul>
-                        <ul>
-                            <li> port1 &lt; port2 should be true</li>
-                            <li> Both port1 and port2 should be greater than 0</li>
-                        </ul>`
-                    })
-                    
+                +list-addresses({
+                    items: addresses,
+                    name: 'multicastAddresses',
+                    tip: `Addresses may be represented as follows:
+                    <ul>
+                        <li>IP address (e.g. 127.0.0.1, 9.9.9.9, etc)</li>
+                        <li>IP address and port (e.g. 127.0.0.1:47500, 9.9.9.9:47501, etc)</li>
+                        <li>IP address and port range (e.g. 127.0.0.1:47500..47510, 9.9.9.9:47501..47504, etc)</li>
+                        <li>Hostname (e.g. host1.com, host2, etc)</li>
+                        <li>Hostname and port (e.g. host1.com:47500, host2:47502, etc)</li>
+                        <li>Hostname and port range (e.g. host1.com:47500..47510, host2:47502..47508, etc)</li>
+                    </ul>
+                    If port is 0 or not provided then default port will be used (depends on discovery SPI configuration)<br />
+                    If port range is provided (e.g. host:port1..port2) the following should be considered:
+                    </ul>
+                    <ul>
+                        <li> port1 &lt; port2 should be true</li>
+                        <li> Both port1 and port2 should be greater than 0</li>
+                    </ul>`
+                })
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/s3.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/s3.pug
index 41d45ac..dc18824 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/s3.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/s3.pug
@@ -24,15 +24,32 @@
 
     .pc-form-grid-row&attributes(attributes=attributes)
         .pc-form-grid-col-30
-            +text('Bucket name:', `${model}.bucketName`, `'${discoveryKind}BucketName'`, required, 'Input bucket name', 'Bucket name for IP finder')
+            +form-field__text({
+                label: 'Bucket name:',
+                model: `${model}.bucketName`,
+                name: `'${discoveryKind}BucketName'`,
+                required: required,
+                placeholder: 'Input bucket name',
+                tip: 'Bucket name for IP finder'
+            })
         .pc-form-grid-col-30
             .pc-form-grid__text-only-item(style='font-style: italic;color: #424242;')
                 | AWS credentials will be generated as stub
         .pc-form-grid-col-40(ng-if-start=`$ctrl.available("2.4.0")`)
-            +text('Bucket endpoint:', `${model}.bucketEndpoint`, `'${discoveryKind}BucketEndpoint'`, false, 'Input bucket endpoint',
-            'Bucket endpoint for IP finder<br/> \
-            For information about possible endpoint names visit <a href="http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">docs.aws.amazon.com</a>')
+            +form-field__text({
+                label: 'Bucket endpoint:',
+                model: `${model}.bucketEndpoint`,
+                name: `'${discoveryKind}BucketEndpoint'`,
+                placeholder: 'Input bucket endpoint',
+                tip: 'Bucket endpoint for IP finder<br/> \
+                      For information about possible endpoint names visit <a href="http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">docs.aws.amazon.com</a>'
+            })
         .pc-form-grid-col-20(ng-if-end)
-            +text('SSE algorithm:', `${model}.SSEAlgorithm`, `'${discoveryKind}SSEAlgorithm'`, false, 'Input SSE algorithm',
-            'Server-side encryption algorithm for Amazon S3-managed encryption keys<br/> \
-            For information about possible S3-managed encryption keys visit <a href="http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html">docs.aws.amazon.com</a>')
\ No newline at end of file
+            +form-field__text({
+                label: 'SSE algorithm:',
+                model: `${model}.SSEAlgorithm`,
+                name: `'${discoveryKind}SSEAlgorithm'`,
+                placeholder: 'Input SSE algorithm',
+                tip: 'Server-side encryption algorithm for Amazon S3-managed encryption keys<br/> \
+                      For information about possible S3-managed encryption keys visit <a href="http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html">docs.aws.amazon.com</a>'
+            })
\ No newline at end of file
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/shared.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/shared.pug
index 83e8f2a..e5b86c3 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/shared.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/shared.pug
@@ -21,4 +21,10 @@
 
     .pc-form-grid-row&attributes(attributes=attributes)
         .pc-form-grid-col-60
-            +text('File path:', `${model}.path`, '"path"', 'false', 'disco/tcp', 'Shared path')
+            +form-field__text({
+                label: 'File path:',
+                model: `${model}.path`,
+                name: '"path"',
+                placeholder: 'disco/tcp',
+                tip: 'Shared path'
+            })
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/vm.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/vm.pug
index 1266f86..aee3a1b 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/vm.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/vm.pug
@@ -23,33 +23,33 @@
 
     .pc-form-grid-row&attributes(attributes=attributes)
         .pc-form-grid-col-60
-            .ignite-form-field
-                .ignite-form-field__control
-                    +list-addresses({
-                        items: addresses,
-                        name: 'vmAddresses',
-                        tip: `Addresses may be represented as follows:
-                            <ul>
-                                <li>IP address (e.g. 127.0.0.1, 9.9.9.9, etc)</li>
-                                <li>IP address and port (e.g. 127.0.0.1:47500, 9.9.9.9:47501, etc)</li>
-                                <li>IP address and port range (e.g. 127.0.0.1:47500..47510, 9.9.9.9:47501..47504, etc)</li>
-                                <li>Hostname (e.g. host1.com, host2, etc)</li>
-                                <li>Hostname and port (e.g. host1.com:47500, host2:47502, etc)</li>
-                                <li>Hostname and port range (e.g. host1.com:47500..47510, host2:47502..47508, etc)</li>
-                            </ul>
-                            If port is 0 or not provided then default port will be used (depends on discovery SPI configuration)<br />
-                            If port range is provided (e.g. host:port1..port2) the following should be considered:
-                            </ul>
-                            <ul>
-                                <li> port1 &lt; port2 should be true</li>
-                                <li> Both port1 and port2 should be greater than 0</li>
-                            </ul>`
-                    })(
-                        ng-required='true'
-                        expose-ignite-form-field-control='$vmAddresses'
-                    )
-                .ignite-form-field__errors(
+            .form-field.ignite-form-field
+                +list-addresses({
+                    items: addresses,
+                    name: 'vmAddresses',
+                    tip: `Addresses may be represented as follows:
+                        <ul>
+                            <li>IP address (e.g. 127.0.0.1, 9.9.9.9, etc)</li>
+                            <li>IP address and port (e.g. 127.0.0.1:47500, 9.9.9.9:47501, etc)</li>
+                            <li>IP address and port range (e.g. 127.0.0.1:47500..47510, 9.9.9.9:47501..47504, etc)</li>
+                            <li>Hostname (e.g. host1.com, host2, etc)</li>
+                            <li>Hostname and port (e.g. host1.com:47500, host2:47502, etc)</li>
+                            <li>Hostname and port range (e.g. host1.com:47500..47510, host2:47502..47508, etc)</li>
+                        </ul>
+                        If port is 0 or not provided then default port will be used (depends on discovery SPI configuration)<br />
+                        If port range is provided (e.g. host:port1..port2) the following should be considered:
+                        </ul>
+                        <ul>
+                            <li> port1 &lt; port2 should be true</li>
+                            <li> Both port1 and port2 should be greater than 0</li>
+                        </ul>`
+                })(
+                    ng-required='true'
+                    ng-ref='$vmAddresses'
+                    ng-ref-read='ngModel'
+                )
+                .form-field__errors(
                     ng-messages=`$vmAddresses.$error`
                     ng-show=`$vmAddresses.$invalid`
                 )
-                    +form-field-feedback(_, 'required', 'Addresses should be configured')
+                    +form-field__error({ error: 'required', message: 'Addresses should be configured' })
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper.pug
index 826e09b..53d704f 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper.pug
@@ -25,36 +25,52 @@
 
     .pc-form-grid-row&attributes(attributes=attributes)
         .pc-form-grid-col-60
-            +java-class('Curator:', `${model}.curator`, '"curator"', 'true', 'false',
-                'The Curator framework in use<br/>\
-                By default generates curator of org.apache.curator. framework.imps.CuratorFrameworkImpl\
-                class with configured connect string, retry policy, and default session and connection timeouts', required)
+            +form-field__java-class({
+                label: 'Curator:',
+                model: `${model}.curator`,
+                name: '"curator"',
+                tip: 'The Curator framework in use<br/>\
+                     By default generates curator of org.apache.curator. framework.imps.CuratorFrameworkImpl\
+                     class with configured connect string, retry policy, and default session and connection timeouts',
+                validationActive: required
+            })
         .pc-form-grid-col-60
-            +text('Connect string:', `${model}.zkConnectionString`, `'${discoveryKind}ConnectionString'`, required, 'host:port[chroot][,host:port[chroot]]',
-                'When <b>IGNITE_ZK_CONNECTION_STRING</b> system property is not configured this property will be used.<br><br>This should be a comma separated host:port pairs, each corresponding to a zk server. e.g. "127.0.0.1:3000,127.0.0.1:3001".<br>If the optional chroot suffix is used the example would look like: "127.0.0.1:3000,127.0.0.1:3002/app/a".<br><br>Where the client would be rooted at "/app/a" and all paths would be relative to this root - ie getting/setting/etc... "/foo/bar" would result in operations being run on "/app/a/foo/bar" (from the server perspective).<br><br><a href="https://zookeeper.apache.org/doc/r3.2.2/api/org/apache/zookeeper/ZooKeeper.html#ZooKeeper(java.lang.String,%20int,%20org.apache.zookeeper.Watcher)">Zookeeper docs</a>')
+            +form-field__text({
+                label: 'Connect string:',
+                model: `${model}.zkConnectionString`,
+                name: `'${discoveryKind}ConnectionString'`,
+                required: required,
+                placeholder: 'host:port[chroot][,host:port[chroot]]',
+                tip: 'When <b>IGNITE_ZK_CONNECTION_STRING</b> system property is not configured this property will be used.<br><br>This should be a comma separated host:port pairs, each corresponding to a zk server. e.g. "127.0.0.1:3000,127.0.0.1:3001".<br>If the optional chroot suffix is used the example would look like: "127.0.0.1:3000,127.0.0.1:3002/app/a".<br><br>Where the client would be rooted at "/app/a" and all paths would be relative to this root - ie getting/setting/etc... "/foo/bar" would result in operations being run on "/app/a/foo/bar" (from the server perspective).<br><br><a href="https://zookeeper.apache.org/doc/r3.2.2/api/org/apache/zookeeper/ZooKeeper.html#ZooKeeper(java.lang.String,%20int,%20org.apache.zookeeper.Watcher)">Zookeeper docs</a>'
+            })
         .pc-form-grid-col-60
-            +dropdown('Retry policy:', `${model}.retryPolicy.kind`, '"retryPolicy"', 'true', 'Default',
-            '[\
-                {value: "ExponentialBackoff", label: "Exponential backoff"},\
-                {value: "BoundedExponentialBackoff", label: "Bounded exponential backoff"},\
-                {value: "UntilElapsed", label: "Until elapsed"},\
-                {value: "NTimes", label: "Max number of times"},\
-                {value: "OneTime", label: "Only once"},\
-                {value: "Forever", label: "Always allow retry"},\
-                {value: "Custom", label: "Custom"},\
-                {value: null, label: "Default"}\
-            ]',
-            'Available retry policies:\
-            <ul>\
-                <li>Exponential backoff - retries a set number of times with increasing sleep time between retries</li>\
-                <li>Bounded exponential backoff - retries a set number of times with an increasing (up to a maximum bound) sleep time between retries</li>\
-                <li>Until elapsed - retries until a given amount of time elapses</li>\
-                <li>Max number of times - retries a max number of times</li>\
-                <li>Only once - retries only once</li>\
-                <li>Always allow retry - retries infinitely</li>\
-                <li>Custom - custom retry policy implementation</li>\
-                <li>Default - exponential backoff retry policy with configured base sleep time equal to 1000ms and max retry count equal to 10</li>\
-            </ul>')
+            +form-field__dropdown({
+                label: 'Retry policy:',
+                model: `${model}.retryPolicy.kind`,
+                name: '"retryPolicy"',
+                placeholder: 'Default',
+                options: '[\
+                                {value: "ExponentialBackoff", label: "Exponential backoff"},\
+                                {value: "BoundedExponentialBackoff", label: "Bounded exponential backoff"},\
+                                {value: "UntilElapsed", label: "Until elapsed"},\
+                                {value: "NTimes", label: "Max number of times"},\
+                                {value: "OneTime", label: "Only once"},\
+                                {value: "Forever", label: "Always allow retry"},\
+                                {value: "Custom", label: "Custom"},\
+                                {value: null, label: "Default"}\
+                            ]',
+                tip: 'Available retry policies:\
+                            <ul>\
+                                <li>Exponential backoff - retries a set number of times with increasing sleep time between retries</li>\
+                                <li>Bounded exponential backoff - retries a set number of times with an increasing (up to a maximum bound) sleep time between retries</li>\
+                                <li>Until elapsed - retries until a given amount of time elapses</li>\
+                                <li>Max number of times - retries a max number of times</li>\
+                                <li>Only once - retries only once</li>\
+                                <li>Always allow retry - retries infinitely</li>\
+                                <li>Custom - custom retry policy implementation</li>\
+                                <li>Default - exponential backoff retry policy with configured base sleep time equal to 1000ms and max retry count equal to 10</li>\
+                            </ul>'
+            })
 
         .pc-form-grid__break
 
@@ -69,16 +85,31 @@
         .pc-form-grid-col-30
             -var model = `${modelAt}.discovery.ZooKeeper`
 
-            +text('Base path:', `${model}.basePath`, '"basePath"', 'false', '/services', 'Base path for service registration')
+            +form-field__text({
+                label: 'Base path:',
+                model: `${model}.basePath`,
+                name: '"basePath"',
+                placeholder: '/services',
+                tip: 'Base path for service registration'
+            })
         .pc-form-grid-col-30
-            +text('Service name:', `${model}.serviceName`, '"serviceName"', 'false', 'ignite',
-                'Service name to use, as defined by Curator&#39;s ServiceDiscovery recipe<br/>\
-                In physical ZooKeeper terms, it represents the node under basePath, under which services will be registered')
+            +form-field__text({
+                label:'Service name:',
+                model: `${model}.serviceName`,
+                name: '"serviceName"',
+                placeholder: 'ignite',
+                tip: 'Service name to use, as defined by Curator&#39;s ServiceDiscovery recipe<br/>\
+                      In physical ZooKeeper terms, it represents the node under basePath, under which services will be registered'
+            })
 
         .pc-form-grid__break
 
         .pc-form-grid-col-60
-            +checkbox('Allow duplicate registrations', `${model}.allowDuplicateRegistrations`, '"allowDuplicateRegistrations"',
-                'Whether to register each node only once, or if duplicate registrations are allowed<br/>\
-                Nodes will attempt to register themselves, plus those they know about<br/>\
-                By default, duplicate registrations are not allowed, but you might want to set this property to <b>true</b> if you have multiple network interfaces or if you are facing troubles')
+            +form-field__checkbox({
+                label: 'Allow duplicate registrations',
+                model: `${model}.allowDuplicateRegistrations`,
+                name: '"allowDuplicateRegistrations"',
+                tip: 'Whether to register each node only once, or if duplicate registrations are allowed<br/>\
+                     Nodes will attempt to register themselves, plus those they know about<br/>\
+                     By default, duplicate registrations are not allowed, but you might want to set this property to <b>true</b> if you have multiple network interfaces or if you are facing troubles'
+            })
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper/retrypolicy/bounded-exponential-backoff.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper/retrypolicy/bounded-exponential-backoff.pug
index 0ddc1e9..84f7f2d 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper/retrypolicy/bounded-exponential-backoff.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper/retrypolicy/bounded-exponential-backoff.pug
@@ -19,8 +19,30 @@
 -var model = `${modelAt}.discovery.ZooKeeper.retryPolicy.BoundedExponentialBackoff`
 
 .pc-form-grid-col-20(ng-if-start=`${modelRetryPolicyKind} === 'BoundedExponentialBackoff'`)
-    +number('Base interval:', `${model}.baseSleepTimeMs`, '"beBaseSleepTimeMs"', 'true', '1000', '0', 'Initial amount of time in ms to wait between retries')
+    +form-field__number({
+        label: 'Base interval:',
+        model: `${model}.baseSleepTimeMs`,
+        name: '"beBaseSleepTimeMs"',
+        placeholder: '1000',
+        min: '0',
+        tip: 'Initial amount of time in ms to wait between retries'
+    })
 .pc-form-grid-col-20
-    +number('Max interval:', `${model}.maxSleepTimeMs`, '"beMaxSleepTimeMs"', 'true', 'Integer.MAX_VALUE', '0', 'Max time in ms to sleep on each retry')
+    +form-field__number({
+        label: 'Max interval:',
+        model: `${model}.maxSleepTimeMs`,
+        name: '"beMaxSleepTimeMs"',
+        placeholder: 'Integer.MAX_VALUE',
+        min: '0',
+        tip: 'Max time in ms to sleep on each retry'
+    })
 .pc-form-grid-col-20(ng-if-end)
-    +number-min-max('Max retries:', `${model}.maxRetries`, '"beMaxRetries"', 'true', '10', '0', '29', 'Max number of times to retry')
+    +form-field__number({
+        label: 'Max retries:',
+        model: `${model}.maxRetries`,
+        name: '"beMaxRetries"',
+        placeholder: '10',
+        min: '0',
+        max: '29',
+        tip: 'Max number of times to retry'
+    })
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper/retrypolicy/custom.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper/retrypolicy/custom.pug
index 6a1bcfb..1cac6b8 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper/retrypolicy/custom.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper/retrypolicy/custom.pug
@@ -21,5 +21,12 @@
 -var required = `${modelAt}.discovery.kind === "ZooKeeper" && ${modelAt}.discovery.ZooKeeper.retryPolicy.kind === "Custom"`
 
 .pc-form-grid-col-60(ng-if-start=`${modelRetryPolicyKind} === 'Custom'`)
-    +java-class('Class name:', `${retry}.className`, '"customClassName"', 'true', required, 'Custom retry policy implementation class name', required)
-.pc-form-grid__break(ng-if-end)
\ No newline at end of file
+    +form-field__java-class({
+        label: 'Class name:',
+        model: `${retry}.className`,
+        name: '"customClassName"',
+        required: required,
+        tip: 'Custom retry policy implementation class name',
+        validationActive: required
+    })
+.pc-form-grid__break(ng-if-end)
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper/retrypolicy/exponential-backoff.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper/retrypolicy/exponential-backoff.pug
index bfc3c02..ed0b5ff 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper/retrypolicy/exponential-backoff.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper/retrypolicy/exponential-backoff.pug
@@ -19,8 +19,30 @@
 -var model = `${modelAt}.discovery.ZooKeeper.retryPolicy.ExponentialBackoff`
 
 .pc-form-grid-col-20(ng-if-start=`${modelRetryPolicyKind} === 'ExponentialBackoff'`)
-    +number('Base interval:', `${model}.baseSleepTimeMs`, '"expBaseSleepTimeMs"', 'true', '1000', '0', 'Initial amount of time in ms to wait between retries')
+    +form-field__number({
+        label: 'Base interval:',
+        model: `${model}.baseSleepTimeMs`,
+        name: '"expBaseSleepTimeMs"',
+        placeholder: '1000',
+        min: '0',
+        tip: 'Initial amount of time in ms to wait between retries'
+    })
 .pc-form-grid-col-20
-    +number-min-max('Max retries:', `${model}.maxRetries`, '"expMaxRetries"', 'true', '10', '0', '29', 'Max number of times to retry')
+    +form-field__number({
+        label: 'Max retries:',
+        model: `${model}.maxRetries`,
+        name: '"expMaxRetries"',
+        placeholder: '10',
+        min: '0',
+        max: '29',
+        tip: 'Max number of times to retry'
+    })
 .pc-form-grid-col-20(ng-if-end)
-    +number('Max interval:', `${model}.maxSleepMs`, '"expMaxSleepMs"', 'true', 'Integer.MAX_VALUE', '0', 'Max time in ms to sleep on each retry')
+    +form-field__number({
+        label: 'Max interval:',
+        model: `${model}.maxSleepMs`,
+        name: '"expMaxSleepMs"',
+        placeholder: 'Integer.MAX_VALUE',
+        min: '0',
+        tip: 'Max time in ms to sleep on each retry'
+    })
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper/retrypolicy/forever.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper/retrypolicy/forever.pug
index 575106b..e61a3c6 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper/retrypolicy/forever.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper/retrypolicy/forever.pug
@@ -19,5 +19,12 @@
 -var model = `${modelAt}.discovery.ZooKeeper.retryPolicy.Forever`
 
 .pc-form-grid-col-30(ng-if-start=`${modelRetryPolicyKind} === 'Forever'`)
-    +number('Interval:', `${model}.retryIntervalMs`, '"feRetryIntervalMs"', 'true', '1000', '0', 'Time in ms between retry attempts')
+    +form-field__number({
+        label: 'Interval:',
+        model: `${model}.retryIntervalMs`,
+        name: '"feRetryIntervalMs"',
+        placeholder: '1000',
+        min: '0',
+        tip: 'Time in ms between retry attempts'
+    })
 .pc-form-grid__break(ng-if-end)
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper/retrypolicy/n-times.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper/retrypolicy/n-times.pug
index dbb54e5..e44d030 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper/retrypolicy/n-times.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper/retrypolicy/n-times.pug
@@ -19,6 +19,20 @@
 -var model = `${modelAt}.discovery.ZooKeeper.retryPolicy.NTimes`
 
 .pc-form-grid-col-30(ng-if-start=`${modelRetryPolicyKind} === 'NTimes'`)
-    +number('Retries:', `${model}.n`, '"n"', 'true', '10', '0', 'Number of times to retry')
+    +form-field__number({
+        label: 'Retries:',
+        model: `${model}.n`,
+        name: '"n"',
+        placeholder: '10',
+        min: '0',
+        tip: 'Number of times to retry'
+    })
 .pc-form-grid-col-30(ng-if-end)
-    +number('Interval:', `${model}.sleepMsBetweenRetries`, '"ntSleepMsBetweenRetries"', 'true', '1000', '0', 'Time in ms between retry attempts')
+    +form-field__number({
+        label: 'Interval:',
+        model: `${model}.sleepMsBetweenRetries`,
+        name: '"ntSleepMsBetweenRetries"',
+        placeholder: '1000',
+        min: '0',
+        tip: 'Time in ms between retry attempts'
+    })
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper/retrypolicy/one-time.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper/retrypolicy/one-time.pug
index 4ff1644..4d86f5c 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper/retrypolicy/one-time.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper/retrypolicy/one-time.pug
@@ -19,5 +19,12 @@
 -var model = `${modelAt}.discovery.ZooKeeper.retryPolicy.OneTime`
 
 .pc-form-grid-col-30(ng-if-start=`${modelRetryPolicyKind} === 'OneTime'`)
-    +number('Interval:', `${model}.sleepMsBetweenRetry`, '"oneSleepMsBetweenRetry"', 'true', '1000', '0', 'Time in ms to retry attempt')
-.pc-form-grid__break(ng-if-end)
\ No newline at end of file
+    +form-field__number({
+        label: 'Interval:',
+        model: `${model}.sleepMsBetweenRetry`,
+        name: '"oneSleepMsBetweenRetry"',
+        placeholder: '1000',
+        min: '0',
+        tip: 'Time in ms to retry attempt'
+    })
+.pc-form-grid__break(ng-if-end)
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper/retrypolicy/until-elapsed.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper/retrypolicy/until-elapsed.pug
index ebde01c..acb1dff 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper/retrypolicy/until-elapsed.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/general/discovery/zookeeper/retrypolicy/until-elapsed.pug
@@ -19,6 +19,20 @@
 -var model = `${modelAt}.discovery.ZooKeeper.retryPolicy.UntilElapsed`
 
 .pc-form-grid-col-30(ng-if-start=`${modelRetryPolicyKind} === 'UntilElapsed'`)
-    +number('Total time:', `${model}.maxElapsedTimeMs`, '"ueMaxElapsedTimeMs"', 'true', '60000', '0', 'Total time in ms for execution of retry attempt')
+    +form-field__number({
+        label: 'Total time:',
+        model: `${model}.maxElapsedTimeMs`,
+        name: '"ueMaxElapsedTimeMs"',
+        placeholder: '60000',
+        min: '0',
+        tip: 'Total time in ms for execution of retry attempt'
+    })
 .pc-form-grid-col-30(ng-if-end)
-    +number('Interval:', `${model}.sleepMsBetweenRetries`, '"ueSleepMsBetweenRetries"', 'true', '1000', '0', 'Time in ms between retry attempts')
+    +form-field__number({
+        label: 'Interval:',
+        model: `${model}.sleepMsBetweenRetries`,
+        name: '"ueSleepMsBetweenRetries"',
+        placeholder: '1000',
+        min: '0',
+        tip: 'Time in ms between retry attempts'
+    })
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/hadoop.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/hadoop.pug
index 16a072c..082f7bd 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/hadoop.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/hadoop.pug
@@ -25,51 +25,110 @@
 -var libs = model + '.nativeLibraryNames'
 
 panel-collapsible(ng-form=form on-open=`ui.loadPanel('${form}')`)
-    -var uniqueTip = 'Such native library already exists!'
-
     panel-title Hadoop configuration
     panel-description Hadoop Accelerator configuration.
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-60
-                +dropdown('Map reduce planner:', plannerModel + '.kind', '"MapReducePlanner"', 'true', 'Default', '[\
-                    {value: "Weighted", label: "Weighted"},\
-                    {value: "Custom", label: "Custom"},\
-                    {value: null, label: "Default"}\
-                ]', 'Implementation of map reduce planner\
-                <ul>\
-                    <li>Weighted - Planner which assigns mappers and reducers based on their "weights"</li>\
-                    <li>Custom - Custom planner implementation</li>\
-                    <li>Default - Default planner implementation</li>\
-                </ul>')
+                +form-field__dropdown({
+                    label: 'Map reduce planner:',
+                    model: `${plannerModel}.kind`,
+                    name: '"MapReducePlanner"',
+                    placeholder: 'Default',
+                    options: '[\
+                        {value: "Weighted", label: "Weighted"},\
+                        {value: "Custom", label: "Custom"},\
+                        {value: null, label: "Default"}\
+                    ]',
+                    tip: 'Implementation of map reduce planner\
+                        <ul>\
+                            <li>Weighted - Planner which assigns mappers and reducers based on their "weights"</li>\
+                            <li>Custom - Custom planner implementation</li>\
+                            <li>Default - Default planner implementation</li>\
+                        </ul>'
+                })
             .pc-form-group.pc-form-grid-row(ng-show=weightedPlanner)
                 .pc-form-grid-col-20
-                    +number('Local mapper weight:', weightedModel + '.localMapperWeight', '"LocalMapperWeight"', 'true', 100, '0',
-                        'This weight is added to a node when a mapper is assigned and it is input split data is located on this node')
+                    +form-field__number({
+                        label: 'Local mapper weight:',
+                        model: `${weightedModel}.localMapperWeight`,
+                        name: '"LocalMapperWeight"',
+                        placeholder: '100',
+                        min: '0',
+                        tip: 'This weight is added to a node when a mapper is assigned and it is input split data is located on this node'
+                    })
                 .pc-form-grid-col-20
-                    +number('Remote mapper weight:', weightedModel + '.remoteMapperWeight', '"remoteMapperWeight"', 'true', 100, '0',
-                        'This weight is added to a node when a mapper is assigned, but it is input split data is not located on this node')
+                    +form-field__number({
+                        label: 'Remote mapper weight:',
+                        model: `${weightedModel}.remoteMapperWeight`,
+                        name: '"remoteMapperWeight"',
+                        placeholder: '100',
+                        min: '0',
+                        tip: 'This weight is added to a node when a mapper is assigned, but it is input split data is not located on this node'
+                    })
                 .pc-form-grid-col-20
-                    +number('Local reducer weight:', weightedModel + '.localReducerWeight', '"localReducerWeight"', 'true', 100, '0',
-                        'This weight is added to a node when a reducer is assigned and the node have at least one assigned mapper')
+                    +form-field__number({
+                        label: 'Local reducer weight:',
+                        model: `${weightedModel}.localReducerWeight`,
+                        name: '"localReducerWeight"',
+                        placeholder: '100',
+                        min: '0',
+                        tip: 'This weight is added to a node when a reducer is assigned and the node have at least one assigned mapper'
+                    })
                 .pc-form-grid-col-30
-                    +number('Remote reducer weight:', weightedModel + '.remoteReducerWeight', '"remoteReducerWeight"', 'true', 100, '0',
-                        'This weight is added to a node when a reducer is assigned, but the node does not have any assigned mappers')
+                    +form-field__number({
+                        label: 'Remote reducer weight:',
+                        model: `${weightedModel}.remoteReducerWeight`,
+                        name: '"remoteReducerWeight"',
+                        placeholder: '100',
+                        min: '0',
+                        tip: 'This weight is added to a node when a reducer is assigned, but the node does not have any assigned mappers'
+                    })
                 .pc-form-grid-col-30
-                    +number('Local mapper weight:', weightedModel + '.preferLocalReducerThresholdWeight', '"preferLocalReducerThresholdWeight"', 'true', 200, '0',
-                        "When threshold is reached, a node with mappers is no longer considered as preferred for further reducer assignments")
+                    +form-field__number({
+                        label: 'Local mapper weight:',
+                        model: `${weightedModel}.preferLocalReducerThresholdWeight`,
+                        name: '"preferLocalReducerThresholdWeight"',
+                        placeholder: '200',
+                        min: '0',
+                        tip: 'When threshold is reached, a node with mappers is no longer considered as preferred for further reducer assignments'
+                    })
             .pc-form-group.pc-form-grid-row(ng-show=customPlanner)
                 .pc-form-grid-col-60
-                    +java-class('Class name:', plannerModel + '.Custom.className', '"MapReducePlannerCustomClass"', 'true', customPlanner,
-                        'Custom planner implementation')
+                    +form-field__java-class({
+                        label: 'Class name:',
+                        model: `${plannerModel}.Custom.className`,
+                        name: '"MapReducePlannerCustomClass"',
+                        required: customPlanner,
+                        tip: 'Custom planner implementation'
+                    })
             .pc-form-grid-col-30
-                +number('Finished job info TTL:', model + '.finishedJobInfoTtl', '"finishedJobInfoTtl"', 'true', '30000', '0',
-                    'Finished job info time-to-live in milliseconds')
+                +form-field__number({
+                    label: 'Finished job info TTL:',
+                    model: `${model}.finishedJobInfoTtl`,
+                    name: '"finishedJobInfoTtl"',
+                    placeholder: '30000',
+                    min: '0',
+                    tip: 'Finished job info time-to-live in milliseconds'
+                })
             .pc-form-grid-col-30
-                +number('Max parallel tasks:', model + '.maxParallelTasks', '"maxParallelTasks"', 'true', 'availableProcessors * 2', '1',
-                    'Max number of local tasks that may be executed in parallel')
+                +form-field__number({
+                    label: 'Max parallel tasks:',
+                    model: `${model}.maxParallelTasks`,
+                    name: '"maxParallelTasks"',
+                    placeholder: 'availableProcessors * 2',
+                    min: '1',
+                    tip: 'Max number of local tasks that may be executed in parallel'
+                })
             .pc-form-grid-col-30
-                +number('Max task queue size:', model + '.maxTaskQueueSize', '"maxTaskQueueSize"', 'true', '8192', '1', 'Max task queue size')
+                +form-field__number({
+                    label: 'Max task queue size:',
+                    model: `${model}.maxTaskQueueSize`,
+                    name: '"maxTaskQueueSize"',
+                    placeholder: '8192',
+                    min: '1',
+                    tip: 'Max task queue size'
+                })
             .pc-form-grid-col-60
                 .ignite-form-field
                     +list-text-field({
@@ -81,7 +140,7 @@
                     })(
                         list-editable-cols=`::[{name: 'Native libraries:'}]`
                     )
-                        +unique-feedback(_, `${uniqueTip}`)
+                        +form-field__error({ error: 'igniteUnique', message: 'Such native library already exists!' })
 
         .pca-form-column-6
             +preview-xml-java(model, 'clusterHadoop')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/load-balancing.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/load-balancing.pug
index 20ea1f0..e96b016 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/load-balancing.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/load-balancing.pug
@@ -25,92 +25,156 @@
 panel-collapsible(ng-form=form on-open=`ui.loadPanel('${form}')`)
     panel-title Load balancing configuration
     panel-description
-        | Load balancing component balances job distribution among cluster nodes. 
+        | Load balancing component balances job distribution among cluster nodes.
         | #[a.link-success(href="https://apacheignite.readme.io/docs/load-balancing" target="_blank") More info]
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6
             mixin clusters-load-balancing-spi
                 .ignite-form-field(ng-init='loadBalancingSpiTbl={type: "loadBalancingSpi", model: "loadBalancingSpi", focusId: "kind", ui: "load-balancing-table"}')
-                    +ignite-form-field__label('Load balancing configurations:', '"loadBalancingConfigurations"')
-                        +tooltip(`Load balancing component balances job distribution among cluster nodes`)
-                    .ignite-form-field__control
-                        -let items = loadBalancingSpi
+                    +form-field__label({ label: 'Load balancing configurations:', name: '"loadBalancingConfigurations"' })
+                        +form-field__tooltip(`Load balancing component balances job distribution among cluster nodes`)
 
-                        list-editable.pc-list-editable-with-legacy-settings-rows(
-                            ng-model=items
-                            name='loadBalancingConfigurations'
-                        )
-                            list-editable-item-edit
-                                - form = '$parent.form'
-                                .settings-row
-                                    +sane-ignite-form-field-dropdown({
-                                        label: 'Load balancing:',
-                                        model: '$item.kind',
-                                        name: '"loadBalancingKind"',
-                                        required: true,
-                                        options: '::$ctrl.Clusters.loadBalancingKinds',
-                                        tip: `Provides the next best balanced node for job execution
-                                        <ul>
-                                            <li>Round-robin - Iterates through nodes in round-robin fashion and pick the next sequential node</li>
-                                            <li>Adaptive - Adapts to overall node performance</li>
-                                            <li>Random - Picks a random node for job execution</li>
-                                            <li>Custom - Custom load balancing implementation</li>
-                                        </ul>`
-                                    })(
-                                        ignite-unique=`${loadBalancingSpi}`
-                                        ignite-unique-property='kind'
-                                    )
-                                        +unique-feedback('"loadBalancingKind"', 'Load balancing SPI of that type is already configured')
-                                .settings-row(ng-show='$item.kind === "RoundRobin"')
-                                    +checkbox('Per task', '$item.RoundRobin.perTask', '"loadBalancingRRPerTask"', 'A new round robin order should be created for every task flag')
-                                .settings-row(ng-show='$item.kind === "Adaptive"')
-                                    +dropdown('Load probe:', '$item.Adaptive.loadProbe.kind', '"loadBalancingAdaptiveLoadProbeKind"', 'true', 'Default', '[\
-                                            {value: "Job", label: "Job count"},\
-                                            {value: "CPU", label: "CPU load"},\
-                                            {value: "ProcessingTime", label: "Processing time"},\
-                                            {value: "Custom", label: "Custom"},\
-                                            {value: null, label: "Default"}\
-                                        ]', 'Implementation of node load probing\
+                    -let items = loadBalancingSpi
+                    list-editable.pc-list-editable-with-legacy-settings-rows(
+                        ng-model=items
+                        name='loadBalancingConfigurations'
+                    )
+                        list-editable-item-edit
+                            - form = '$parent.form'
+                            .settings-row
+                                +form-field__dropdown({
+                                    label: 'Load balancing:',
+                                    model: '$item.kind',
+                                    name: '"loadBalancingKind"',
+                                    required: true,
+                                    options: '::$ctrl.Clusters.loadBalancingKinds',
+                                    tip: `Provides the next best balanced node for job execution
+                                    <ul>
+                                        <li>Round-robin - Iterates through nodes in round-robin fashion and pick the next sequential node</li>
+                                        <li>Adaptive - Adapts to overall node performance</li>
+                                        <li>Random - Picks a random node for job execution</li>
+                                        <li>Custom - Custom load balancing implementation</li>
+                                    </ul>`
+                                })(
+                                    ignite-unique=`${loadBalancingSpi}`
+                                    ignite-unique-property='kind'
+                                )
+                                    +form-field__error({ error: 'igniteUnique', message: 'Load balancing SPI of that type is already configured' })
+                            .settings-row(ng-show='$item.kind === "RoundRobin"')
+                                +form-field__checkbox({
+                                    label: 'Per task',
+                                    model: '$item.RoundRobin.perTask',
+                                    name: '"loadBalancingRRPerTask"',
+                                    tip: 'A new round robin order should be created for every task flag'
+                                })
+                            .settings-row(ng-show='$item.kind === "Adaptive"')
+                                +form-field__dropdown({
+                                    label: 'Load probe:',
+                                    model: '$item.Adaptive.loadProbe.kind',
+                                    name: '"loadBalancingAdaptiveLoadProbeKind"',
+                                    placeholder: 'Default',
+                                    options: '[\
+                                        {value: "Job", label: "Job count"},\
+                                        {value: "CPU", label: "CPU load"},\
+                                        {value: "ProcessingTime", label: "Processing time"},\
+                                        {value: "Custom", label: "Custom"},\
+                                        {value: null, label: "Default"}\
+                                    ]',
+                                    tip: 'Implementation of node load probing\
                                         <ul>\
                                             <li>Job count - Based on active and waiting job count</li>\
                                             <li>CPU load - Based on CPU load</li>\
                                             <li>Processing time - Based on total job processing time</li>\
                                             <li>Custom - Custom load probing implementation</li>\
                                             <li>Default - Default load probing implementation</li>\
-                                        </ul>')
-                                .settings-row(ng-show='$item.kind === "Adaptive" && $item.Adaptive.loadProbe.kind')
-                                    .panel-details(ng-show='$item.Adaptive.loadProbe.kind === "Job"')
-                                        .details-row
-                                            +checkbox('Use average', '$item.Adaptive.loadProbe.Job.useAverage', '"loadBalancingAdaptiveJobUseAverage"', 'Use average CPU load vs. current')
-                                    .panel-details(ng-show='$item.Adaptive.loadProbe.kind === "CPU"')
-                                        .details-row
-                                            +checkbox('Use average', '$item.Adaptive.loadProbe.CPU.useAverage', '"loadBalancingAdaptiveCPUUseAverage"', 'Use average CPU load vs. current')
-                                        .details-row
-                                            +checkbox('Use processors', '$item.Adaptive.loadProbe.CPU.useProcessors', '"loadBalancingAdaptiveCPUUseProcessors"', "divide each node's CPU load by the number of processors on that node")
-                                        .details-row
-                                            +number-min-max-step('Processor coefficient:', '$item.Adaptive.loadProbe.CPU.processorCoefficient',
-                                                '"loadBalancingAdaptiveCPUProcessorCoefficient"', 'true', '1', '0.001', '1', '0.001', 'Coefficient of every CPU')
-                                    .panel-details(ng-show='$item.Adaptive.loadProbe.kind === "ProcessingTime"')
-                                        .details-row
-                                            +checkbox('Use average', '$item.Adaptive.loadProbe.ProcessingTime.useAverage', '"loadBalancingAdaptiveJobUseAverage"', 'Use average execution time vs. current')
-                                    .panel-details(ng-show=loadProbeCustom)
-                                        .details-row
-                                            +java-class('Load brobe implementation:', '$item.Adaptive.loadProbe.Custom.className', '"loadBalancingAdaptiveJobUseClass"', 'true', loadProbeCustom,
-                                                'Custom load balancing SPI implementation class name.', loadProbeCustom)
-                                .settings-row(ng-show='$item.kind === "WeightedRandom"')
-                                    +number('Node weight:', '$item.WeightedRandom.nodeWeight', '"loadBalancingWRNodeWeight"', 'true', 10, '1', 'Weight of node')
-                                .settings-row(ng-show='$item.kind === "WeightedRandom"')
-                                    +checkbox('Use weights', '$item.WeightedRandom.useWeights', '"loadBalancingWRUseWeights"', 'Node weights should be checked when doing random load balancing')
-                                .settings-row(ng-show=loadBalancingCustom)
-                                    +java-class('Load balancing SPI implementation:', '$item.Custom.className', '"loadBalancingClass"', 'true', loadBalancingCustom,
-                                        'Custom load balancing SPI implementation class name.', loadBalancingCustom)
+                                        </ul>'
+                                })
+                            .settings-row(ng-show='$item.kind === "Adaptive" && $item.Adaptive.loadProbe.kind')
+                                .panel-details(ng-show='$item.Adaptive.loadProbe.kind === "Job"')
+                                    .details-row
+                                        +form-field__checkbox({
+                                            label: 'Use average',
+                                            model: '$item.Adaptive.loadProbe.Job.useAverage',
+                                            name: '"loadBalancingAdaptiveJobUseAverage"',
+                                            tip: 'Use average CPU load vs. current'
+                                        })
+                                .panel-details(ng-show='$item.Adaptive.loadProbe.kind === "CPU"')
+                                    .details-row
+                                        +form-field__checkbox({
+                                            label: 'Use average',
+                                            model: '$item.Adaptive.loadProbe.CPU.useAverage',
+                                            name: '"loadBalancingAdaptiveCPUUseAverage"',
+                                            tip: 'Use average CPU load vs. current'
+                                        })
+                                    .details-row
+                                        +form-field__checkbox({
+                                            label: 'Use processors',
+                                            model: '$item.Adaptive.loadProbe.CPU.useProcessors',
+                                            name: '"loadBalancingAdaptiveCPUUseProcessors"',
+                                            tip: 'Divide each node\'s CPU load by the number of processors on that node'
+                                        })
+                                    .details-row
+                                        +form-field__number({
+                                            label: 'Processor coefficient:',
+                                            model: '$item.Adaptive.loadProbe.CPU.processorCoefficient',
+                                            name: '"loadBalancingAdaptiveCPUProcessorCoefficient"',
+                                            placeholder: '1',
+                                            min: '0.001',
+                                            max: '1',
+                                            step: '0.05',
+                                            tip: 'Coefficient of every CPU'
+                                        })
+                                .panel-details(ng-show='$item.Adaptive.loadProbe.kind === "ProcessingTime"')
+                                    .details-row
+                                        +form-field__checkbox({
+                                            label: 'Use average',
+                                            model: '$item.Adaptive.loadProbe.ProcessingTime.useAverage',
+                                            name: '"loadBalancingAdaptiveJobUseAverage"',
+                                            tip: 'Use average execution time vs. current'
+                                        })
+                                .panel-details(ng-show=loadProbeCustom)
+                                    .details-row
+                                        +form-field__java-class({
+                                            label: 'Load brobe implementation:',
+                                            model: '$item.Adaptive.loadProbe.Custom.className',
+                                            name: '"loadBalancingAdaptiveJobUseClass"',
+                                            required: loadProbeCustom,
+                                            tip: 'Custom load balancing SPI implementation class name.',
+                                            validationActive: loadProbeCustom
+                                        })
 
-                            list-editable-no-items
-                                list-editable-add-item-button(
-                                    add-item=`$ctrl.Clusters.addLoadBalancingSpi(${model})`
-                                    label-single='load balancing configuration'
-                                    label-multiple='load balancing configurations'
-                                )
+                            .settings-row(ng-show='$item.kind === "WeightedRandom"')
+                                +form-field__number({
+                                    label: 'Node weight:',
+                                    model: '$item.WeightedRandom.nodeWeight',
+                                    name: '"loadBalancingWRNodeWeight"',
+                                    placeholder: '10',
+                                    min: '1',
+                                    tip: 'Weight of node'
+                                })
+                            .settings-row(ng-show='$item.kind === "WeightedRandom"')
+                                +form-field__checkbox({
+                                    label: 'Use weights',
+                                    model: '$item.WeightedRandom.useWeights',
+                                    name: '"loadBalancingWRUseWeights"',
+                                    tip: 'Node weights should be checked when doing random load balancing'
+                                })
+                            .settings-row(ng-show=loadBalancingCustom)
+                                +form-field__java-class({
+                                    label: 'Load balancing SPI implementation:',
+                                    model: '$item.Custom.className',
+                                    name: '"loadBalancingClass"',
+                                    required: loadBalancingCustom,
+                                    tip: 'Custom load balancing SPI implementation class name.',
+                                    validationActive: loadBalancingCustom
+                                })
+
+                        list-editable-no-items
+                            list-editable-add-item-button(
+                                add-item=`$ctrl.Clusters.addLoadBalancingSpi(${model})`
+                                label-single='load balancing configuration'
+                                label-multiple='load balancing configurations'
+                            )
 
             +clusters-load-balancing-spi
 
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/logger.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/logger.pug
index 7b4b9aa..c30448c 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/logger.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/logger.pug
@@ -26,8 +26,12 @@
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-60
-                +dropdown('Logger:', kind, '"logger"', 'true', 'Default',
-                    '[\
+                +form-field__dropdown({
+                    label: 'Logger:',
+                    model: kind,
+                    name: '"logger"',
+                    placeholder: 'Default',
+                    options: '[\
                         {value: "Log4j", label: "Apache Log4j"},\
                         {value: "Log4j2", label: "Apache Log4j 2"},\
                         {value: "SLF4J", label: "Simple Logging Facade (SLF4J)"},\
@@ -37,17 +41,18 @@
                         {value: "Custom", label: "Custom"},\
                         {value: null, label: "Default"}\
                     ]',
-                    'Logger implementations\
-                    <ul>\
-                        <li>Apache Log4j - log4j-based logger</li>\
-                        <li>Apache Log4j 2 - Log4j2-based logger</li>\
-                        <li>Simple Logging Facade (SLF4J) - SLF4j-based logger</li>\
-                        <li>Java logger (JUL) - built in java logger</li>\
-                        <li>Jakarta Commons Logging (JCL) - wraps any JCL (Jakarta Commons Logging) loggers</li>\
-                        <li>Null logger - logger which does not output anything</li>\
-                        <li>Custom - custom logger implementation</li>\
-                        <li>Default - Apache Log4j if awailable on classpath or Java logger otherwise</li>\
-                    </ul>')
+                    tip: 'Logger implementations\
+                       <ul>\
+                           <li>Apache Log4j - log4j-based logger</li>\
+                           <li>Apache Log4j 2 - Log4j2-based logger</li>\
+                           <li>Simple Logging Facade (SLF4J) - SLF4j-based logger</li>\
+                           <li>Java logger (JUL) - built in java logger</li>\
+                           <li>Jakarta Commons Logging (JCL) - wraps any JCL (Jakarta Commons Logging) loggers</li>\
+                           <li>Null logger - logger which does not output anything</li>\
+                           <li>Custom - custom logger implementation</li>\
+                           <li>Default - Apache Log4j if awailable on classpath or Java logger otherwise</li>\
+                       </ul>'
+                })
             .pc-form-group(ng-show=`${kind} && (${kind} === 'Log4j2' || ${kind} === 'Log4j' || ${kind} === 'Custom')`)
                 .pc-form-grid-row(ng-show=`${kind} === 'Log4j2'`)
                     include ./logger/log4j2
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/logger/custom.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/logger/custom.pug
index a717754..589a9c0 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/logger/custom.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/logger/custom.pug
@@ -21,4 +21,11 @@
 -var required = '$ctrl.clonedCluster.logger.kind === "Custom"'
 
 .pc-form-grid-col-60
-    +java-class('Class:', `${model}.class`, '"customLogger"', 'true', required, 'Logger implementation class name', required)
+    +form-field__java-class({
+        label: 'Class:',
+        model: `${model}.class`,
+        name: '"customLogger"',
+        required: required,
+        tip: 'Logger implementation class name',
+        validationActive: required
+    })
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/logger/log4j.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/logger/log4j.pug
index a1cab60..1f216b7 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/logger/log4j.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/logger/log4j.pug
@@ -21,29 +21,48 @@
 -var pathRequired = model + '.mode === "Path" && $ctrl.clonedCluster.logger.kind === "Log4j"'
 
 .pc-form-grid-col-30
-    +dropdown('Level:', `${model}.level`, '"log4jLevel"', 'true', 'Default',
-        '[\
-            {value: "OFF", label: "OFF"},\
-            {value: "FATAL", label: "FATAL"},\
-            {value: "ERROR", label: "ERROR"},\
-            {value: "WARN", label: "WARN"},\
-            {value: "INFO", label: "INFO"},\
-            {value: "DEBUG", label: "DEBUG"},\
-            {value: "TRACE", label: "TRACE"},\
-            {value: "ALL", label: "ALL"},\
-            {value: null, label: "Default"}\
-        ]',
-        'Level for internal log4j implementation')
+    +form-field__dropdown({
+        label: 'Level:',
+        model: `${model}.level`,
+        name: '"log4jLevel"',
+        placeholder: 'Default',
+        options: '[\
+                    {value: "OFF", label: "OFF"},\
+                    {value: "FATAL", label: "FATAL"},\
+                    {value: "ERROR", label: "ERROR"},\
+                    {value: "WARN", label: "WARN"},\
+                    {value: "INFO", label: "INFO"},\
+                    {value: "DEBUG", label: "DEBUG"},\
+                    {value: "TRACE", label: "TRACE"},\
+                    {value: "ALL", label: "ALL"},\
+                    {value: null, label: "Default"}\
+                ]',
+        tip: 'Level for internal log4j implementation'
+    })
+
 .pc-form-grid-col-30
-    +dropdown-required('Logger configuration:', `${model}.mode`, '"log4jMode"', 'true', 'true', 'Choose logger mode',
-        '[\
-            {value: "Default", label: "Default"},\
-            {value: "Path", label: "Path"}\
-        ]',
-        'Choose logger configuration\
-        <ul>\
-            <li>Default - default logger</li>\
-            <li>Path - path or URI to XML configuration</li>\
-        </ul>')
+    +form-field__dropdown({
+        label: 'Logger configuration:',
+        model: `${model}.mode`,
+        name: '"log4jMode"',
+        required: 'true',
+        placeholder: 'Choose logger mode',
+        options: '[\
+                    {value: "Default", label: "Default"},\
+                    {value: "Path", label: "Path"}\
+                ]',
+        tip: 'Choose logger configuration\
+                <ul>\
+                    <li>Default - default logger</li>\
+                    <li>Path - path or URI to XML configuration</li>\
+                </ul>'
+    })
 .pc-form-grid-col-60(ng-show=pathRequired)
-    +text('Path:', `${model}.path`, '"log4jPath"', pathRequired, 'Input path', 'Path or URI to XML configuration')
+    +form-field__text({
+        label: 'Path:',
+        model: `${model}.path`,
+        name: '"log4jPath"',
+        required: pathRequired,
+        placeholder: 'Input path',
+        tip: 'Path or URI to XML configuration'
+    })
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/logger/log4j2.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/logger/log4j2.pug
index fc94e06..c5b785b 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/logger/log4j2.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/logger/log4j2.pug
@@ -21,18 +21,30 @@
 -var log4j2Required = '$ctrl.clonedCluster.logger.kind === "Log4j2"'
 
 .pc-form-grid-col-60
-    +dropdown('Level:', `${model}.level`, '"log4j2Level"', 'true', 'Default',
-        '[\
-            {value: "OFF", label: "OFF"},\
-            {value: "FATAL", label: "FATAL"},\
-            {value: "ERROR", label: "ERROR"},\
-            {value: "WARN", label: "WARN"},\
-            {value: "INFO", label: "INFO"},\
-            {value: "DEBUG", label: "DEBUG"},\
-            {value: "TRACE", label: "TRACE"},\
-            {value: "ALL", label: "ALL"},\
-            {value: null, label: "Default"}\
-        ]',
-        'Level for internal log4j2 implementation')
+    +form-field__dropdown({
+        label: 'Level:',
+        model: `${model}.level`,
+        name: '"log4j2Level"',
+        placeholder: 'Default',
+        options: '[\
+                    {value: "OFF", label: "OFF"},\
+                    {value: "FATAL", label: "FATAL"},\
+                    {value: "ERROR", label: "ERROR"},\
+                    {value: "WARN", label: "WARN"},\
+                    {value: "INFO", label: "INFO"},\
+                    {value: "DEBUG", label: "DEBUG"},\
+                    {value: "TRACE", label: "TRACE"},\
+                    {value: "ALL", label: "ALL"},\
+                    {value: null, label: "Default"}\
+                ]',
+        tip: 'Level for internal log4j2 implementation'
+    })
 .pc-form-grid-col-60
-    +text('Path:', `${model}.path`, '"log4j2Path"', log4j2Required, 'Input path', 'Path or URI to XML configuration')
+    +form-field__text({
+        label: 'Path:',
+        model: `${model}.path`,
+        name: '"log4j2Path"',
+        required: log4j2Required,
+        placeholder: 'Input path',
+        tip: 'Path or URI to XML configuration'
+    })
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/marshaller.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/marshaller.pug
index baa4956..23393b8 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/marshaller.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/marshaller.pug
@@ -24,52 +24,91 @@
 panel-collapsible(ng-form=form on-open=`ui.loadPanel('${form}')`)
     panel-title Marshaller
     panel-description
-        | Marshaller allows to marshal or unmarshal objects in grid. 
-        | It provides serialization/deserialization mechanism for all instances that are sent across networks or are otherwise serialized. 
-        | By default BinaryMarshaller will be used. 
+        | Marshaller allows to marshal or unmarshal objects in grid.
+        | It provides serialization/deserialization mechanism for all instances that are sent across networks or are otherwise serialized.
+        | By default BinaryMarshaller will be used.
         | #[a.link-success(href="https://apacheignite.readme.io/docs/binary-marshaller" target="_blank") More info]
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-60(ng-if='$ctrl.available(["1.0.0", "2.0.0"])')
-                +dropdown('Marshaller:', marshaller + '.kind', '"kind"', 'true', 'Default', '$ctrl.marshallerVariant',
-                    'Instance of marshaller to use in grid<br/>\
-                    <ul>\
-                        <li>OptimizedMarshaller - Optimized implementation of marshaller</li>\
-                        <li>JdkMarshaller - Marshaller based on JDK serialization mechanism</li>\
-                        <li>Default - BinaryMarshaller serialize and deserialize all objects in the binary format</li>\
-                    </ul>')
+                +form-field__dropdown({
+                    label: 'Marshaller:',
+                    model: marshaller + '.kind',
+                    name: '"kind"',
+                    placeholder: 'Default',
+                    options: '$ctrl.marshallerVariant',
+                    tip: 'Instance of marshaller to use in grid<br/>\
+                       <ul>\
+                           <li>OptimizedMarshaller - Optimized implementation of marshaller</li>\
+                           <li>JdkMarshaller - Marshaller based on JDK serialization mechanism</li>\
+                           <li>Default - BinaryMarshaller serialize and deserialize all objects in the binary format</li>\
+                       </ul>'
+                })
             .pc-form-grid-col-60(ng-if='$ctrl.available(["2.0.0", "2.1.0"])')
-                +dropdown('Marshaller:', marshaller + '.kind', '"kind"', 'true', 'Default', '$ctrl.marshallerVariant',
-                    'Instance of marshaller to use in grid<br/>\
-                    <ul>\
-                        <li>JdkMarshaller - Marshaller based on JDK serialization mechanism</li>\
-                        <li>Default - BinaryMarshaller serialize and deserialize all objects in the binary format</li>\
-                    </ul>')
+                +form-field__dropdown({
+                    label: 'Marshaller:',
+                    model: marshaller + '.kind',
+                    name: '"kind"',
+                    placeholder: 'Default',
+                    options: '$ctrl.marshallerVariant',
+                    tip: 'Instance of marshaller to use in grid<br/>\
+                        <ul>\
+                            <li>JdkMarshaller - Marshaller based on JDK serialization mechanism</li>\
+                            <li>Default - BinaryMarshaller serialize and deserialize all objects in the binary format</li>\
+                        </ul>'
+                })
             .pc-form-group.pc-form-grid-row(
                 ng-show=`${marshaller}.kind === 'OptimizedMarshaller'`
                 ng-if='$ctrl.available(["1.0.0", "2.1.0"])'
             )
                 .pc-form-grid-col-60
-                    +number('Streams pool size:', `${optMarshaller}.poolSize`, '"poolSize"', 'true', '0', '0',
-                        'Specifies size of cached object streams used by marshaller<br/>\
-                        Object streams are cached for performance reason to avoid costly recreation for every serialization routine<br/>\
-                        If 0 (default), pool is not used and each thread has its own cached object stream which it keeps reusing<br/>\
-                        Since each stream has an internal buffer, creating a stream for each thread can lead to high memory consumption if many large messages are marshalled or unmarshalled concurrently<br/>\
-                        Consider using pool in this case. This will limit number of streams that can be created and, therefore, decrease memory consumption<br/>\
-                        NOTE: Using streams pool can decrease performance since streams will be shared between different threads which will lead to more frequent context switching')
+                    +form-field__number({
+                        label: 'Streams pool size:',
+                        model: `${optMarshaller}.poolSize`,
+                        name: '"poolSize"',
+                        placeholder: '0',
+                        min: '0',
+                        tip: 'Specifies size of cached object streams used by marshaller<br/>\
+                             Object streams are cached for performance reason to avoid costly recreation for every serialization routine<br/>\
+                             If 0 (default), pool is not used and each thread has its own cached object stream which it keeps reusing<br/>\
+                             Since each stream has an internal buffer, creating a stream for each thread can lead to high memory consumption if many large messages are marshalled or unmarshalled concurrently<br/>\
+                             Consider using pool in this case. This will limit number of streams that can be created and, therefore, decrease memory consumption<br/>\
+                             NOTE: Using streams pool can decrease performance since streams will be shared between different threads which will lead to more frequent context switching'
+                    })
                 .pc-form-grid-col-60
-                    +checkbox('Require serializable', `${optMarshaller}.requireSerializable`, '"requireSerializable"',
-                        'Whether marshaller should require Serializable interface or not')
+                    +form-field__checkbox({
+                        label: 'Require serializable',
+                        model: `${optMarshaller}.requireSerializable`,
+                        name: '"requireSerializable"',
+                        tip: 'Whether marshaller should require Serializable interface or not'
+                    })
             .pc-form-grid-col-60
-                +checkbox('Marshal local jobs', `${model}.marshalLocalJobs`, '"marshalLocalJobs"', 'If this flag is enabled, jobs mapped to local node will be marshalled as if it was remote node')
+                +form-field__checkbox({
+                    label: 'Marshal local jobs',
+                    model: `${model}.marshalLocalJobs`,
+                    name: '"marshalLocalJobs"',
+                    tip: 'If this flag is enabled, jobs mapped to local node will be marshalled as if it was remote node'
+                })
 
             //- Removed in ignite 2.0
             .pc-form-grid-col-30(ng-if-start='$ctrl.available(["1.0.0", "2.0.0"])')
-                +number('Keep alive time:', `${model}.marshallerCacheKeepAliveTime`, '"marshallerCacheKeepAliveTime"', 'true', '10000', '0',
-                    'Keep alive time of thread pool that is in charge of processing marshaller messages')
+                +form-field__number({
+                    label: 'Keep alive time:',
+                    model: `${model}.marshallerCacheKeepAliveTime`,
+                    name: '"marshallerCacheKeepAliveTime"',
+                    placeholder: '10000',
+                    min: '0',
+                    tip: 'Keep alive time of thread pool that is in charge of processing marshaller messages'
+                })
             .pc-form-grid-col-30(ng-if-end)
-                +number('Pool size:', `${model}.marshallerCacheThreadPoolSize`, '"marshallerCacheThreadPoolSize"', 'true', 'max(8, availableProcessors) * 2', '1',
-                    'Default size of thread pool that is in charge of processing marshaller messages')
+                +form-field__number({
+                    label: 'Pool size:',
+                    model: `${model}.marshallerCacheThreadPoolSize`,
+                    name: '"marshallerCacheThreadPoolSize"',
+                    placeholder: 'max(8, availableProcessors) * 2',
+                    min: '1',
+                    tip: 'Default size of thread pool that is in charge of processing marshaller messages'
+                })
 
         .pca-form-column-6
             +preview-xml-java(model, 'clusterMarshaller')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/memory.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/memory.pug
index 181ae45..7712cb7 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/memory.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/memory.pug
@@ -21,18 +21,18 @@
 -var memoryPolicies = model + '.memoryPolicies'
 
 panel-collapsible(
-    ng-form=form
-    on-open=`ui.loadPanel('${form}')`
-    ng-show='$ctrl.available(["2.0.0", "2.3.0"])'
+ng-form=form
+on-open=`ui.loadPanel('${form}')`
+ng-show='$ctrl.available(["2.0.0", "2.3.0"])'
 )
     panel-title Memory configuration
     panel-description
-        | Page memory is a manageable off-heap based memory architecture that is split into pages of fixed size. 
+        | Page memory is a manageable off-heap based memory architecture that is split into pages of fixed size.
         | #[a.link-success(href="https://apacheignite.readme.io/docs/durable-memory" target="_blank") More info]
     panel-content.pca-form-row(ng-if=`$ctrl.available(["2.0.0", "2.3.0"]) && ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-60
-                +sane-ignite-form-field-dropdown({
+                +form-field__dropdown({
                     label: 'Page size:',
                     model: `${model}.pageSize`,
                     name: '"MemoryConfigurationPageSize"',
@@ -40,13 +40,19 @@
                     tip: 'Every memory region is split on pages of fixed size'
                 })
             .pc-form-grid-col-60
-                +number('Concurrency level:', model + '.concurrencyLevel', '"MemoryConfigurationConcurrencyLevel"',
-                'true', 'availableProcessors', '2', 'The number of concurrent segments in Ignite internal page mapping tables')
+                +form-field__number({
+                    label: 'Concurrency level:',
+                    model: `${model}.concurrencyLevel`,
+                    name: '"MemoryConfigurationConcurrencyLevel"',
+                    placeholder: 'availableProcessors',
+                    min: '2',
+                    tip: 'The number of concurrent segments in Ignite internal page mapping tables'
+                })
             .pc-form-grid-col-60.pc-form-group__text-title
                 span System cache
             .pc-form-group.pc-form-grid-row
                 .pc-form-grid-col-30
-                    pc-form-field-size(
+                    form-field-size(
                         label='Initial size:'
                         ng-model=`${model}.systemCacheInitialSize`
                         name='systemCacheInitialSize'
@@ -56,7 +62,7 @@
                         on-scale-change='systemCacheInitialSizeScale = $event'
                     )
                 .pc-form-grid-col-30
-                    pc-form-field-size(
+                    form-field-size(
                         label='Max size:'
                         ng-model=`${model}.systemCacheMaxSize`
                         name='systemCacheMaxSize'
@@ -69,7 +75,7 @@
                 span Memory policies
             .pc-form-group.pc-form-grid-row
                 .pc-form-grid-col-60
-                    +sane-ignite-form-field-text({
+                    +form-field__text({
                         label: 'Default memory policy name:',
                         model: `${model}.defaultMemoryPolicyName`,
                         name: '"defaultMemoryPolicyName"',
@@ -78,116 +84,154 @@
                     })(
                         pc-not-in-collection='::$ctrl.Clusters.memoryPolicy.name.invalidValues'
                         ui-validate=`{
-                            defaultMemoryPolicyExists: '$ctrl.Clusters.memoryPolicy.customValidators.defaultMemoryPolicyExists($value, ${memoryPolicies})'
-                        }`
+                                defaultMemoryPolicyExists: '$ctrl.Clusters.memoryPolicy.customValidators.defaultMemoryPolicyExists($value, ${memoryPolicies})'
+                            }`
                         ui-validate-watch=`"${memoryPolicies}"`
                         ui-validate-watch-object-equality='true'
                         ng-model-options='{allowInvalid: true}'
                     )
-                        +form-field-feedback('"MemoryPolicyName"', 'notInCollection', '{{::$ctrl.Clusters.memoryPolicy.name.invalidValues[0]}} is reserved for internal use')
-                        +form-field-feedback('"MemoryPolicyName"', 'defaultMemoryPolicyExists', 'Memory policy with that name should be configured')
+                        +form-field__error({ error: 'notInCollection', message: '{{::$ctrl.Clusters.memoryPolicy.name.invalidValues[0]}} is reserved for internal use' })
+                        +form-field__error({ error: 'defaultMemoryPolicyExists', message: 'Memory policy with that name should be configured' })
                 .pc-form-grid-col-60(ng-hide='(' + model + '.defaultMemoryPolicyName || "default") !== "default"')
-                    +number('Default memory policy size:', model + '.defaultMemoryPolicySize', '"defaultMemoryPolicySize"',
-                    'true', '0.8 * totalMemoryAvailable', '10485760',
-                    'Specify desired size of default memory policy without having to use more verbose syntax of MemoryPolicyConfiguration elements')
+                    +form-field__number({
+                        label: 'Default memory policy size:',
+                        model: `${model}.defaultMemoryPolicySize`,
+                        name: '"defaultMemoryPolicySize"',
+                        placeholder: '0.8 * totalMemoryAvailable',
+                        min: '10485760',
+                        tip: 'Specify desired size of default memory policy without having to use more verbose syntax of MemoryPolicyConfiguration elements'
+                    })
                 .pc-form-grid-col-60
                     mixin clusters-memory-policies
                         .ignite-form-field(ng-init='memoryPoliciesTbl={type: "memoryPolicies", model: "memoryPolicies", focusId: "name", ui: "memory-policies-table"}')
-                            +ignite-form-field__label('Configured policies:', '"configuredPolicies"')
-                                +tooltip(`List of configured policies`)
-                            .ignite-form-field__control
-                                -let items = memoryPolicies
+                            +form-field__label({ label: 'Configured policies:', name: '"configuredPolicies"' })
+                                +form-field__tooltip({ title: `List of configured policies` })
 
-                                list-editable.pc-list-editable-with-form-grid(ng-model=items name='memoryPolicies')
-                                    list-editable-item-edit.pc-form-grid-row
-                                        - form = '$parent.form'
-                                        .pc-form-grid-col-60
-                                            +sane-ignite-form-field-text({
-                                                label: 'Name:',
-                                                model: '$item.name',
-                                                name: '"MemoryPolicyName"',
-                                                placeholder: '{{ ::$ctrl.Clusters.memoryPolicy.name.default }}',
-                                                tip: 'Memory policy name'
-                                            })(
-                                                ui-validate=`{
+                            -let items = memoryPolicies
+                            list-editable.pc-list-editable-with-form-grid(ng-model=items name='memoryPolicies')
+                                list-editable-item-edit.pc-form-grid-row
+                                    - form = '$parent.form'
+                                    .pc-form-grid-col-60
+                                        +form-field__text({
+                                            label: 'Name:',
+                                            model: '$item.name',
+                                            name: '"MemoryPolicyName"',
+                                            placeholder: '{{ ::$ctrl.Clusters.memoryPolicy.name.default }}',
+                                            tip: 'Memory policy name'
+                                        })(
+                                            ui-validate=`{
                                                     uniqueMemoryPolicyName: '$ctrl.Clusters.memoryPolicy.customValidators.uniqueMemoryPolicyName($item, ${items})'
                                                 }`
-                                                ui-validate-watch=`"${items}"`
-                                                ui-validate-watch-object-equality='true'
-                                                pc-not-in-collection='::$ctrl.Clusters.memoryPolicy.name.invalidValues'
-                                                ng-model-options='{allowInvalid: true}'
-                                            )
-                                                +form-field-feedback('"MemoryPolicyName', 'uniqueMemoryPolicyName', 'Memory policy with that name is already configured')
-                                                +form-field-feedback('"MemoryPolicyName', 'notInCollection', '{{::$ctrl.Clusters.memoryPolicy.name.invalidValues[0]}} is reserved for internal use')
-                                        .pc-form-grid-col-60
-                                            pc-form-field-size(
-                                                label='Initial size:'
-                                                ng-model='$item.initialSize'
-                                                ng-model-options='{allowInvalid: true}'
-                                                name='MemoryPolicyInitialSize'
-                                                placeholder='{{ $ctrl.Clusters.memoryPolicy.initialSize.default / scale.value }}'
-                                                min='{{ ::$ctrl.Clusters.memoryPolicy.initialSize.min }}'
-                                                tip='Initial memory region size defined by this memory policy'
-                                                on-scale-change='scale = $event'
-                                            )
-                                        .pc-form-grid-col-60
-                                            pc-form-field-size(
-                                                ng-model='$item.maxSize'
-                                                ng-model-options='{allowInvalid: true}'
-                                                name='MemoryPolicyMaxSize'
-                                                label='Maximum size:'
-                                                placeholder='{{ ::$ctrl.Clusters.memoryPolicy.maxSize.default }}'
-                                                min='{{ $ctrl.Clusters.memoryPolicy.maxSize.min($item) }}'
-                                                tip='Maximum memory region size defined by this memory policy'
-                                            )
-                                        .pc-form-grid-col-60
-                                            +text('Swap file path:', '$item.swapFilePath', '"MemoryPolicySwapFilePath"', 'false',
-                                            'Input swap file path', 'An optional path to a memory mapped file for this memory policy')
-                                        .pc-form-grid-col-60
-                                            +dropdown('Eviction mode:', '$item.pageEvictionMode', '"MemoryPolicyPageEvictionMode"', 'true', 'DISABLED',
-                                            '[\
+                                            ui-validate-watch=`"${items}"`
+                                            ui-validate-watch-object-equality='true'
+                                            pc-not-in-collection='::$ctrl.Clusters.memoryPolicy.name.invalidValues'
+                                            ng-model-options='{allowInvalid: true}'
+                                        )
+                                            +form-field__error({ error: 'uniqueMemoryPolicyName', message: 'Memory policy with that name is already configured' })
+                                            +form-field__error({ error: 'notInCollection', message: '{{::$ctrl.Clusters.memoryPolicy.name.invalidValues[0]}} is reserved for internal use' })
+                                    .pc-form-grid-col-60
+                                        form-field-size(
+                                            label='Initial size:'
+                                            ng-model='$item.initialSize'
+                                            ng-model-options='{allowInvalid: true}'
+                                            name='MemoryPolicyInitialSize'
+                                            placeholder='{{ $ctrl.Clusters.memoryPolicy.initialSize.default / scale.value }}'
+                                            min='{{ ::$ctrl.Clusters.memoryPolicy.initialSize.min }}'
+                                            tip='Initial memory region size defined by this memory policy'
+                                            on-scale-change='scale = $event'
+                                        )
+                                    .pc-form-grid-col-60
+                                        form-field-size(
+                                            ng-model='$item.maxSize'
+                                            ng-model-options='{allowInvalid: true}'
+                                            name='MemoryPolicyMaxSize'
+                                            label='Maximum size:'
+                                            placeholder='{{ ::$ctrl.Clusters.memoryPolicy.maxSize.default }}'
+                                            min='{{ $ctrl.Clusters.memoryPolicy.maxSize.min($item) }}'
+                                            tip='Maximum memory region size defined by this memory policy'
+                                        )
+                                    .pc-form-grid-col-60
+                                        +form-field__text({
+                                            label: 'Swap file path:',
+                                            model: '$item.swapFilePath',
+                                            name: '"MemoryPolicySwapFilePath"',
+                                            placeholder: 'Input swap file path',
+                                            tip: 'An optional path to a memory mapped file for this memory policy'
+                                        })
+                                    .pc-form-grid-col-60
+                                        +form-field__dropdown({
+                                            label: 'Eviction mode:',
+                                            model: '$item.pageEvictionMode',
+                                            name: '"MemoryPolicyPageEvictionMode"',
+                                            placeholder: 'DISABLED',
+                                            options: '[\
                                                 {value: "DISABLED", label: "DISABLED"},\
                                                 {value: "RANDOM_LRU", label: "RANDOM_LRU"},\
-                                                {value: "RANDOM_2_LRU", label: "RANDOM_2_LRU"}\
+                                                [value: "RANDOM_2_LRU", label: "RANDOM_2_LRU"}\
                                             ]',
-                                            'An algorithm for memory pages eviction\
-                                            <ul>\
-                                                <li>DISABLED - Eviction is disabled</li>\
-                                                <li>RANDOM_LRU - Once a memory region defined by a memory policy is configured, an off - heap array is allocated to track last usage timestamp for every individual data page</li>\
-                                                <li>RANDOM_2_LRU - Differs from Random - LRU only in a way that two latest access timestamps are stored for every data page</li>\
-                                            </ul>')
-                                        .pc-form-grid-col-30
-                                            +number-min-max-step('Eviction threshold:', '$item.evictionThreshold', '"MemoryPolicyEvictionThreshold"',
-                                            'true', '0.9', '0.5', '0.999', '0.001', 'A threshold for memory pages eviction initiation')
-                                        .pc-form-grid-col-30
-                                            +sane-ignite-form-field-number({
-                                                label: 'Empty pages pool size:',
-                                                model: '$item.emptyPagesPoolSize',
-                                                name: '"MemoryPolicyEmptyPagesPoolSize"',
-                                                placeholder: '{{ ::$ctrl.Clusters.memoryPolicy.emptyPagesPoolSize.default }}',
-                                                min: '{{ ::$ctrl.Clusters.memoryPolicy.emptyPagesPoolSize.min }}',
-                                                max: '{{ $ctrl.Clusters.memoryPolicy.emptyPagesPoolSize.max($ctrl.clonedCluster, $item) }}',
-                                                tip: 'The minimal number of empty pages to be present in reuse lists for this memory policy'
-                                            })
+                                            tip: 'An algorithm for memory pages eviction\
+                                                 <ul>\
+                                                    <li>DISABLED - Eviction is disabled</li>\
+                                                    <li>RANDOM_LRU - Once a memory region defined by a memory policy is configured, an off - heap array is allocated to track last usage timestamp for every individual data page</li>\
+                                                    <li>RANDOM_2_LRU - Differs from Random - LRU only in a way that two latest access timestamps are stored for every data page</li>\
+                                                 </ul>'
+                                        })
+                                    .pc-form-grid-col-30
+                                        +form-field__number({
+                                            label: 'Eviction threshold:',
+                                            model: '$item.evictionThreshold',
+                                            name: '"MemoryPolicyEvictionThreshold"',
+                                            placeholder: '0.9',
+                                            min: '0.5',
+                                            max: '0.999',
+                                            step: '0.05',
+                                            tip: 'A threshold for memory pages eviction initiation'
+                                        })
+                                    .pc-form-grid-col-30
+                                        +form-field__number({
+                                            label: 'Empty pages pool size:',
+                                            model: '$item.emptyPagesPoolSize',
+                                            name: '"MemoryPolicyEmptyPagesPoolSize"',
+                                            placeholder: '{{ ::$ctrl.Clusters.memoryPolicy.emptyPagesPoolSize.default }}',
+                                            min: '{{ ::$ctrl.Clusters.memoryPolicy.emptyPagesPoolSize.min }}',
+                                            max: '{{ $ctrl.Clusters.memoryPolicy.emptyPagesPoolSize.max($ctrl.clonedCluster, $item) }}',
+                                            tip: 'The minimal number of empty pages to be present in reuse lists for this memory policy'
+                                        })
 
-                                        //- Since ignite 2.1
-                                        .pc-form-grid-col-30(ng-if-start='$ctrl.available("2.1.0")')
-                                            +number('Sub intervals:', '$item.subIntervals', '"MemoryPolicySubIntervals"',
-                                                'true', '5', '1', 'A number of sub-intervals the whole rate time interval will be split into to calculate allocation and eviction rates')
-                                        .pc-form-grid-col-30(ng-if-end)
-                                            +number('Rate time interval:', '$item.rateTimeInterval', '"MemoryPolicyRateTimeInterval"',
-                                                'true', '60000', '1000', 'Time interval for allocation rate and eviction rate monitoring purposes')
-                                                
-                                        .pc-form-grid-col-60
-                                            +checkbox('Metrics enabled', '$item.metricsEnabled', '"MemoryPolicyMetricsEnabled"',
-                                            'Whether memory metrics are enabled by default on node startup')
+                                    //- Since ignite 2.1
+                                    .pc-form-grid-col-30(ng-if-start='$ctrl.available("2.1.0")')
+                                        +form-field__number({
+                                            label: 'Sub intervals:',
+                                            model: '$item.subIntervals',
+                                            name: '"MemoryPolicySubIntervals"',
+                                            placeholder: '5',
+                                            min: '1',
+                                            tip: 'A number of sub-intervals the whole rate time interval will be split into to calculate allocation and eviction rates'
+                                        })
+                                    .pc-form-grid-col-30(ng-if-end)
+                                        +form-field__number({
+                                            label: 'Rate time interval:',
+                                            model: '$item.rateTimeInterval',
+                                            name: '"MemoryPolicyRateTimeInterval"',
+                                            placeholder: '60000',
+                                            min: '1000',
+                                            tip: 'Time interval for allocation rate and eviction rate monitoring purposes'
+                                        })
 
-                                    list-editable-no-items
-                                        list-editable-add-item-button(
-                                            add-item=`$ctrl.Clusters.addMemoryPolicy($ctrl.clonedCluster)`
-                                            label-single='memory policy configuration'
-                                            label-multiple='memory policy configurations'
-                                        )
+                                    .pc-form-grid-col-60
+                                        +form-field__checkbox({
+                                            label: 'Metrics enabled',
+                                            model: '$item.metricsEnabled',
+                                            name: '"MemoryPolicyMetricsEnabled"',
+                                            tip: 'Whether memory metrics are enabled by default on node startup'
+                                        })
+
+                                list-editable-no-items
+                                    list-editable-add-item-button(
+                                        add-item=`$ctrl.Clusters.addMemoryPolicy($ctrl.clonedCluster)`
+                                        label-single='memory policy configuration'
+                                        label-multiple='memory policy configurations'
+                                    )
 
                     +clusters-memory-policies
 
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/metrics.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/metrics.pug
index c4c9260..f99efbb 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/metrics.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/metrics.pug
@@ -25,22 +25,46 @@
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-30
-                +number('Expire time:', `${model}.metricsExpireTime`, '"metricsExpireTime"', 'true', 'Long.MAX_VALUE', '1',
-                    'Time in milliseconds after which a certain metric value is considered expired')
+                +form-field__number({
+                    label: 'Expire time:',
+                    model: `${model}.metricsExpireTime`,
+                    name: '"metricsExpireTime"',
+                    placeholder: 'Long.MAX_VALUE',
+                    min: '0',
+                    tip: 'Time in milliseconds after which a certain metric value is considered expired'
+                })
             .pc-form-grid-col-30
-                +number('History size:', `${model}.metricsHistorySize`, '"metricsHistorySize"', 'true', '10000', '1',
-                    'Number of metrics kept in history to compute totals and averages')
+                +form-field__number({
+                    label: 'History size:',
+                    model: `${model}.metricsHistorySize`,
+                    name: '"metricsHistorySize"',
+                    placeholder: '10000',
+                    min: '1',
+                    tip: 'Number of metrics kept in history to compute totals and averages'
+                })
             .pc-form-grid-col-30
-                +number('Log frequency:', `${model}.metricsLogFrequency`, '"metricsLogFrequency"', 'true', '60000', '0',
-                    'Frequency of metrics log print out<br/>\ ' +
-                    'When <b>0</b> log print of metrics is disabled')
+                +form-field__number({
+                    label: 'Log frequency:',
+                    model: `${model}.metricsLogFrequency`,
+                    name: '"metricsLogFrequency"',
+                    placeholder: '60000',
+                    min: '0',
+                    tip: 'Frequency of metrics log print out<br/>\ ' +
+                    'When <b>0</b> log print of metrics is disabled'
+                })
             .pc-form-grid-col-30
-                +number('Update frequency:', `${model}.metricsUpdateFrequency`, '"metricsUpdateFrequency"', 'true', '2000', '-1',
-                    'Job metrics update frequency in milliseconds\
-                    <ul>\
-                        <li>If set to -1 job metrics are never updated</li>\
-                        <li>If set to 0 job metrics are updated on each job start and finish</li>\
-                        <li>Positive value defines the actual update frequency</li>\
-                    </ul>')
+                +form-field__number({
+                    label: 'Update frequency:',
+                    model: `${model}.metricsUpdateFrequency`,
+                    name: '"metricsUpdateFrequency"',
+                    placeholder: '2000',
+                    min: '-1',
+                    tip: 'Job metrics update frequency in milliseconds\
+                        <ul>\
+                            <li>If set to -1 job metrics are never updated</li>\
+                            <li>If set to 0 job metrics are updated on each job start and finish</li>\
+                            <li>Positive value defines the actual update frequency</li>\
+                        </ul>'
+                })
         .pca-form-column-6
             +preview-xml-java(model, 'clusterMetrics')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/misc.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/misc.pug
index cdc7258..d0e5d9f 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/misc.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/misc.pug
@@ -25,34 +25,69 @@
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-60
-                +text('Work directory:', model + '.workDirectory', '"workDirectory"', 'false', 'Input work directory',
-                    'Ignite work directory.<br/>\
-                    If not provided, the method will use work directory under IGNITE_HOME specified by IgniteConfiguration#setIgniteHome(String)\
-                    or IGNITE_HOME environment variable or system property.')
+                +form-field__text({
+                    label: 'Work directory:',
+                    model: `${model}.workDirectory`,
+                    name: '"workDirectory"',
+                    placeholder: 'Input work directory',
+                    tip: 'Ignite work directory.<br/>\
+                          If not provided, the method will use work directory under IGNITE_HOME specified by IgniteConfiguration#setIgniteHome(String)\
+                          or IGNITE_HOME environment variable or system property.'
+                })
 
             //- Since ignite 2.0
             .pc-form-grid-col-60(ng-if-start='$ctrl.available("2.0.0")')
-                +text('Consistent ID:', model + '.consistentId', '"ConsistentId"', 'false', 'Input consistent ID', 'Consistent globally unique node ID which survives node restarts')
+                +form-field__text({
+                    label: 'Consistent ID:',
+                    model: `${model}.consistentId`,
+                    name: '"ConsistentId"',
+                    placeholder: 'Input consistent ID',
+                    tip: 'Consistent globally unique node ID which survives node restarts'
+                })
             .pc-form-grid-col-60
-                +java-class('Warmup closure:', model + '.warmupClosure', '"warmupClosure"', 'true', 'false', 'This closure will be executed before actual grid instance start')
+                +form-field__java-class({
+                    label: 'Warmup closure:',
+                    model: `${model}.warmupClosure`,
+                    name: '"warmupClosure"',
+                    tip: 'This closure will be executed before actual grid instance start'
+                })
+
             .pc-form-grid-col-60
-                +checkbox('Active on start', model + '.activeOnStart', '"activeOnStart"',
-                    'If cluster is not active on start, there will be no cache partition map exchanges performed until the cluster is activated')
+                +form-field__checkbox({
+                    label: 'Active on start',
+                    model: model + '.activeOnStart',
+                    name: '"activeOnStart"',
+                    tip: 'If cluster is not active on start, there will be no cache partition map exchanges performed until the cluster is activated'
+                })
             .pc-form-grid-col-60(ng-if-end)
-                +checkbox('Cache sanity check enabled', model + '.cacheSanityCheckEnabled', '"cacheSanityCheckEnabled"',
-                    'If enabled, then Ignite will perform the following checks and throw an exception if check fails<br/>\
-                    <ul>\
-                    <li>Cache entry is not externally locked with lock or lockAsync methods when entry is enlisted to transaction</li>\
-                    <li>Each entry in affinity group - lock transaction has the same affinity key as was specified on affinity transaction start</li>\
-                    <li>Each entry in partition group - lock transaction belongs to the same partition as was specified on partition transaction start</li>\
-                    </ul>')
+                +form-field__checkbox({
+                    label: 'Cache sanity check enabled',
+                    model: model + '.cacheSanityCheckEnabled',
+                    name: '"cacheSanityCheckEnabled"',
+                    tip: 'If enabled, then Ignite will perform the following checks and throw an exception if check fails<br/>\
+                          <ul>\
+                              <li>Cache entry is not externally locked with lock or lockAsync methods when entry is enlisted to transaction</li>\
+                              <li>Each entry in affinity group - lock transaction has the same affinity key as was specified on affinity transaction start</li>\
+                              <li>Each entry in partition group - lock transaction belongs to the same partition as was specified on partition transaction start</li>\
+                          </ul>'
+                })
 
             .pc-form-grid-col-60(ng-if='$ctrl.available(["1.0.0", "2.1.0"])')
-                +checkbox('Late affinity assignment', model + '.lateAffinityAssignment', '"lateAffinityAssignment"',
-                    'With late affinity assignment mode if primary node was changed for some partition this nodes becomes primary only when rebalancing for all assigned primary partitions is finished')
+                +form-field__checkbox({
+                    label: 'Late affinity assignment',
+                    model: model + '.lateAffinityAssignment',
+                    name: '"lateAffinityAssignment"',
+                    tip: 'With late affinity assignment mode if primary node was changed for some partition this nodes becomes primary only when rebalancing for all assigned primary partitions is finished'
+                })
 
             .pc-form-grid-col-60(ng-if='$ctrl.available("2.1.0")')
-                +number('Long query timeout:', `${model}.longQueryWarningTimeout`, '"LongQueryWarningTimeout"', 'true', '3000', '0',
-                'Timeout in milliseconds after which long query warning will be printed')
+                +form-field__number({
+                    label: 'Long query timeout:',
+                    model: `${model}.longQueryWarningTimeout`,
+                    name: '"LongQueryWarningTimeout"',
+                    placeholder: '3000',
+                    min: '0',
+                    tip: 'Timeout in milliseconds after which long query warning will be printed'
+                })
         .pca-form-column-6
             +preview-xml-java(model, 'clusterMisc', 'caches')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/odbc.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/odbc.pug
index 481a9aa..3f1bca5 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/odbc.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/odbc.pug
@@ -27,12 +27,12 @@
 )
     panel-title ODBC configuration
     panel-description
-        | ODBC server configuration. 
+        | ODBC server configuration.
         | #[a.link-success(href="https://apacheignite.readme.io/docs/odbc-driver" target="_blank") More info]
     panel-content.pca-form-row(ng-if=`$ctrl.available(["1.0.0", "2.1.0"]) && ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-60
-                +sane-form-field-checkbox({
+                +form-field__checkbox({
                     label: 'Enabled',
                     model: enabled,
                     name: '"odbcEnabled"',
@@ -43,28 +43,63 @@
                     }`
                     ui-validate-watch='$ctrl.Clusters.odbc.odbcEnabled.correctMarshallerWatch("$ctrl.clonedCluster")'
                 )
-                    +form-field-feedback(null, 'correctMarshaller', 'ODBC can only be used with BinaryMarshaller')
+                    +form-field__error({ error: 'correctMarshaller', message: 'ODBC can only be used with BinaryMarshaller' })
             .pc-form-grid-col-60
-                +text-ip-address-with-port-range('ODBC endpoint address:', `${model}.endpointAddress`, '"endpointAddress"', enabled, '0.0.0.0:10800..10810',
-                    'ODBC endpoint address. <br/>\
-                    The following address formats are permitted:\
-                    <ul>\
-                        <li>hostname - will use provided hostname and default port range</li>\
-                        <li>hostname:port - will use provided hostname and port</li>\
-                        <li>hostname:port_from..port_to - will use provided hostname and port range</li>\
-                    </ul>')
+                +form-field__ip-address-with-port-range({
+                    label: `${model}.endpointAddress`,
+                    model: '$item.localOutboundHost',
+                    name: '"endpointAddress"',
+                    enabled,
+                    placeholder: '0.0.0.0:10800..10810',
+                    tip: 'ODBC endpoint address. <br/>\
+                          The following address formats are permitted:\
+                          <ul>\
+                              <li>hostname - will use provided hostname and default port range</li>\
+                              <li>hostname:port - will use provided hostname and port</li>\
+                              <li>hostname:port_from..port_to - will use provided hostname and port range</li>\
+                          </ul>'
+                })
             .pc-form-grid-col-30
-                +number('Send buffer size:', `${model}.socketSendBufferSize`, '"ODBCSocketSendBufferSize"', enabled, '0', '0',
-                    'Socket send buffer size.<br/>\
-                    When set to <b>0</b>, operation system default will be used')
+                +form-field__number({
+                    label: 'Send buffer size:',
+                    model: `${model}.socketSendBufferSize`,
+                    name: '"ODBCSocketSendBufferSize"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '0',
+                    min: '0',
+                    tip: 'Socket send buffer size.<br/>\
+                          When set to <b>0</b>, operation system default will be used'
+                })
             .pc-form-grid-col-30
-                +number('Socket receive buffer size:', `${model}.socketReceiveBufferSize`, '"ODBCSocketReceiveBufferSize"', enabled, '0', '0',
-                    'Socket receive buffer size.<br/>\
-                    When set to <b>0</b>, operation system default will be used')
+                +form-field__number({
+                    label:'Socket receive buffer size:',
+                    model: `${model}.socketReceiveBufferSize`,
+                    name: '"ODBCSocketReceiveBufferSize"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '0',
+                    min: '0',
+                    tip: 'Socket receive buffer size.<br/>\
+                          When set to <b>0</b>, operation system default will be used'
+                })
             .pc-form-grid-col-30
-                +number('Maximum open cursors', `${model}.maxOpenCursors`, '"maxOpenCursors"', enabled, '128', '1', 'Maximum number of opened cursors per connection')
+                +form-field__number({
+                    label: 'Maximum open cursors',
+                    model: `${model}.maxOpenCursors`,
+                    name: '"maxOpenCursors"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '128',
+                    min: '1',
+                    tip: 'Maximum number of opened cursors per connection'
+                })
             .pc-form-grid-col-30
-                +number('Pool size:', `${model}.threadPoolSize`, '"ODBCThreadPoolSize"', enabled, 'max(8, availableProcessors)', '1',
-                    'Size of thread pool that is in charge of processing ODBC tasks')
+                +form-field__number({
+                    label: 'Pool size:',
+                    model: `${model}.threadPoolSize`,
+                    name: '"ODBCThreadPoolSize"',
+                    disabled: `!(${enabled})`,
+                    placeholder: 'max(8, availableProcessors)',
+                    min: '1',
+                    tip: 'Size of thread pool that is in charge of processing ODBC tasks'
+                })
         .pca-form-column-6
             +preview-xml-java(model, 'clusterODBC')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/persistence.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/persistence.pug
index 2c8d10a..a15707a 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/persistence.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/persistence.pug
@@ -27,56 +27,189 @@
 )
     panel-title Persistence store
     panel-description
-        | Configures Apache Ignite Native Persistence. 
+        | Configures Apache Ignite Native Persistence.
         a.link-success(href='https://apacheignite.readme.io/docs/distributed-persistent-store' target='_blank') More info
     panel-content.pca-form-row(ng-if=`$ctrl.available(["2.1.0", "2.3.0"]) && ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-60
-                +checkbox('Enabled', enabled, '"PersistenceEnabled"', 'Flag indicating whether to configure persistent configuration')
+                +form-field__checkbox({
+                    label: 'Enabled',
+                    model: enabled,
+                    name: '"PersistenceEnabled"',
+                    tip: 'Flag indicating whether to configure persistent configuration'
+                })
             .pc-form-grid-col-60
-                +text-enabled('Store path:', `${model}.persistentStorePath`, '"PersistenceStorePath"', enabled, 'false', 'Input store path',
-                'A path the root directory where the Persistent Store will persist data and indexes')
+                +form-field__text({
+                    label: 'Store path:',
+                    model: `${model}.persistentStorePath`,
+                    name: '"PersistenceStorePath"',
+                    disabled: `!(${enabled})`,
+                    placeholder: 'Input store path',
+                    tip: 'A path the root directory where the Persistent Store will persist data and indexes'
+                })
             .pc-form-grid-col-60
-                +checkbox-enabled('Metrics enabled', `${model}.metricsEnabled`, '"PersistenceMetricsEnabled"', enabled, 'Flag indicating whether persistence metrics collection is enabled')
+                +form-field__checkbox({
+                    label: 'Metrics enabled',
+                    model: `${model}.metricsEnabled`,
+                    name: '"PersistenceMetricsEnabled"',
+                    disabled: `!${enabled}`,
+                    tip: 'Flag indicating whether persistence metrics collection is enabled'
+                })
             .pc-form-grid-col-60
-                +checkbox-enabled('Always write full pages', `${model}.alwaysWriteFullPages`, '"PersistenceAlwaysWriteFullPages"', enabled, 'Flag indicating whether always write full pages')
+                +form-field__checkbox({
+                    label: 'Always write full pages',
+                    model: `${model}.alwaysWriteFullPages`,
+                    name: '"PersistenceAlwaysWriteFullPages"',
+                    disabled: `!${enabled}`,
+                    tip: 'Flag indicating whether always write full pages'
+                })
             .pc-form-grid-col-60
-                +number('Checkpointing frequency:', `${model}.checkpointingFrequency`, '"PersistenceCheckpointingFrequency"', enabled, '180000', '1',
-                'Frequency which is a minimal interval when the dirty pages will be written to the Persistent Store')
+                +form-field__number({
+                    label: 'Checkpointing frequency:',
+                    model: `${model}.checkpointingFrequency`,
+                    name: '"PersistenceCheckpointingFrequency"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '180000',
+                    min: '1',
+                    tip: 'Frequency which is a minimal interval when the dirty pages will be written to the Persistent Store'
+                })
             .pc-form-grid-col-60
-                +number('Checkpointing page buffer size:', `${model}.checkpointingPageBufferSize`, '"PersistenceCheckpointingPageBufferSize"', enabled, '268435456', '0',
-                'Amount of memory allocated for a checkpointing temporary buffer')
+                +form-field__number({
+                    label: 'Checkpointing page buffer size:',
+                    model: `${model}.checkpointingPageBufferSize`,
+                    name: '"PersistenceCheckpointingPageBufferSize"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '268435456',
+                    min: '0',
+                    tip: 'Amount of memory allocated for a checkpointing temporary buffer'
+                })
             .pc-form-grid-col-60
-                +number('Checkpointing threads:', `${model}.checkpointingThreads`, '"PersistenceCheckpointingThreads"', enabled, '1', '1', 'A number of threads to use for the checkpointing purposes')
+                +form-field__number({
+                    label: 'Checkpointing threads:',
+                    model: `${model}.checkpointingThreads`,
+                    name: '"PersistenceCheckpointingThreads"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '1',
+                    min: '1',
+                    tip: 'A number of threads to use for the checkpointing purposes'
+                })
             .pc-form-grid-col-60
-                +text-enabled('WAL store path:', `${model}.walStorePath`, '"PersistenceWalStorePath"', enabled, 'false', 'Input store path', 'A path to the directory where WAL is stored')
+                +form-field__text({
+                    label: 'WAL store path:',
+                    model: `${model}.walStorePath`,
+                    name: '"PersistenceWalStorePath"',
+                    disabled: `!(${enabled})`,
+                    placeholder: 'Input store path',
+                    tip: 'A path to the directory where WAL is stored'
+                })
             .pc-form-grid-col-60
-                +text-enabled('WAL archive path:', `${model}.walArchivePath`, '"PersistenceWalArchivePath"', enabled, 'false', 'Input archive path', 'A path to the WAL archive directory')
+                +form-field__text({
+                    label: 'WAL archive path:',
+                    model: `${model}.walArchivePath`,
+                    name: '"PersistenceWalArchivePath"',
+                    disabled: `!(${enabled})`,
+                    placeholder: 'Input archive path',
+                    tip: 'A path to the WAL archive directory'
+                })
             .pc-form-grid-col-30
-                +number('WAL segments:', `${model}.walSegments`, '"PersistenceWalSegments"', enabled, '10', '1', 'A number of WAL segments to work with')
+                +form-field__number({
+                    label: 'WAL segments:',
+                    model: `${model}.walSegments`,
+                    name: '"PersistenceWalSegments"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '10',
+                    min: '1',
+                    tip: 'A number of WAL segments to work with'
+                })
             .pc-form-grid-col-30
-                +number('WAL segment size:', `${model}.walSegmentSize`, '"PersistenceWalSegmentSize"', enabled, '67108864', '0', 'Size of a WAL segment')
+                +form-field__number({
+                    label: 'WAL segment size:',
+                    model: `${model}.walSegmentSize`,
+                    name: '"PersistenceWalSegmentSize"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '67108864',
+                    min: '0',
+                    tip: 'Size of a WAL segment'
+                })
             .pc-form-grid-col-30
-                +number('WAL history size:', `${model}.walHistorySize`, '"PersistenceWalHistorySize"', enabled, '20', '1', 'A total number of checkpoints to keep in the WAL history')
+                +form-field__number({
+                    label: 'WAL history size:',
+                    model: `${model}.walHistorySize`,
+                    name: '"PersistenceWalHistorySize"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '20',
+                    min: '1',
+                    tip: 'A total number of checkpoints to keep in the WAL history'
+                })
             .pc-form-grid-col-30
-                +number('WAL flush frequency:', `${model}.walFlushFrequency`, '"PersistenceWalFlushFrequency"', enabled, '2000', '1',
-                'How often will be fsync, in milliseconds. In background mode, exist thread which do fsync by timeout')
+                +form-field__number({
+                    label: 'WAL flush frequency:',
+                    model: `${model}.walFlushFrequency`,
+                    name: '"PersistenceWalFlushFrequency"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '2000',
+                    min: '1',
+                    tip:'How often will be fsync, in milliseconds. In background mode, exist thread which do fsync by timeout'
+                })
             .pc-form-grid-col-30
-                +number('WAL fsync delay:', `${model}.walFsyncDelayNanos`, '"PersistenceWalFsyncDelay"', enabled, '1000', '1', 'WAL fsync delay, in nanoseconds')
+                +form-field__number({
+                    label: 'WAL fsync delay:',
+                    model: `${model}.walFsyncDelayNanos`,
+                    name: '"PersistenceWalFsyncDelay"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '1000',
+                    min: '1',
+                    tip: 'WAL fsync delay, in nanoseconds'
+                })
             .pc-form-grid-col-30
-                +number('WAL record iterator buffer size:', `${model}.walRecordIteratorBufferSize`, '"PersistenceWalRecordIteratorBufferSize"', enabled, '67108864', '1',
-                'How many bytes iterator read from disk(for one reading), during go ahead WAL')
+                +form-field__number({
+                    label: 'WAL record iterator buffer size:',
+                    model: `${model}.walRecordIteratorBufferSize`,
+                    name: '"PersistenceWalRecordIteratorBufferSize"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '67108864',
+                    min: '1',
+                    tip: 'How many bytes iterator read from disk(for one reading), during go ahead WAL'
+                })
             .pc-form-grid-col-30
-                +number('Lock wait time:', `${model}.lockWaitTime`, '"PersistenceLockWaitTime"', enabled, '10000', '1',
-                'Time out in second, while wait and try get file lock for start persist manager')
+                +form-field__number({
+                    label: 'Lock wait time:',
+                    model: `${model}.lockWaitTime`,
+                    name: '"PersistenceLockWaitTime"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '10000',
+                    min: '1',
+                    tip: 'Time out in second, while wait and try get file lock for start persist manager'
+                })
             .pc-form-grid-col-30
-                +number('Rate time interval:', `${model}.rateTimeInterval`, '"PersistenceRateTimeInterval"', enabled, '60000', '1000',
-                'The length of the time interval for rate - based metrics. This interval defines a window over which hits will be tracked.')
+                +form-field__number({
+                    label: 'Rate time interval:' ,
+                    model: `${model}.rateTimeInterval`,
+                    name: '"PersistenceRateTimeInterval"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '60000',
+                    min: '1000',
+                    tip: 'The length of the time interval for rate - based metrics. This interval defines a window over which hits will be tracked.'
+                })
             .pc-form-grid-col-30
-                +number('Thread local buffer size:', `${model}.tlbSize`, '"PersistenceTlbSize"', enabled, '131072', '1',
-                'Define size thread local buffer. Each thread which write to WAL have thread local buffer for serialize recode before write in WAL')
+                +form-field__number({
+                    label: 'Thread local buffer size:',
+                    model: `${model}.tlbSize`,
+                    name: '"PersistenceTlbSize"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '131072',
+                    min: '1',
+                    tip: 'Define size thread local buffer. Each thread which write to WAL have thread local buffer for serialize recode before write in WAL'
+                })
             .pc-form-grid-col-30
-                +number('Sub intervals:', `${model}.subIntervals`, '"PersistenceSubIntervals"', enabled, '5', '1',
-                'Number of sub - intervals the whole rate time interval will be split into to calculate rate - based metrics')
+                +form-field__number({
+                    label: 'Sub intervals:',
+                    model: `${model}.subIntervals`,
+                    name: '"PersistenceSubIntervals"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '5',
+                    min: '1',
+                    tip: 'Number of sub - intervals the whole rate time interval will be split into to calculate rate - based metrics'
+                })
         .pca-form-column-6
             +preview-xml-java(model, 'clusterPersistence')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/service.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/service.pug
index b37067b..10b5dd8 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/service.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/service.pug
@@ -22,60 +22,90 @@
 panel-collapsible(ng-form=form on-open=`ui.loadPanel('${form}')`)
     panel-title Service configuration
     panel-description
-        | Service Grid allows for deployments of arbitrary user-defined services on the cluster. 
+        | Service Grid allows for deployments of arbitrary user-defined services on the cluster.
         | #[a.link-success(href="https://apacheignite.readme.io/docs/fault-tolerance" target="_blank") More info]
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6
             mixin clusters-service-configurations
                 .ignite-form-field(ng-init='serviceConfigurationsTbl={type: "serviceConfigurations", model: "serviceConfigurations", focusId: "kind", ui: "failover-table"}')
-                    +ignite-form-field__label('Service configurations:', '"serviceConfigurations"')
-                    .ignite-form-field__control
-                        -let items = model
+                    +form-field__label({ label: 'Service configurations:', name: '"serviceConfigurations"' })
 
-                        list-editable.pc-list-editable-with-form-grid(ng-model=items name='serviceConfigurations')
-                            list-editable-item-edit.pc-form-grid-row
-                                .pc-form-grid-col-60
-                                    +sane-ignite-form-field-text({
-                                        label: 'Name:',
-                                        model: '$item.name',
-                                        name: '"serviceName"',
-                                        required: true,
-                                        placeholder: 'Input service name'
-                                    })(
-                                        ui-validate=`{
-                                            uniqueName: '$ctrl.Clusters.serviceConfigurations.serviceConfiguration.name.customValidators.uniqueName($item, ${items})'
-                                        }`
-                                        ui-validate-watch=`"${items}"`
-                                        ui-validate-watch-object-equality='true'
-                                        ng-model-options='{allowInvalid: true}'
-                                    )
-                                        +form-field-feedback('"serviceName', 'uniqueName', 'Service with that name is already configured')
-                                .pc-form-grid-col-60
-                                    +java-class('Service class', '$item.service', '"serviceService"', 'true', 'true', 'Service implementation class name')
-                                .pc-form-grid-col-60
-                                    +number('Max per node count:', '$item.maxPerNodeCount', '"ServiceMaxPerNodeCount"', 'true', 'Unlimited', '0',
-                                        'Maximum number of deployed service instances on each node.<br/>' +
-                                        'Zero for unlimited')
-                                .pc-form-grid-col-60
-                                    +number('Total count:', '$item.totalCount', '"serviceTotalCount"', 'true', 'Unlimited', '0',
-                                        'Total number of deployed service instances in the cluster.<br/>' +
-                                        'Zero for unlimited')
-                                .pc-form-grid-col-60
-                                    +dropdown-required-empty('Cache:', '$item.cache', '"serviceCache"', 'true', 'false',
-                                        'Choose cache', 'No caches configured for current cluster', '$ctrl.cachesMenu', 'Cache name used for key-to-node affinity calculation')(
-                                        pc-is-in-collection='$ctrl.clonedCluster.caches'
-                                    )
-                                        +form-field-feedback(_, 'isInCollection', `Cluster doesn't have such a cache`)
-                                .pc-form-grid-col-60
-                                    +text('Affinity key:', '$item.affinityKey', '"serviceAffinityKey"', 'false', 'Input affinity key',
-                                        'Affinity key used for key-to-node affinity calculation')
+                    -let items = model
 
-                            list-editable-no-items
-                                list-editable-add-item-button(
-                                    add-item=`$ctrl.Clusters.addServiceConfiguration($ctrl.clonedCluster)`
-                                    label-single='service configuration'
-                                    label-multiple='service configurations'
+                    list-editable.pc-list-editable-with-form-grid(ng-model=items name='serviceConfigurations')
+                        list-editable-item-edit.pc-form-grid-row
+                            .pc-form-grid-col-60
+                                +form-field__text({
+                                    label: 'Name:',
+                                    model: '$item.name',
+                                    name: '"serviceName"',
+                                    required: true,
+                                    placeholder: 'Input service name'
+                                })(
+                                    ui-validate=`{
+                                        uniqueName: '$ctrl.Clusters.serviceConfigurations.serviceConfiguration.name.customValidators.uniqueName($item, ${items})'
+                                    }`
+                                    ui-validate-watch=`"${items}"`
+                                    ui-validate-watch-object-equality='true'
+                                    ng-model-options='{allowInvalid: true}'
                                 )
+                                    +form-field__error({ error: 'uniqueName', message: 'Service with that name is already configured' })
+                            .pc-form-grid-col-60
+                                +form-field__java-class({
+                                    label: 'Service class',
+                                    model: '$item.service',
+                                    name: '"serviceService"',
+                                    required: 'true',
+                                    tip: 'Service implementation class name'
+                                })
+                            .pc-form-grid-col-60
+                                +form-field__number({
+                                    label: 'Max per node count:',
+                                    model: '$item.maxPerNodeCount',
+                                    name: '"ServiceMaxPerNodeCount"',
+                                    placeholder: 'Unlimited',
+                                    min: '0',
+                                    tip: 'Maximum number of deployed service instances on each node.<br/>\
+                                          Zero for unlimited'
+                                })
+                            .pc-form-grid-col-60
+                                +form-field__number({
+                                    label: 'Total count:',
+                                    model: '$item.totalCount',
+                                    name: '"serviceTotalCount"',
+                                    placeholder: 'Unlimited',
+                                    min: '0',
+                                    tip: 'Total number of deployed service instances in the cluster.<br/>\
+                                        Zero for unlimited'
+                                })
+                            .pc-form-grid-col-60
+                                +form-field__dropdown({
+                                    label: 'Cache:',
+                                    model: '$item.cache',
+                                    name: '"serviceCache"',
+                                    placeholder: 'Choose cache',
+                                    placeholderEmpty: 'No caches configured for current cluster',
+                                    options: '$ctrl.cachesMenu',
+                                    tip: 'Cache name used for key-to-node affinity calculation'
+                                })(
+                                    pc-is-in-collection='$ctrl.clonedCluster.caches'
+                                )
+                                    +form-field__error({ error: 'isInCollection', message: `Cluster doesn't have such a cache` })
+                            .pc-form-grid-col-60
+                                +form-field__text({
+                                    label: 'Affinity key:',
+                                    model: '$item.affinityKey',
+                                    name: '"serviceAffinityKey"',
+                                    placeholder: 'Input affinity key',
+                                    tip: 'Affinity key used for key-to-node affinity calculation'
+                                })
+
+                        list-editable-no-items
+                            list-editable-add-item-button(
+                                add-item=`$ctrl.Clusters.addServiceConfiguration($ctrl.clonedCluster)`
+                                label-single='service configuration'
+                                label-multiple='service configurations'
+                            )
 
             +clusters-service-configurations
 
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/sql-connector.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/sql-connector.pug
index 708aa0d..3b2ca27 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/sql-connector.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/sql-connector.pug
@@ -31,28 +31,86 @@
     panel-content.pca-form-row(ng-if=`$ctrl.available(["2.1.0", "2.3.0"]) && ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-60
-                +checkbox('Enabled', connectionEnabled, '"SqlConnectorEnabled"', 'Flag indicating whether to configure SQL connector configuration')
+                +form-field__checkbox({
+                    label: 'Enabled',
+                    model: connectionEnabled,
+                    name: '"SqlConnectorEnabled"',
+                    tip: 'Flag indicating whether to configure SQL connector configuration'
+                })
             .pc-form-grid-col-60
-                +text-enabled('Host:', `${connectionModel}.host`, '"SqlConnectorHost"', connectionEnabled, 'false', 'localhost')
+                +form-field__text({
+                    label: 'Host:',
+                    model: `${connectionModel}.host`,
+                    name: '"SqlConnectorHost"',
+                    disabled: `!(${connectionEnabled})`,
+                    placeholder: 'localhost'
+                })
             .pc-form-grid-col-30
-                +number('Port:', `${connectionModel}.port`, '"SqlConnectorPort"', connectionEnabled, '10800', '1025')
+                +form-field__number({
+                    label: 'Port:',
+                    model: `${connectionModel}.port`,
+                    name: '"SqlConnectorPort"',
+                    disabled: `!(${connectionEnabled})`,
+                    placeholder: '10800',
+                    min: '1025'
+                })
             .pc-form-grid-col-30
-                +number('Port range:', `${connectionModel}.portRange`, '"SqlConnectorPortRange"', connectionEnabled, '100', '0')
+                +form-field__number({
+                    label: 'Port range:',
+                    model: `${connectionModel}.portRange`,
+                    name: '"SqlConnectorPortRange"',
+                    disabled: `!(${connectionEnabled})`,
+                    placeholder: '100',
+                    min: '0'
+                })
             .pc-form-grid-col-30
-                +number('Socket send buffer size:', `${connectionModel}.socketSendBufferSize`, '"SqlConnectorSocketSendBufferSize"', connectionEnabled, '0', '0',
-                    'Socket send buffer size.<br/>\
-                    When set to <b>0</b>, operation system default will be used')
+                +form-field__number({
+                    label: 'Socket send buffer size:',
+                    model: `${connectionModel}.socketSendBufferSize`,
+                    name: '"SqlConnectorSocketSendBufferSize"',
+                    disabled: `!(${connectionEnabled})`,
+                    placeholder: '0',
+                    min: '0',
+                    tip: 'Socket send buffer size.<br/>\
+                          When set to <b>0</b>, operation system default will be used'
+                })
             .pc-form-grid-col-30
-                +number('Socket receive buffer size:', `${connectionModel}.socketReceiveBufferSize`, '"SqlConnectorSocketReceiveBufferSize"', connectionEnabled, '0', '0',
-                    'Socket receive buffer size.<br/>\
-                    When set to <b>0</b>, operation system default will be used')
+                +form-field__number({
+                    label: 'Socket receive buffer size:',
+                    model: `${connectionModel}.socketReceiveBufferSize`,
+                    name: '"SqlConnectorSocketReceiveBufferSize"',
+                    disabled: `!(${connectionEnabled})`,
+                    placeholder: '0',
+                    min: '0',
+                    tip: 'Socket receive buffer size.<br/>\
+                         When set to <b>0</b>, operation system default will be used'
+                })
             .pc-form-grid-col-30
-                +number('Max connection cursors:', `${connectionModel}.maxOpenCursorsPerConnection`, '"SqlConnectorMaxOpenCursorsPerConnection"', connectionEnabled, '128', '0',
-                    'Max number of opened cursors per connection')
+                +form-field__number({
+                    label: 'Max connection cursors:',
+                    model: `${connectionModel}.maxOpenCursorsPerConnection`,
+                    name: '"SqlConnectorMaxOpenCursorsPerConnection"',
+                    disabled: `!(${connectionEnabled})`,
+                    placeholder: '128',
+                    min: '0',
+                    tip: 'Max number of opened cursors per connection'
+                })
             .pc-form-grid-col-30
-                +number('Pool size:', `${connectionModel}.threadPoolSize`, '"SqlConnectorThreadPoolSize"', connectionEnabled, 'max(8, availableProcessors)', '1',
-                    'Size of thread pool that is in charge of processing SQL requests')
+                +form-field__number({
+                    label: 'Pool size:',
+                    model: `${connectionModel}.threadPoolSize`,
+                    name: '"SqlConnectorThreadPoolSize"',
+                    disabled: `!(${connectionEnabled})`,
+                    placeholder: 'max(8, availableProcessors)',
+                    min: '1',
+                    tip: 'Size of thread pool that is in charge of processing SQL requests'
+                })
             .pc-form-grid-col-60
-                +checkbox-enabled('TCP_NODELAY option', `${connectionModel}.tcpNoDelay`, '"SqlConnectorTcpNoDelay"', connectionEnabled)
+                +form-field__checkbox({
+                    label: 'TCP_NODELAY option',
+                    model: `${connectionModel}.tcpNoDelay`,
+                    name: '"SqlConnectorTcpNoDelay"',
+                    disabled: `!${connectionEnabled}`
+                })
         .pca-form-column-6
             +preview-xml-java(model, 'clusterQuery')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/ssl.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/ssl.pug
index 2745f53..61c722e 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/ssl.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/ssl.pug
@@ -25,55 +25,89 @@
 panel-collapsible(ng-form=form on-open=`ui.loadPanel('${form}')`)
     panel-title SSL configuration
     panel-description
-        | Settings for SSL configuration for creating a secure socket layer. 
+        | Settings for SSL configuration for creating a secure socket layer.
         | #[a.link-success(href="https://apacheignite.readme.io/docs/ssltls" target="_blank") More info]
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-60
-                +checkbox('Enabled', enabled, '"sslEnabled"', 'Flag indicating whether to configure SSL configuration')
+                +form-field__checkbox({
+                    label: 'Enabled',
+                    model: enabled,
+                    name: '"sslEnabled"',
+                    tip: 'Flag indicating whether to configure SSL configuration'
+                })
             .pc-form-grid-col-60
-                +text-options('Algorithm to create a key manager:', `${model}.keyAlgorithm`, '"keyAlgorithm"', '["SumX509", "X509"]', enabled, 'false', 'SumX509',
-                    'Sets key manager algorithm that will be used to create a key manager<br/>\
-                    Notice that in most cased default value suites well, however, on Android platform this value need to be set to X509')
+                +form-field__typeahead({
+                    label: 'Algorithm to create a key manager:',
+                    model: `${model}.keyAlgorithm`,
+                    name: '"keyAlgorithm"',
+                    disabled: `!(${enabled})`,
+                    placeholder: 'SumX509',
+                    options: '["SumX509", "X509"]',
+                    tip: 'Sets key manager algorithm that will be used to create a key manager<br/>\
+                         Notice that in most cased default value suites well, however, on Android platform this value need to be set to X509'
+                })
+
             .pc-form-grid-col-60
-                +text-enabled('Key store file:', `${model}.keyStoreFilePath`, '"keyStoreFilePath"', enabled, enabled, 'Path to the key store file',
-                    'Path to the key store file<br/>\
-                    This is a mandatory parameter since ssl context could not be initialized without key manager')
+                +form-field__text({
+                    label: 'Key store file:',
+                    model: `${secondaryFileSystem}.keyStoreFilePath`,
+                    name: '"keyStoreFilePath"',
+                    disabled: `!(${enabled})`,
+                    required: enabled,
+                    placeholder: 'Path to the key store file',
+                    tip: 'Path to the key store file<br/>\
+                          This is a mandatory parameter since ssl context could not be initialized without key manager'
+                })
             .pc-form-grid-col-30
-                +text-options('Key store type:', `${model}.keyStoreType`, '"keyStoreType"', '["JKS", "PCKS11", "PCKS12"]', enabled, 'false', 'JKS',
-                    'Key store type used in context initialization')
+                +form-field__typeahead({
+                    label: 'Key store type:',
+                    model: `${model}.keyStoreType`,
+                    name: '"keyStoreType"',
+                    disabled: `!(${enabled})`,
+                    placeholder: 'JKS',
+                    options: '["JKS", "PCKS11", "PCKS12"]',
+                    tip: 'Key store type used in context initialization'
+                })
             .pc-form-grid-col-30
-                +text-options('Protocol:', `${model}.protocol`, '"protocol"', '["TSL", "SSL"]', enabled, 'false', 'TSL', 'Protocol for secure transport')
+                +form-field__typeahead({
+                    label: 'Protocol:',
+                    model: `${model}.protocol`,
+                    name: '"protocol"',
+                    disabled: `!(${enabled})`,
+                    placeholder: 'TSL',
+                    options: '["TSL", "SSL"]',
+                    tip: 'Protocol for secure transport'
+                })
             .pc-form-grid-col-60
                 .ignite-form-field
-                    .ignite-form-field__control
-                        list-editable(
-                            ng-model=trust
-                            name='trustManagers'
-                            list-editable-cols=`::[{name: "Pre-configured trust managers:"}]`
-                            ng-disabled=enabledToDisabled(enabled)
-                            ng-required=`${enabled} && !${model}.trustStoreFilePath`
-                        )
-                            list-editable-item-view {{ $item }}
+                    list-editable(
+                        ng-model=trust
+                        name='trustManagers'
+                        list-editable-cols=`::[{name: "Pre-configured trust managers:"}]`
+                        ng-disabled=enabledToDisabled(enabled)
+                        ng-required=`${enabled} && !${model}.trustStoreFilePath`
+                    )
+                        list-editable-item-view {{ $item }}
 
-                            list-editable-item-edit
-                                +list-java-class-field('Trust manager', '$item', '"trustManager"', trust)
-                                    +unique-feedback('"trustManager"', 'Such trust manager already exists!')
+                        list-editable-item-edit
+                            +list-java-class-field('Trust manager', '$item', '"trustManager"', trust)
+                                +form-field__error({ error: 'igniteUnique', message: 'Such trust manager already exists!' })
 
-                            list-editable-no-items
-                                list-editable-add-item-button(
-                                    add-item=`$editLast((${trust} = ${trust} || []).push(''))`
-                                    label-single='trust manager'
-                                    label-multiple='trust managers'
-                                )
-                    .ignite-form-field__errors(
+                        list-editable-no-items
+                            list-editable-add-item-button(
+                                add-item=`$editLast((${trust} = ${trust} || []).push(''))`
+                                label-single='trust manager'
+                                label-multiple='trust managers'
+                            )
+                    .form-field__errors(
                         ng-messages=`sslConfiguration.trustManagers.$error`
                         ng-show=`sslConfiguration.trustManagers.$invalid`
                     )
-                        +form-field-feedback(_, 'required', 'Trust managers or trust store file should be configured')
+                        +form-field__error({ error: 'required', message: 'Trust managers or trust store file should be configured' })
 
             .pc-form-grid-col-30(ng-if-start=`!${trust}.length`)
-                +sane-ignite-form-field-text({
+                +form-field__text({
                     label: 'Trust store file:',
                     model: `${model}.trustStoreFilePath`,
                     name: '"trustStoreFilePath"',
@@ -82,8 +116,16 @@
                     placeholder: 'Path to the trust store file',
                     tip: 'Path to the trust store file'
                 })
-                    +form-field-feedback(_, 'required', 'Trust store file or trust managers should be configured')
+                    +form-field__error({ error: 'required', message: 'Trust store file or trust managers should be configured' })
             .pc-form-grid-col-30(ng-if-end)
-                +text-options('Trust store type:', `${model}.trustStoreType`, '"trustStoreType"', '["JKS", "PCKS11", "PCKS12"]', enabled, 'false', 'JKS', 'Trust store type used in context initialization')
+                +form-field__typeahead({
+                    label: 'Trust store type:',
+                    model: `${model}.trustStoreType`,
+                    name: '"trustStoreType"',
+                    disabled: `!(${enabled})`,
+                    placeholder: 'JKS',
+                    options: '["JKS", "PCKS11", "PCKS12"]',
+                    tip: 'Trust store type used in context initialization'
+                })
         .pca-form-column-6
             +preview-xml-java(cluster, 'clusterSsl')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/swap.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/swap.pug
index d314296..d39dae6 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/swap.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/swap.pug
@@ -28,25 +28,35 @@
 )
     panel-title Swap
     panel-description
-        | Settings for overflow data to disk if it cannot fit in memory. 
+        | Settings for overflow data to disk if it cannot fit in memory.
         | #[a.link-success(href="https://apacheignite.readme.io/v1.9/docs/off-heap-memory#swap-space" target="_blank") More info]
     panel-content.pca-form-row(ng-if=`$ctrl.available(["1.0.0", "2.0.0"]) && ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-60
-                +dropdown('Swap space SPI:', `${swapModel}.kind`, '"swapSpaceSpi"', 'true', 'Choose swap SPI',
-                    '::$ctrl.Clusters.swapSpaceSpis',
-                    'Provides a mechanism in grid for storing data on disk<br/>\
-                    Ignite cache uses swap space to overflow data to disk if it cannot fit in memory\
-                    <ul>\
-                        <li>File-based swap - File-based swap space SPI implementation which holds keys in memory</li>\
-                        <li>Not set - File-based swap space SPI with default configuration when it needed</li>\
-                    </ul>')
+                +form-field__dropdown({
+                    label: 'Swap space SPI:',
+                    model: `${swapModel}.kind`,
+                    name: '"swapSpaceSpi"',
+                    placeholder: 'Choose swap SPI',
+                    options: '::$ctrl.Clusters.swapSpaceSpis',
+                    tip: 'Provides a mechanism in grid for storing data on disk<br/>\
+                        Ignite cache uses swap space to overflow data to disk if it cannot fit in memory\
+                        <ul>\
+                            <li>File-based swap - File-based swap space SPI implementation which holds keys in memory</li>\
+                            <li>Not set - File-based swap space SPI with default configuration when it needed</li>\
+                        </ul>'
+                })
             .pc-form-group.pc-form-grid-row(ng-show=`${swapModel}.kind`)
                 .pc-form-grid-col-60
-                    +text('Base directory:', `${fileSwapModel}.baseDirectory`, '"baseDirectory"', 'false', 'swapspace',
-                        'Base directory where to write files')
+                    +form-field__text({
+                        label: 'Base directory:',
+                        model: `${fileSwapModel}.baseDirectory`,
+                        name: '"baseDirectory"',
+                        placeholder: 'swapspace',
+                        tip: 'Base directory where to write files'
+                    })
                 .pc-form-grid-col-30
-                    +sane-ignite-form-field-number({
+                    +form-field__number({
                         label: 'Read stripe size:',
                         model: `${fileSwapModel}.readStripesNumber`,
                         name: '"readStripesNumber"',
@@ -57,18 +67,38 @@
                             powerOfTwo: '$ctrl.Clusters.swapSpaceSpi.readStripesNumber.customValidators.powerOfTwo($value)'
                         }`
                     )
-                        +form-field-feedback('"readStripesNumber"', 'powerOfTwo', 'Read stripe size must be positive and power of two')
+                        +form-field__error({ error: 'powerOfTwo', message: 'Read stripe size must be positive and power of two' })
                 .pc-form-grid-col-30
-                    +number-min-max-step('Maximum sparsity:', `${fileSwapModel}.maximumSparsity`, '"maximumSparsity"', 'true', '0.5', '0', '0.999', '0.001',
-                        'This property defines maximum acceptable wasted file space to whole file size ratio<br/>\
-                        When this ratio becomes higher than specified number compacting thread starts working')
+                    +form-field__number({
+                        label: 'Maximum sparsity:',
+                        model: `${fileSwapModel}.maximumSparsity`,
+                        name: '"maximumSparsity"',
+                        placeholder: '0.5',
+                        min: '0',
+                        max: '0.999',
+                        step: '0.001',
+                        tip: 'This property defines maximum acceptable wasted file space to whole file size ratio<br/>\
+                             When this ratio becomes higher than specified number compacting thread starts working'
+                    })
                 .pc-form-grid-col-30
-                    +number('Max write queue size:', `${fileSwapModel}.maxWriteQueueSize`, '"maxWriteQueueSize"', 'true', '1024 * 1024', '0',
-                        'Max write queue size in bytes<br/>\
-                        If there are more values are waiting for being written to disk then specified size, SPI will block on store operation')
+                    +form-field__number({
+                        label: 'Max write queue size:',
+                        model: `${fileSwapModel}.maxWriteQueueSize`,
+                        name: '"maxWriteQueueSize"',
+                        placeholder: '1024 * 1024',
+                        min: '0',
+                        tip: 'Max write queue size in bytes<br/>\
+                              If there are more values are waiting for being written to disk then specified size, SPI will block on store operation'
+                    })
                 .pc-form-grid-col-30
-                    +number('Write buffer size:', `${fileSwapModel}.writeBufferSize`, '"writeBufferSize"', 'true', '64 * 1024', '0',
-                        'Write buffer size in bytes<br/>\
-                        Write to disk occurs only when this buffer is full')
+                    +form-field__number({
+                        label: 'Write buffer size:',
+                        model: `${fileSwapModel}.writeBufferSize`,
+                        name: '"writeBufferSize"',
+                        placeholder: '64 * 1024',
+                        min: '0',
+                        tip: 'Write buffer size in bytes<br/>\
+                              Write to disk occurs only when this buffer is full'
+                    })
         .pca-form-column-6
             +preview-xml-java(model, 'clusterSwap')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/thread.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/thread.pug
index ebe3bcd..76633f3 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/thread.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/thread.pug
@@ -26,10 +26,16 @@
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-30
-                +number('Public:', model + '.publicThreadPoolSize', '"publicThreadPoolSize"', 'true', 'max(8, availableProcessors) * 2', '1',
-                    'Thread pool that is in charge of processing ComputeJob, GridJobs and user messages sent to node')
+                +form-field__number({
+                    label: 'Public:',
+                    model: model + '.publicThreadPoolSize',
+                    name: '"publicThreadPoolSize"',
+                    placeholder: 'max(8, availableProcessors) * 2',
+                    min: '1',
+                    tip: 'Thread pool that is in charge of processing ComputeJob, GridJobs and user messages sent to node'
+                })
             .pc-form-grid-col-30
-                +sane-ignite-form-field-number({
+                +form-field__number({
                     label: 'System:',
                     model: `${model}.systemThreadPoolSize`,
                     name: '"systemThreadPoolSize"',
@@ -38,16 +44,34 @@
                     tip: 'Thread pool that is in charge of processing internal system messages'
                 })
             .pc-form-grid-col-30
-                +number('Service:', model + '.serviceThreadPoolSize', '"serviceThreadPoolSize"', 'true', 'max(8, availableProcessors) * 2', '1',
-                    'Thread pool that is in charge of processing proxy invocation')
+                +form-field__number({
+                    label: 'Service:',
+                    model: model + '.serviceThreadPoolSize',
+                    name: '"serviceThreadPoolSize"',
+                    placeholder: 'max(8, availableProcessors) * 2',
+                    min: '1',
+                    tip: 'Thread pool that is in charge of processing proxy invocation'
+                })
             .pc-form-grid-col-30
-                +number('Management:', model + '.managementThreadPoolSize', '"managementThreadPoolSize"', 'true', '4', '1',
-                    'Thread pool that is in charge of processing internal and Visor ComputeJob, GridJobs')
+                +form-field__number({
+                    label: 'Management:',
+                    model: model + '.managementThreadPoolSize',
+                    name: '"managementThreadPoolSize"',
+                    placeholder: '4',
+                    min: '1',
+                    tip: 'Thread pool that is in charge of processing internal and Visor ComputeJob, GridJobs'
+                })
             .pc-form-grid-col-30
-                +number('IGFS:', model + '.igfsThreadPoolSize', '"igfsThreadPoolSize"', 'true', 'availableProcessors', '1',
-                    'Thread pool that is in charge of processing outgoing IGFS messages')
+                +form-field__number({
+                    label: 'IGFS:',
+                    model: model + '.igfsThreadPoolSize',
+                    name: '"igfsThreadPoolSize"',
+                    placeholder: 'availableProcessors',
+                    min: '1',
+                    tip: 'Thread pool that is in charge of processing outgoing IGFS messages'
+                })
             .pc-form-grid-col-30
-                +sane-ignite-form-field-number({
+                +form-field__number({
                     label: 'Rebalance:',
                     model: `${model}.rebalanceThreadPoolSize`,
                     name: '"rebalanceThreadPoolSize"',
@@ -56,12 +80,18 @@
                     max: `{{ $ctrl.Clusters.rebalanceThreadPoolSize.max(${model}) }}`,
                     tip: 'Max count of threads can be used at rebalancing'
                 })
-                    +form-field-feedback('max', 'Rebalance thread pool size should not exceed or be equal to System thread pool size')
+                    +form-field__error({ error: 'max', message: 'Rebalance thread pool size should not exceed or be equal to System thread pool size' })
             .pc-form-grid-col-30
-                +number('Utility cache:', model + '.utilityCacheThreadPoolSize', '"utilityCacheThreadPoolSize"', 'true', 'max(8, availableProcessors)', '1',
-                    'Default thread pool size that will be used to process utility cache messages')
+                +form-field__number({
+                    label: 'Utility cache:',
+                    model: model + '.utilityCacheThreadPoolSize',
+                    name: '"utilityCacheThreadPoolSize"',
+                    placeholder: 'max(8, availableProcessors)',
+                    min: '1',
+                    tip: 'Default thread pool size that will be used to process utility cache messages'
+                })
             .pc-form-grid-col-30
-                pc-form-field-size(
+                form-field-size(
                     label='Utility cache keep alive time:'
                     ng-model=`${model}.utilityCacheKeepAliveTime`
                     name='utilityCacheKeepAliveTime'
@@ -73,72 +103,104 @@
                     on-scale-change='_s1 = $event'
                 )
             .pc-form-grid-col-30
-                +number('Async callback:', model + '.asyncCallbackPoolSize', '"asyncCallbackPoolSize"', 'true', 'max(8, availableProcessors)', '1',
-                    'Size of thread pool that is in charge of processing asynchronous callbacks')
+                +form-field__number({
+                    label:'Async callback:',
+                    model: model + '.asyncCallbackPoolSize',
+                    name: '"asyncCallbackPoolSize"',
+                    placeholder: 'max(8, availableProcessors)',
+                    min: '1',
+                    tip: 'Size of thread pool that is in charge of processing asynchronous callbacks'
+                })
             .pc-form-grid-col-30
-                +number('Striped:', model + '.stripedPoolSize', '"stripedPoolSize"', 'true', 'max(8, availableProcessors)', '1',
-                    'Striped pool size that should be used for cache requests processing')
+                +form-field__number({
+                    label: 'Striped:',
+                    model: model + '.stripedPoolSize',
+                    name: '"stripedPoolSize"',
+                    placeholder: 'max(8, availableProcessors)',
+                    min: '1',
+                    tip: 'Striped pool size that should be used for cache requests processing'
+                })
 
             //- Since ignite 2.0
             .pc-form-grid-col-30(ng-if-start='$ctrl.available("2.0.0")')
-                +number('Data streamer:', model + '.dataStreamerThreadPoolSize', '"dataStreamerThreadPoolSize"', 'true', 'max(8, availableProcessors)', '1',
-                    'Size of thread pool that is in charge of processing data stream messages')
+                +form-field__number({
+                    label: 'Data streamer:',
+                    model: model + '.dataStreamerThreadPoolSize',
+                    name: '"dataStreamerThreadPoolSize"',
+                    placeholder: 'max(8, availableProcessors)',
+                    min: '1',
+                    tip: 'Size of thread pool that is in charge of processing data stream messages'
+                })
             .pc-form-grid-col-30
-                +number('Query:', model + '.queryThreadPoolSize', '"queryThreadPoolSize"', 'true', 'max(8, availableProcessors)', '1',
-                    'Size of thread pool that is in charge of processing query messages')
+                +form-field__number({
+                    label: 'Query:',
+                    model: model + '.queryThreadPoolSize',
+                    name: '"queryThreadPoolSize"',
+                    placeholder: 'max(8, availableProcessors)',
+                    min: '1',
+                    tip: 'Size of thread pool that is in charge of processing query messages'
+                })
             .pc-form-grid-col-60(ng-if-end)
                 .ignite-form-field
-                    +ignite-form-field__label('Executor configurations:', '"executorConfigurations"')
-                        +tooltip(`Custom thread pool configurations for compute tasks`)
-                    .ignite-form-field__control
-                        list-editable(
-                            ng-model=executors
-                            ng-model-options='{allowInvalid: true}'
-                            name='executorConfigurations'
-                            ui-validate=`{
-                                allNamesExist: '$ctrl.Clusters.executorConfigurations.allNamesExist($value)',
-                                allNamesUnique: '$ctrl.Clusters.executorConfigurations.allNamesUnique($value)'
-                            }`
-                        )
-                            list-editable-item-view
-                                | {{ $item.name }} / 
-                                | {{ $item.size || 'max(8, availableProcessors)'}}
+                    +form-field__label({ label: 'Executor configurations:', name: '"executorConfigurations"' })
+                        +form-field__tooltip({ title: `Custom thread pool configurations for compute tasks` })
 
-                            list-editable-item-edit
-                                .pc-form-grid-row
-                                    .pc-form-grid-col-30
-                                        +sane-ignite-form-field-text({
-                                            label: 'Name:',
-                                            model: '$item.name',
-                                            name: '"ExecutorName"',
-                                            required: true,
-                                            placeholder: 'Input executor name',
-                                            tip: 'Thread pool name'
-                                        })(
-                                            ui-validate=`{
-                                                uniqueName: '$ctrl.Clusters.executorConfiguration.name.customValidators.uniqueName($item, ${executors})'
-                                            }`
-                                            ui-validate-watch=`"${executors}"`
-                                            ui-validate-watch-object-equality='true'
-                                            ng-model-options='{allowInvalid: true}'
-                                            data-ignite-form-field-input-autofocus='true'
-                                        )
-                                            +form-field-feedback(null, 'uniqueName', 'Service with that name is already configured')
-                                    .pc-form-grid-col-30
-                                        +number('Pool size:', '$item.size', '"ExecutorPoolSize"', 'true', 'max(8, availableProcessors)', '1', 'Thread pool size')
+                    list-editable(
+                        ng-model=executors
+                        ng-model-options='{allowInvalid: true}'
+                        name='executorConfigurations'
+                        ui-validate=`{
+                            allNamesExist: '$ctrl.Clusters.executorConfigurations.allNamesExist($value)',
+                            allNamesUnique: '$ctrl.Clusters.executorConfigurations.allNamesUnique($value)'
+                        }`
+                    )
+                        list-editable-item-view
+                            | {{ $item.name }} /
+                            | {{ $item.size || 'max(8, availableProcessors)'}}
 
-                            list-editable-no-items
-                                list-editable-add-item-button(
-                                    add-item=`$edit($ctrl.Clusters.addExecutorConfiguration(${model}))`
-                                    label-single='executor configuration'
-                                    label-multiple='executor configurations'
-                                )
-                    .ignite-form-field__errors(
+                        list-editable-item-edit
+                            .pc-form-grid-row
+                                .pc-form-grid-col-30
+                                    +form-field__text({
+                                        label: 'Name:',
+                                        model: '$item.name',
+                                        name: '"ExecutorName"',
+                                        required: true,
+                                        placeholder: 'Input executor name',
+                                        tip: 'Thread pool name'
+                                    })(
+                                        ui-validate=`{
+                                            uniqueName: '$ctrl.Clusters.executorConfiguration.name.customValidators.uniqueName($item, ${executors})'
+                                        }`
+                                        ui-validate-watch=`"${executors}"`
+                                        ui-validate-watch-object-equality='true'
+                                        ng-model-options='{allowInvalid: true}'
+                                        ignite-form-field-input-autofocus='true'
+                                    )
+                                        +form-field__error({ error: 'uniqueName', message: 'Service with that name is already configured' })
+                                .pc-form-grid-col-30
+                                    +form-field__number({
+                                        label: 'Pool size:',
+                                        model: '$item.size',
+                                        name: '"ExecutorPoolSize"',
+                                        placeholder: 'max(8, availableProcessors)',
+                                        min: '1',
+                                        tip: 'Thread pool size'
+                                    })
+
+                        list-editable-no-items
+                            list-editable-add-item-button(
+                                add-item=`$edit($ctrl.Clusters.addExecutorConfiguration(${model}))`
+                                label-single='executor configuration'
+                                label-multiple='executor configurations'
+                            )
+
+                    .form-field__errors(
                         ng-messages=`pools.executorConfigurations.$error`
                         ng-show=`pools.executorConfigurations.$invalid`
                     )
-                        +form-field-feedback(_, 'allNamesExist', 'All executor configurations should have a name')
-                        +form-field-feedback(_, 'allNamesUnique', 'All executor configurations should have a unique name')
+                        +form-field__error({ error: 'allNamesExist', message: 'All executor configurations should have a name' })
+                        +form-field__error({ error: 'allNamesUnique', message: 'All executor configurations should have a unique name' })
 
         .pca-form-column-6
             +preview-xml-java(model, 'clusterPools')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/time.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/time.pug
index fa85a5d..7cfff3c 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/time.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/time.pug
@@ -26,19 +26,46 @@
         .pca-form-column-6.pc-form-grid-row
             //- Removed in ignite 2.0
             .pc-form-grid-col-30(ng-if-start='$ctrl.available(["1.0.0", "2.0.0"])')
-                +number('Samples size:', `${model}.clockSyncSamples`, '"clockSyncSamples"', 'true', '8', '0',
-                    'Number of samples used to synchronize clocks between different nodes<br/>\
-                    Clock synchronization is used for cache version assignment in CLOCK order mode')
+                +form-field__number({
+                    label: 'Samples size:',
+                    model: `${model}.clockSyncSamples`,
+                    name: '"clockSyncSamples"',
+                    placeholder: '8',
+                    min: '0',
+                    tip: 'Number of samples used to synchronize clocks between different nodes<br/>\
+                          Clock synchronization is used for cache version assignment in CLOCK order mode'
+                })
             .pc-form-grid-col-30(ng-if-end)
-                +number('Frequency:', `${model}.clockSyncFrequency`, '"clockSyncFrequency"', 'true', '120000', '0',
-                    'Frequency at which clock is synchronized between nodes, in milliseconds<br/>\
-                    Clock synchronization is used for cache version assignment in CLOCK order mode')
+                +form-field__number({
+                    label: 'Frequency:',
+                    model: `${model}.clockSyncFrequency`,
+                    name: '"clockSyncFrequency"',
+                    placeholder: '120000',
+                    min: '0',
+                    tip: 'Frequency at which clock is synchronized between nodes, in milliseconds<br/>\
+                          Clock synchronization is used for cache version assignment in CLOCK order mode'
+                })
 
             .pc-form-grid-col-30
-                +number-min-max('Port base:', `${model}.timeServerPortBase`, '"timeServerPortBase"', 'true', '31100', '0', '65535',
-                    'Time server provides clock synchronization between nodes<br/>\
-                    Base UPD port number for grid time server. Time server will be started on one of free ports in range')
+                +form-field__number({
+                    label: 'Port base:',
+                    model: `${model}.timeServerPortBase`,
+                    name: '"timeServerPortBase"',
+                    placeholder: '31100',
+                    min: '0',
+                    max: '65535',
+                    tip: 'Time server provides clock synchronization between nodes<br/>\
+                         Base UPD port number for grid time server. Time server will be started on one of free ports in range'
+                })
+
             .pc-form-grid-col-30
-                +number('Port range:', `${model}.timeServerPortRange`, '"timeServerPortRange"', 'true', '100', '1', 'Time server port range')
+                +form-field__number({
+                    label: 'Port range:',
+                    model: `${model}.timeServerPortRange`,
+                    name: '"timeServerPortRange"',
+                    placeholder: '100',
+                    min: '1',
+                    tip: 'Time server port range'
+                })
         .pca-form-column-6
             +preview-xml-java(model, 'clusterTime')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/transactions.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/transactions.pug
index b5f80df..48c8391 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/transactions.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/cluster-edit-form/templates/transactions.pug
@@ -21,45 +21,78 @@
 
 panel-collapsible(ng-form=form on-open=`ui.loadPanel('${form}')`)
     panel-title Transactions
-    panel-description 
-        | Settings for transactions. 
+    panel-description
+        | Settings for transactions.
         | #[a.link-success(href="https://apacheignite.readme.io/docs/transactions" target="_blank") More info]
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-30
-                +dropdown('Concurrency:', `${model}.defaultTxConcurrency`, '"defaultTxConcurrency"', 'true', 'PESSIMISTIC',
-                    '[\
+                +form-field__dropdown({
+                    label: 'Concurrency:',
+                    model: `${model}.defaultTxConcurrency`,
+                    name: '"defaultTxConcurrency"',
+                    placeholder: 'PESSIMISTIC',
+                    options: '[\
                         {value: "OPTIMISTIC", label: "OPTIMISTIC"},\
                         {value: "PESSIMISTIC", label: "PESSIMISTIC"}\
                     ]',
-                    'Cache transaction concurrency to use when one is not explicitly specified\
-                    <ul>\
-                        <li>OPTIMISTIC - All cache operations are not distributed to other nodes until commit is called</li>\
-                        <li>PESSIMISTIC - A lock is acquired on all cache operations with exception of read operations in READ_COMMITTED mode</li>\
-                    </ul>')
+                    tip: 'Cache transaction concurrency to use when one is not explicitly specified\
+                        <ul>\
+                            <li>OPTIMISTIC - All cache operations are not distributed to other nodes until commit is called</li>\
+                            <li>PESSIMISTIC - A lock is acquired on all cache operations with exception of read operations in READ_COMMITTED mode</li>\
+                        </ul>'
+                })
             .pc-form-grid-col-30
-                +dropdown('Isolation:', `${model}.defaultTxIsolation`, '"defaultTxIsolation"', 'true', 'REPEATABLE_READ',
-                    '[\
+                +form-field__dropdown({
+                    label: 'Isolation:',
+                    model: `${model}.defaultTxIsolation`,
+                    name: '"defaultTxIsolation"',
+                    placeholder: 'REPEATABLE_READ',
+                    options: '[\
                         {value: "READ_COMMITTED", label: "READ_COMMITTED"},\
                         {value: "REPEATABLE_READ", label: "REPEATABLE_READ"},\
                         {value: "SERIALIZABLE", label: "SERIALIZABLE"}\
                     ]',
-                    'Default transaction isolation\
-                    <ul>\
-                        <li>READ_COMMITTED - Always a committed value will be provided for read operations</li>\
-                        <li>REPEATABLE_READ - If a value was read once within transaction, then all consecutive reads will provide the same in-transaction value</li>\
-                        <li>SERIALIZABLE - All transactions occur in a completely isolated fashion, as if all transactions in the system had executed serially, one after the other.</li>\
-                    </ul>')
+                    tip: 'Default transaction isolation\
+                        <ul>\
+                            <li>READ_COMMITTED - Always a committed value will be provided for read operations</li>\
+                            <li>REPEATABLE_READ - If a value was read once within transaction, then all consecutive reads will provide the same in-transaction value</li>\
+                            <li>SERIALIZABLE - All transactions occur in a completely isolated fashion, as if all transactions in the system had executed serially, one after the other.</li>\
+                        </ul>'
+                })
             .pc-form-grid-col-60
-                +number('Default timeout:', `${model}.defaultTxTimeout`, '"defaultTxTimeout"', 'true', '0', '0', 'Default transaction timeout')
+                +form-field__number({
+                    label: 'Default timeout:',
+                    model: `${model}.defaultTxTimeout`,
+                    name: '"defaultTxTimeout"',
+                    placeholder: '0',
+                    min: '0',
+                    tip: 'Default transaction timeout'
+                })
             .pc-form-grid-col-30
-                +number('Pessimistic log cleanup delay:', `${model}.pessimisticTxLogLinger`, '"pessimisticTxLogLinger"', 'true', '10000', '0',
-                    'Delay, in milliseconds, after which pessimistic recovery entries will be cleaned up for failed node')
+                +form-field__number({
+                    label: 'Pessimistic log cleanup delay:',
+                    model: `${model}.pessimisticTxLogLinger`,
+                    name: '"pessimisticTxLogLinger"',
+                    placeholder: '10000',
+                    min: '0',
+                    tip: 'Delay, in milliseconds, after which pessimistic recovery entries will be cleaned up for failed node'
+                })
             .pc-form-grid-col-30
-                +number('Pessimistic log size:', `${model}.pessimisticTxLogSize`, '"pessimisticTxLogSize"', 'true', '0', '0',
-                    'Size of pessimistic transactions log stored on node in order to recover transaction commit if originating node has left grid before it has sent all messages to transaction nodes')
+                +form-field__number({
+                    label: 'Pessimistic log size:',
+                    model: `${model}.pessimisticTxLogSize`,
+                    name: '"pessimisticTxLogSize"',
+                    placeholder: '0',
+                    min: '0',
+                    tip: 'Size of pessimistic transactions log stored on node in order to recover transaction commit if originating node has left grid before it has sent all messages to transaction nodes'
+                })
             .pc-form-grid-col-60
-                +java-class('Manager factory:', `${model}.txManagerFactory`, '"txManagerFactory"', 'true', 'false',
-                    'Class name of transaction manager factory for integration with JEE app servers')
+                +form-field__java-class({
+                    label: 'Manager factory:',
+                    model: `${model}.txManagerFactory`,
+                    name: '"txManagerFactory"',
+                    tip: 'Class name of transaction manager factory for integration with JEE app servers'
+                })
         .pca-form-column-6
             +preview-xml-java(model, 'clusterTransactions')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/igfs-edit-form/templates/dual.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/igfs-edit-form/templates/dual.pug
index 67a37ad..67839e8 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/igfs-edit-form/templates/dual.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/igfs-edit-form/templates/dual.pug
@@ -31,12 +31,28 @@
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6
             .settings-row
-                +number('Maximum pending puts size:', `${model}.dualModeMaxPendingPutsSize`, '"dualModeMaxPendingPutsSize"', 'true', '0', 'Number.MIN_SAFE_INTEGER',
-                    'Maximum amount of pending data read from the secondary file system and waiting to be written to data cache<br/>\
-                    Zero or negative value stands for unlimited size')
+                +form-field__number({
+                    label: 'Maximum pending puts size:',
+                    model: `${model}.dualModeMaxPendingPutsSize`,
+                    name: '"dualModeMaxPendingPutsSize"',
+                    placeholder: '0',
+                    min: 'Number.MIN_SAFE_INTEGER',
+                    tip: 'Maximum amount of pending data read from the secondary file system and waiting to be written to data cache<br/>\
+                         Zero or negative value stands for unlimited size'
+                })
             .settings-row
-                +java-class('Put executor service:', `${model}.dualModePutExecutorService`, '"dualModePutExecutorService"', 'true', 'false', 'DUAL mode put operation executor service')
+                +form-field__java-class({
+                    label: 'Put executor service:',
+                    model: `${model}.dualModePutExecutorService`,
+                    name: '"dualModePutExecutorService"',
+                    tip: 'DUAL mode put operation executor service'
+                })
             .settings-row
-                +checkbox('Put executor service shutdown', `${model}.dualModePutExecutorServiceShutdown`, '"dualModePutExecutorServiceShutdown"', 'DUAL mode put operation executor service shutdown flag')
+                +form-field__checkbox({
+                    label: 'Put executor service shutdown',
+                    model: `${model}.dualModePutExecutorServiceShutdown`,
+                    name: '"dualModePutExecutorServiceShutdown"',
+                    tip: 'DUAL mode put operation executor service shutdown flag'
+                })
         .pca-form-column-6
             +preview-xml-java(model, 'igfsDualMode')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/igfs-edit-form/templates/fragmentizer.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/igfs-edit-form/templates/fragmentizer.pug
index a8194c2..d1fa76a 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/igfs-edit-form/templates/fragmentizer.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/igfs-edit-form/templates/fragmentizer.pug
@@ -26,12 +26,41 @@
             -var enabled = `${model}.fragmentizerEnabled`
 
             .pc-form-grid-col-60
-                +checkbox('Enabled', enabled, '"fragmentizerEnabled"', 'Fragmentizer enabled flag')
+                +form-field__checkbox({
+                    label: 'Enabled',
+                    model: enabled,
+                    name: '"fragmentizerEnabled"',
+                    tip: 'Fragmentizer enabled flag'
+                })
             .pc-form-grid-col-30
-                +number('Concurrent files:', `${model}.fragmentizerConcurrentFiles`, '"fragmentizerConcurrentFiles"', enabled, '0', '0', 'Number of files to process concurrently by fragmentizer')
+                +form-field__number({
+                    label: 'Concurrent files:',
+                    model: `${model}.fragmentizerConcurrentFiles`,
+                    name: '"fragmentizerConcurrentFiles"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '0',
+                    min: '0',
+                    tip: 'Number of files to process concurrently by fragmentizer'
+                })
             .pc-form-grid-col-30
-                +number('Throttling block length:', `${model}.fragmentizerThrottlingBlockLength`, '"fragmentizerThrottlingBlockLength"', enabled, '16777216', '1', 'Length of file chunk to transmit before throttling is delayed')
+                +form-field__number({
+                    label: 'Throttling block length:',
+                    model: `${model}.fragmentizerThrottlingBlockLength`,
+                    name: '"fragmentizerThrottlingBlockLength"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '16777216',
+                    min: '1',
+                    tip: 'Length of file chunk to transmit before throttling is delayed'
+                })
             .pc-form-grid-col-60
-                +number('Throttling delay:', `${model}.fragmentizerThrottlingDelay`, '"fragmentizerThrottlingDelay"', enabled, '200', '0', 'Delay in milliseconds for which fragmentizer is paused')
+                +form-field__number({
+                    label: 'Throttling delay:',
+                    model: `${model}.fragmentizerThrottlingDelay`,
+                    name: '"fragmentizerThrottlingDelay"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '200',
+                    min: '0',
+                    tip: 'Delay in milliseconds for which fragmentizer is paused'
+                })
         .pca-form-column-6
             +preview-xml-java(model, 'igfsFragmentizer')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/igfs-edit-form/templates/general.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/igfs-edit-form/templates/general.pug
index b9eb8fc..777c123 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/igfs-edit-form/templates/general.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/igfs-edit-form/templates/general.pug
@@ -22,12 +22,12 @@
 panel-collapsible(opened=`::true` ng-form=form)
     panel-title General
     panel-description
-        | General IGFS configuration. 
+        | General IGFS configuration.
         a.link-success(href="https://apacheignite-fs.readme.io/docs/in-memory-file-system" target="_blank") More info
     panel-content.pca-form-row
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-60
-                +sane-ignite-form-field-text({
+                +form-field__text({
                     label: 'Name:',
                     model: `${model}.name`,
                     name: '"igfsName"',
@@ -38,9 +38,9 @@
                     ignite-unique-property='name'
                     ignite-unique-skip=`["_id", ${model}]`
                 )
-                    +unique-feedback(`${model}.name`, 'IGFS name should be unique.')
+                    +form-field__error({ error: 'igniteUnique', message: 'IGFS name should be unique.' })
             .pc-form-grid-col-30
-                +sane-ignite-form-field-dropdown({
+                +form-field__dropdown({
                     label: 'IGFS mode:',
                     model: `${model}.defaultMode`,
                     name: '"defaultMode"',
@@ -57,7 +57,7 @@
                     `
                 })
             .pc-form-grid-col-30
-                +sane-ignite-form-field-number({
+                +form-field__number({
                     label: 'Group size:',
                     model: `${model}.affinnityGroupSize`,
                     name: '"affinnityGroupSize"',
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/igfs-edit-form/templates/ipc.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/igfs-edit-form/templates/ipc.pug
index ef024b4..efae60c 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/igfs-edit-form/templates/ipc.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/igfs-edit-form/templates/ipc.pug
@@ -28,28 +28,77 @@
             -var enabled = `${model}.ipcEndpointEnabled`
 
             .pc-form-grid-col-60
-                +checkbox('Enabled', enabled, '"ipcEndpointEnabled"', 'IPC endpoint enabled flag')
+                +form-field__checkbox({
+                    label: 'Enabled',
+                    model: enabled,
+                    name: '"ipcEndpointEnabled"',
+                    tip: 'IPC endpoint enabled flag'
+                })
             .pc-form-grid-col-60
-                +dropdown('Type:', `${ipcEndpointConfiguration}.type`, '"ipcEndpointConfigurationType"', enabled, 'TCP',
-                    '[\
+                +form-field__dropdown({
+                    label: 'Type:',
+                    model: `${ipcEndpointConfiguration}.type`,
+                    name: '"ipcEndpointConfigurationType"',
+                    disabled: `!(${enabled})`,
+                    placeholder: 'TCP',
+                    options: '[\
                         {value: "SHMEM", label: "SHMEM"},\
                         {value: "TCP", label: "TCP"}\
                     ]',
-                    'IPC endpoint type\
-                    <ul>\
-                        <li>SHMEM - shared memory endpoint</li>\
-                        <li>TCP - TCP endpoint</li>\
-                    </ul>')
+                    tip: 'IPC endpoint type\
+                        <ul>\
+                            <li>SHMEM - shared memory endpoint</li>\
+                            <li>TCP - TCP endpoint</li>\
+                        </ul>'
+                })
             .pc-form-grid-col-30
-                +text-ip-address('Host:', `${ipcEndpointConfiguration}.host`, '"ipcEndpointConfigurationHost"', enabled, '127.0.0.1', 'Host endpoint is bound to')
+                +form-field__ip-address({
+                    label: 'Host:',
+                    model: `${ipcEndpointConfiguration}.host`,
+                    name: '"ipcEndpointConfigurationHost"',
+                    enabled: enabled,
+                    placeholder: '127.0.0.1',
+                    tip: 'Host endpoint is bound to'
+                })
             .pc-form-grid-col-30
-                +number-min-max('Port:', `${ipcEndpointConfiguration}.port`, '"ipcEndpointConfigurationPort"', enabled, '10500', '1', '65535', 'Port endpoint is bound to')
+                +form-field__number({
+                    label: 'Port:',
+                    model: `${ipcEndpointConfiguration}.port`,
+                    name: '"ipcEndpointConfigurationPort"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '10500',
+                    min: '1',
+                    max: '65535',
+                    tip: 'Port endpoint is bound to'
+                })
             .pc-form-grid-col-30
-                +number('Memory size:', `${ipcEndpointConfiguration}.memorySize`, '"ipcEndpointConfigurationMemorySize"', enabled, '262144', '1', 'Shared memory size in bytes allocated for endpoint communication')
+                +form-field__number({
+                    label: 'Memory size:',
+                    model: `${ipcEndpointConfiguration}.memorySize`,
+                    name: '"ipcEndpointConfigurationMemorySize"',
+                    disabled: `!(${enabled})`,
+                    placeholder: '262144',
+                    min: '1',
+                    tip: 'Shared memory size in bytes allocated for endpoint communication'
+                })
             .pc-form-grid-col-30
-                +number('Thread count:', `${ipcEndpointConfiguration}.threadCount`, '"ipcEndpointConfigurationThreadCount"', enabled, 'availableProcessors', '1',
-                    'Number of threads used by this endpoint to process incoming requests')
+                +form-field__number({
+                    label: 'Thread count:',
+                    model: `${ipcEndpointConfiguration}.threadCount`,
+                    name: '"ipcEndpointConfigurationThreadCount"',
+                    disabled: `!(${enabled})`,
+                    placeholder: 'availableProcessors',
+                    min: '1',
+                    tip: 'Number of threads used by this endpoint to process incoming requests'
+                })
             .pc-form-grid-col-60
-                +text-enabled('Token directory:', `${ipcEndpointConfiguration}.tokenDirectoryPath`, '"ipcEndpointConfigurationTokenDirectoryPath"', enabled, 'false', 'ipc/shmem', 'Directory where shared memory tokens are stored')
+                +form-field__text({
+                    label: 'Token directory:',
+                    model: `${ipcEndpointConfiguration}.tokenDirectoryPath`,
+                    name: '"ipcEndpointConfigurationTokenDirectoryPath"',
+                    disabled: `!(${enabled})`,
+                    placeholder: 'ipc/shmem',
+                    tip: 'Directory where shared memory tokens are stored'
+                })
         .pca-form-column-6
             +preview-xml-java(model, 'igfsIPC')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/igfs-edit-form/templates/misc.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/igfs-edit-form/templates/misc.pug
index 9a39b3a..cf68e72 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/igfs-edit-form/templates/misc.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/igfs-edit-form/templates/misc.pug
@@ -26,83 +26,181 @@
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-60
-                +number('Block size:', `${model}.blockSize`, '"blockSize"', 'true', '65536', '0', 'File data block size in bytes')
+                +form-field__number({
+                    label: 'Block size:',
+                    model: `${model}.blockSize`,
+                    name: '"blockSize"',
+                    placeholder: '65536',
+                    min: '0',
+                    tip: 'File data block size in bytes'
+                })
 
             //- Since ignite 2.0
             .pc-form-grid-col-60(ng-if='$ctrl.available("2.0.0")')
-                +number('Buffer size:', `${model}.streamBufferSize`, '"streamBufferSize"', 'true', '65536', '0', 'Read/write buffer size for IGFS stream operations in bytes')
+                +form-field__number({
+                    label: 'Buffer size:',
+                    model: `${model}.streamBufferSize`,
+                    name: '"streamBufferSize"',
+                    placeholder: '65536',
+                    min: '0',
+                    tip: 'Read/write buffer size for IGFS stream operations in bytes'
+                })
 
             //- Removed in ignite 2.0
             .pc-form-grid-col-60(ng-if-start='$ctrl.available(["1.0.0", "2.0.0"])')
-                +number('Stream buffer size:', `${model}.streamBufferSize`, '"streamBufferSize"', 'true', '65536', '0', 'Read/write buffer size for IGFS stream operations in bytes')
+                +form-field__number({
+                    label: 'Stream buffer size:',
+                    model: `${model}.streamBufferSize`,
+                    name: '"streamBufferSize"',
+                    placeholder: '65536',
+                    min: '0',
+                    tip: 'Read/write buffer size for IGFS stream operations in bytes'
+                })
             .pc-form-grid-col-60(ng-if-end)
-                +number('Maximum space size:', `${model}.maxSpaceSize`, '"maxSpaceSize"', 'true', '0', '0', 'Maximum space available for data cache to store file system entries')
+                +form-field__number({
+                    label: 'Maximum space size:',
+                    model: `${model}.maxSpaceSize`,
+                    name: '"maxSpaceSize"',
+                    placeholder: '0',
+                    min: '0',
+                    tip: 'Maximum space available for data cache to store file system entries'
+                })
 
             .pc-form-grid-col-30
-                +number('Maximum task range length:', `${model}.maximumTaskRangeLength`, '"maximumTaskRangeLength"', 'true', '0', '0', 'Maximum default range size of a file being split during IGFS task execution')
+                +form-field__number({
+                    label: 'Maximum task range length:',
+                    model: `${model}.maximumTaskRangeLength`,
+                    name: '"maximumTaskRangeLength"',
+                    placeholder: '0',
+                    min: '0',
+                    tip: 'Maximum default range size of a file being split during IGFS task execution'
+                })
             .pc-form-grid-col-30
-                +number-min-max('Management port:', `${model}.managementPort`, '"managementPort"', 'true', '11400', '0', '65535', 'Port number for management endpoint')
+                +form-field__number({
+                    label: 'Management port:',
+                    model: `${model}.managementPort`,
+                    name: '"managementPort"',
+                    placeholder: '11400',
+                    min: '0',
+                    max: '65535',
+                    tip: 'Port number for management endpoint'
+                })
             .pc-form-grid-col-30
-                +number('Per node batch size:', `${model}.perNodeBatchSize`, '"perNodeBatchSize"', 'true', '100', '0', 'Number of file blocks collected on local node before sending batch to remote node')
+                +form-field__number({
+                    label: 'Per node batch size:',
+                    model: `${model}.perNodeBatchSize`,
+                    name: '"perNodeBatchSize"',
+                    placeholder: '100',
+                    min: '0',
+                    tip: 'Number of file blocks collected on local node before sending batch to remote node'
+                })
             .pc-form-grid-col-30
-                +number('Per node parallel batch count:', `${model}.perNodeParallelBatchCount`, '"perNodeParallelBatchCount"', 'true', '8', '0', 'Number of file block batches that can be concurrently sent to remote node')
+                +form-field__number({
+                    label: 'Per node parallel batch count:',
+                    model: `${model}.perNodeParallelBatchCount`,
+                    name: '"perNodeParallelBatchCount"',
+                    placeholder: '8',
+                    min: '0',
+                    tip: 'Number of file block batches that can be concurrently sent to remote node'
+                })
             .pc-form-grid-col-60
-                +number('Prefetch blocks:', `${model}.prefetchBlocks`, '"prefetchBlocks"', 'true', '0', '0', 'Number of pre-fetched blocks if specific file chunk is requested')
+                +form-field__number({
+                    label: 'Prefetch blocks:',
+                    model: `${model}.prefetchBlocks`,
+                    name: '"prefetchBlocks"',
+                    placeholder: '8',
+                    min: '0',
+                    tip: 'Number of pre-fetched blocks if specific file chunk is requested'
+                })
             .pc-form-grid-col-60
-                +number('Sequential reads before prefetch:', `${model}.sequentialReadsBeforePrefetch`, '"sequentialReadsBeforePrefetch"', 'true', '0', '0', 'Amount of sequential block reads before prefetch is triggered')
+                +form-field__number({
+                    label: 'Sequential reads before prefetch:',
+                    model: `${model}.sequentialReadsBeforePrefetch`,
+                    name: '"sequentialReadsBeforePrefetch"',
+                    placeholder: '8',
+                    min: '0',
+                    tip: 'Amount of sequential block reads before prefetch is triggered'
+                })
 
             //- Removed in ignite 2.0
             .pc-form-grid-col-60(ng-if='$ctrl.available(["1.0.0", "2.0.0"])')
-                +number('Trash purge timeout:', `${model}.trashPurgeTimeout`, '"trashPurgeTimeout"', 'true', '1000', '0', 'Maximum timeout awaiting for trash purging in case data cache oversize is detected')
+                +form-field__number({
+                    label: 'Trash purge timeout:',
+                    model: `${model}.trashPurgeTimeout`,
+                    name: '"trashPurgeTimeout"',
+                    placeholder: '1000',
+                    min: '0',
+                    tip: 'Maximum timeout awaiting for trash purging in case data cache oversize is detected'
+                })
 
             .pc-form-grid-col-60
-                +checkbox('Colocate metadata', `${model}.colocateMetadata`, '"colocateMetadata"', 'Whether to co-locate metadata on a single node')
+                +form-field__checkbox({
+                    label: 'Colocate metadata',
+                    model: `${model}.colocateMetadata`,
+                    name: '"colocateMetadata"',
+                    tip: 'Whether to co-locate metadata on a single node'
+                })
             .pc-form-grid-col-60
-                +checkbox('Relaxed consistency', `${model}.relaxedConsistency`, '"relaxedConsistency"',
-                    'If value of this flag is <b>true</b>, IGFS will skip expensive consistency checks<br/>\
-                    It is recommended to set this flag to <b>false</b> if your application has conflicting\
-                    operations, or you do not know how exactly users will use your system')
+                +form-field__checkbox({
+                    label: 'Relaxed consistency',
+                    model: `${model}.relaxedConsistency`,
+                    name: '"relaxedConsistency"',
+                    tip: 'If value of this flag is <b>true</b>, IGFS will skip expensive consistency checks<br/>\
+                         It is recommended to set this flag to <b>false</b> if your application has conflicting\
+                         operations, or you do not know how exactly users will use your system'
+                })
 
             //- Since ignite 2.0
             .pc-form-grid-col-60(ng-if='$ctrl.available("2.0.0")')
-                +checkbox('Update file length on flush', model + '.updateFileLengthOnFlush', '"updateFileLengthOnFlush"', 'Update file length on flush flag')
+                +form-field__checkbox({
+                    label: 'Update file length on flush',
+                    model: model + '.updateFileLengthOnFlush',
+                    name: '"updateFileLengthOnFlush"',
+                    tip: 'Update file length on flush flag'
+                })
 
             .pc-form-grid-col-60
                 mixin igfs-misc-path-modes
                     .ignite-form-field
-                        +ignite-form-field__label('Path modes:', '"pathModes"')
-                            +tooltip(`Map of path prefixes to IGFS modes used for them`)
-                        .ignite-form-field__control
-                            -let items = pathModes
+                        +form-field__label({ label: 'Path modes:', name: '"pathModes"' })
+                            +form-field__tooltip({ title: `Map of path prefixes to IGFS modes used for them` })
 
-                            list-editable(ng-model=items)
-                                list-editable-item-view
-                                    | {{ $item.path + " [" + $item.mode + "]"}}
+                        -let items = pathModes
 
-                                list-editable-item-edit
-                                    - form = '$parent.form'
+                        list-editable(ng-model=items)
+                            list-editable-item-view
+                                | {{ $item.path + " [" + $item.mode + "]"}}
 
-                                    .pc-form-grid-row
-                                        .pc-form-grid-col-30
-                                            +ignite-form-field-text('Path:', '$item.path', '"path"', false, true, 'Enter path')(ignite-auto-focus)
-                                        .pc-form-grid-col-30
-                                            +sane-ignite-form-field-dropdown({
-                                                label: 'Mode:',
-                                                model: `$item.mode`,
-                                                name: '"mode"',
-                                                required: true,
-                                                placeholder: 'Choose igfs mode',
-                                                options: '{{::$ctrl.IGFSs.defaultMode.values}}'
-                                            })(
-                                                ng-model-options='{allowInvalid: true}'
-                                            )
+                            list-editable-item-edit
+                                - form = '$parent.form'
 
-                                list-editable-no-items
-                                    list-editable-add-item-button(
-                                        add-item=`$editLast((${items} = ${items} || []).push({}))`
-                                        label-single='path mode'
-                                        label-multiple='path modes'
-                                    )
+                                .pc-form-grid-row
+                                    .pc-form-grid-col-30
+                                        +form-field__text({
+                                            label: 'Path:',
+                                            model: '$item.path',
+                                            name: '"path"',
+                                            required: true,
+                                            placeholder: 'Enter path'
+                                        })(ignite-auto-focus)
+                                    .pc-form-grid-col-30
+                                        +form-field__dropdown({
+                                            label: 'Mode:',
+                                            model: `$item.mode`,
+                                            name: '"mode"',
+                                            required: true,
+                                            placeholder: 'Choose igfs mode',
+                                            options: '{{::$ctrl.IGFSs.defaultMode.values}}'
+                                        })(
+                                            ng-model-options='{allowInvalid: true}'
+                                        )
+
+                            list-editable-no-items
+                                list-editable-add-item-button(
+                                    add-item=`$editLast((${items} = ${items} || []).push({}))`
+                                    label-single='path mode'
+                                    label-multiple='path modes'
+                                )
 
                 +igfs-misc-path-modes
 
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/igfs-edit-form/templates/secondary.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/igfs-edit-form/templates/secondary.pug
index 92c8210..4d779f1 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/igfs-edit-form/templates/secondary.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/igfs-edit-form/templates/secondary.pug
@@ -22,7 +22,7 @@
 panel-collapsible(ng-form=form on-open=`ui.loadPanel('${form}')`)
     panel-title Secondary file system
     panel-description
-        | Secondary file system is provided for pass-through, write-through, and read-through purposes. 
+        | Secondary file system is provided for pass-through, write-through, and read-through purposes.
         a.link-success(href="https://apacheignite-fs.readme.io/docs/secondary-file-system" target="_blank") More info
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
@@ -30,7 +30,7 @@
             -var secondaryFileSystem = `${model}.secondaryFileSystem`
 
             .pc-form-grid-col-60
-                +sane-form-field-checkbox({
+                +form-field__checkbox({
                     label: 'Enabled',
                     name: '"secondaryFileSystemEnabled"',
                     model: enabled
@@ -43,13 +43,34 @@
                     ui-validate-watch-collection=`"[${model}.defaultMode, ${model}.pathModes]"`
                     ui-validate-watch-object-equality='true'
                 )
-                    +form-field-feedback(null, 'requiredWhenIGFSProxyMode', 'Secondary file system should be configured for "PROXY" IGFS mode')
-                    +form-field-feedback(null, 'requiredWhenPathModeProxyMode', 'Secondary file system should be configured for "PROXY" path mode')
+                    +form-field__error({ error: 'requiredWhenIGFSProxyMode', message: 'Secondary file system should be configured for "PROXY" IGFS mode' })
+                    +form-field__error({ error: 'requiredWhenPathModeProxyMode', message: 'Secondary file system should be configured for "PROXY" path mode' })
             .pc-form-grid-col-60
-                +text-enabled('URI:', `${secondaryFileSystem}.uri`, '"hadoopURI"', enabled, 'false', 'hdfs://[namenodehost]:[port]/[path]', 'URI of file system')
+                +form-field__text({
+                    label: 'URI:',
+                    model: `${secondaryFileSystem}.uri`,
+                    name: '"hadoopURI"',
+                    disabled: `!(${enabled})`,
+                    placeholder: 'hdfs://[namenodehost]:[port]/[path]',
+                    tip: 'URI of file system'
+                })
             .pc-form-grid-col-60
-                +text-enabled('Config path:', `${secondaryFileSystem}.cfgPath`, '"cfgPath"', enabled, 'false', 'Path to additional config', 'Additional path to Hadoop configuration')
+                +form-field__text({
+                    label: 'Config path:',
+                    model: `${secondaryFileSystem}.cfgPath`,
+                    name: '"cfgPath"',
+                    disabled: `!(${enabled})`,
+                    placeholder: 'Path to additional config',
+                    tip: 'Additional path to Hadoop configuration'
+                })
             .pc-form-grid-col-60
-                +text-enabled('User name:', `${secondaryFileSystem}.userName`, '"userName"', enabled, 'false', 'Input user name', 'User name')
+                +form-field__text({
+                    label: 'User name:',
+                    model: `${secondaryFileSystem}.userName`,
+                    name: '"userName"',
+                    disabled: `!(${enabled})`,
+                    placeholder: 'Input user name',
+                    tip: 'User name'
+                })
         .pca-form-column-6
             +preview-xml-java(model, 'igfsSecondFS')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/model-edit-form/controller.js b/modules/web-console/frontend/app/components/page-configure-advanced/components/model-edit-form/controller.js
index b7d4ebe..2b53d97 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/model-edit-form/controller.js
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/model-edit-form/controller.js
@@ -31,7 +31,8 @@
     /** @type {ng.ICompiledExpression} */
     onSave;
 
-    static $inject = ['ModalImportModels', 'IgniteErrorPopover', 'IgniteLegacyUtils', Confirm.name, 'ConfigChangesGuard', IgniteVersion.name, '$scope', Models.name, 'IgniteFormUtils'];
+    static $inject = ['ModalImportModels', 'IgniteErrorPopover', 'IgniteLegacyUtils', 'Confirm', 'ConfigChangesGuard', 'IgniteVersion', '$scope', 'Models', 'IgniteFormUtils'];
+
     /**
      * @param {ModalImportModels} ModalImportModels
      * @param {Confirm} Confirm
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/model-edit-form/templates/general.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/model-edit-form/templates/general.pug
index a6c8194..589760e 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/model-edit-form/templates/general.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/model-edit-form/templates/general.pug
@@ -23,15 +23,20 @@
 panel-collapsible(opened=`::true` ng-form=form)
     panel-title General
     panel-description
-        | Domain model properties common for Query and Store. 
-        a.link-success(href="https://apacheignite.readme.io/docs/cache-queries" target="_blank") More info about query configuration. 
+        | Domain model properties common for Query and Store.
+        a.link-success(href="https://apacheignite.readme.io/docs/cache-queries" target="_blank") More info about query configuration.
         a.link-success(href="https://apacheignite.readme.io/docs/3rd-party-store" target="_blank") More info about store.
     panel-content.pca-form-row
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-60
-                +checkbox('Generate POJO classes', generatePojo, '"generatePojo"', 'If selected then POJO classes will be generated from database tables')
+                +form-field__checkbox({
+                    label: 'Generate POJO classes',
+                    model: generatePojo,
+                    name: '"generatePojo"',
+                    tip: 'If selected then POJO classes will be generated from database tables'
+                })
             .pc-form-grid-col-30
-                +sane-ignite-form-field-dropdown({
+                +form-field__dropdown({
                     label: 'Caches:',
                     model: `${model}.caches`,
                     name: '"caches"',
@@ -42,16 +47,42 @@
                     tip: 'Select caches to describe types in cache'
                 })
             .pc-form-grid-col-30
-                +dropdown-required('Query metadata:', `${model}.queryMetadata`, '"queryMetadata"', 'true', 'true', '', '::$ctrl.Models.queryMetadata.values',
-                    'Query metadata configured with:\
-                    <ul>\
-                        <li>Java annotations like @QuerySqlField</li>\
-                        <li>Configuration via QueryEntity class</li>\
-                    </ul>')
+                +form-field__dropdown({
+                    label: 'Query metadata:',
+                    model: `${model}.queryMetadata`,
+                    name: '"queryMetadata"',
+                    required: 'true',
+                    placeholder: '',
+                    options: '::$ctrl.Models.queryMetadata.values',
+                    tip: 'Query metadata configured with:\
+                          <ul>\
+                            <li>Java annotations like @QuerySqlField</li>\
+                            <li>Configuration via QueryEntity class</li>\
+                          </ul>'
+                })
+
             .pc-form-grid-col-60
-                +java-class-typeahead('Key type:', `${model}.keyType`, '"keyType"', '$ctrl.javaBuiltInClassesBase', 'true', 'true', '{{ ' + generatePojo + ' ? "Full class name for Key" : "Key type name" }}', 'Key class used to store key in cache', generatePojo)
+                +form-field__java-class--typeahead({
+                    label: 'Key type:',
+                    model: `${model}.keyType`,
+                    name: '"keyType"',
+                    options: '$ctrl.javaBuiltInClassesBase',
+                    required: 'true',
+                    placeholder: '{{ ' + generatePojo + ' ? "Full class name for Key" : "Key type name" }}',
+                    tip: 'Key class used to store key in cache',
+                    validationActive: generatePojo
+                })
             .pc-form-grid-col-60
-                +java-class-autofocus-placholder('Value type:', `${model}.valueType`, '"valueType"', 'true', 'true', 'false', '{{ ' + generatePojo +' ? "Enter fully qualified class name" : "Value type name" }}', 'Value class used to store value in cache', generatePojo)
+                +form-field__java-class({
+                    label: 'Value type:',
+                    model: `${model}.valueType`,
+                    name: '"valueType"',
+                    placeholder: '{{ ' + generatePojo +' ? "Enter fully qualified class name" : "Value type name" }}',
+                    tip: 'Value class used to store value in cache',
+                    validationActive: generatePojo
+                })(
+                    ignite-form-field-input-autofocus=autofocus
+                )
 
         .pca-form-column-6
             +preview-xml-java(model, 'domainModelGeneral')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/model-edit-form/templates/query.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/model-edit-form/templates/query.pug
index ed91ec4..ec9f1d4 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/model-edit-form/templates/query.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/model-edit-form/templates/query.pug
@@ -26,7 +26,7 @@
 panel-collapsible(ng-form=form opened=`!!${model}.queryMetadata`)
     panel-title#query-title Domain model for SQL query
     panel-description
-        | Domain model properties for fields queries. 
+        | Domain model properties for fields queries.
         a.link-success(href='https://apacheignite.readme.io/docs/cache-queries' target='_blank') More info
     panel-content.pca-form-row
         .pca-form-column-6.pc-form-grid-row
@@ -37,59 +37,87 @@
                 label Not available for annotated types
 
             .pc-form-grid-col-60(ng-if-start=`${model}.queryMetadata === 'Configuration'`)
-                +text('Table name:', `${model}.tableName`, '"tableName"', 'false', 'Enter table name')
+                +form-field__text({
+                    label: 'Table name:',
+                    model: `${model}.tableName`,
+                    name: '"tableName"',
+                    placeholder: 'Enter table name'
+                })
 
             .pc-form-grid-col-30(ng-if-start='$ctrl.available("2.0.0")')
-                +text('Key field name:', `${model}.keyFieldName`, '"keyFieldName"', 'false', 'Enter key field name',
-                    'Key name.<br/>' +
-                    'Can be used in field list to denote the key as a whole')
+                +form-field__text({
+                    label: 'Key field name:',
+                    model: `${model}.keyFieldName`,
+                    name: '"keyFieldName"',
+                    placeholder: 'Enter key field name',
+                    tip: 'Key name.<br/>' +
+                        'Can be used in field list to denote the key as a whole'
+                })
             .pc-form-grid-col-30(ng-if-end)
-                +text('Value field name:', `${model}.valueFieldName`, '"valueFieldName"', 'false', 'Enter value field name',
-                    'Value name.<br/>' +
-                    'Can be used in field list to denote the entire value')
+                +form-field__text({
+                    label: 'Value field name:',
+                    model: `${model}.valueFieldName`,
+                    name: '"valueFieldName"',
+                    placeholder: 'Enter value field name',
+                    tip: 'Value name.<br/>' +
+                        'Can be used in field list to denote the entire value'
+                })
 
             .pc-form-grid-col-60
                 mixin domains-query-fields
                     .ignite-form-field
-                        +ignite-form-field__label('Fields:', '"fields"')
-                            +tooltip(`Collection of name-to-type mappings to be queried, in addition to indexed fields`)
-                        .ignite-form-field__control
-                            -let items = queryFields
-                            list-editable(
-                                ng-model=items
-                                name='queryFields'
-                                ng-change=`$ctrl.onQueryFieldsChange(${model})`
-                            )
-                                list-editable-item-view
-                                    | {{ $item.name}} / {{ $item.className}}
+                        +form-field__label({ label: 'Fields:', name: '"fields"' })
+                            +form-field__tooltip({ title: `Collection of name-to-type mappings to be queried, in addition to indexed fields` })
 
-                                list-editable-item-edit
-                                    - form = '$parent.form'
-                                    .pc-form-grid-row
-                                        .pc-form-grid-col-30(divider='/')
-                                            +ignite-form-field-text('Field name:', '$item.name', '"name"', false, true, 'Enter field name')(
-                                                data-ignite-unique=items
-                                                data-ignite-unique-property='name'
-                                                ignite-auto-focus
-                                            )
-                                                +unique-feedback('"name"', 'Property with such name already exists!')
-                                        .pc-form-grid-col-30
-                                            +java-class-typeahead('Field full class name:', `$item.className`, '"className"', '$ctrl.queryFieldTypes', true, true, 'Enter field full class name')(
-                                                ng-model-options='{allowInvalid: true}'
-                                                extra-valid-java-identifiers='$ctrl.queryFieldTypes'
-                                            )
+                        -let items = queryFields
+                        list-editable(
+                            ng-model=items
+                            name='queryFields'
+                            ng-change=`$ctrl.onQueryFieldsChange(${model})`
+                        )
+                            list-editable-item-view
+                                | {{ $item.name}} / {{ $item.className}}
 
-                                list-editable-no-items
-                                    list-editable-add-item-button(
-                                        add-item=`$editLast((${items} = ${items} || []).push({}))`
-                                        label-single='field to query'
-                                        label-multiple='fields'
-                                    )
+                            list-editable-item-edit
+                                - form = '$parent.form'
+                                .pc-form-grid-row
+                                    .pc-form-grid-col-30(divider='/')
+                                        +form-field__text({
+                                            label: 'Field name:',
+                                            model: '$item.name',
+                                            name: '"name"',
+                                            required: true,
+                                            placeholder: 'Enter field name'
+                                        })(
+                                            ignite-unique=items
+                                            ignite-unique-property='name'
+                                            ignite-auto-focus
+                                        )
+                                            +form-field__error({ error: 'igniteUnique', message: 'Property with such name already exists!' })
+                                    .pc-form-grid-col-30
+                                        +form-field__java-class--typeahead({
+                                            label: 'Field full class name:',
+                                            model: `$item.className`,
+                                            name: '"className"',
+                                            options: '$ctrl.queryFieldTypes',
+                                            required: 'true',
+                                            placeholder: 'Enter field full class name'
+                                        })(
+                                            ng-model-options='{allowInvalid: true}'
+                                            extra-valid-java-identifiers='$ctrl.queryFieldTypes'
+                                        )
+
+                            list-editable-no-items
+                                list-editable-add-item-button(
+                                    add-item=`$editLast((${items} = ${items} || []).push({}))`
+                                    label-single='field to query'
+                                    label-multiple='fields'
+                                )
 
                 +domains-query-fields
 
             .pc-form-grid-col-60
-                +sane-ignite-form-field-dropdown({
+                +form-field__dropdown({
                     label: 'Key fields:',
                     model: queryKeyFields,
                     name: '"queryKeyFields"',
@@ -103,153 +131,165 @@
             .pc-form-grid-col-60
                 mixin domains-query-aliases
                     .ignite-form-field
-                        +ignite-form-field__label('Aliases:', '"aliases"')
-                            +tooltip(`Mapping from full property name in dot notation to an alias that will be used as SQL column name<br />
-                                For example: "parent.name" as "parentName"`)
-                        .ignite-form-field__control
-                            -let items = queryAliases
+                        +form-field__label({ label: 'Aliases:', name: '"aliases"' })
+                            +form-field__tooltip({ title: `Mapping from full property name in dot notation to an alias that will be used as SQL column name<br />
+                                For example: "parent.name" as "parentName"` })
 
-                            list-editable(ng-model=items name='queryAliases')
-                                list-editable-item-view
-                                    | {{ $item.field }} &rarr; {{ $item.alias }}
+                        -let items = queryAliases
 
-                                list-editable-item-edit
-                                    - form = '$parent.form'
-                                    .pc-form-grid-row
-                                        .pc-form-grid-col-30(divider='/')
-                                            +ignite-form-field-text('Field name', '$item.field', '"field"', false, true, 'Enter field name')(
-                                                data-ignite-unique=items
-                                                data-ignite-unique-property='field'
-                                                ignite-auto-focus
-                                            )
-                                                +unique-feedback('"field"', 'Such field already exists!')
-                                        .pc-form-grid-col-30
-                                            +ignite-form-field-text('Field alias', '$item.alias', '"alias"', false, true, 'Enter field alias')
+                        list-editable(ng-model=items name='queryAliases')
+                            list-editable-item-view
+                                | {{ $item.field }} &rarr; {{ $item.alias }}
 
-                                list-editable-no-items
-                                    list-editable-add-item-button(
-                                        add-item=`$editLast((${items} = ${items} || []).push({}))`
-                                        label-single='alias to query'
-                                        label-multiple='aliases'
-                                    )
+                            list-editable-item-edit
+                                - form = '$parent.form'
+                                .pc-form-grid-row
+                                    .pc-form-grid-col-30(divider='/')
+                                        +form-field__text({
+                                            label: 'Field name',
+                                            model: '$item.field',
+                                            name: '"field"',
+                                            required: true,
+                                            placeholder: 'Enter field name'
+                                        })(
+                                            ignite-unique=items
+                                            ignite-unique-property='field'
+                                            ignite-auto-focus
+                                        )
+                                            +form-field__error({ error: 'igniteUnique', message: 'Such field already exists!' })
+                                    .pc-form-grid-col-30
+                                        +form-field__text({
+                                            label: 'Field alias',
+                                            model: '$item.alias',
+                                            name: '"alias"',
+                                            required: true,
+                                            placeholder: 'Enter field alias'
+                                        })
+
+                            list-editable-no-items
+                                list-editable-add-item-button(
+                                    add-item=`$editLast((${items} = ${items} || []).push({}))`
+                                    label-single='alias to query'
+                                    label-multiple='aliases'
+                                )
 
                 +domains-query-aliases
 
             .pc-form-grid-col-60(ng-if-end)
                 .ignite-form-field
-                    +ignite-form-field__label('Indexes:', '"indexes"')
-                    .ignite-form-field__control
-                        list-editable(
-                            ng-model=queryIndexes
-                            ng-model-options='{allowInvalid: true}'
-                            name='queryIndexes'
-                            ui-validate=`{
-                                complete: '$ctrl.Models.queryIndexes.complete($value)',
-                                fieldsExist: '$ctrl.Models.queryIndexes.fieldsExist($value, ${queryFields})',
-                                indexFieldsHaveUniqueNames: '$ctrl.Models.queryIndexes.indexFieldsHaveUniqueNames($value)'
-                            }`
-                            ui-validate-watch=`"[${queryIndexes}, ${queryFields}]"`
-                            ui-validate-watch-object-equality='true'
-                        )
-                            list-editable-item-view(item-name='queryIndex')
-                                div {{ queryIndex.name }} [{{ queryIndex.indexType }}]
-                                div(ng-repeat='field in queryIndex.fields track by field._id')
-                                    span {{ field.name }}
-                                    span(ng-if='queryIndex.indexType == "SORTED"')
-                                        |  / {{ field.direction ? 'ASC' : 'DESC'}}
+                    +form-field__label({ label: 'Indexes:', name: '"indexes"' })
 
-                            list-editable-item-edit(item-name='queryIndex')
-                                .pc-form-grid-row
-                                    .pc-form-grid-col-30(divider='/')
-                                        +sane-ignite-form-field-text({
-                                            label: 'Index name:',
-                                            model: 'queryIndex.name',
-                                            name: '"name"',
-                                            required: true,
-                                            placeholder: 'Enter index name'
-                                        })(
-                                            ignite-unique=queryIndexes
-                                            ignite-unique-property='name'
-                                            ignite-form-field-input-autofocus='true'
+                    list-editable(
+                        ng-model=queryIndexes
+                        ng-model-options='{allowInvalid: true}'
+                        name='queryIndexes'
+                        ui-validate=`{
+                            complete: '$ctrl.Models.queryIndexes.complete($value)',
+                            fieldsExist: '$ctrl.Models.queryIndexes.fieldsExist($value, ${queryFields})',
+                            indexFieldsHaveUniqueNames: '$ctrl.Models.queryIndexes.indexFieldsHaveUniqueNames($value)'
+                        }`
+                        ui-validate-watch=`"[${queryIndexes}, ${queryFields}]"`
+                        ui-validate-watch-object-equality='true'
+                    )
+                        list-editable-item-view(item-name='queryIndex')
+                            div {{ queryIndex.name }} [{{ queryIndex.indexType }}]
+                            div(ng-repeat='field in queryIndex.fields track by field._id')
+                                span {{ field.name }}
+                                span(ng-if='queryIndex.indexType == "SORTED"')
+                                    |  / {{ field.direction ? 'ASC' : 'DESC'}}
+
+                        list-editable-item-edit(item-name='queryIndex')
+                            .pc-form-grid-row
+                                .pc-form-grid-col-30(divider='/')
+                                    +form-field__text({
+                                        label: 'Index name:',
+                                        model: 'queryIndex.name',
+                                        name: '"name"',
+                                        required: true,
+                                        placeholder: 'Enter index name'
+                                    })(
+                                        ignite-unique=queryIndexes
+                                        ignite-unique-property='name'
+                                        ignite-form-field-input-autofocus='true'
+                                    )
+                                        +form-field__error({ error: 'igniteUnique', message: 'Such index already exists!' })
+                                .pc-form-grid-col-30
+                                    +form-field__dropdown({
+                                        label: 'Index type:',
+                                        model: `queryIndex.indexType`,
+                                        name: '"indexType"',
+                                        required: true,
+                                        placeholder: 'Select index type',
+                                        options: '::$ctrl.Models.indexType.values'
+                                    })
+                                .pc-form-grid-col-60
+                                    .ignite-form-field
+                                        +form-field__label({ label: 'Index fields:', name: '"indexFields"', required: true })
+
+                                        list-editable(
+                                            ng-model='queryIndex.fields'
+                                            ng-model-options='{allowInvalid: true}'
+                                            name='indexFields'
+                                            ng-required='true'
                                         )
-                                            +unique-feedback(_, 'Such index already exists!')
-                                    .pc-form-grid-col-30
-                                        +sane-ignite-form-field-dropdown({
-                                            label: 'Index type:',
-                                            model: `queryIndex.indexType`,
-                                            name: '"indexType"',
-                                            required: true,
-                                            placeholder: 'Select index type',
-                                            options: '::$ctrl.Models.indexType.values'
-                                        })
-                                    .pc-form-grid-col-60
-                                        .ignite-form-field
-                                            +ignite-form-field__label('Index fields:', '"indexFields"', true)
-                                            .ignite-form-field__control
-                                                list-editable(
-                                                    ng-model='queryIndex.fields'
-                                                    ng-model-options='{allowInvalid: true}'
-                                                    name='indexFields'
-                                                    ng-required='true'
-                                                )
-                                                    list-editable-item-view(item-name='indexField')
-                                                        | {{ indexField.name }} 
-                                                        span(ng-if='queryIndex.indexType === "SORTED"')
-                                                            |  / {{ indexField.direction ? "ASC" : "DESC" }}
+                                            list-editable-item-view(item-name='indexField')
+                                                | {{ indexField.name }}
+                                                span(ng-if='queryIndex.indexType === "SORTED"')
+                                                    |  / {{ indexField.direction ? "ASC" : "DESC" }}
 
-                                                    list-editable-item-edit(item-name='indexField')
-                                                        .pc-form-grid-row
-                                                            .pc-form-grid-col-60
-                                                                +sane-ignite-form-field-dropdown({
-                                                                    label: 'Index field:',
-                                                                    model: 'indexField.name',
-                                                                    name: '"indexName"',
-                                                                    placeholder: `{{ ${queryFields}.length > 0 ? 'Choose index field' : 'No fields configured' }}`,
-                                                                    options: queryFields
-                                                                })(
-                                                                    bs-options=`queryField.name as queryField.name for queryField in ${queryFields}`
-                                                                    ng-disabled=`${queryFields}.length === 0`
-                                                                    ng-model-options='{allowInvalid: true}'
-                                                                    ignite-unique='queryIndex.fields'
-                                                                    ignite-unique-property='name'
-                                                                    ignite-auto-focus
-                                                                )
-                                                                    +unique-feedback(_, 'Such field already exists!')
-                                                            .pc-form-grid-col-60(
-                                                                ng-if='queryIndex.indexType === "SORTED"'
-                                                            )
-                                                                +sane-ignite-form-field-dropdown({
-                                                                    label: 'Sort direction:',
-                                                                    model: 'indexField.direction',
-                                                                    name: '"indexDirection"',
-                                                                    required: true,
-                                                                    options: '::$ctrl.Models.indexSortDirection.values'
-                                                                })
-                                                    list-editable-no-items
-                                                        list-editable-add-item-button(
-                                                            add-item=`$edit($ctrl.Models.addIndexField(queryIndex.fields))`
-                                                            label-single='field to index'
-                                                            label-multiple='fields in index'
+                                            list-editable-item-edit(item-name='indexField')
+                                                .pc-form-grid-row
+                                                    .pc-form-grid-col-60
+                                                        +form-field__dropdown({
+                                                            label: 'Index field:',
+                                                            model: 'indexField.name',
+                                                            name: '"indexName"',
+                                                            placeholder: `{{ ${queryFields}.length > 0 ? 'Choose index field' : 'No fields configured' }}`,
+                                                            options: queryFields
+                                                        })(
+                                                            bs-options=`queryField.name as queryField.name for queryField in ${queryFields}`
+                                                            ng-disabled=`${queryFields}.length === 0`
+                                                            ng-model-options='{allowInvalid: true}'
+                                                            ignite-unique='queryIndex.fields'
+                                                            ignite-unique-property='name'
+                                                            ignite-auto-focus
                                                         )
-                                            .ignite-form-field__errors(
-                                                ng-messages=`$form.indexFields.$error`
-                                                ng-show=`$form.indexFields.$invalid`
-                                            )
-                                                +form-field-feedback(_, 'required', 'Index fields should be configured')
+                                                            +form-field__error({ error: 'igniteUnique', message: 'Such field already exists!' })
+                                                    .pc-form-grid-col-60(
+                                                        ng-if='queryIndex.indexType === "SORTED"'
+                                                    )
+                                                        +form-field__dropdown({
+                                                            label: 'Sort direction:',
+                                                            model: 'indexField.direction',
+                                                            name: '"indexDirection"',
+                                                            required: true,
+                                                            options: '::$ctrl.Models.indexSortDirection.values'
+                                                        })
+                                            list-editable-no-items
+                                                list-editable-add-item-button(
+                                                    add-item=`$edit($ctrl.Models.addIndexField(queryIndex.fields))`
+                                                    label-single='field to index'
+                                                    label-multiple='fields in index'
+                                                )
+                                        .form-field__errors(
+                                            ng-messages=`$form.indexFields.$error`
+                                            ng-show=`$form.indexFields.$invalid`
+                                        )
+                                            +form-field__error({ error: 'required', message: 'Index fields should be configured' })
 
-                            list-editable-no-items
-                                list-editable-add-item-button(
-                                    add-item=`$edit($ctrl.Models.addIndex(${model}))`
-                                    label-single='index'
-                                    label-multiple='fields'
-                                )
-                    .ignite-form-field__errors(
+                        list-editable-no-items
+                            list-editable-add-item-button(
+                                add-item=`$edit($ctrl.Models.addIndex(${model}))`
+                                label-single='index'
+                                label-multiple='fields'
+                            )
+                    .form-field__errors(
                         ng-messages=`query.queryIndexes.$error`
                         ng-show=`query.queryIndexes.$invalid`
                     )
-                        +form-field-feedback(_, 'complete', 'Some indexes are incomplete')
-                        +form-field-feedback(_, 'fieldsExist', 'Some indexes use unknown fields')
-                        +form-field-feedback(_, 'indexFieldsHaveUniqueNames', 'Each query index field name should be unique')
+                        +form-field__error({ error: 'complete', message: 'Some indexes are incomplete' })
+                        +form-field__error({ error: 'fieldsExist', message: 'Some indexes use unknown fields' })
+                        +form-field__error({ error: 'indexFieldsHaveUniqueNames', message: 'Each query index field name should be unique' })
 
         .pca-form-column-6
             +preview-xml-java(model, 'domainModelQuery')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/model-edit-form/templates/store.pug b/modules/web-console/frontend/app/components/page-configure-advanced/components/model-edit-form/templates/store.pug
index 811c0d7..0e1a44d 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/model-edit-form/templates/store.pug
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/model-edit-form/templates/store.pug
@@ -37,7 +37,7 @@
         list-editable-item-edit
             .pc-form-grid-row
                 .pc-form-grid-col-30(divider='/')
-                    +sane-ignite-form-field-text({
+                    +form-field__text({
                         label: 'DB name:',
                         model: '$item.databaseFieldName',
                         name: '"databaseFieldName"',
@@ -49,11 +49,18 @@
                         ignite-unique=items
                         ignite-unique-property='databaseFieldName'
                     )
-                        +unique-feedback(_, 'DB name should be unique')
+                        +form-field__error({ error: 'igniteUnique', message: 'DB name should be unique' })
                 .pc-form-grid-col-30
-                    +dropdown-required('DB type:', '$item.databaseFieldType', '"databaseFieldType"', true, true, 'Choose DB type', 'supportedJdbcTypes')
+                    +form-field__dropdown({
+                        label: 'DB type:',
+                        model:'$item.databaseFieldType',
+                        name: '"databaseFieldType"',
+                        required: 'true',
+                        placeholder: 'Choose DB type',
+                        options: 'supportedJdbcTypes'
+                    })
                 .pc-form-grid-col-30(divider='/')
-                    +sane-ignite-form-field-text({
+                    +form-field__text({
                         label: 'Java name:',
                         model: '$item.javaFieldName',
                         name: '"javaFieldName"',
@@ -64,9 +71,16 @@
                         ignite-unique=items
                         ignite-unique-property='javaFieldName'
                     )
-                        +unique-feedback(_, 'Java name should be unique')
+                        +form-field__error({ error: 'igniteUnique', message: 'Java name should be unique' })
                 .pc-form-grid-col-30
-                    +dropdown-required('Java type:', '$item.javaFieldType', '"javaFieldType"', true, true, 'Choose Java type', 'supportedJavaTypes')
+                    +form-field__dropdown({
+                        label: 'Java type:',
+                        model: '$item.javaFieldType',
+                        name: '"javaFieldType"',
+                        required: 'true',
+                        placeholder: 'Choose Java type',
+                        options: 'supportedJavaTypes'
+                    })
 
         list-editable-no-items
             list-editable-add-item-button(
@@ -78,45 +92,59 @@
 panel-collapsible(ng-form=form on-open=`ui.loadPanel('${form}')`)
     panel-title#store-title Domain model for cache store
     panel-description
-        | Domain model properties for binding database with cache via POJO cache store. 
+        | Domain model properties for binding database with cache via POJO cache store.
         a.link-success(href="https://apacheignite.readme.io/docs/3rd-party-store" target="_blank") More info
     panel-content.pca-form-row(ng-if=`ui.isPanelLoaded('${form}')`)
         .pca-form-column-6.pc-form-grid-row
             .pc-form-grid-col-30
-                +text('Database schema:', model + '.databaseSchema', '"databaseSchema"', 'false', 'Input DB schema name', 'Schema name in database')
+                +form-field__text({
+                    label: 'Database schema:',
+                    model: model + '.databaseSchema',
+                    name: '"databaseSchema"',
+                    placeholder: 'Input DB schema name',
+                    tip: 'Schema name in database'
+                })
             .pc-form-grid-col-30
-                +text('Database table:', model + '.databaseTable', '"databaseTable"', 'false', 'Input DB table name', 'Table name in database')
+                +form-field__text({
+                    label: 'Database table:',
+                    model: model + '.databaseTable',
+                    name: '"databaseTable"',
+                    placeholder: 'Input DB table name',
+                    tip: 'Table name in database'
+                })
             .pc-form-grid-col-60
                 .ignite-form-field
-                    +ignite-form-field__label('Key fields:', '"keyFields"')
-                        +tooltip(`Collection of key fields descriptions for CacheJdbcPojoStore`)
-                    .ignite-form-field__control
-                        +list-db-field-edit({
-                            items: keyFields,
-                            itemName: 'key field',
-                            itemsName: 'key fields'
-                        })(name='keyFields')
-                    .ignite-form-field__errors(
+                    +form-field__label({ label: 'Key fields:', name: '"keyFields"' })
+                        +form-field__tooltip({ title: `Collection of key fields descriptions for CacheJdbcPojoStore` })
+
+                    +list-db-field-edit({
+                        items: keyFields,
+                        itemName: 'key field',
+                        itemsName: 'key fields'
+                    })(name='keyFields')
+
+                    .form-field__errors(
                         ng-messages=`store.keyFields.$error`
                         ng-show=`store.keyFields.$invalid`
                     )
-                        +form-field-feedback(_, 'dbFieldUnique', 'Each key field DB name and Java name should be unique')
+                        +form-field__error({ error: 'dbFieldUnique', message: 'Each key field DB name and Java name should be unique' })
 
             .pc-form-grid-col-60
                 .ignite-form-field
-                    +ignite-form-field__label('Value fields:', '"valueFields"')
-                        +tooltip(`Collection of value fields descriptions for CacheJdbcPojoStore`)
-                    .ignite-form-field__control
-                        +list-db-field-edit({
-                            items: valueFields,
-                            itemName: 'value field',
-                            itemsName: 'value fields'
-                        })(name='valueFields')
-                    .ignite-form-field__errors(
+                    +form-field__label({ label: 'Value fields:', name: '"valueFields"' })
+                        +form-field__tooltip({ title: `Collection of value fields descriptions for CacheJdbcPojoStore` })
+
+                    +list-db-field-edit({
+                        items: valueFields,
+                        itemName: 'value field',
+                        itemsName: 'value fields'
+                    })(name='valueFields')
+
+                    .form-field__errors(
                         ng-messages=`store.valueFields.$error`
                         ng-show=`store.valueFields.$invalid`
                     )
-                        +form-field-feedback(_, 'dbFieldUnique', 'Each value field DB name and Java name should be unique')
+                        +form-field__error({ error: 'dbFieldUnique', message: 'Each value field DB name and Java name should be unique' })
 
         .pca-form-column-6
             +preview-xml-java(model, 'domainStore')
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/page-configure-advanced-caches/controller.js b/modules/web-console/frontend/app/components/page-configure-advanced/components/page-configure-advanced-caches/controller.js
index ecb7a15..da17c67 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/page-configure-advanced-caches/controller.js
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/page-configure-advanced-caches/controller.js
@@ -27,16 +27,17 @@
 // Controller for Caches screen.
 export default class Controller {
     static $inject = [
-        ConfigSelectors.name,
+        'ConfigSelectors',
         'configSelectionManager',
         '$uiRouter',
         '$transitions',
-        ConfigureState.name,
+        'ConfigureState',
         '$state',
         'IgniteFormUtils',
         'IgniteVersion',
-        Caches.name
+        'Caches'
     ];
+
     /**
      * @param {ConfigSelectors} ConfigSelectors
      * @param {object} configSelectionManager
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/page-configure-advanced-caches/index.js b/modules/web-console/frontend/app/components/page-configure-advanced/components/page-configure-advanced-caches/index.js
index 818c263..c6c751c 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/page-configure-advanced-caches/index.js
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/page-configure-advanced-caches/index.js
@@ -20,4 +20,4 @@
 
 export default angular
     .module('ignite-console.page-configure-advanced.caches', [])
-    .component(component.name, component);
+    .component('pageConfigureAdvancedCaches', component);
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/page-configure-advanced-cluster/controller.js b/modules/web-console/frontend/app/components/page-configure-advanced/components/page-configure-advanced-cluster/controller.js
index 2b05940..f0348c3 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/page-configure-advanced-cluster/controller.js
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/page-configure-advanced-cluster/controller.js
@@ -22,7 +22,7 @@
 
 // Controller for Clusters screen.
 export default class PageConfigureAdvancedCluster {
-    static $inject = ['$uiRouter', ConfigSelectors.name, ConfigureState.name];
+    static $inject = ['$uiRouter', 'ConfigSelectors', 'ConfigureState'];
 
     /**
      * @param {uirouter.UIRouter} $uiRouter
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/page-configure-advanced-igfs/controller.js b/modules/web-console/frontend/app/components/page-configure-advanced/components/page-configure-advanced-igfs/controller.js
index dd0e5da..50eee7c 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/page-configure-advanced-igfs/controller.js
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/page-configure-advanced-igfs/controller.js
@@ -27,7 +27,8 @@
 import IGFSs from 'app/services/IGFSs';
 
 export default class PageConfigureAdvancedIGFS {
-    static $inject = [ConfigSelectors.name, ConfigureState.name, '$uiRouter', IGFSs.name, '$state', 'configSelectionManager'];
+    static $inject = ['ConfigSelectors', 'ConfigureState', '$uiRouter', 'IGFSs', '$state', 'configSelectionManager'];
+
     /**
      * @param {ConfigSelectors} ConfigSelectors        
      * @param {ConfigureState} ConfigureState         
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/components/page-configure-advanced-models/controller.js b/modules/web-console/frontend/app/components/page-configure-advanced/components/page-configure-advanced-models/controller.js
index d2a07e8..fd1ccaa 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/components/page-configure-advanced-models/controller.js
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/components/page-configure-advanced-models/controller.js
@@ -32,7 +32,8 @@
 import {default as Models} from 'app/services/Models';
 
 export default class PageConfigureAdvancedModels {
-    static $inject = [ConfigSelectors.name, ConfigureState.name, '$uiRouter', Models.name, '$state', 'configSelectionManager'];
+    static $inject = ['ConfigSelectors', 'ConfigureState', '$uiRouter', 'Models', '$state', 'configSelectionManager'];
+
     /**
      * @param {ConfigSelectors} ConfigSelectors
      * @param {ConfigureState} ConfigureState
diff --git a/modules/web-console/frontend/app/components/page-configure-advanced/style.scss b/modules/web-console/frontend/app/components/page-configure-advanced/style.scss
index 9480486..40cd713 100644
--- a/modules/web-console/frontend/app/components/page-configure-advanced/style.scss
+++ b/modules/web-console/frontend/app/components/page-configure-advanced/style.scss
@@ -114,12 +114,6 @@
         user-select: none;
         cursor: pointer;
 
-        ignite-form-panel-chevron {
-            margin-right: 10px;
-            position: relative;
-            top: -3px;
-        }
-
         .pca-panel-heading-title {
             font-size: 16px;
             margin-right: 8px;
diff --git a/modules/web-console/frontend/app/components/page-configure-basic/controller.js b/modules/web-console/frontend/app/components/page-configure-basic/controller.js
index 105765a..88c6163 100644
--- a/modules/web-console/frontend/app/components/page-configure-basic/controller.js
+++ b/modules/web-console/frontend/app/components/page-configure-basic/controller.js
@@ -40,7 +40,7 @@
     form;
 
     static $inject = [
-        Confirm.name, '$uiRouter', ConfigureState.name, ConfigSelectors.name, Clusters.name, Caches.name, IgniteVersion.name, '$element', 'ConfigChangesGuard', 'IgniteFormUtils', '$scope'
+        'Confirm', '$uiRouter', 'ConfigureState', 'ConfigSelectors', 'Clusters', 'Caches', 'IgniteVersion', '$element', 'ConfigChangesGuard', 'IgniteFormUtils', '$scope'
     ];
 
     /**
@@ -81,7 +81,9 @@
     }
 
     _uiCanExit($transition$) {
-        if ($transition$.options().custom.justIDUpdate)
+        const options = $transition$.options();
+
+        if (options.custom.justIDUpdate || options.redirectedFrom)
             return true;
 
         $transition$.onSuccess({}, () => this.reset());
diff --git a/modules/web-console/frontend/app/components/page-configure-basic/style.scss b/modules/web-console/frontend/app/components/page-configure-basic/style.scss
index 7e0d8ba..4814aa4 100644
--- a/modules/web-console/frontend/app/components/page-configure-basic/style.scss
+++ b/modules/web-console/frontend/app/components/page-configure-basic/style.scss
@@ -59,33 +59,6 @@
         line-height: 19px;
     }
 
-    .pcb-memory-size {
-        .input-tip {
-            display: flex;
-            flex-direction: row;
-
-            .form-control {
-                border-top-right-radius: 0;
-                border-bottom-right-radius: 0;
-            }
-
-            .btn-ignite {
-                border-top-left-radius: 0;
-                border-bottom-left-radius: 0;
-                padding-top: 0;
-                padding-bottom: 0;
-                flex: 0 0 auto;
-                width: 60px !important;
-            }
-        }
-    }
-
-    .pcb-form-field-size {
-        .form-field-feedback {
-            left: -63px;
-        }
-    }
-
     .pcb-form-main-buttons {
         display: flex;
         flex-direction: row;
@@ -101,7 +74,7 @@
         box-shadow: 0px -2px 4px -1px rgba(0, 0, 0, 0.2);
     }
 
-    .form-field-checkbox {
+    .form-field__checkbox {
         margin-top: auto;
         margin-bottom: 8px;
     }
diff --git a/modules/web-console/frontend/app/components/page-configure-basic/template.pug b/modules/web-console/frontend/app/components/page-configure-basic/template.pug
index e85b3d8..996ddcc 100644
--- a/modules/web-console/frontend/app/components/page-configure-basic/template.pug
+++ b/modules/web-console/frontend/app/components/page-configure-basic/template.pug
@@ -28,7 +28,6 @@
 - const model = '$ctrl.clonedCluster'
 - const modelDiscoveryKind = `${model}.discovery.kind`
 - let form = '$ctrl.form'
-- const tipOpts = {placement: 'top'}
 
 form(novalidate name=form)
     h2.pcb-section-header.pcb-inner-padding Step 1. Cluster Configuration
@@ -41,11 +40,10 @@
 
     .pc-form-grid-row.pcb-form-grid-row
         .pc-form-grid-col-60
-            +sane-ignite-form-field-text({
+            +form-field__text({
                 label: 'Name:',
                 model: `${model}.name`,
                 name: '"clusterName"',
-                disabled: 'false',
                 placeholder: 'Input name',
                 required: true,
                 tip: 'Instance name allows to indicate to what grid this particular grid instance belongs to'
@@ -54,23 +52,29 @@
                 ignite-unique-property='name'
                 ignite-unique-skip=`["_id", ${model}]`
             )
-                +unique-feedback(`${model}.name`, 'Cluster name should be unique.')
+                +form-field__error({ error: 'igniteUnique', message: 'Cluster name should be unique.' })
 
         .pc-form-grid__break
         .pc-form-grid-col-60
-            +dropdown('Discovery:', modelDiscoveryKind, '"discovery"', 'true', 'Choose discovery', '$ctrl.Clusters.discoveries',
-            'Discovery allows to discover remote nodes in grid\
-            <ul>\
-                <li>Static IPs - IP Finder which works only with pre configured list of IP addresses specified</li>\
-                <li>Multicast - Multicast based IP finder</li>\
-                <li>AWS S3 - AWS S3 based IP finder that automatically discover cluster nodes on Amazon EC2 cloud</li>\
-                <li>Apache jclouds - Apache jclouds multi cloud toolkit based IP finder for cloud platforms with unstable IP addresses</li>\
-                <li>Google cloud storage - Google Cloud Storage based IP finder that automatically discover cluster nodes on Google Compute Engine cluster</li>\
-                <li>JDBC - JDBC based IP finder that use database to store node IP address</li>\
-                <li>Shared filesystem - Shared filesystem based IP finder that use file to store node IP address</li>\
-                <li>Apache ZooKeeper - Apache ZooKeeper based IP finder when you use ZooKeeper to coordinate your distributed environment</li>\
-                <li>Kubernetes - IP finder for automatic lookup of Ignite nodes running in Kubernetes environment</li>\
-            </ul>')
+            +form-field__dropdown({
+                label: 'Discovery:',
+                model: modelDiscoveryKind,
+                name: '"discovery"',
+                placeholder: 'Choose discovery',
+                options: '$ctrl.Clusters.discoveries',
+                tip: 'Discovery allows to discover remote nodes in grid\
+                <ul>\
+                    <li>Static IPs - IP Finder which works only with pre configured list of IP addresses specified</li>\
+                    <li>Multicast - Multicast based IP finder</li>\
+                    <li>AWS S3 - AWS S3 based IP finder that automatically discover cluster nodes on Amazon EC2 cloud</li>\
+                    <li>Apache jclouds - Apache jclouds multi cloud toolkit based IP finder for cloud platforms with unstable IP addresses</li>\
+                    <li>Google cloud storage - Google Cloud Storage based IP finder that automatically discover cluster nodes on Google Compute Engine cluster</li>\
+                    <li>JDBC - JDBC based IP finder that use database to store node IP address</li>\
+                    <li>Shared filesystem - Shared filesystem based IP finder that use file to store node IP address</li>\
+                    <li>Apache ZooKeeper - Apache ZooKeeper based IP finder when you use ZooKeeper to coordinate your distributed environment</li>\
+                    <li>Kubernetes - IP finder for automatic lookup of Ignite nodes running in Kubernetes environment</li>\
+                </ul>'
+            })
         .pc-form-grid__break
         .pc-form-group
             +discovery-vm(model)(class='pcb-form-grid-row' ng-if=`${modelDiscoveryKind} === 'Vm'`)
@@ -93,7 +97,7 @@
                 $ctrl.memorySizeInputVisible$|async:this
             `
         )
-            pc-form-field-size(
+            form-field-size(
                 ng-model='$ctrl.defaultMemoryPolicy.maxSize'
                 ng-model-options='{allowInvalid: true}'
                 id='memory'
@@ -106,10 +110,10 @@
                 tip='“default” cluster memory policy off-heap max memory size. Leave empty to use 80% of physical memory available on current machine. Should be at least 10Mb.'
                 on-scale-change='scale = $event'
             )
-                +form-field-feedback('"memory"', 'min', 'Maximum size should be equal to or more than initial size ({{ $ctrl.Clusters.memoryPolicy.maxSize.min($ctrl.defaultMemoryPolicy) / scale.value}} {{scale.label}}).')
+                +form-field__error({ error: 'min', message: 'Maximum size should be equal to or more than initial size ({{ $ctrl.Clusters.memoryPolicy.maxSize.min($ctrl.defaultMemoryPolicy) / scale.value}} {{scale.label}}).' })
 
         .pc-form-grid-col-60(ng-if=`$ctrl.IgniteVersion.available('2.3.0')`)
-            pc-form-field-size(
+            form-field-size(
                 ng-model=`${model}.dataStorageConfiguration.defaultDataRegionConfiguration.maxSize`
                 ng-model-options='{allowInvalid: true}'
                 id='memory'
@@ -122,11 +126,7 @@
                 tip='Default data region off-heap max memory size. Leave empty to use 20% of physical memory available on current machine. Should be at least 10Mb.'
                 on-scale-change='scale = $event'
             )
-                +form-field-feedback(
-                    _,
-                    'min',
-                    `Maximum size should be equal to or more than initial size ({{ $ctrl.Clusters.dataRegion.maxSize.min(${model}.dataStorageConfiguration.defaultDataRegionConfiguration) / scale.value}} {{scale.label}}).`
-                )
+                +form-field__error({ error: 'min', message: `Maximum size should be equal to or more than initial size ({{ $ctrl.Clusters.dataRegion.maxSize.min(${model}.dataStorageConfiguration.defaultDataRegionConfiguration) / scale.value}} {{scale.label}}).` })
         .pc-form-grid-col-120
             .ignite-form-field
                 list-editable.pcb-caches-list(
@@ -144,16 +144,26 @@
                         div {{ $ctrl.Caches.getCacheBackupsCount($item) }}
                     list-editable-item-edit
                         div
-                            +ignite-form-field-text('Name', '$item.name', '"name"', false, true)(
+                            +form-field__text({
+                                label: 'Name',
+                                model: '$item.name',
+                                name: '"name"',
+                                required: true
+                            })(
                                 ignite-unique='$ctrl.shortCaches'
                                 ignite-unique-property='name'
                                 ignite-form-field-input-autofocus='true'
                             )
-                                +unique-feedback('"name"', 'Cache name should be unqiue')
+                                +form-field__error({ error: 'igniteUnique', message: 'Cache name should be unqiue' })
                         div
-                            +cacheMode('Mode:', '$item.cacheMode', '"cacheMode"', 'PARTITIONED')
+                            +form-field__cache-modes({
+                                label: 'Mode:',
+                                model: '$item.cacheMode',
+                                name: '"cacheMode"',
+                                placeholder: 'PARTITIONED'
+                            })
                         div
-                            +sane-ignite-form-field-dropdown({
+                            +form-field__dropdown({
                                 label: 'Atomicity:',
                                 model: '$item.atomicityMode',
                                 name: '"atomicityMode"',
@@ -161,14 +171,20 @@
                                 options: '::$ctrl.Caches.atomicityModes'
                             })
                         div(ng-show='$ctrl.Caches.shouldShowCacheBackupsCount($item)')
-                            +number('Backups:', '$item.backups', '"backups"', 'true', '0', '0')
+                            +form-field__number({
+                                label: 'Backups:',
+                                model: '$item.backups',
+                                name: '"backups"',
+                                placeholder: '0',
+                                min: 0
+                            })
                     list-editable-no-items
                         list-editable-add-item-button(
                             add-item='$ctrl.addCache()'
                             label-single='cache'
                             label-multiple='caches'
                         )
-        
+
     .pc-form-actions-panel
         button-preview-project(ng-hide='$ctrl.isNew$|async:this' cluster=model)
 
diff --git a/modules/web-console/frontend/app/components/page-configure-overview/controller.js b/modules/web-console/frontend/app/components/page-configure-overview/controller.js
index db21d9a..e49e5c6 100644
--- a/modules/web-console/frontend/app/components/page-configure-overview/controller.js
+++ b/modules/web-console/frontend/app/components/page-configure-overview/controller.js
@@ -39,11 +39,11 @@
 export default class PageConfigureOverviewController {
     static $inject = [
         '$uiRouter',
-        ModalPreviewProject.name,
-        Clusters.name,
-        ConfigureState.name,
-        ConfigSelectors.name,
-        ConfigurationDownload.name
+        'ModalPreviewProject',
+        'Clusters',
+        'ConfigureState',
+        'ConfigSelectors',
+        'ConfigurationDownload'
     ];
 
     /**
diff --git a/modules/web-console/frontend/app/components/page-configure/components/formUICanExitGuard.js b/modules/web-console/frontend/app/components/page-configure/components/formUICanExitGuard.js
index c962161..7875546 100644
--- a/modules/web-console/frontend/app/components/page-configure/components/formUICanExitGuard.js
+++ b/modules/web-console/frontend/app/components/page-configure/components/formUICanExitGuard.js
@@ -18,7 +18,7 @@
 import {default as ConfigChangesGuard} from '../services/ConfigChangesGuard';
 
 class FormUICanExitGuardController {
-    static $inject = ['$element', ConfigChangesGuard.name];
+    static $inject = ['$element', 'ConfigChangesGuard'];
 
     /**
      * @param {JQLite} $element
@@ -43,7 +43,9 @@
             return;
 
         controller.uiCanExit = ($transition$) => {
-            if ($transition$.options().custom.justIDUpdate)
+            const options = $transition$.options();
+
+            if (options.custom.justIDUpdate || options.redirectedFrom)
                 return true;
 
             $transition$.onSuccess({}, controller.reset);
diff --git a/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/component.js b/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/component.js
index 1d4ba7c..e4b8253 100644
--- a/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/component.js
+++ b/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/component.js
@@ -93,7 +93,8 @@
     /** @type {ng.ICompiledExpression} */
     onHide;
 
-    static $inject = ['$uiRouter', ConfigSelectors.name, ConfigEffects.name, ConfigureState.name, '$http', 'IgniteConfirm', IgniteConfirmBatch.name, 'IgniteFocus', SqlTypes.name, JavaTypes.name, 'IgniteMessages', '$scope', '$rootScope', 'AgentManager', 'IgniteActivitiesData', 'IgniteLoading', 'IgniteFormUtils', 'IgniteLegacyUtils'];
+    static $inject = ['$uiRouter', 'ConfigSelectors', 'ConfigEffects', 'ConfigureState', '$http', 'IgniteConfirm', 'IgniteConfirmBatch', 'IgniteFocus', 'SqlTypes', 'JavaTypes', 'IgniteMessages', '$scope', '$rootScope', 'AgentManager', 'IgniteActivitiesData', 'IgniteLoading', 'IgniteFormUtils', 'IgniteLegacyUtils'];
+
     /**
      * @param {UIRouter} $uiRouter
      * @param {ConfigSelectors} ConfigSelectors
@@ -154,16 +155,20 @@
         })
         .take(1);
     }
+
     saveBatch(batch) {
         if (!batch.length)
             return;
 
+        this.$scope.importDomain.loadingOptions = SAVING_DOMAINS;
         this.Loading.start('importDomainFromDb');
+
         this.ConfigureState.dispatchAction({
             type: 'ADVANCED_SAVE_COMPLETE_CONFIGURATION',
             changedItems: this.batchActionsToRequestBody(batch),
             prevActions: []
         });
+
         this.saveSubscription = Observable.race(
             this.ConfigureState.actions$.filter((a) => a.type === 'ADVANCED_SAVE_COMPLETE_CONFIGURATION_OK')
                 .do(() => this.onHide()),
@@ -175,6 +180,7 @@
         })
         .subscribe();
     }
+
     batchActionsToRequestBody(batch) {
         const result = batch.reduce((req, action) => {
             return {
@@ -199,21 +205,25 @@
         result.cluster.caches = [...new Set(result.cluster.caches)];
         return result;
     }
+
     onTableSelectionChange(selected) {
         this.$scope.$applyAsync(() => {
             this.$scope.importDomain.tablesToUse = selected;
             this.selectedTablesIDs = selected.map((t) => t.id);
         });
     }
+
     onSchemaSelectionChange(selected) {
         this.$scope.$applyAsync(() => {
             this.$scope.importDomain.schemasToUse = selected;
             this.selectedSchemasIDs = selected.map((i) => i.name);
         });
     }
+
     onVisibleRowsChange(rows) {
         return this.visibleTables = rows.map((r) => r.entity);
     }
+
     onCacheSelect(cacheID) {
         if (cacheID < 0)
             return;
@@ -236,11 +246,13 @@
         )
         .subscribe();
     }
+
     $onDestroy() {
         this.subscription.unsubscribe();
         if (this.onCacheSelectSubcription) this.onCacheSelectSubcription.unsubscribe();
         if (this.saveSubscription) this.saveSubscription.unsubscribe();
     }
+
     $onInit() {
         // Restores old behavior
         const {$http, Confirm, ConfirmBatch, Focus, SqlTypes, JavaTypes, Messages, $scope, $root, agentMgr, ActivitiesData, Loading, FormUtils, LegacyUtils} = this;
@@ -1025,13 +1037,9 @@
 
                             $scope.ui.selectedJdbcDriverJar = $scope.jdbcDriverJars[0].value;
 
-                            // FormUtils.confirmUnsavedChanges(dirty, () => {
                             $scope.importDomain.action = 'connect';
                             $scope.importDomain.tables = [];
                             this.selectedTables = [];
-
-                            // Focus.move('jdbcUrl');
-                            // });
                         }
                         else {
                             $scope.importDomain.jdbcDriversNotFound = true;
@@ -1140,7 +1148,7 @@
                 ></tables-action-cell>
             `,
             visible: true,
-            minWidth: 500
+            minWidth: 450
         }
     ];
 }
diff --git a/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/index.js b/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/index.js
index b75c89a..3bc71da 100644
--- a/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/index.js
+++ b/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/index.js
@@ -23,9 +23,9 @@
 import {component as amountIndicator} from './selected-items-amount-indicator/component';
 
 export default angular
-.module('configuration.modal-import-models', [])
-.service(service.name, service)
-.component(tablesActionCell.name, tablesActionCell)
-.component(stepIndicator.name, stepIndicator)
-.component(amountIndicator.name, amountIndicator)
-.component(component.name, component);
+    .module('configuration.modal-import-models', [])
+    .service('ModalImportModels', service)
+    .component('tablesActionCell', tablesActionCell)
+    .component('modalImportModelsStepIndicator', stepIndicator)
+    .component('selectedItemsAmountIndicator', amountIndicator)
+    .component('modalImportModels', component);
diff --git a/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/selected-items-amount-indicator/component.js b/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/selected-items-amount-indicator/component.js
index 26a6ea0..abee6b0 100644
--- a/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/selected-items-amount-indicator/component.js
+++ b/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/selected-items-amount-indicator/component.js
@@ -17,8 +17,8 @@
 
 import template from './template.pug';
 import './style.scss';
+
 export const component = {
-    name: 'selectedItemsAmountIndicator',
     template,
     bindings: {
         selectedAmount: '<',
diff --git a/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/step-indicator/component.js b/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/step-indicator/component.js
index 4f2b141..c37662c 100644
--- a/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/step-indicator/component.js
+++ b/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/step-indicator/component.js
@@ -25,7 +25,6 @@
 }
 
 export const component = {
-    name: 'modalImportModelsStepIndicator',
     template,
     controller: ModalImportModelsStepIndicator,
     bindings: {
diff --git a/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/tables-action-cell/component.js b/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/tables-action-cell/component.js
index 7fde5ba..17d4dc1 100644
--- a/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/tables-action-cell/component.js
+++ b/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/tables-action-cell/component.js
@@ -59,7 +59,6 @@
 }
 
 export const component = {
-    name: 'tablesActionCell',
     controller: TablesActionCell,
     bindings: {
         onEditStart: '&',
diff --git a/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/tables-action-cell/style.scss b/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/tables-action-cell/style.scss
index 49a7b91..e6e6333 100644
--- a/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/tables-action-cell/style.scss
+++ b/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/tables-action-cell/style.scss
@@ -28,7 +28,6 @@
 
         &:hover {
             background: white;
-            box-shadow: inset 0 1px 3px 0 rgba(0, 0, 0, 0.5);
             border: solid 1px #c5c5c5;
         }
     }
diff --git a/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/tables-action-cell/template.pug b/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/tables-action-cell/template.pug
index 2f51114..c64d973 100644
--- a/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/tables-action-cell/template.pug
+++ b/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/tables-action-cell/template.pug
@@ -14,6 +14,8 @@
     See the License for the specific language governing permissions and
     limitations under the License.
 
+include /app/helpers/jade/mixins
+
 button.table-action-cell__edit-button.btn-ignite(
     type='button'
     b_s-tooltip=''
@@ -25,21 +27,17 @@
 )
     | {{ $ctrl.tableActionView($ctrl.table) }}
 .table-action-cell__edit-form(ng-if='$ctrl.table.edit')
-    .ignite-form-field.ignite-form-field-dropdown.table-action-cell__action-select
-        .ignite-form-field__control
-            .input-tip
-                button.select-toggle.form-control(
-                    type='button'
-                    bs-select
-                    ng-model='$ctrl.table.action'
-                    bs-options='item.value as item.shortLabel for item in $ctrl.importActions'
-                )
-    .ignite-form-field.ignite-form-field-dropdown.table-action-cell__cache-select
-        .ignite-form-field__control
-            .input-tip
-                button.select-toggle.form-control(
-                    bs-select
-                    ng-model='$ctrl.table.cacheOrTemplate'
-                    ng-change='$ctrl.onCacheSelect({$event: $ctrl.table.cacheOrTemplate})'
-                    bs-options='item.value as item.label for item in $ctrl.table.cachesOrTemplates'
-                )
\ No newline at end of file
+    .table-action-cell__action-select
+        +form-field__dropdown({
+            model: '$ctrl.table.action',
+            options: '$ctrl.importActions',
+            optionLabel: 'shortLabel'
+        })
+
+    .table-action-cell__cache-select
+        +form-field__dropdown({
+            model: '$ctrl.table.cacheOrTemplate',
+            options: '$ctrl.table.cachesOrTemplates'
+        })(
+            ng-change='$ctrl.onCacheSelect({$event: $ctrl.table.cacheOrTemplate})'
+        )
diff --git a/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/template.tpl.pug b/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/template.tpl.pug
index 1762ecc..8aef347 100644
--- a/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/template.tpl.pug
+++ b/modules/web-console/frontend/app/components/page-configure/components/modal-import-models/template.tpl.pug
@@ -16,26 +16,23 @@
 
 include /app/helpers/jade/mixins
 
-mixin chk(mdl, change, tip)
-    input(type='checkbox' ng-model=mdl ng-change=change bs-tooltip='' data-title=tip data-trigger='hover' data-placement='top')
-
 mixin td-ellipses-lbl(w, lbl)
     td.td-ellipsis(width=`${w}` style=`min-width: ${w}; max-width: ${w}`)
         label #{lbl}
 
-.modal--ignite.modal.modal-domain-import.center(role='dialog')
+.modal--ignite.modal--wide.modal.modal-domain-import.center(role='dialog')
     -var tipOpts = {};
     - tipOpts.container = '.modal-content'
-    - tipOpts.placement = 'top'
+    - tipOpts.placement = 'right'
     .modal-dialog
         .modal-content(ignite-loading='importDomainFromDb' ignite-loading-text='{{importDomain.loadingOptions.text}}')
             #errors-container.modal-header.header
                 button.close(type='button' ng-click='$hide()' aria-hidden='true')
                     svg(ignite-icon="cross")
-                h4.modal-title() 
+                h4.modal-title()
                     span(ng-if='!importDomain.demo') Import domain models from database
                     span(ng-if='importDomain.demo') Import domain models from demo database
-            .modal-body.theme--ignite
+            .modal-body
                 modal-import-models-step-indicator(
                     steps='$ctrl.actions'
                     current-step='importDomain.action'
@@ -61,26 +58,61 @@
                                 li Copy h2-x.x.x.jar into agent 'jdbc-drivers' folder and try again
                                 li Refer to agent README.txt for more information
                 .import-domain-model-wizard-page(ng-if='importDomain.action == "connect" && !importDomain.demo')
-                    -var form = 'connectForm'
-
                     form.pc-form-grid-row(name=form novalidate)
                         .pc-form-grid-col-30
-                            +ignite-form-field-dropdown('Driver JAR:', 'ui.selectedJdbcDriverJar', '"jdbcDriverJar"', false, true, false,
-                                'Choose JDBC driver', '', 'jdbcDriverJars',
-                                'Select appropriate JAR with JDBC driver<br> To add another driver you need to place it into "/jdbc-drivers" folder of Ignite Web Agent<br> Refer to Ignite Web Agent README.txt for for more information'
+                            +form-field__dropdown({
+                                label: 'Driver JAR:',
+                                model: 'ui.selectedJdbcDriverJar',
+                                name: '"jdbcDriverJar"',
+                                required: true,
+                                placeholder: 'Choose JDBC driver',
+                                options: 'jdbcDriverJars',
+                                tip: 'Select appropriate JAR with JDBC driver<br> To add another driver you need to place it into "/jdbc-drivers" folder of Ignite Web Agent<br> Refer to Ignite Web Agent README.txt for for more information'
+                            })
+                        .pc-form-grid-col-30
+                            +form-field__java-class({
+                                label: 'JDBC driver:',
+                                model: 'selectedPreset.jdbcDriverClass',
+                                name: '"jdbcDriverClass"',
+                                required: true,
+                                tip: 'Fully qualified class name of JDBC driver that will be used to connect to database'
+                            })
+                        .pc-form-grid-col-60
+                            +form-field__text({
+                                label: 'JDBC URL:',
+                                model: 'selectedPreset.jdbcUrl',
+                                name: '"jdbcUrl"',
+                                required: true,
+                                placeholder: 'JDBC URL',
+                                tip: 'JDBC URL for connecting to database<br>Refer to your database documentation for details'
+                            })(
+                                ignite-form-field-input-autofocus='true'
                             )
+
                         .pc-form-grid-col-30
-                            +java-class('JDBC driver:', 'selectedPreset.jdbcDriverClass', '"jdbcDriverClass"', true, true, 'Fully qualified class name of JDBC driver that will be used to connect to database')
+                            +form-field__text({
+                                label: 'User:',
+                                model: 'selectedPreset.user',
+                                name: '"jdbcUser"',
+                                tip: 'User name for connecting to database'
+                            })
+                        .pc-form-grid-col-30
+                            +form-field__password({
+                                label: 'Password:',
+                                model: 'selectedPreset.password',
+                                name: '"jdbcPassword"',
+                                tip: 'Password for connecting to database<br>Note, password would not be saved in preferences for security reasons'
+                            })(
+                                ignite-on-enter='importDomainNext()'
+                            )
                         .pc-form-grid-col-60
-                            +text-enabled-autofocus('JDBC URL:', 'selectedPreset.jdbcUrl', '"jdbcUrl"', true, true, 'JDBC URL', 'JDBC URL for connecting to database<br>Refer to your database documentation for details')
-                        .pc-form-grid-col-30
-                            +text('User:', 'selectedPreset.user', '"jdbcUser"', false, '', 'User name for connecting to database')
-                        .pc-form-grid-col-30
-                            +password('Password:', 'selectedPreset.password', '"jdbcPassword"', false, '', 'Password for connecting to database<br>Note, password would not be saved in preferences for security reasons')(ignite-on-enter='importDomainNext()')
-                        .pc-form-grid-col-60
-                            - tipOpts.placement = 'auto'
-                            +checkbox('Tables only', 'selectedPreset.tablesOnly', '"tablesOnly"', 'If selected, then only tables metadata will be parsed<br>Otherwise table and view metadata will be parsed')
-                            - tipOpts.placement = 'top'
+                            +form-field__checkbox({
+                                label: 'Tables only',
+                                model: 'selectedPreset.tablesOnly',
+                                name: '"tablesOnly"',
+                                tip: 'If selected, then only tables metadata will be parsed<br>Otherwise table and view metadata will be parsed',
+                                tipOpts
+                            })
 
                 .import-domain-model-wizard-page(ng-if='importDomain.action == "schemas"')
                     pc-items-table(
@@ -95,14 +127,14 @@
                 .import-domain-model-wizard-page(ng-if='importDomain.action == "tables"')
                     form.pc-form-grid-row(novalidate)
                         .pc-form-grid-col-30
-                            +sane-ignite-form-field-dropdown({
+                            +form-field__dropdown({
                                 label: 'Action:',
                                 model: 'importCommon.action'
                             })(
                                 bs-options='item.value as item.label for item in importActions'
                             )
                         .pc-form-grid-col-30
-                            +sane-ignite-form-field-dropdown({
+                            +form-field__dropdown({
                                 label: 'Cache:',
                                 model: 'importCommon.cacheOrTemplate'
                             })(
@@ -110,8 +142,8 @@
                                 ng-change='$ctrl.onCacheSelect(importCommon.cacheOrTemplate)'
                             )
                         .pc-form-grid-col-60.pc-form-grid__text-only-item
-                            | Defaults to be applied for filtered tables
-                            +tooltip('Select and apply options for caches generation')
+                            +form-field__label({ label: 'Defaults to be applied for filtered tables' })
+                                +form-field__tooltip({ title: 'Select and apply options for caches generation' })
                             button.btn-ignite.btn-ignite--success(
                                 type='button'
                                 ng-click='applyDefaults()'
@@ -133,23 +165,66 @@
 
                     form.pc-form-grid-row(name=form novalidate)
                         .pc-form-grid-col-60
-                            +checkbox('Use Java built-in types for keys', 'ui.builtinKeys', '"domainBuiltinKeys"', 'Use Java built-in types like "Integer", "Long", "String" instead of POJO generation in case when table primary key contains only one field')
+                            +form-field__checkbox({
+                                label: 'Use Java built-in types for keys',
+                                model: 'ui.builtinKeys',
+                                name: '"domainBuiltinKeys"',
+                                tip: 'Use Java built-in types like "Integer", "Long", "String" instead of POJO generation in case when table primary key contains only one field',
+                                tipOpts
+                            })
                         .pc-form-grid-col-60
-                            +checkbox('Use primitive types for NOT NULL table columns', 'ui.usePrimitives', '"domainUsePrimitives"', 'Use primitive types like "int", "long", "double" for POJOs fields generation in case of NOT NULL columns')
+                            +form-field__checkbox({
+                                label: 'Use primitive types for NOT NULL table columns',
+                                model: 'ui.usePrimitives',
+                                name: '"domainUsePrimitives"',
+                                tip: 'Use primitive types like "int", "long", "double" for POJOs fields generation in case of NOT NULL columns',
+                                tipOpts
+                            })
                         .pc-form-grid-col-60
-                            +checkbox('Generate query entity key fields', 'ui.generateKeyFields', '"generateKeyFields"',
-                                'Generate key fields for query entity.<br\>\
+                            +form-field__checkbox({
+                                label: 'Generate query entity key fields',
+                                model: 'ui.generateKeyFields',
+                                name: '"generateKeyFields"',
+                                tip: 'Generate key fields for query entity.<br\>\
                                 We need this for the cases when no key-value classes\
                                 are present on cluster nodes, and we need to build/modify keys and values during SQL DML operations.\
-                                Thus, setting this parameter is not mandatory and should be based on particular use case.')
+                                Thus, setting this parameter is not mandatory and should be based on particular use case.',
+                                tipOpts
+                            })
                         .pc-form-grid-col-60
-                            +checkbox('Generate POJO classes', generatePojo, '"domainGeneratePojo"', 'If selected then POJO classes will be generated from database tables')
+                            +form-field__checkbox({
+                                label: 'Generate POJO classes',
+                                model: generatePojo,
+                                name: '"domainGeneratePojo"',
+                                tip: 'If selected then POJO classes will be generated from database tables',
+                                tipOpts
+                            })
                         .pc-form-grid-col-60(ng-if=generatePojo)
-                            +checkbox('Generate aliases for query entity', 'ui.generateTypeAliases', '"domainGenerateTypeAliases"', 'Generate aliases for query entity if table name is invalid Java identifier')
+                            +form-field__checkbox({
+                                label: 'Generate aliases for query entity',
+                                model: 'ui.generateTypeAliases',
+                                name: '"domainGenerateTypeAliases"',
+                                tip: 'Generate aliases for query entity if table name is invalid Java identifier',
+                                tipOpts
+                            })
                         .pc-form-grid-col-60(ng-if=generatePojo)
-                            +checkbox('Generate aliases for query fields', 'ui.generateFieldAliases', '"domainGenerateFieldAliases"', 'Generate aliases for query fields with database field names when database field name differ from Java field name')
+                            +form-field__checkbox({
+                                label: 'Generate aliases for query fields',
+                                model: 'ui.generateFieldAliases',
+                                name: '"domainGenerateFieldAliases"',
+                                tip: 'Generate aliases for query fields with database field names when database field name differ from Java field name',
+                                tipOpts
+                            })
                         .pc-form-grid-col-60(ng-if=generatePojo)
-                            +java-package('Package:', 'ui.packageName', '"domainPackageName"', true, true, 'Package that will be used for POJOs generation')
+                            +form-field__java-package({
+                                label: 'Package:',
+                                model: 'ui.packageName',
+                                name: '"domainPackageName"',
+                                required: true,
+                                tip: 'Package that will be used for POJOs generation',
+                                tipOpts
+                            })
+
             .modal-footer
                 button.btn-ignite.btn-ignite--success.modal-import-models__prev-button(
                     type='button'
diff --git a/modules/web-console/frontend/app/components/page-configure/components/modal-preview-project/index.js b/modules/web-console/frontend/app/components/page-configure/components/modal-preview-project/index.js
index a0dc92e..a8aebb5 100644
--- a/modules/web-console/frontend/app/components/page-configure/components/modal-preview-project/index.js
+++ b/modules/web-console/frontend/app/components/page-configure/components/modal-preview-project/index.js
@@ -23,5 +23,5 @@
 
 export default angular
     .module('ignite-console.page-configure.modal-preview-project', [])
-    .service(service.name, service)
+    .service('ModalPreviewProject', service)
     .component(component.name, component);
diff --git a/modules/web-console/frontend/app/components/page-configure/components/modal-preview-project/template.pug b/modules/web-console/frontend/app/components/page-configure/components/modal-preview-project/template.pug
index 3499277..536db28 100644
--- a/modules/web-console/frontend/app/components/page-configure/components/modal-preview-project/template.pug
+++ b/modules/web-console/frontend/app/components/page-configure/components/modal-preview-project/template.pug
@@ -20,7 +20,7 @@
     .modal-dialog
         .modal-content
             .modal-header
-                h4.modal-title 
+                h4.modal-title
                     svg(ignite-icon="structure")
                     span See Project Structure
                 button.close(type='button' aria-label='Close' ng-click='$ctrl.onHide()')
@@ -44,4 +44,5 @@
                 .pane-right
                     div.file-preview(ignite-ace='{mode: $ctrl.fileExt, readonly: true}' ng-model='$ctrl.fileText')
             .modal-footer
-                button.btn-ignite.btn-ignite--success(ng-click='$ctrl.onHide()') Close
\ No newline at end of file
+                div
+                    button.btn-ignite.btn-ignite--success(ng-click='$ctrl.onHide()') Close
\ No newline at end of file
diff --git a/modules/web-console/frontend/app/components/page-configure/components/pc-form-field-size/component.js b/modules/web-console/frontend/app/components/page-configure/components/pc-form-field-size/component.js
deleted file mode 100644
index e90a2cf..0000000
--- a/modules/web-console/frontend/app/components/page-configure/components/pc-form-field-size/component.js
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import template from './template.pug';
-import controller from './controller';
-import './style.scss';
-
-export default {
-    controller,
-    template,
-    transclude: true,
-    require: {
-        ngModel: 'ngModel'
-    },
-    bindings: {
-        label: '@',
-        placeholder: '@',
-        min: '@?',
-        max: '@?',
-        tip: '@',
-        required: '<?',
-        sizeType: '@?',
-        sizeScaleLabel: '@?',
-        onScaleChange: '&?',
-        ngDisabled: '<?'
-    }
-};
diff --git a/modules/web-console/frontend/app/components/page-configure/components/pc-form-field-size/controller.js b/modules/web-console/frontend/app/components/page-configure/components/pc-form-field-size/controller.js
deleted file mode 100644
index 0d751e8..0000000
--- a/modules/web-console/frontend/app/components/page-configure/components/pc-form-field-size/controller.js
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import get from 'lodash/get';
-
-export default class PCFormFieldSizeController {
-    /** @type {ng.INgModelController} */
-    ngModel;
-    /** @type {number} */
-    min;
-    /** @type {number} */
-    max;
-    /** @type {ng.ICompiledExpression} */
-    onScaleChange;
-    /** @type {ng.IFormController} */
-    innerForm;
-
-    static $inject = ['$element', '$attrs'];
-
-    /** @type {ig.config.formFieldSize.ISizeTypes} */
-    static sizeTypes = {
-        bytes: [
-            {label: 'Kb', value: 1024},
-            {label: 'Mb', value: 1024 * 1024},
-            {label: 'Gb', value: 1024 * 1024 * 1024}
-        ],
-        seconds: [
-            {label: 'ns', value: 1 / 1000},
-            {label: 'ms', value: 1},
-            {label: 's', value: 1000}
-        ]
-    };
-
-    /**
-     * @param {JQLite} $element
-     * @param {ng.IAttributes} $attrs
-     */
-    constructor($element, $attrs) {
-        this.$element = $element;
-        this.$attrs = $attrs;
-        this.id = Math.random();
-    }
-
-    $onDestroy() {
-        this.$element = null;
-    }
-
-    $onInit() {
-        if (!this.min) this.min = 0;
-        if (!this.sizesMenu) this.setDefaultSizeType();
-        this.$element.addClass('ignite-form-field');
-        this.ngModel.$render = () => this.assignValue(this.ngModel.$viewValue);
-    }
-
-    $postLink() {
-        if ('min' in this.$attrs)
-            this.ngModel.$validators.min = (value) => this.ngModel.$isEmpty(value) || value === void 0 || value >= this.min;
-        if ('max' in this.$attrs)
-            this.ngModel.$validators.max = (value) => this.ngModel.$isEmpty(value) || value === void 0 || value <= this.max;
-
-        this.ngModel.$validators.step = (value) => this.ngModel.$isEmpty(value) || value === void 0 || Math.floor(value) === value;
-    }
-
-    $onChanges(changes) {
-        if ('sizeType' in changes) {
-            this.sizesMenu = PCFormFieldSizeController.sizeTypes[changes.sizeType.currentValue];
-            this.sizeScale = this.chooseSizeScale(get(changes, 'sizeScaleLabel.currentValue'));
-        }
-        if (!this.sizesMenu) this.setDefaultSizeType();
-        if ('sizeScaleLabel' in changes)
-            this.sizeScale = this.chooseSizeScale(changes.sizeScaleLabel.currentValue);
-
-        if ('min' in changes) this.ngModel.$validate();
-    }
-
-    /**
-     * @param {ig.config.formFieldSize.ISizeTypeOption} value
-     */
-    set sizeScale(value) {
-        this._sizeScale = value;
-        if (this.onScaleChange) this.onScaleChange({$event: this.sizeScale});
-        if (this.ngModel) this.assignValue(this.ngModel.$viewValue);
-    }
-
-    get sizeScale() {
-        return this._sizeScale;
-    }
-
-    /**
-     * @param {number} rawValue
-     */
-    assignValue(rawValue) {
-        if (!this.sizesMenu) this.setDefaultSizeType();
-        return this.value = rawValue
-            ? rawValue / this.sizeScale.value
-            : rawValue;
-    }
-
-    onValueChange() {
-        this.ngModel.$setViewValue(this.value ? this.value * this.sizeScale.value : this.value);
-    }
-
-    _defaultLabel() {
-        if (!this.sizesMenu)
-            return;
-
-        return this.sizesMenu[1].label;
-    }
-
-    chooseSizeScale(label = this._defaultLabel()) {
-        if (!label)
-            return;
-
-        return this.sizesMenu.find((option) => option.label.toLowerCase() === label.toLowerCase());
-    }
-
-    setDefaultSizeType() {
-        this.sizesMenu = PCFormFieldSizeController.sizeTypes.bytes;
-        this.sizeScale = this.chooseSizeScale();
-    }
-}
diff --git a/modules/web-console/frontend/app/components/page-configure/components/pc-form-field-size/index.js b/modules/web-console/frontend/app/components/page-configure/components/pc-form-field-size/index.js
deleted file mode 100644
index 1fdc379..0000000
--- a/modules/web-console/frontend/app/components/page-configure/components/pc-form-field-size/index.js
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import angular from 'angular';
-import component from './component';
-
-export default angular
-    .module('ignite-console.page-configure.form-field-size', [])
-    .component('pcFormFieldSize', component);
diff --git a/modules/web-console/frontend/app/components/page-configure/components/pc-form-field-size/style.scss b/modules/web-console/frontend/app/components/page-configure/components/pc-form-field-size/style.scss
deleted file mode 100644
index 737b2a0..0000000
--- a/modules/web-console/frontend/app/components/page-configure/components/pc-form-field-size/style.scss
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-pc-form-field-size {
-    @import "./../../../../../public/stylesheets/variables.scss";
-
-    .input-tip {
-        display: flex;
-        flex-direction: row;
-
-        .form-control {
-            border-top-right-radius: 0;
-            border-bottom-right-radius: 0;
-        }
-
-        input {
-            border-top-right-radius: 0 !important;
-            border-bottom-right-radius: 0 !important;
-            min-width: 0;
-        }
-
-        .btn-ignite {
-            border-top-left-radius: 0 !important;
-            border-bottom-left-radius: 0 !important;
-            flex: 0 0 auto;
-            width: 60px !important;
-            line-height: initial !important;
-        }
-    }
-
-    &.ng-invalid:not(.ng-pristine),
-    &.ng-invalid.ng-touched {
-        input, .btn-ignite {
-            border-color: $ignite-brand-primary !important;
-            box-shadow: inset 0 1px 3px 0 rgba($ignite-brand-primary, .5) !important;
-        }
-    }
-}
\ No newline at end of file
diff --git a/modules/web-console/frontend/app/components/page-configure/components/pc-form-field-size/template.pug b/modules/web-console/frontend/app/components/page-configure/components/pc-form-field-size/template.pug
deleted file mode 100644
index de62d35..0000000
--- a/modules/web-console/frontend/app/components/page-configure/components/pc-form-field-size/template.pug
+++ /dev/null
@@ -1,61 +0,0 @@
-//-
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-         http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-
-include /app/helpers/jade/mixins
-
-+ignite-form-field__label('{{ ::$ctrl.label }}', '$ctrl.id', '$ctrl.required', '$ctrl.ngDisabled')
-    span(ng-if='::$ctrl.tip')
-        +tooltip('{{::$ctrl.tip}}')
-.ignite-form-field__control(ng-form='$ctrl.innerForm')
-    .input-tip
-        input.form-control(
-            type='number'
-            id='{{::$ctrl.id}}Input'
-            ng-model='$ctrl.value'
-            ng-model-options='{allowInvalid: true}'
-            ng-change='$ctrl.onValueChange()'
-            name='numberInput'
-            placeholder='{{$ctrl.placeholder}}'
-            min='{{ $ctrl.min ? $ctrl.min / $ctrl.sizeScale.value : "" }}'
-            max='{{ $ctrl.max ? $ctrl.max / $ctrl.sizeScale.value : "" }}'
-            ng-required='$ctrl.required'
-            ng-disabled='$ctrl.ngDisabled'
-        )
-        button.btn-ignite.btn-ignite--secondary(
-            bs-select
-            bs-options='size as size.label for size in $ctrl.sizesMenu'
-            ng-model='$ctrl.sizeScale'
-            protect-from-bs-select-render
-            ng-disabled='$ctrl.ngDisabled'
-            type='button'
-        )
-            | {{ $ctrl.sizeScale.label }}
-            span.fa.fa-caret-down.icon-right
-.ignite-form-field__errors(
-    ng-messages='$ctrl.ngModel.$error'
-    ng-show=`($ctrl.ngModel.$dirty || $ctrl.ngModel.$touched || $ctrl.ngModel.$submitted) && $ctrl.ngModel.$invalid`
-)
-    div(ng-transclude)
-    div(ng-message='required')
-        | This field could not be empty
-    div(ng-message='min')
-        | Value is less than allowable minimum: {{ $ctrl.min/$ctrl.sizeScale.value }} {{$ctrl.sizeScale.label}}
-    div(ng-message='max')
-        | Value is more than allowable maximum: {{ $ctrl.max/$ctrl.sizeScale.value }} {{$ctrl.sizeScale.label}}
-    div(ng-message='number')
-        | Only numbers allowed
-    div(ng-message='step')
-        | Invalid step
\ No newline at end of file
diff --git a/modules/web-console/frontend/app/components/page-configure/components/pc-items-table/style.scss b/modules/web-console/frontend/app/components/page-configure/components/pc-items-table/style.scss
index 227f23c..2147714 100644
--- a/modules/web-console/frontend/app/components/page-configure/components/pc-items-table/style.scss
+++ b/modules/web-console/frontend/app/components/page-configure/components/pc-items-table/style.scss
@@ -24,9 +24,6 @@
         display: flex;
         flex-direction: row;
     }
-    .ui-grid-settings--heading {
-        flex: 1;
-    }
 
     // Removes unwanted box-shadow and border-right from checkboxes column
     .ui-grid.ui-grid--ignite .ui-grid-pinned-container.ui-grid-pinned-container-left .ui-grid-render-container-left:before {
diff --git a/modules/web-console/frontend/app/components/page-configure/components/pc-items-table/template.pug b/modules/web-console/frontend/app/components/page-configure/components/pc-items-table/template.pug
index 0dbb760..75c683f 100644
--- a/modules/web-console/frontend/app/components/page-configure/components/pc-items-table/template.pug
+++ b/modules/web-console/frontend/app/components/page-configure/components/pc-items-table/template.pug
@@ -17,25 +17,22 @@
 include /app/helpers/jade/mixins
 
 .panel--ignite
-    .panel-heading.ui-grid-settings.ui-grid-ignite__panel(ng-if='!$ctrl.hideHeader')
-        .panel-title
-            .pc-items-table__table-name.ng-animate-disabled(
-                ng-hide='$ctrl.gridAPI.selection.getSelectedCount()'
-            )
-                | {{ $ctrl.tableTitle }}
-                grid-column-selector(grid-api='$ctrl.gridAPI')
-            .pc-items-table__selection-count.ng-animate-disabled(
-                ng-show='$ctrl.gridAPI.selection.getSelectedCount()'
-            )
-                i {{ $ctrl.gridAPI.selection.getSelectedCount() }} of {{ $ctrl.items.length }} selected
-            .pco-clusters-table__actions-button
-                +ignite-form-field-bsdropdown({
-                    label: 'Actions',
-                    name: 'action',
-                    disabled: '!$ctrl.gridAPI.selection.getSelectedCount()',
-                    required: false,
-                    options: '$ctrl.actionsMenu'
-                })
+    header.header-with-selector(ng-if='!$ctrl.hideHeader')
+        div(ng-hide='$ctrl.gridAPI.selection.getSelectedCount()')
+            span {{ $ctrl.tableTitle }}
+            grid-column-selector(grid-api='$ctrl.gridAPI')
+
+        div(ng-show='$ctrl.gridAPI.selection.getSelectedCount()')
+            grid-item-selected(grid-api='$ctrl.gridAPI')
+
+        div
+            +ignite-form-field-bsdropdown({
+                label: 'Actions',
+                name: 'action',
+                disabled: '!$ctrl.gridAPI.selection.getSelectedCount()',
+                options: '$ctrl.actionsMenu'
+            })
+
     .grid.ui-grid--ignite(
         ui-grid='$ctrl.grid'
         ui-grid-selection
@@ -46,4 +43,4 @@
     )
 
     div(ng-transclude='footerSlot' ng-hide='$ctrl.showFilterNotification')
-    footer-slot(ng-if='$ctrl.showFilterNotification' style='font-style:italic') Nothing to display. Check your filters.
\ No newline at end of file
+    footer-slot(ng-if='$ctrl.showFilterNotification' style='font-style:italic') Nothing to display. Check your filters.
diff --git a/modules/web-console/frontend/app/components/page-configure/index.js b/modules/web-console/frontend/app/components/page-configure/index.js
index 9beea7a..c1d51ee 100644
--- a/modules/web-console/frontend/app/components/page-configure/index.js
+++ b/modules/web-console/frontend/app/components/page-configure/index.js
@@ -36,7 +36,6 @@
 import projectStructurePreview from './components/modal-preview-project';
 import itemsTable from './components/pc-items-table';
 import pcUiGridFilters from './components/pc-ui-grid-filters';
-import pcFormFieldSize from './components/pc-form-field-size';
 import isInCollection from './components/pcIsInCollection';
 import pcValidation from './components/pcValidation';
 import fakeUiCanExit from './components/fakeUICanExit';
@@ -99,7 +98,6 @@
         'ui.router',
         'asyncFilter',
         uiValidate,
-        pcFormFieldSize.name,
         pcUiGridFilters.name,
         projectStructurePreview.name,
         itemsTable.name,
@@ -179,9 +177,9 @@
         ConfigEffects.connect();
     }])
     .component('pageConfigure', component)
-    .directive(isInCollection.name, isInCollection)
-    .directive(fakeUiCanExit.name, fakeUiCanExit)
-    .directive(formUICanExitGuard.name, formUICanExitGuard)
+    .directive('pcIsInCollection', isInCollection)
+    .directive('fakeUiCanExit', fakeUiCanExit)
+    .directive('formUiCanExitGuard', formUICanExitGuard)
     .factory('configSelectionManager', ConfigSelectionManager)
     .service('IgniteSummaryZipper', SummaryZipper)
     .service('IgniteConfigurationResource', ConfigurationResource)
diff --git a/modules/web-console/frontend/app/components/page-configure/services/ConfigChangesGuard.js b/modules/web-console/frontend/app/components/page-configure/services/ConfigChangesGuard.js
index cca0e9e..7e2df80 100644
--- a/modules/web-console/frontend/app/components/page-configure/services/ConfigChangesGuard.js
+++ b/modules/web-console/frontend/app/components/page-configure/services/ConfigChangesGuard.js
@@ -52,7 +52,7 @@
 }
 
 export default class ConfigChangesGuard {
-    static $inject = [Confirm.name, '$sce'];
+    static $inject = ['Confirm', '$sce'];
 
     /**
      * @param {Confirm} Confirm.
@@ -74,7 +74,7 @@
             You have unsaved changes.
             Are you sure you want to discard them?
             </p>
-            <details>
+            <details class='config-changes-guard__details'>
                 <summary>Click here to see changes</summary>
                 <div style='max-height: 400px; overflow: auto;'>${html.format(changes)}</div>                
             </details>
diff --git a/modules/web-console/frontend/app/components/page-configure/services/ConfigurationResource.js b/modules/web-console/frontend/app/components/page-configure/services/ConfigurationResource.js
index 2dab8a3..269eb93 100644
--- a/modules/web-console/frontend/app/components/page-configure/services/ConfigurationResource.js
+++ b/modules/web-console/frontend/app/components/page-configure/services/ConfigurationResource.js
@@ -27,7 +27,9 @@
                 .then(({data}) => data)
                 .catch(({data}) => Promise.reject(data));
         },
-        populate({spaces, clusters, caches, igfss, domains}) {
+        populate(data) {
+            const {spaces, clusters, caches, igfss, domains} = _.cloneDeep(data);
+
             _.forEach(clusters, (cluster) => {
                 cluster.caches = _.filter(caches, ({_id}) => _.includes(cluster.caches, _id));
 
diff --git a/modules/web-console/frontend/app/components/page-configure/services/ConfigurationResource.spec.js b/modules/web-console/frontend/app/components/page-configure/services/ConfigurationResource.spec.js
new file mode 100644
index 0000000..d52d94a
--- /dev/null
+++ b/modules/web-console/frontend/app/components/page-configure/services/ConfigurationResource.spec.js
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import configurationResource from './ConfigurationResource';
+
+import { suite, test } from 'mocha';
+import { assert } from 'chai';
+
+const CHECKED_CONFIGURATION = {
+    spaces: [{
+        _id: '1space',
+        name: 'Test space'
+    }],
+    clusters: [{
+        _id: '1cluster',
+        space: '1space',
+        name: 'Test cluster',
+        caches: ['1cache'],
+        models: ['1model'],
+        igfss: ['1igfs']
+    }],
+    caches: [{
+        _id: '1cache',
+        space: '1space',
+        name: 'Test cache',
+        clusters: ['1cluster'],
+        models: ['1model']
+    }],
+    domains: [{
+        _id: '1model',
+        space: '1space',
+        name: 'Test model',
+        clusters: ['1cluster'],
+        caches: ['1cache']
+    }],
+    igfss: [{
+        _id: '1igfs',
+        space: '1space',
+        name: 'Test IGFS',
+        clusters: ['1cluster']
+    }]
+};
+
+suite('ConfigurationResourceTestsSuite', () => {
+    test('ConfigurationResourceService correctly populate data', async() => {
+        const service = configurationResource(null);
+        const converted = _.cloneDeep(CHECKED_CONFIGURATION);
+        const res = await service.populate(converted);
+
+        assert.notEqual(res.clusters[0], converted.clusters[0]);
+
+        assert.deepEqual(converted.clusters[0].caches, CHECKED_CONFIGURATION.clusters[0].caches);
+        assert.deepEqual(converted.clusters[0].models, CHECKED_CONFIGURATION.clusters[0].models);
+        assert.deepEqual(converted.clusters[0].igfss, CHECKED_CONFIGURATION.clusters[0].igfss);
+
+        assert.deepEqual(converted.caches[0].clusters, CHECKED_CONFIGURATION.caches[0].clusters);
+        assert.deepEqual(converted.caches[0].models, CHECKED_CONFIGURATION.caches[0].models);
+
+        assert.deepEqual(converted.domains[0].clusters, CHECKED_CONFIGURATION.domains[0].clusters);
+        assert.deepEqual(converted.domains[0].caches, CHECKED_CONFIGURATION.domains[0].caches);
+
+        assert.deepEqual(converted.igfss[0].clusters, CHECKED_CONFIGURATION.igfss[0].clusters);
+    });
+});
diff --git a/modules/web-console/frontend/app/components/page-configure/services/PageConfigure.js b/modules/web-console/frontend/app/components/page-configure/services/PageConfigure.js
index 10200be..d81921e 100644
--- a/modules/web-console/frontend/app/components/page-configure/services/PageConfigure.js
+++ b/modules/web-console/frontend/app/components/page-configure/services/PageConfigure.js
@@ -38,7 +38,8 @@
 import {default as ConfigSelectors} from 'app/components/page-configure/store/selectors';
 
 export default class PageConfigure {
-    static $inject = [ConfigureState.name, ConfigSelectors.name];
+    static $inject = ['ConfigureState', 'ConfigSelectors'];
+
     /**
      * @param {ConfigureState} ConfigureState
      * @param {ConfigSelectors} ConfigSelectors
@@ -55,7 +56,7 @@
                 .take(1)
                 .do(() => this.ConfigureState.dispatchAction({type: 'LOAD_COMPLETE_CONFIGURATION', clusterID, isDemo}))
                 .ignoreElements(),
-            this.ConfigureState.actions$.let(ofType('LOAD_COMPLETE_CONFIGURATION_ERR')).take(1).map((e) => {throw e;}),
+            this.ConfigureState.actions$.let(ofType('LOAD_COMPLETE_CONFIGURATION_ERR')).take(1).pluck('error').map((e) => Promise.reject(e)),
             this.ConfigureState.state$
                 .let(this.ConfigSelectors.selectCompleteClusterConfiguration({clusterID, isDemo}))
                 .filter((c) => c.__isComplete)
diff --git a/modules/web-console/frontend/app/components/page-configure/states.js b/modules/web-console/frontend/app/components/page-configure/states.js
index ed43f48..3ba5bb7 100644
--- a/modules/web-console/frontend/app/components/page-configure/states.js
+++ b/modules/web-console/frontend/app/components/page-configure/states.js
@@ -24,10 +24,9 @@
 
 const idRegex = `new|[a-z0-9]+`;
 
-const shortCachesResolve = ['ConfigSelectors', 'ConfigureState', 'ConfigEffects', '$transition$', (ConfigSelectors, ConfigureState, {etp}, $transition$) => {
+const shortCachesResolve = ['ConfigSelectors', 'ConfigureState', 'ConfigEffects', '$transition$', function(ConfigSelectors, ConfigureState, {etp}, $transition$) {
     if ($transition$.params().clusterID === 'new')
         return Promise.resolve();
-
     return Observable.fromPromise($transition$.injector().getAsync('_cluster'))
         .switchMap(() => ConfigureState.state$.let(ConfigSelectors.selectCluster($transition$.params().clusterID)).take(1))
         .switchMap((cluster) => {
diff --git a/modules/web-console/frontend/app/components/page-configure/store/effects.js b/modules/web-console/frontend/app/components/page-configure/store/effects.js
index 9647461..014076b 100644
--- a/modules/web-console/frontend/app/components/page-configure/store/effects.js
+++ b/modules/web-console/frontend/app/components/page-configure/store/effects.js
@@ -85,18 +85,19 @@
 
 export default class ConfigEffects {
     static $inject = [
-        ConfigureState.name,
-        Caches.name,
-        IGFSs.name,
-        Models.name,
-        ConfigSelectors.name,
-        Clusters.name,
+        'ConfigureState',
+        'Caches',
+        'IGFSs',
+        'Models',
+        'ConfigSelectors',
+        'Clusters',
         '$state',
         'IgniteMessages',
         'IgniteConfirm',
-        Confirm.name,
-        ConfigurationDownload.name
+        'Confirm',
+        'ConfigurationDownload'
     ];
+
     /**
      * @param {ConfigureState} ConfigureState
      * @param {Caches} Caches
diff --git a/modules/web-console/frontend/app/components/page-configure/style.scss b/modules/web-console/frontend/app/components/page-configure/style.scss
index 36ae752..365e058 100644
--- a/modules/web-console/frontend/app/components/page-configure/style.scss
+++ b/modules/web-console/frontend/app/components/page-configure/style.scss
@@ -33,10 +33,10 @@
         display: flex;
         flex-direction: row;
         padding: 10px 20px 10px 30px;
-        box-shadow: 0 0px 4px 0 rgba(0, 0, 0, 0.2), 0px 3px 4px -1px rgba(0, 0, 0, 0.2);
+        box-shadow: 0 0 4px 0 rgba(0, 0, 0, 0.2), 0 3px 4px -1px rgba(0, 0, 0, 0.2);
         position: -webkit-sticky;
         position: sticky;
-        bottom: 0px;
+        bottom: 0;
         // margin: 20px -30px -30px;
         background: white;
         border-radius: 0 0 4px 4px;
@@ -103,6 +103,11 @@
             margin-left: 10px !important;
         }
     }
+
+    .form-field__label.required:after {
+        content: '*';
+        margin-left: 0.25em;
+    }
 }
 
 .pc-form-group {
@@ -130,30 +135,15 @@
         background: var(--pc-form-group-title-bg-color);
     }
 
-    &>.form-field-checkbox .ignite-form-field__control {
-        & > span {
-            position: relative;
-
-            &:after {
-                content: '';
-                display: block;
-                position: absolute;
-                background-color: var(--pc-form-group-title-bg-color);
-                z-index: -1;
-                top: 0;
-                bottom: 0;
-                left: -26px;
-                right: -5px;
-            }
-        }
-        [ignite-icon] {
-            background-color: var(--pc-form-group-title-bg-color);            
-        }
-    }
-
     &+.pc-form-group {
         padding-top: 10px;
     }
+
+    .form-field__checkbox {
+        background-color: white;
+        padding: 0 5px;
+        margin: 0 -5px;
+    }
 }
 
 .pc-form-grid-row > .pc-form-group__text-title[class*='pc-form-grid-col-'] {
@@ -322,4 +312,24 @@
     align-items: center;
     background: white;
     z-index: 2;
-}
\ No newline at end of file
+}
+
+.config-changes-guard__details {
+    cursor: pointer;
+
+    summary {
+        list-style: none;
+    }
+
+    summary::-webkit-details-marker {
+        display: none;
+    }
+
+    summary:before {
+        content: '▶ ';
+    }
+
+    &[open] summary:before {
+        content: '▼ ';
+    }
+}
diff --git a/modules/web-console/frontend/app/components/page-configure/template.pug b/modules/web-console/frontend/app/components/page-configure/template.pug
index bd299be..86180fa 100644
--- a/modules/web-console/frontend/app/components/page-configure/template.pug
+++ b/modules/web-console/frontend/app/components/page-configure/template.pug
@@ -39,7 +39,7 @@
         input(type='checkbox' ng-model='$ctrl.tooltipsVisible')
         div
 
-    ui-view.theme--ignite(
+    ui-view.theme--ignite.theme--ignite-errors-horizontal(
         ignite-loading='configuration'
         ignite-loading-text='{{ $ctrl.loadingText }}'
         ignite-loading-position='top'
diff --git a/modules/web-console/frontend/app/components/page-profile/template.pug b/modules/web-console/frontend/app/components/page-profile/template.pug
index 2ec6c90..3c5fb52 100644
--- a/modules/web-console/frontend/app/components/page-profile/template.pug
+++ b/modules/web-console/frontend/app/components/page-profile/template.pug
@@ -118,7 +118,7 @@
             .col-100
                 panel-collapsible(
                     opened='$ctrl.ui.expandedPassword'
-                    on-open='$ctrl.ui.expandedToken = false'
+                    on-open='$ctrl.ui.expandedPassword = true'
                     on-close='$ctrl.onPasswordPanelClose()'
                 )
                     panel-title
diff --git a/modules/web-console/frontend/app/components/page-queries/components/queries-notebook/controller.js b/modules/web-console/frontend/app/components/page-queries/components/queries-notebook/controller.js
index c887f4d..3ee08b3 100644
--- a/modules/web-console/frontend/app/components/page-queries/components/queries-notebook/controller.js
+++ b/modules/web-console/frontend/app/components/page-queries/components/queries-notebook/controller.js
@@ -41,7 +41,7 @@
 
 const NON_COLLOCATED_JOINS_SINCE = '1.7.0';
 
-const COLLOCATED_QUERY_SINCE = [['2.3.5', '2.4.0'], ['2.4.6', '2.5.0'], '2.5.2'];
+const COLLOCATED_QUERY_SINCE = [['2.3.5', '2.4.0'], ['2.4.6', '2.5.0'], ['2.5.1-p13', '2.6.0'], '2.7.0'];
 
 const ENFORCE_JOIN_SINCE = [['1.7.9', '1.8.0'], ['1.8.4', '1.9.0'], '1.9.1'];
 
@@ -251,16 +251,16 @@
 
 // Controller for SQL notebook screen.
 export class NotebookCtrl {
-    static $inject = ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', '$animate', '$location', '$anchorScroll', '$state', '$filter', '$modal', '$popover', 'IgniteLoading', 'IgniteLegacyUtils', 'IgniteMessages', 'IgniteConfirm', 'AgentManager', 'IgniteChartColors', 'IgniteNotebook', 'IgniteNodes', 'uiGridExporterConstants', 'IgniteVersion', 'IgniteActivitiesData', 'JavaTypes', 'IgniteCopyToClipboard', CSV.name, 'IgniteErrorParser'];
+    static $inject = ['$rootScope', '$scope', '$http', '$q', '$timeout', '$interval', '$animate', '$location', '$anchorScroll', '$state', '$filter', '$modal', '$popover', 'IgniteLoading', 'IgniteLegacyUtils', 'IgniteMessages', 'IgniteConfirm', 'AgentManager', 'IgniteChartColors', 'IgniteNotebook', 'IgniteNodes', 'uiGridExporterConstants', 'IgniteVersion', 'IgniteActivitiesData', 'JavaTypes', 'IgniteCopyToClipboard', 'CSV', 'IgniteErrorParser', 'DemoInfo'];
 
     /**
      * @param {CSV} CSV
      */
-    constructor($root, $scope, $http, $q, $timeout, $interval, $animate, $location, $anchorScroll, $state, $filter, $modal, $popover, Loading, LegacyUtils, Messages, Confirm, agentMgr, IgniteChartColors, Notebook, Nodes, uiGridExporterConstants, Version, ActivitiesData, JavaTypes, IgniteCopyToClipboard, CSV, errorParser) {
+    constructor($root, $scope, $http, $q, $timeout, $interval, $animate, $location, $anchorScroll, $state, $filter, $modal, $popover, Loading, LegacyUtils, Messages, Confirm, agentMgr, IgniteChartColors, Notebook, Nodes, uiGridExporterConstants, Version, ActivitiesData, JavaTypes, IgniteCopyToClipboard, CSV, errorParser, DemoInfo) {
         const $ctrl = this;
 
         this.CSV = CSV;
-        Object.assign(this, { $root, $scope, $http, $q, $timeout, $interval, $animate, $location, $anchorScroll, $state, $filter, $modal, $popover, Loading, LegacyUtils, Messages, Confirm, agentMgr, IgniteChartColors, Notebook, Nodes, uiGridExporterConstants, Version, ActivitiesData, JavaTypes, errorParser });
+        Object.assign(this, { $root, $scope, $http, $q, $timeout, $interval, $animate, $location, $anchorScroll, $state, $filter, $modal, $popover, Loading, LegacyUtils, Messages, Confirm, agentMgr, IgniteChartColors, Notebook, Nodes, uiGridExporterConstants, Version, ActivitiesData, JavaTypes, errorParser, DemoInfo });
 
         // Define template urls.
         $ctrl.paragraphRateTemplateUrl = paragraphRateTemplateUrl;
@@ -283,7 +283,15 @@
 
         $scope.caches = [];
 
-        $scope.pageSizes = [50, 100, 200, 400, 800, 1000];
+        $scope.pageSizesOptions = [
+            {value: 50, label: '50'},
+            {value: 100, label: '100'},
+            {value: 200, label: '200'},
+            {value: 400, label: '400'},
+            {value: 800, label: '800'},
+            {value: 1000, label: '1000'}
+        ];
+
         $scope.maxPages = [
             {label: 'Unlimited', value: 0},
             {label: '1', value: 1},
@@ -980,7 +988,14 @@
                 else
                     $scope.rebuildScrollParagraphs();
             })
-            .then(() => _startWatch())
+            .then(() => {
+                if ($root.IgniteDemoMode && sessionStorage.showDemoInfo !== 'true') {
+                    sessionStorage.showDemoInfo = 'true';
+
+                    this.DemoInfo.show().then(_startWatch);
+                } else
+                    _startWatch();
+            })
             .catch(() => {
                 $scope.notebookLoadFailed = true;
 
@@ -1048,7 +1063,7 @@
             const paragraph = _newParagraph({
                 name: 'Query' + (sz === 0 ? '' : sz),
                 query: '',
-                pageSize: $scope.pageSizes[1],
+                pageSize: $scope.pageSizesOptions[1].value,
                 timeLineSpan: $scope.timeLineSpans[0],
                 result: 'none',
                 rate: {
@@ -1077,7 +1092,7 @@
             const paragraph = _newParagraph({
                 name: 'Scan' + (sz === 0 ? '' : sz),
                 query: '',
-                pageSize: $scope.pageSizes[1],
+                pageSize: $scope.pageSizesOptions[1].value,
                 timeLineSpan: $scope.timeLineSpans[0],
                 result: 'none',
                 rate: {
@@ -1556,8 +1571,8 @@
             if (!$scope.queryAvailable(paragraph))
                 return;
 
-            Notebook.save($scope.notebook)
-                .catch(Messages.showError);
+            if (!paragraph.partialQuery)
+                Notebook.save($scope.notebook).catch(Messages.showError);
 
             _cancelRefresh(paragraph);
 
@@ -1569,7 +1584,7 @@
                     const args = paragraph.queryArgs = {
                         type: 'EXPLAIN',
                         cacheName: $scope.cacheNameForSql(paragraph),
-                        query: 'EXPLAIN ' + paragraph.query,
+                        query: 'EXPLAIN ' + (paragraph.partialQuery || paragraph.query),
                         pageSize: paragraph.pageSize
                     };
 
@@ -1956,11 +1971,8 @@
         };
     }
 
-    $onInit() {
-
-    }
-
     $onDestroy() {
-        this.refresh$.unsubscribe();
+        if (this.refresh$)
+            this.refresh$.unsubscribe();
     }
 }
diff --git a/modules/web-console/frontend/app/components/page-queries/components/queries-notebook/style.scss b/modules/web-console/frontend/app/components/page-queries/components/queries-notebook/style.scss
index a5fd50a..fe0ede1 100644
--- a/modules/web-console/frontend/app/components/page-queries/components/queries-notebook/style.scss
+++ b/modules/web-console/frontend/app/components/page-queries/components/queries-notebook/style.scss
@@ -121,4 +121,56 @@
     .btn.btn-default.select-toggle.tipLabel {
         padding-right: 25px;
     }
+
+    .form-field__sensitive {
+        input[type='checkbox'] {
+            height: 0;
+        }
+
+        input:checked + span {
+            color: #0067b9;
+        }
+    }
+
+    .queries-notebook-displayed-caches {
+        max-height: 210px;
+        padding: 0 5px;
+        margin-top: 10px;
+        margin-left: -5px;
+        margin-right: -5px;
+
+        overflow-y: auto;
+    }
+}
+
+.popover.settings.refresh-rate {
+    width: 244px;
+
+    [ignite-icon] {
+        height: 12px;
+    }
+
+    .popover-title {
+        padding: 10px;
+        font-size: 14px;
+    }
+
+    .actions {
+        width: 100%;
+        text-align: right;
+
+        button {
+            margin-top: 20px;
+            margin-right: 0;
+        }
+    }
+
+    .ignite-form-field {
+        display: flex;
+        padding: 5px;
+
+        input {
+            margin-right: 10px;
+        }
+    }
 }
diff --git a/modules/web-console/frontend/app/components/page-queries/components/queries-notebook/template.tpl.pug b/modules/web-console/frontend/app/components/page-queries/components/queries-notebook/template.tpl.pug
index 713f83e..781ce51 100644
--- a/modules/web-console/frontend/app/components/page-queries/components/queries-notebook/template.tpl.pug
+++ b/modules/web-console/frontend/app/components/page-queries/components/queries-notebook/template.tpl.pug
@@ -16,6 +16,24 @@
 
 include /app/helpers/jade/mixins
 
+mixin form-field__sensitive({ label, modelFilter, modelSensitive, name, placeholder })
+    .form-field.form-field__sensitive.ignite-form-field
+        +form-field__label({ label, name })
+            +form-field__tooltip({ title: 'You can set case sensitive search' })
+        .form-field__control.form-field__control-group
+            +form-field__input({ name, model: modelFilter, placeholder })(
+                type='text'
+            )
+            label.btn-ignite.btn-ignite--secondary
+                +form-field__input({ name: `${ name } + "Sensitive"`, model: modelSensitive, placeholder })(
+                    type='checkbox'
+                )
+                span Cs
+        .form-field__errors(
+            ng-messages=`$input.$error`
+            ng-show=`($input.$dirty || $input.$touched || $input.$submitted) && $input.$invalid`
+        )
+
 mixin btn-toolbar(btn, click, tip, focusId)
     i.btn.btn-default.fa(class=btn ng-click=click bs-tooltip='' data-title=tip ignite-on-click-focus=focusId data-trigger='hover' data-placement='bottom')
 
@@ -72,59 +90,120 @@
             input.form-control(id='paragraph-name-{{paragraph.id}}' ng-model='paragraph.editName' required ng-click='$event.stopPropagation();' ignite-on-enter='renameParagraph(paragraph, paragraph.editName)' ignite-on-escape='paragraph.edit = false')
 
 mixin query-settings
-    .panel-top-align
-        label.tipLabel(bs-tooltip data-placement='bottom' data-title='Configure periodical execution of last successfully executed query') Refresh rate:
-            button.btn.btn-default.fa.fa-clock-o.tipLabel(ng-class='{"btn-info": paragraph.rate && paragraph.rate.installed}' bs-popover data-template-url='{{ $ctrl.paragraphRateTemplateUrl }}' data-placement='left' data-auto-close='1' data-trigger='click') {{rateAsString(paragraph)}}
+    div
+        .form-field--inline(
+            bs-tooltip
+            data-placement='top'
+            data-title='Max number of rows to show in query result as one page'
+        )
+            +form-field__dropdown({
+                label: 'Rows per page:',
+                model: 'paragraph.pageSize',
+                name: '"pageSize" + paragraph.id',
+                options: 'pageSizesOptions'
+            })
 
-        label.tipLabel(bs-tooltip data-placement='bottom' data-title='Max number of rows to show in query result as one page') Page size:
-            button.btn.btn-default.select-toggle.tipLabel(ng-model='paragraph.pageSize' bs-select bs-options='item for item in pageSizes')
+        .form-field--inline(
+            bs-tooltip
+            data-placement='top'
+            data-title='Limit query max results to specified number of pages'
+        )
+            +form-field__dropdown({
+                label: 'Max pages:',
+                model: 'paragraph.maxPages',
+                name: '"maxPages" + paragraph.id',
+                options: 'maxPages'
+            })
 
-        label.tipLabel(bs-tooltip data-placement='bottom' data-title='Limit query max results to specified number of pages') Max pages:
-            button.btn.btn-default.select-toggle.tipLabel(ng-model='paragraph.maxPages' bs-select bs-options='item.value as item.label for item in maxPages')
+        .form-field--inline(
+            bs-tooltip
+            data-placement='bottom'
+            data-title='Configure periodical execution of last successfully executed query'
+        )
+            button.btn-ignite-group(
+                bs-popover
+                data-template-url='{{ $ctrl.paragraphRateTemplateUrl }}'
+                data-placement='bottom-right'
+                data-auto-close='1'
+                data-trigger='click'
+            )
+                .btn-ignite(
+                    ng-class='{\
+                        "btn-ignite--primary": paragraph.rate && paragraph.rate.installed,\
+                        "btn-ignite--secondary": !(paragraph.rate && paragraph.rate.installed),\
+                    }'
+                )
+                    svg(ignite-icon='clock')
+                    | &nbsp; {{ rateAsString(paragraph) }}
+                .btn-ignite(
+                    ng-class='{\
+                        "btn-ignite--primary": paragraph.rate && paragraph.rate.installed,\
+                        "btn-ignite--secondary": !(paragraph.rate && paragraph.rate.installed),\
+                    }'
+                )
+                    span.icon.fa.fa-caret-down
+    div
+        .row(ng-if='nonCollocatedJoinsAvailable(paragraph)')
+            +form-field__checkbox({
+                label: 'Allow non-collocated joins',
+                model: 'paragraph.nonCollocatedJoins',
+                name: '"nonCollocatedJoins" + paragraph.id',
+                tip: 'Non-collocated joins is a special mode that allow to join data across cluster without collocation.<br/>\
+                Nested joins are not supported for now.<br/>\
+                <b>NOTE</b>: In some cases it may consume more heap memory or may take a long time than collocated joins.',
+                tipOpts: { placement: 'top' }
+            })
 
-        .panel-tip-container
-            .row(ng-if='nonCollocatedJoinsAvailable(paragraph)')
-                label.tipLabel(bs-tooltip data-placement='bottom' data-title='Non-collocated joins is a special mode that allow to join data across cluster without collocation.<br/>\
-                    Nested joins are not supported for now.<br/>\
-                    <b>NOTE</b>: In some cases it may consume more heap memory or may take a long time than collocated joins.' data-trigger='hover')
-                    input(type='checkbox' ng-model='paragraph.nonCollocatedJoins')
-                    span Allow non-collocated joins
-            .row(ng-if='collocatedJoinsAvailable(paragraph)')
-                label.tipLabel(bs-tooltip data-placement='bottom' data-title='Used For Optimization Purposes Of Queries With GROUP BY Statements.<br/>\
-                    <b>NOTE:</b> Whenever Ignite executes a distributed query, it sends sub-queries to individual cluster members.<br/>\
-                    If you know in advance that the elements of your query selection are collocated together on the same node\
-                    and you group by collocated key (primary or affinity key), then Ignite can make significant performance and\
-                    network optimizations by grouping data on remote nodes.' data-trigger='hover')
-                    input(type='checkbox' ng-model='paragraph.collocated')
-                    span Collocated Query
-            .row(ng-if='enforceJoinOrderAvailable(paragraph)')
-                label.tipLabel(bs-tooltip data-placement='bottom' data-title='Enforce join order of tables in the query.<br/>\
-                    If <b>set</b>, then query optimizer will not reorder tables within join.<br/>\
-                    <b>NOTE:</b> It is not recommended to enable this property unless you have verified that\
-                    indexes are not selected in optimal order.' data-trigger='hover')
-                    input(type='checkbox' ng-model='paragraph.enforceJoinOrder')
-                    span Enforce join order
-            .row(ng-if='lazyQueryAvailable(paragraph)')
-                label.tipLabel(bs-tooltip data-placement='bottom' data-title='By default Ignite attempts to fetch the whole query result set to memory and send it to the client.<br/>\
-                    For small and medium result sets this provides optimal performance and minimize duration of internal database locks, thus increasing concurrency.<br/>\
-                    If result set is too big to fit in available memory this could lead to excessive GC pauses and even OutOfMemoryError.<br/>\
-                    Use this flag as a hint for Ignite to fetch result set lazily, thus minimizing memory consumption at the cost of moderate performance hit.' data-trigger='hover')
-                    input(type='checkbox' ng-model='paragraph.lazy')
-                    span Lazy result set
+        .row(ng-if='collocatedJoinsAvailable(paragraph)')
+            +form-field__checkbox({
+                label: 'Collocated Query',
+                model: 'paragraph.collocated',
+                name: '"collocated" + paragraph.id',
+                tip: 'Used For Optimization Purposes Of Queries With GROUP BY Statements.<br/>\
+                <b>NOTE:</b> Whenever Ignite executes a distributed query, it sends sub-queries to individual cluster members.<br/>\
+                If you know in advance that the elements of your query selection are collocated together on the same node\
+                and you group by collocated key (primary or affinity key), then Ignite can make significant performance and\
+                network optimizations by grouping data on remote nodes.',
+                tipOpts: { placement: 'top' }
+            })
+
+        .row(ng-if='enforceJoinOrderAvailable(paragraph)')
+            +form-field__checkbox({
+                label: 'Enforce join order',
+                model: 'paragraph.enforceJoinOrder',
+                name: '"enforceJoinOrder" + paragraph.id',
+                tip: 'Enforce join order of tables in the query.<br/>\
+                If <b>set</b>, then query optimizer will not reorder tables within join.<br/>\
+                <b>NOTE:</b> It is not recommended to enable this property unless you have verified that\
+                indexes are not selected in optimal order.',
+                tipOpts: { placement: 'top' }
+            })
+
+        .row(ng-if='lazyQueryAvailable(paragraph)')
+            +form-field__checkbox({
+                label: 'Lazy result set',
+                model: 'paragraph.lazy',
+                name: '"lazy" + paragraph.id',
+                tip: 'By default Ignite attempts to fetch the whole query result set to memory and send it to the client.<br/>\
+                For small and medium result sets this provides optimal performance and minimize duration of internal database locks, thus increasing concurrency.<br/>\
+                If result set is too big to fit in available memory this could lead to excessive GC pauses and even OutOfMemoryError.<br/>\
+                Use this flag as a hint for Ignite to fetch result set lazily, thus minimizing memory consumption at the cost of moderate performance hit.',
+                tipOpts: { placement: 'top' }
+            })
 
 mixin query-actions
-    button.btn.btn-primary(ng-disabled='!queryAvailable(paragraph)' ng-click='execute(paragraph)')
-        div
-            i.fa.fa-fw.fa-play(ng-hide='paragraph.executionInProgress(false)')
-            i.fa.fa-fw.fa-refresh.fa-spin(ng-show='paragraph.executionInProgress(false)')
-            span.tipLabelExecute Execute
-    button.btn.btn-primary(ng-disabled='!queryAvailable(paragraph)' ng-click='execute(paragraph, true)')
-        div
-            i.fa.fa-fw.fa-play(ng-hide='paragraph.executionInProgress(true)')
-            i.fa.fa-fw.fa-refresh.fa-spin(ng-show='paragraph.executionInProgress(true)')
-            span.tipLabelExecute Execute on selected node
+    button.btn-ignite.btn-ignite--primary(ng-disabled='!queryAvailable(paragraph)' ng-click='execute(paragraph)')
+        span.icon-left.fa.fa-fw.fa-play(ng-hide='paragraph.executionInProgress(false)')
+        span.icon-left.fa.fa-fw.fa-refresh.fa-spin(ng-show='paragraph.executionInProgress(false)')
+        | Execute
 
-    a.btn.btn-default(ng-disabled='!queryAvailable(paragraph)' ng-click='explain(paragraph)' data-placement='bottom' bs-tooltip='' data-title='{{queryTooltip(paragraph, "explain query")}}') Explain
+    button.btn-ignite.btn-ignite--primary(ng-disabled='!queryAvailable(paragraph)' ng-click='execute(paragraph, true)')
+        span.icon-left.fa.fa-fw.fa-play(ng-hide='paragraph.executionInProgress(true)')
+        span.icon-left.fa.fa-fw.fa-refresh.fa-spin(ng-show='paragraph.executionInProgress(true)')
+        | Execute on selected node
+
+    button.btn-ignite.btn-ignite--secondary(ng-disabled='!queryAvailable(paragraph)' ng-click='explain(paragraph)' data-placement='bottom' bs-tooltip='' data-title='{{queryTooltip(paragraph, "explain query")}}')
+        | Explain
 
 mixin table-result-heading-query
     .total.row
@@ -241,26 +320,44 @@
     .panel-collapse(role='tabpanel' bs-collapse-target)
         .col-sm-12.sql-controls
             .col-sm-3
-                +dropdown-required('Cache:', 'paragraph.cacheName', '"cache"', 'true', 'false', 'Choose cache', 'caches')
+                +form-field__dropdown({
+                    label: 'Cache:',
+                    model: 'paragraph.cacheName',
+                    name: '"cache"',
+                    placeholder: 'Choose cache',
+                    options: 'caches'
+                })
             .col-sm-3
-                +text-enabled('Filter:', 'paragraph.filter', '"filter"', true, false, 'Enter filter')
-                    label.btn.btn-default.ignite-form-field__btn(ng-click='paragraph.caseSensitive = !paragraph.caseSensitive')
-                        input(type='checkbox' ng-model='paragraph.caseSensitive')
-                        span(bs-tooltip data-title='Select this checkbox for case sensitive search') Cs
-            label.tipLabel(bs-tooltip data-placement='bottom' data-title='Max number of rows to show in query result as one page') Page size:
-                button.btn.btn-default.select-toggle.tipLabel(ng-model='paragraph.pageSize' bs-select bs-options='item for item in pageSizes')
+                +form-field__sensitive({
+                    label: 'Filter:',
+                    modelFilter: 'paragraph.filter',
+                    modelSensitive: 'paragraph.caseSensitive',
+                    name: '"filter"',
+                    placeholder: 'Enter filter'
+                })
+
+            .col-sm-3
+                +form-field__dropdown({
+                    label: 'Rows per page:',
+                    model: 'paragraph.pageSize',
+                    name: '"pageSize" + paragraph.id',
+                    options: 'pageSizesOptions',
+                    tip: 'Max number of rows to show in query result as one page',
+                    tipOpts: { placement: 'top' }
+                })
 
         .col-sm-12.sql-controls
-            button.btn.btn-primary(ng-disabled='!scanAvailable(paragraph)' ng-click='scan(paragraph)')
-                div
-                    i.fa.fa-fw.fa-play(ng-hide='paragraph.checkScanInProgress(false)')
-                    i.fa.fa-fw.fa-refresh.fa-spin(ng-show='paragraph.checkScanInProgress(false)')
-                    span.tipLabelExecute Scan
+            div
+                button.btn-ignite.btn-ignite--primary(ng-disabled='!scanAvailable(paragraph)' ng-click='scan(paragraph)')
+                    span.icon-left.fa.fa-fw.fa-play(ng-hide='paragraph.checkScanInProgress(false)')
+                    span.icon-left.fa.fa-fw.fa-refresh.fa-spin(ng-show='paragraph.checkScanInProgress(false)')
+                    | Scan
 
-            button.btn.btn-primary(ng-disabled='!scanAvailable(paragraph)' ng-click='scan(paragraph, true)')
-                i.fa.fa-fw.fa-play(ng-hide='paragraph.checkScanInProgress(true)')
-                i.fa.fa-fw.fa-refresh.fa-spin(ng-show='paragraph.checkScanInProgress(true)')
-                span.tipLabelExecute Scan on selected node
+                button.btn-ignite.btn-ignite--primary(ng-disabled='!scanAvailable(paragraph)' ng-click='scan(paragraph, true)')
+                    span.icon-left.fa.fa-fw.fa-play(ng-hide='paragraph.checkScanInProgress(true)')
+                    span.icon-left.fa.fa-fw.fa-refresh.fa-spin(ng-show='paragraph.checkScanInProgress(true)')
+                    | Scan on selected node
+            div
 
         .col-sm-12.sql-result(ng-if='paragraph.queryExecuted() && !paragraph.scanningInProgress' ng-switch='paragraph.resultType()')
             .error(ng-switch-when='error') Error: {{paragraph.error.message}}
@@ -295,31 +392,36 @@
                     i.fa.fa-database.tipField(title='Click to show cache types metadata dialog' bs-popover data-template-url='{{ $ctrl.cacheMetadataTemplateUrl }}' data-placement='bottom-right' data-trigger='click' data-container='#{{ paragraph.id }}')
                     .input-tip
                         input.form-control(type='text' st-search='label' placeholder='Filter caches...')
-                    table.links
-                        tbody.scrollable-y(style='max-height: 15em; display: block;')
-                            tr(ng-repeat='cache in displayedCaches track by cache.name')
-                                td(style='width: 100%')
-                                    input.labelField(id='cache_{{ [paragraph.id, $index].join("_") }}' type='radio' value='{{cache.name}}' ng-model='paragraph.cacheName')
-                                    label(for='cache_{{ [paragraph.id, $index].join("_") }} ' ng-bind-html='cache.label')
+
+                    .queries-notebook-displayed-caches
+                        div(ng-repeat='cache in displayedCaches track by cache.name')
+                            +form-field__radio({
+                                label: '{{ cache.label }}',
+                                model: 'paragraph.cacheName',
+                                name: '"cache_" + [paragraph.id, $index].join("_")',
+                                value: 'cache.name'
+                            })
+
                     .settings-row
                         .row(ng-if='ddlAvailable(paragraph)')
-                            label.tipLabel.use-cache(bs-tooltip data-placement='bottom'
-                                data-title=
-                                    'Use selected cache as default schema name.<br/>\
+                            +form-field__checkbox({
+                                label: 'Use selected cache as default schema name',
+                                model: 'paragraph.useAsDefaultSchema',
+                                name: '"useAsDefaultSchema" + paragraph.id',
+                                tip: 'Use selected cache as default schema name.<br/>\
                                     This will allow to execute query on specified cache without specify schema name.<br/>\
-                                    <b>NOTE:</b> In future version of Ignite this feature will be removed.'
-                                data-trigger='hover')
-                                input(type='checkbox' ng-model='paragraph.useAsDefaultSchema')
-                                span Use selected cache as default schema name
+                                    <b>NOTE:</b> In future version of Ignite this feature will be removed.',
+                                tipOpts: { placement: 'top' }
+                            })
                 .empty-caches(ng-show='displayedCaches.length == 0 && caches.length != 0')
                     label Wrong caches filter
                 .empty-caches(ng-show='caches.length == 0')
                     label No caches
         .col-sm-12.sql-controls
-            +query-actions
+            div
+                +query-actions
 
-            .pull-right
-                +query-settings
+            +query-settings
         .col-sm-12.sql-result(ng-if='paragraph.queryExecuted()' ng-switch='paragraph.resultType()')
             .error(ng-switch-when='error')
                 label Error: {{paragraph.error.message}}
diff --git a/modules/web-console/frontend/app/components/page-queries/components/queries-notebooks-list/template.tpl.pug b/modules/web-console/frontend/app/components/page-queries/components/queries-notebooks-list/template.tpl.pug
index 614a6a6..bbf6df7 100644
--- a/modules/web-console/frontend/app/components/page-queries/components/queries-notebooks-list/template.tpl.pug
+++ b/modules/web-console/frontend/app/components/page-queries/components/queries-notebooks-list/template.tpl.pug
@@ -27,22 +27,18 @@
 
 .queries-notebooks-list
     .panel--ignite
-        .panel-heading.ui-grid-settings.ui-grid-ignite__panel
-            .panel-title
-                div(ng-if="!$root.IgniteDemoMode")
-                    +ignite-form-field-bsdropdown({
-                        label: 'Actions',
-                        model: '$ctrl.action',
-                        name: 'action',
-                        disabled: '$ctrl.gridApi.selection.legacyGetSelectedRows().length === 0',
-                        required: false,
-                        options: '$ctrl.actionOptions'
-                    })
+        header.header-with-selector
+            div
+                span Notebooks
 
-                .ui-grid-settings--heading
-                    span Notebooks
-
-
+            div(ng-if="!$root.IgniteDemoMode")
+                +ignite-form-field-bsdropdown({
+                    label: 'Actions',
+                    model: '$ctrl.action',
+                    name: 'action',
+                    disabled: '$ctrl.gridApi.selection.legacyGetSelectedRows().length === 0',
+                    options: '$ctrl.actionOptions'
+                })
 
         .panel-collapse(ignite-loading='notebooksLoading' ignite-loading-text='Loading notebooks...')
             .grid.ui-grid--ignite#queriesNotebooksList(ui-grid='$ctrl.gridOptions' ui-grid-resize-columns ui-grid-selection ui-grid-hovering)
diff --git a/modules/web-console/frontend/app/components/page-queries/style.scss b/modules/web-console/frontend/app/components/page-queries/style.scss
index 8f13c2e..818fb1c 100644
--- a/modules/web-console/frontend/app/components/page-queries/style.scss
+++ b/modules/web-console/frontend/app/components/page-queries/style.scss
@@ -18,3 +18,7 @@
 button.select-toggle.btn-chart-column-agg-fx::after {
 	right: 0;
 }
+
+.sql-controls {
+	flex-wrap: wrap;
+}
diff --git a/modules/web-console/frontend/app/components/ui-grid-filters/directive.js b/modules/web-console/frontend/app/components/ui-grid-filters/directive.js
index c9b84af..e22530b 100644
--- a/modules/web-console/frontend/app/components/ui-grid-filters/directive.js
+++ b/modules/web-console/frontend/app/components/ui-grid-filters/directive.js
@@ -27,7 +27,8 @@
                     return;
 
                 const applyMultiselectFilter = (cd) => {
-                    cd.headerCellTemplate = template;
+                    if (!cd.headerCellTemplate)
+                        cd.headerCellTemplate = template;
 
                     cd.filter = {
                         type: uiGridConstants.filter.SELECT,
diff --git a/modules/web-console/frontend/app/components/ui-grid/component.js b/modules/web-console/frontend/app/components/ui-grid/component.js
new file mode 100644
index 0000000..f0c3b06
--- /dev/null
+++ b/modules/web-console/frontend/app/components/ui-grid/component.js
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import './style.scss';
+import template from './template.pug';
+import controller from './controller';
+
+export default {
+    template,
+    controller,
+    bindings: {
+        gridApi: '=?',
+        gridTreeView: '<?',
+        gridGrouping: '<?',
+        gridThin: '<?',
+        gridHeight: '<?',
+        tabName: '<?',
+        tableTitle: '<?',
+
+        // Input Events.
+        items: '<',
+        columnDefs: '<',
+        categories: '<?',
+        oneWaySelection: '<?',
+        rowIdentityKey: '@?',
+        selectedRows: '<?',
+        selectedRowsId: '<?',
+
+        // Output events.
+        onSelectionChange: '&?'
+    }
+};
diff --git a/modules/web-console/frontend/app/components/ui-grid/controller.js b/modules/web-console/frontend/app/components/ui-grid/controller.js
new file mode 100644
index 0000000..a640604
--- /dev/null
+++ b/modules/web-console/frontend/app/components/ui-grid/controller.js
@@ -0,0 +1,219 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import debounce from 'lodash/debounce';
+import headerTemplate from 'app/primitives/ui-grid-header/index.tpl.pug';
+
+export default class IgniteUiGrid {
+    /** @type */
+    gridApi;
+
+    /** @type */
+    gridThin;
+
+    /** @type */
+    gridHeight;
+
+    /** @type */
+    items;
+
+    /** @type */
+    columnDefs;
+
+    /** @type */
+    categories;
+
+    /** @type */
+    onSelectionChange;
+
+    /** @type */
+    selectedRows;
+
+    /** @type */
+    selectedRowsId;
+
+    /** @type */
+    _selected;
+
+    static $inject = ['$scope', '$element', '$timeout', 'gridUtil'];
+
+    /**
+     * @param {ng.IScope} $scope
+     */
+    constructor($scope, $element, $timeout, gridUtil) {
+        this.$scope = $scope;
+        this.$element = $element;
+        this.$timeout = $timeout;
+        this.gridUtil = gridUtil;
+
+        this.rowIdentityKey = '_id';
+
+        this.rowHeight = 48;
+        this.headerRowHeight = 70;
+    }
+
+    $onInit() {
+        this.SCROLLBAR_WIDTH = this.gridUtil.getScrollbarWidth();
+
+        if (this.gridThin) {
+            this.rowHeight = 36;
+            this.headerRowHeight = 48;
+        }
+
+        this.grid = {
+            data: this.items,
+            columnDefs: this.columnDefs,
+            categories: this.categories,
+            rowHeight: this.rowHeight,
+            headerRowHeight: this.headerRowHeight,
+            columnVirtualizationThreshold: 30,
+            enableColumnMenus: false,
+            enableFullRowSelection: true,
+            enableFiltering: true,
+            enableRowHashing: false,
+            fastWatch: true,
+            showTreeExpandNoChildren: false,
+            modifierKeysToMultiSelect: true,
+            selectionRowHeaderWidth: 52,
+            exporterCsvColumnSeparator: ';',
+            onRegisterApi: (api) => {
+                this.gridApi = api;
+
+                api.core.on.rowsVisibleChanged(this.$scope, () => {
+                    this.adjustHeight();
+
+                    // Without property existence check non-set selectedRows or selectedRowsId
+                    // binding might cause unwanted behavior,
+                    // like unchecking rows during any items change,
+                    // even if nothing really changed.
+                    if (this._selected && this._selected.length && this.onSelectionChange) {
+                        this.applyIncomingSelectionRows(this._selected);
+
+                        // Change selected rows if filter was changed.
+                        this.onRowsSelectionChange([]);
+                    }
+                });
+
+                if (this.onSelectionChange) {
+                    api.selection.on.rowSelectionChanged(this.$scope, (row, e) => {
+                        this.onRowsSelectionChange([row], e);
+                    });
+
+                    api.selection.on.rowSelectionChangedBatch(this.$scope, (rows, e) => {
+                        this.onRowsSelectionChange(rows, e);
+                    });
+                }
+
+                api.core.on.filterChanged(this.$scope, (column) => {
+                    this.onFilterChange(column);
+                });
+
+                this.$timeout(() => {
+                    if (this.selectedRowsId) this.applyIncomingSelectionRowsId(this.selectedRowsId);
+                });
+            }
+        };
+
+        if (this.grid.categories)
+            this.grid.headerTemplate = headerTemplate;
+    }
+
+    $onChanges(changes) {
+        const hasChanged = (binding) =>
+            binding in changes && changes[binding].currentValue !== changes[binding].previousValue;
+
+        if (hasChanged('items') && this.grid)
+            this.grid.data = changes.items.currentValue;
+
+        if (hasChanged('selectedRows') && this.grid && this.grid.data && this.onSelectionChange)
+            this.applyIncomingSelectionRows(changes.selectedRows.currentValue);
+
+        if (hasChanged('selectedRowsId') && this.grid && this.grid.data)
+            this.applyIncomingSelectionRowsId(changes.selectedRowsId.currentValue);
+
+        if (hasChanged('gridHeight') && this.grid)
+            this.adjustHeight();
+    }
+
+    applyIncomingSelectionRows = (selected = []) => {
+        this.gridApi.selection.clearSelectedRows({ ignore: true });
+
+        const visibleRows = this.gridApi.core.getVisibleRows(this.gridApi.grid)
+            .map(({ entity }) => entity);
+
+        const rows = visibleRows.filter((r) =>
+            selected.map((row) => row[this.rowIdentityKey]).includes(r[this.rowIdentityKey]));
+
+        rows.forEach((r) => {
+            this.gridApi.selection.selectRow(r, { ignore: true });
+        });
+    };
+
+    applyIncomingSelectionRowsId = (selected = []) => {
+        if (this.onSelectionChange) {
+            this.gridApi.selection.clearSelectedRows({ ignore: true });
+
+            const visibleRows = this.gridApi.core.getVisibleRows(this.gridApi.grid)
+                .map(({ entity }) => entity);
+
+            const rows = visibleRows.filter((r) =>
+                selected.includes(r[this.rowIdentityKey]));
+
+            rows.forEach((r) => {
+                this.gridApi.selection.selectRow(r, { ignore: true });
+            });
+        }
+    };
+
+    onRowsSelectionChange = debounce((rows, e = {}) => {
+        if (e.ignore)
+            return;
+
+        this._selected = this.gridApi.selection.legacyGetSelectedRows();
+
+        if (this.onSelectionChange)
+            this.onSelectionChange({ $event: this._selected });
+    });
+
+    onFilterChange = debounce((column) => {
+        if (!this.gridApi.selection)
+            return;
+
+        if (this.selectedRows && this.onSelectionChange)
+            this.applyIncomingSelectionRows(this.selectedRows);
+
+        if (this.selectedRowsId)
+            this.applyIncomingSelectionRowsId(this.selectedRowsId);
+    });
+
+    adjustHeight() {
+        let height = this.gridHeight;
+
+        if (!height) {
+            const maxRowsToShow = this.maxRowsToShow || 5;
+            const headerBorder = 1;
+            const visibleRows = this.gridApi.core.getVisibleRows().length;
+            const header = this.grid.headerRowHeight + headerBorder;
+            const optionalScroll = (visibleRows ? this.gridUtil.getScrollbarWidth() : 0);
+
+            height = Math.min(visibleRows, maxRowsToShow) * this.grid.rowHeight + header + optionalScroll;
+        }
+
+        this.gridApi.grid.element.css('height', height + 'px');
+        this.gridApi.core.handleWindowResize();
+    }
+}
diff --git a/modules/web-console/frontend/app/components/ui-grid/decorator.js b/modules/web-console/frontend/app/components/ui-grid/decorator.js
new file mode 100644
index 0000000..a82f702
--- /dev/null
+++ b/modules/web-console/frontend/app/components/ui-grid/decorator.js
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+export default ['$delegate', 'uiGridSelectionService', ($delegate, uiGridSelectionService) => {
+    $delegate[0].require = ['^uiGrid', '?^igniteGridTable'];
+    $delegate[0].compile = () => ($scope, $el, $attr, [uiGridCtrl, igniteGridTable]) => {
+        const self = uiGridCtrl.grid;
+
+        $delegate[0].link($scope, $el, $attr, uiGridCtrl);
+
+        const mySelectButtonClick = (row, evt) => {
+            evt.stopPropagation();
+
+            if (evt.shiftKey)
+                uiGridSelectionService.shiftSelect(self, row, evt, self.options.multiSelect);
+            else
+                uiGridSelectionService.toggleRowSelection(self, row, evt, self.options.multiSelect, self.options.noUnselect);
+        };
+
+        if (igniteGridTable)
+            $scope.selectButtonClick = mySelectButtonClick;
+    };
+    return $delegate;
+}];
diff --git a/modules/web-console/frontend/app/components/ui-grid/index.js b/modules/web-console/frontend/app/components/ui-grid/index.js
new file mode 100644
index 0000000..fce5268
--- /dev/null
+++ b/modules/web-console/frontend/app/components/ui-grid/index.js
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import angular from 'angular';
+import component from './component';
+import decorator from './decorator';
+
+export default angular
+    .module('ignite-console.ui-grid', [])
+    .component('igniteGridTable', component)
+    .decorator('uiGridSelectionRowHeaderButtonsDirective', decorator);
diff --git a/modules/web-console/frontend/app/components/ui-grid/style.scss b/modules/web-console/frontend/app/components/ui-grid/style.scss
new file mode 100644
index 0000000..dba5d35
--- /dev/null
+++ b/modules/web-console/frontend/app/components/ui-grid/style.scss
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+.ignite-grid-table,
+ignite-grid-table {
+	@import 'public/stylesheets/variables';
+
+    .ui-grid.ui-grid--ignite.ui-grid--thin {
+        // Start section row height.
+        .ui-grid-row {
+            height: 36px;
+
+            .ui-grid-cell {
+                height: 100%;
+            }
+        }
+
+        .ui-grid-cell .ui-grid-cell-contents {
+            padding: 8px 20px;
+            min-height: 35px;
+            max-height: 35px;
+        }
+
+        // Set force header height.
+        // Fix hide border bottom of pinned column without data.
+        .ui-grid-header-canvas {
+            height: 48px;
+        }
+    }
+
+    .ui-grid.ui-grid--ignite.ui-grid--thin-rows {
+        .ui-grid-row {
+            height: 36px !important;
+
+            .ui-grid-cell {
+                height: 100% !important;
+            }
+        }
+
+        .ui-grid-cell .ui-grid-cell-contents {
+            padding: 8px 20px !important;
+            min-height: 35px !important;
+            max-height: 35px !important;
+        }
+    }
+
+    .ui-grid.ui-grid--ignite:not(.ui-grid--thin) {
+        // Start section row height.
+        .ui-grid-row {
+            height: 48px;
+
+            .ui-grid-cell {
+                height: 100%;
+            }
+        }
+
+        .ui-grid-cell .ui-grid-cell-contents {
+            padding: 14px 20px;
+            min-height: 47px;
+            max-height: 47px;
+        }
+
+        // Set force header height.
+        // Fix hide border bottom of pinned column without data.
+        .ui-grid-header-canvas {
+            height: 70px;
+        }
+
+        [role="columnheader"] {
+            margin: 11px 0;
+        }
+
+        // Fix checkbox position.
+        .ui-grid-header-cell  .ui-grid-selection-row-header-buttons {
+            margin-top: 12px;
+        }
+    }
+
+    .ui-grid.ui-grid--ignite {
+        .ui-grid-header .ui-grid-tree-base-row-header-buttons.ui-grid-icon-plus-squared,
+        .ui-grid-header .ui-grid-tree-base-row-header-buttons.ui-grid-icon-minus-squared {
+            top: 14px;
+        }
+
+        [role="columnheader"] {
+            display: flex;
+            align-items: center;
+        }
+
+        .ui-grid-header--subcategories [role="columnheader"] {
+            margin: 0;
+            background-color: white;
+        }
+
+        // Removes unwanted box-shadow and border-right from checkboxes column
+        .ui-grid-pinned-container.ui-grid-pinned-container-left .ui-grid-render-container-left:before {
+            box-shadow: none;
+        }
+        .ui-grid-pinned-container.ui-grid-pinned-container-left .ui-grid-cell:last-child {
+            border-right: none;
+        }
+
+        .ui-grid-pinned-container-left .ui-grid-header--subcategories .ui-grid-header-span.ui-grid-header-cell {
+            box-shadow: none;
+        }
+
+        .ui-grid-header:not(.ui-grid-header--subcategories) .ui-grid-filters[role="columnheader"] {
+            padding-top: 6px;
+        }
+
+        // End section row height.
+        .ui-grid-header-cell:last-child .ui-grid-column-resizer.right {
+            border-right: none;
+        }
+
+        input[type="text"].ui-grid-filter-input {
+            &::placeholder {
+                color: rgba(66, 66, 66, 0.5);
+                font-weight: normal;
+                text-align: left;
+            }
+
+            &:focus {
+                border-color: $ignite-brand-success;
+                box-shadow: none;
+            }
+
+            font-family: Roboto;
+            outline: none;
+            overflow: visible;
+
+            box-sizing: border-box;
+            width: 100%;
+            max-width: initial;
+            height: 29px;
+            padding: 0 10px;
+            margin-right: 0;
+
+            border: solid 1px #c5c5c5;
+            border-radius: 4px;
+            background-color: #ffffff;
+            box-shadow: none;
+
+            color: $text-color;
+            text-align: left;
+            font-weight: normal;
+            line-height: 16px;
+        }
+    }
+}
diff --git a/modules/web-console/frontend/app/components/ui-grid/template.pug b/modules/web-console/frontend/app/components/ui-grid/template.pug
new file mode 100644
index 0000000..4a0f8b3
--- /dev/null
+++ b/modules/web-console/frontend/app/components/ui-grid/template.pug
@@ -0,0 +1,60 @@
+//-
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+
+         http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+
+div(ng-if='::$ctrl.gridTreeView')
+    .grid.ui-grid--ignite(
+        ui-grid='$ctrl.grid'
+        ui-grid-resize-columns
+        ui-grid-filters
+        ui-grid-selection
+        ui-grid-exporter
+        ui-grid-pinning
+        ui-grid-tree-view
+        ng-class='{ "ui-grid--thin": $ctrl.gridThin }'
+    )
+
+div(ng-if='::$ctrl.gridGrouping')
+    .grid.ui-grid--ignite(
+        ui-grid='$ctrl.grid'
+        ui-grid-resize-columns
+        ui-grid-filters
+        ui-grid-selection
+        ui-grid-exporter
+        ui-grid-pinning
+        ui-grid-grouping
+        ng-class='{ "ui-grid--thin": $ctrl.gridThin }'
+    )
+
+div(ng-if='::(!$ctrl.gridGrouping && !$ctrl.gridTreeView && $ctrl.onSelectionChange)')
+    .grid.ui-grid--ignite(
+        ui-grid='$ctrl.grid'
+        ui-grid-resize-columns
+        ui-grid-filters
+        ui-grid-selection
+        ui-grid-exporter
+        ui-grid-pinning
+        ng-class='{ "ui-grid--thin": $ctrl.gridThin }'
+    )
+
+div(ng-if='::(!$ctrl.gridGrouping && !$ctrl.gridTreeView && !$ctrl.onSelectionChange)')
+    .grid.ui-grid--ignite(
+        ui-grid='$ctrl.grid'
+        ui-grid-resize-columns
+        ui-grid-filters
+        ui-grid-exporter
+        ui-grid-pinning
+        ng-class='{ "ui-grid--thin": $ctrl.gridThin }'
+    )
diff --git a/modules/web-console/frontend/app/components/user-notifications/style.scss b/modules/web-console/frontend/app/components/user-notifications/style.scss
index e4fa39e..a1dd94f 100644
--- a/modules/web-console/frontend/app/components/user-notifications/style.scss
+++ b/modules/web-console/frontend/app/components/user-notifications/style.scss
@@ -21,43 +21,4 @@
 
 #user-notifications-dialog {
     min-height: 160px;
-
-    > .ignite-form-field {
-        display: flex;
-        flex-direction: column;
-
-        > .ignite-form-field__label {
-            color: $gray-light;
-            font-size: 12px;
-
-            margin-left: 10px;
-            margin-bottom: 5px;
-        }
-
-        > .ignite-form-field__control {
-            width: 100%;
-
-            .ace_editor {
-                border-radius: 4px;
-                box-shadow: inset 0 1px 3px 0 rgba(0, 0, 0, 0.5);
-                border: solid 1px $disabled-color;
-
-                margin: 0;
-
-                .ace_content {
-                    padding-left: 2px;
-                }
-            }
-        }
-    }
-}
-
-.modal-footer {
-    .show-message {
-        display: flex;
-
-        span {
-            margin-left: 5px;
-        }
-    }
 }
diff --git a/modules/web-console/frontend/app/components/user-notifications/template.tpl.pug b/modules/web-console/frontend/app/components/user-notifications/template.tpl.pug
index a8c0394..a680dea 100644
--- a/modules/web-console/frontend/app/components/user-notifications/template.tpl.pug
+++ b/modules/web-console/frontend/app/components/user-notifications/template.tpl.pug
@@ -16,7 +16,7 @@
 
 include /app/helpers/jade/mixins
 
-.modal.modal--ignite(tabindex='-1' role='dialog')
+.modal.modal--ignite.theme--ignite(tabindex='-1' role='dialog')
     .modal-dialog
         .modal-content
             .modal-header
@@ -30,18 +30,18 @@
                     | Enter the text, which will show for all users of the Web Console about an important event or
                     | warning about ongoing technical works. It will appear #[b on the yellow bar] in the header.
 
-                .ignite-form-field
-                    +ignite-form-field__label('Your notification:', 'notification', true)
-
-                    .ignite-form-field__control
-                        .input-tip
-                            div(ignite-ace='{onLoad: $ctrl.onLoad, mode: "xml"}' ng-trim='true' ng-model='$ctrl.message')
+                .form-field__ace.ignite-form-field
+                    +form-field__label({ label: 'Your notification:', name: 'notification', required: true})
+                    .form-field__control
+                        div(ignite-ace='{onLoad: $ctrl.onLoad, mode: "xml"}' ng-trim='true' ng-model='$ctrl.message')
 
             .modal-footer
-                .pull-left
-                    label.show-message
-                        input(type='checkbox' ng-model='$ctrl.isShown')
-                        span Show message
+                +form-field__checkbox({
+                    label: 'Show message',
+                    name: 'showMessages',
+                    model: '$ctrl.isShown'
+                })
 
-                button.btn-ignite.btn-ignite--link-success(id='btn-cancel' ng-click='$hide()') Cancel
-                button.btn-ignite.btn-ignite--success(id='btn-submit' ng-click='$ctrl.submit()') Submit
+                div
+                    button.btn-ignite.btn-ignite--link-success(id='btn-cancel' ng-click='$hide()') Cancel
+                    button.btn-ignite.btn-ignite--success(id='btn-submit' ng-click='$ctrl.submit()') Submit
diff --git a/modules/web-console/frontend/app/data/getting-started.json b/modules/web-console/frontend/app/data/getting-started.json
index 1802b55..58ca367 100644
--- a/modules/web-console/frontend/app/data/getting-started.json
+++ b/modules/web-console/frontend/app/data/getting-started.json
@@ -2,11 +2,11 @@
     {
         "title": "With Apache Ignite Web Console You Can",
         "message": [
-            "<div class='col-xs-4'>",
-            " <img src='/images/ignite-puzzle.png' width='80%' class='getting-started-puzzle' />",
+            "<div class='col-40 align-center'>",
+            " <img src='/images/ignite-puzzle.png' width='170px' class='getting-started-puzzle' />",
             "</div>",
-            "<div class='col-xs-8'>",
-            " <ul>",
+            "<div class='col-60'>",
+            " <ul class='align-top'>",
             "  <li>Generate cluster configuration</li>",
             "  <li>Import domain model from database</li>",
             "  <li>Configure all needed caches</li>",
@@ -20,11 +20,11 @@
     {
         "title": "Quick cluster configuration",
         "message": [
-            "<div class='col-xs-7'>",
+            "<div class='col-60'>",
             " <img src='/images/cluster-quick.png' width='100%' />",
             "</div>",
-            "<div class='col-xs-5'>",
-            " <ul>",
+            "<div class='col-40'>",
+            " <ul class='align-top'>",
             "  <li>Quick configuration of cluster and it's caches</li>",
             " </ul>",
             "</div>"
@@ -33,11 +33,11 @@
     {
         "title": "Clusters",
         "message": [
-            "<div class='col-xs-7'>",
+            "<div class='col-60'>",
             " <img src='/images/cluster.png' width='100%' />",
             "</div>",
-            "<div class='col-xs-5'>",
-            " <ul>",
+            "<div class='col-40'>",
+            " <ul class='align-top'>",
             "  <li>Configure cluster properties</li>",
             " </ul>",
             "</div>"
@@ -46,11 +46,11 @@
     {
         "title": "Domain Model",
         "message": [
-            "<div class='col-xs-7'>",
+            "<div class='col-60'>",
             " <img src='/images/domains.png' width='100%' />",
             "</div>",
-            "<div class='col-xs-5'>",
-            " <ul>",
+            "<div class='col-40'>",
+            " <ul class='align-top'>",
             "  <li>Import database schemas</li>",
             "  <li>Try in <span class='getting-started-demo'>Demo</span> mode</li>",
             " </ul>",
@@ -60,11 +60,11 @@
     {
         "title": "Caches",
         "message": [
-            "<div class='col-xs-7'>",
+            "<div class='col-60'>",
             " <img src='/images/cache.png' width='100%' />",
             "</div>",
-            "<div class='col-xs-5'>",
-            " <ul>",
+            "<div class='col-40'>",
+            " <ul class='align-top'>",
             "  <li>Configure memory settings</li>",
             " </ul>",
             "</div>"
@@ -73,11 +73,11 @@
     {
         "title": "In-memory File System",
         "message": [
-            "<div class='col-xs-7'>",
+            "<div class='col-60'>",
             " <img src='/images/igfs.png' width='100%' />",
             "</div>",
-            "<div class='col-xs-5'>",
-            " <ul>",
+            "<div class='col-40'>",
+            " <ul class='align-top'>",
             "  <li>Configure IGFS properties</li>",
             " </ul>",
             "</div>"
@@ -86,11 +86,11 @@
     {
         "title": "Preview configuration result",
         "message": [
-            "<div class='col-xs-7'>",
+            "<div class='col-60'>",
             " <img src='/images/preview.png' width='100%' />",
             "</div>",
-            "<div class='col-xs-5'>",
-            " <ul>",
+            "<div class='col-40'>",
+            " <ul class='align-top'>",
             "  <li>Preview configured project files</li>",
             "  <li>Download configured project files</li>",
             " </ul>",
@@ -100,11 +100,11 @@
     {
         "title": "SQL Queries",
         "message": [
-            "<div class='col-xs-7'>",
+            "<div class='col-60'>",
             " <img src='/images/query-table.png' width='100%' />",
             "</div>",
-            "<div class='col-xs-5'>",
-            " <ul>",
+            "<div class='col-40'>",
+            " <ul class='align-top'>",
             "  <li>Execute SQL Queries</li>",
             "  <li>View Execution Paln</li>",
             "  <li>View In-Memory Schema</li>",
@@ -116,11 +116,11 @@
     {
         "title": "Multicluster support",
         "message": [
-            "<div class='col-xs-7'>",
+            "<div class='col-60'>",
             " <img src='/images/multicluster.png' width='100%' />",
             "</div>",
-            "<div class='col-xs-5'>",
-            " <ul>",
+            "<div class='col-40'>",
+            " <ul class='align-top'>",
             "  <li>Execute queries on different clusters</li>",
             " </ul>",
             "</div>"
diff --git a/modules/web-console/frontend/app/directives/auto-focus.directive.js b/modules/web-console/frontend/app/directives/auto-focus.directive.js
index 326fe1f..9c17192 100644
--- a/modules/web-console/frontend/app/directives/auto-focus.directive.js
+++ b/modules/web-console/frontend/app/directives/auto-focus.directive.js
@@ -16,7 +16,7 @@
  */
 
 // Directive to auto-focus specified element.
-export default ['igniteAutoFocus', ['$timeout', ($timeout) => {
+export default ['igniteAutoFocus', ['$timeout', function($timeout) {
     return {
         restrict: 'AC',
         link(scope, element) {
diff --git a/modules/web-console/frontend/app/directives/bs-affix-update.directive.js b/modules/web-console/frontend/app/directives/bs-affix-update.directive.js
index 925722c..c2b1323 100644
--- a/modules/web-console/frontend/app/directives/bs-affix-update.directive.js
+++ b/modules/web-console/frontend/app/directives/bs-affix-update.directive.js
@@ -17,7 +17,7 @@
 
 import angular from 'angular';
 
-export default ['igniteBsAffixUpdate', ['$window', '$timeout', ($window, $timeout) => {
+export default ['igniteBsAffixUpdate', ['$window', '$timeout', function($window, $timeout) {
     let update = null;
 
     const link = ({$last}) => {
diff --git a/modules/web-console/frontend/app/directives/btn-ignite-link.js b/modules/web-console/frontend/app/directives/btn-ignite-link.js
index 5180c62..02aaf8f 100644
--- a/modules/web-console/frontend/app/directives/btn-ignite-link.js
+++ b/modules/web-console/frontend/app/directives/btn-ignite-link.js
@@ -15,13 +15,15 @@
  * limitations under the License.
  */
 
-export default () => ({
-    restrict: 'C',
-    link: (scope, $element) => {
-        $element.contents()
-            .filter(function() {
-                return this.nodeType === 3;
-            })
-            .wrap('<span></span>');
-    }
-});
+export default function() {
+    return {
+        restrict: 'C',
+        link: (scope, $element) => {
+            $element.contents()
+                .filter(function() {
+                    return this.nodeType === 3;
+                })
+                .wrap('<span></span>');
+        }
+    };
+}
diff --git a/modules/web-console/frontend/app/directives/centered/centered.directive.js b/modules/web-console/frontend/app/directives/centered/centered.directive.js
index 77bbb94..5e3b1c8 100644
--- a/modules/web-console/frontend/app/directives/centered/centered.directive.js
+++ b/modules/web-console/frontend/app/directives/centered/centered.directive.js
@@ -17,7 +17,7 @@
 
 import './centered.scss';
 
-export default ['centered', [() => {
+export default ['centered', [function() {
     return {
         restrict: 'E',
         transclude: true,
diff --git a/modules/web-console/frontend/app/directives/copy-to-clipboard.directive.js b/modules/web-console/frontend/app/directives/copy-to-clipboard.directive.js
index ee2110e..027cb09 100644
--- a/modules/web-console/frontend/app/directives/copy-to-clipboard.directive.js
+++ b/modules/web-console/frontend/app/directives/copy-to-clipboard.directive.js
@@ -16,7 +16,7 @@
  */
 
 // Directive for copy to clipboard.
-export default ['igniteCopyToClipboard', ['IgniteCopyToClipboard', (CopyToClipboard) => {
+export default ['igniteCopyToClipboard', ['IgniteCopyToClipboard', function(CopyToClipboard) {
     return {
         restrict: 'A',
         link(scope, element, attrs) {
diff --git a/modules/web-console/frontend/app/directives/hide-on-state-change/hide-on-state-change.directive.js b/modules/web-console/frontend/app/directives/hide-on-state-change/hide-on-state-change.directive.js
index 152e942..6c8d702 100644
--- a/modules/web-console/frontend/app/directives/hide-on-state-change/hide-on-state-change.directive.js
+++ b/modules/web-console/frontend/app/directives/hide-on-state-change/hide-on-state-change.directive.js
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-export default ['hideOnStateChange', ['$transitions', ($transitions) => {
+export default ['hideOnStateChange', ['$transitions', function($transitions) {
     const link = (scope, element) => {
         $transitions.onSuccess({}, () => element.fadeOut('slow'));
     };
diff --git a/modules/web-console/frontend/app/directives/information/information.directive.js b/modules/web-console/frontend/app/directives/information/information.directive.js
index 6f304ef..3036262 100644
--- a/modules/web-console/frontend/app/directives/information/information.directive.js
+++ b/modules/web-console/frontend/app/directives/information/information.directive.js
@@ -17,7 +17,7 @@
 
 import template from './information.pug';
 
-export default ['igniteInformation', [() => {
+export default ['igniteInformation', [function() {
     return {
         scope: {
             title: '@'
diff --git a/modules/web-console/frontend/app/directives/on-click-focus.directive.js b/modules/web-console/frontend/app/directives/on-click-focus.directive.js
index 5c9ee88..593a677 100644
--- a/modules/web-console/frontend/app/directives/on-click-focus.directive.js
+++ b/modules/web-console/frontend/app/directives/on-click-focus.directive.js
@@ -16,7 +16,7 @@
  */
 
 // Directive to describe element that should be focused on click.
-export default ['igniteOnClickFocus', ['IgniteFocus', (Focus) => {
+export default ['igniteOnClickFocus', ['IgniteFocus', function(Focus) {
     return function(scope, elem, attrs) {
         elem.on('click', () => Focus.move(attrs.igniteOnClickFocus));
 
diff --git a/modules/web-console/frontend/app/directives/on-enter-focus-move.directive.js b/modules/web-console/frontend/app/directives/on-enter-focus-move.directive.js
index 2dd2884..75ae1e2 100644
--- a/modules/web-console/frontend/app/directives/on-enter-focus-move.directive.js
+++ b/modules/web-console/frontend/app/directives/on-enter-focus-move.directive.js
@@ -16,7 +16,7 @@
  */
 
 // Directive to move focus to specified element on ENTER key.
-export default ['igniteOnEnterFocusMove', ['IgniteFocus', (Focus) => {
+export default ['igniteOnEnterFocusMove', ['IgniteFocus', function(Focus) {
     return function(scope, elem, attrs) {
         elem.on('keydown keypress', (event) => {
             if (event.which === 13) {
diff --git a/modules/web-console/frontend/app/directives/on-enter.directive.js b/modules/web-console/frontend/app/directives/on-enter.directive.js
index 459220e..2ec75f9 100644
--- a/modules/web-console/frontend/app/directives/on-enter.directive.js
+++ b/modules/web-console/frontend/app/directives/on-enter.directive.js
@@ -16,7 +16,7 @@
  */
 
 // Directive to bind ENTER key press with some user action.
-export default ['igniteOnEnter', ['$timeout', ($timeout) => {
+export default ['igniteOnEnter', ['$timeout', function($timeout) {
     return function(scope, elem, attrs) {
         elem.on('keydown keypress', (event) => {
             if (event.which === 13) {
diff --git a/modules/web-console/frontend/app/directives/on-escape.directive.js b/modules/web-console/frontend/app/directives/on-escape.directive.js
index aa1accd..dafc176 100644
--- a/modules/web-console/frontend/app/directives/on-escape.directive.js
+++ b/modules/web-console/frontend/app/directives/on-escape.directive.js
@@ -16,7 +16,7 @@
  */
 
 // Directive to bind ESC key press with some user action.
-export default ['igniteOnEscape', ['$timeout', ($timeout) => {
+export default ['igniteOnEscape', ['$timeout', function($timeout) {
     return function(scope, elem, attrs) {
         elem.on('keydown keypress', (event) => {
             if (event.which === 27) {
diff --git a/modules/web-console/frontend/app/directives/restore-input-focus.directive.js b/modules/web-console/frontend/app/directives/restore-input-focus.directive.js
index 32e6622..97a1491 100644
--- a/modules/web-console/frontend/app/directives/restore-input-focus.directive.js
+++ b/modules/web-console/frontend/app/directives/restore-input-focus.directive.js
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-export default [() => {
+export default [function() {
     return ($scope, $element) => {
         $element.on('click', () => {
             $element.siblings('.input-tip').find('input').focus();
diff --git a/modules/web-console/frontend/app/directives/retain-selection.directive.js b/modules/web-console/frontend/app/directives/retain-selection.directive.js
index 74d6872..24a55d1 100644
--- a/modules/web-console/frontend/app/directives/retain-selection.directive.js
+++ b/modules/web-console/frontend/app/directives/retain-selection.directive.js
@@ -16,7 +16,7 @@
  */
 
 // Directive to workaround known issue with type ahead edit lost cursor position.
-export default ['igniteRetainSelection', ['$timeout', ($timeout) => {
+export default ['igniteRetainSelection', ['$timeout', function($timeout) {
     let promise;
 
     return function(scope, elem) {
diff --git a/modules/web-console/frontend/app/directives/ui-ace-docker/ui-ace-docker.directive.js b/modules/web-console/frontend/app/directives/ui-ace-docker/ui-ace-docker.directive.js
index 9042acb..e83c315 100644
--- a/modules/web-console/frontend/app/directives/ui-ace-docker/ui-ace-docker.directive.js
+++ b/modules/web-console/frontend/app/directives/ui-ace-docker/ui-ace-docker.directive.js
@@ -18,7 +18,7 @@
 import template from './ui-ace-docker.pug';
 import controller from './ui-ace-docker.controller';
 
-export default ['igniteUiAceDocker', [() => {
+export default ['igniteUiAceDocker', [function() {
     const link = ($scope, $el, $attrs, [igniteUiAceTabs]) => {
         if (igniteUiAceTabs.onLoad)
             $scope.onLoad = igniteUiAceTabs.onLoad;
diff --git a/modules/web-console/frontend/app/directives/ui-ace-java/ui-ace-java.directive.js b/modules/web-console/frontend/app/directives/ui-ace-java/ui-ace-java.directive.js
index 62eb376..b4f945d 100644
--- a/modules/web-console/frontend/app/directives/ui-ace-java/ui-ace-java.directive.js
+++ b/modules/web-console/frontend/app/directives/ui-ace-java/ui-ace-java.directive.js
@@ -18,7 +18,7 @@
 import template from './ui-ace-java.pug';
 import IgniteUiAceJava from './ui-ace-java.controller';
 
-export default () => {
+export default function() {
     return {
         priority: 1,
         restrict: 'E',
@@ -41,4 +41,4 @@
             ngModelCtrl: '?ngModel'
         }
     };
-};
+}
diff --git a/modules/web-console/frontend/app/directives/ui-ace-pojos/ui-ace-pojos.directive.js b/modules/web-console/frontend/app/directives/ui-ace-pojos/ui-ace-pojos.directive.js
index 8a8d047..864f705 100644
--- a/modules/web-console/frontend/app/directives/ui-ace-pojos/ui-ace-pojos.directive.js
+++ b/modules/web-console/frontend/app/directives/ui-ace-pojos/ui-ace-pojos.directive.js
@@ -18,7 +18,7 @@
 import template from './ui-ace-pojos.pug';
 import controller from './ui-ace-pojos.controller';
 
-export default ['igniteUiAcePojos', [() => {
+export default ['igniteUiAcePojos', [function() {
     const link = ($scope, $el, $attrs, [igniteUiAceTabs]) => {
         if (igniteUiAceTabs.onLoad)
             $scope.onLoad = igniteUiAceTabs.onLoad;
diff --git a/modules/web-console/frontend/app/directives/ui-ace-pom/ui-ace-pom.directive.js b/modules/web-console/frontend/app/directives/ui-ace-pom/ui-ace-pom.directive.js
index 664d3a0..87083ab 100644
--- a/modules/web-console/frontend/app/directives/ui-ace-pom/ui-ace-pom.directive.js
+++ b/modules/web-console/frontend/app/directives/ui-ace-pom/ui-ace-pom.directive.js
@@ -18,7 +18,7 @@
 import template from './ui-ace-pom.pug';
 import controller from './ui-ace-pom.controller';
 
-export default ['igniteUiAcePom', [() => {
+export default ['igniteUiAcePom', [function() {
     const link = ($scope, $el, $attrs, [igniteUiAceTabs]) => {
         if (igniteUiAceTabs.onLoad)
             $scope.onLoad = igniteUiAceTabs.onLoad;
diff --git a/modules/web-console/frontend/app/directives/ui-ace-sharp/ui-ace-sharp.directive.js b/modules/web-console/frontend/app/directives/ui-ace-sharp/ui-ace-sharp.directive.js
index 5a37b80..094028b 100644
--- a/modules/web-console/frontend/app/directives/ui-ace-sharp/ui-ace-sharp.directive.js
+++ b/modules/web-console/frontend/app/directives/ui-ace-sharp/ui-ace-sharp.directive.js
@@ -15,10 +15,12 @@
  * limitations under the License.
  */
 
+import _ from 'lodash';
+
 import template from './ui-ace-sharp.pug';
 import controller from './ui-ace-sharp.controller';
 
-export default ['igniteUiAceSharp', ['IgniteSharpTransformer', (generator) => {
+export default ['igniteUiAceSharp', ['IgniteSharpTransformer', function(generator) {
     const link = (scope, $el, attrs, [ctrl, igniteUiAceTabs, formCtrl, ngModelCtrl]) => {
         if (formCtrl && ngModelCtrl)
             formCtrl.$removeControl(ngModelCtrl);
diff --git a/modules/web-console/frontend/app/directives/ui-ace-spring/ui-ace-spring.directive.js b/modules/web-console/frontend/app/directives/ui-ace-spring/ui-ace-spring.directive.js
index 8655fd1..532ba42 100644
--- a/modules/web-console/frontend/app/directives/ui-ace-spring/ui-ace-spring.directive.js
+++ b/modules/web-console/frontend/app/directives/ui-ace-spring/ui-ace-spring.directive.js
@@ -18,7 +18,7 @@
 import template from './ui-ace-spring.pug';
 import IgniteUiAceSpring from './ui-ace-spring.controller';
 
-export default () => {
+export default function() {
     return {
         priority: 1,
         restrict: 'E',
@@ -41,4 +41,4 @@
             ngModelCtrl: '?ngModel'
         }
     };
-};
+}
diff --git a/modules/web-console/frontend/app/directives/ui-ace-tabs.directive.js b/modules/web-console/frontend/app/directives/ui-ace-tabs.directive.js
index 2b90a72..6d11f58 100644
--- a/modules/web-console/frontend/app/directives/ui-ace-tabs.directive.js
+++ b/modules/web-console/frontend/app/directives/ui-ace-tabs.directive.js
@@ -15,7 +15,9 @@
  * limitations under the License.
  */
 
-export default ['igniteUiAceTabs', [() => {
+import _ from 'lodash';
+
+export default ['igniteUiAceTabs', [function() {
     return {
         scope: true,
         restrict: 'AE',
diff --git a/modules/web-console/frontend/app/helpers/jade/form.pug b/modules/web-console/frontend/app/helpers/jade/form.pug
deleted file mode 100644
index 44eaed9..0000000
--- a/modules/web-console/frontend/app/helpers/jade/form.pug
+++ /dev/null
@@ -1,26 +0,0 @@
-//-
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-         http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-
-include ./form/form-field-feedback
-include ./form/form-field-label
-include ./form/form-field-text
-include ./form/form-field-password
-include ./form/form-field-dropdown
-include ./form/form-field-datalist
-include ./form/form-field-checkbox
-include ./form/form-field-number
-include ./form/form-field-up
-include ./form/form-field-down
diff --git a/modules/web-console/frontend/app/helpers/jade/form/form-field-checkbox.pug b/modules/web-console/frontend/app/helpers/jade/form/form-field-checkbox.pug
deleted file mode 100644
index a8236a9..0000000
--- a/modules/web-console/frontend/app/helpers/jade/form/form-field-checkbox.pug
+++ /dev/null
@@ -1,44 +0,0 @@
-//-
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-         http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-
-mixin form-field-checkbox(label, model, name, disabled, required, tip)
-        label.form-field-checkbox.ignite-form-field
-            .ignite-form-field__control
-                input(
-                    id=`{{ ${name} }}Input`
-                    name=`{{ ${name} }}`
-                    type='checkbox'
-
-                    ng-model=model
-                    ng-required=required && `${required}`
-                    ng-disabled=disabled && `${disabled}`
-                    expose-ignite-form-field-control='$input'
-                )&attributes(attributes ? attributes.attributes ? attributes.attributes : attributes : {})
-                span #{label}
-                +tooltip(tip, tipOpts, 'tipLabel')
-            .ignite-form-field__errors(
-                ng-messages=`$input.$error`
-                ng-show=`($input.$dirty || $input.$touched || $input.$submitted) && $input.$invalid`
-            )
-                if block
-                    block
-                if required
-                    +form-field-feedback(name, 'required', `${errLbl} could not be empty!`)
-
-mixin sane-form-field-checkbox({label, model, name, disabled, required, tip})
-    +form-field-checkbox(label, model, name, disabled = false, required = false, tip)&attributes(attributes)
-        if block
-            block
\ No newline at end of file
diff --git a/modules/web-console/frontend/app/helpers/jade/form/form-field-datalist.pug b/modules/web-console/frontend/app/helpers/jade/form/form-field-datalist.pug
deleted file mode 100644
index 888634b..0000000
--- a/modules/web-console/frontend/app/helpers/jade/form/form-field-datalist.pug
+++ /dev/null
@@ -1,52 +0,0 @@
-//-
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-         http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-
-mixin form-field-datalist(label, model, name, disabled, required, placeholder, options, tip)
-    -var errLbl = label.substring(0, label.length - 1)
-
-    mixin form-field-input()
-        input.form-control(
-            id=`{{ ${name} }}Input`
-            name=`{{ ${name} }}`
-            placeholder=placeholder
-           
-            ng-model=model
-
-            ng-required=required && `${required}`
-            ng-disabled=disabled && `${disabled}` || `!${options}.length`
-
-            bs-typeahead
-            bs-options=`item for item in ${options}`
-            container='body'
-            data-min-length='1'
-            ignite-retain-selection
-            expose-ignite-form-field-control='$input'
-        )&attributes(attributes.attributes)
-
-    .ignite-form-field
-        +ignite-form-field__label(label, name, required, disabled)
-            +tooltip(tip, tipOpts)
-        .ignite-form-field__control
-            .input-tip
-                +form-field-input(attributes=attributes)
-        .ignite-form-field__errors(
-            ng-messages=`$input.$error`
-            ng-if=`($input.$dirty || $input.$touched || $input.$submitted) && $input.$invalid`
-        )
-            if block
-                block
-
-            +form-field-feedback(name, 'required', `${errLbl} could not be empty!`)
diff --git a/modules/web-console/frontend/app/helpers/jade/form/form-field-down.pug b/modules/web-console/frontend/app/helpers/jade/form/form-field-down.pug
deleted file mode 100644
index 1ced54c..0000000
--- a/modules/web-console/frontend/app/helpers/jade/form/form-field-down.pug
+++ /dev/null
@@ -1,18 +0,0 @@
-//-
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-         http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-
-mixin ignite-form-field-down()
-    i.tipField.fa.fa-arrow-down(ignite-form-field-down ng-click='vm.down()')&attributes(attributes)
diff --git a/modules/web-console/frontend/app/helpers/jade/form/form-field-dropdown.pug b/modules/web-console/frontend/app/helpers/jade/form/form-field-dropdown.pug
deleted file mode 100644
index c6579e3..0000000
--- a/modules/web-console/frontend/app/helpers/jade/form/form-field-dropdown.pug
+++ /dev/null
@@ -1,60 +0,0 @@
-//-
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-         http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-
-mixin ignite-form-field-dropdown(label, model, name, disabled, required, multiple, placeholder, placeholderEmpty, options, tip)
-    mixin form-field-input()
-        -var errLbl = label.substring(0, label.length - 1)
-
-        button.select-toggle.form-control(
-            type='button'
-            id=`{{ ${name} }}Input`
-            name=`{{ ${name} }}`
-
-            data-placeholder=placeholderEmpty ? `{{ ${options}.length > 0 ? '${placeholder}' : '${placeholderEmpty}' }}` : placeholder
-            
-            ng-model=model
-            ng-disabled=disabled && `${disabled}`
-            ng-required=required && `${required}`
-
-            bs-select
-            bs-options=`item.value as item.label for item in ${options}`
-            expose-ignite-form-field-control='$input'
-
-            data-multiple=multiple ? '1' : false
-
-            tabindex='0'
-        )&attributes(attributes.attributes)
-
-    .ignite-form-field.ignite-form-field-dropdown
-        +ignite-form-field__label(label, name, required, disabled)
-            +tooltip(tip, tipOpts)
-        .ignite-form-field__control
-            .input-tip
-                +form-field-input(attributes=attributes)
-        .ignite-form-field__errors(
-            ng-messages=`$input.$error`
-            ng-show=`($input.$dirty || $input.$touched || $input.$submitted) && $input.$invalid`
-        )
-            if block
-                block
-
-            if required
-                +form-field-feedback(name, 'required', multiple ? 'At least one option should be selected' : 'An option should be selected')
-
-mixin sane-ignite-form-field-dropdown({label, model, name, disabled = false, required = false, multiple = false, placeholder, placeholderEmpty, options, tip})
-    +ignite-form-field-dropdown(label, model, name, disabled, required, multiple, placeholder, placeholderEmpty, options, tip)&attributes(attributes)
-        if block
-            block
\ No newline at end of file
diff --git a/modules/web-console/frontend/app/helpers/jade/form/form-field-feedback.pug b/modules/web-console/frontend/app/helpers/jade/form/form-field-feedback.pug
deleted file mode 100644
index dcdcf0e..0000000
--- a/modules/web-console/frontend/app/helpers/jade/form/form-field-feedback.pug
+++ /dev/null
@@ -1,18 +0,0 @@
-//-
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-         http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-
-mixin form-field-feedback(name, error, message)
-    div(ng-message=error) #{message}
diff --git a/modules/web-console/frontend/app/helpers/jade/form/form-field-label.pug b/modules/web-console/frontend/app/helpers/jade/form/form-field-label.pug
deleted file mode 100644
index 2edd115..0000000
--- a/modules/web-console/frontend/app/helpers/jade/form/form-field-label.pug
+++ /dev/null
@@ -1,25 +0,0 @@
-//-
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-         http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-
-mixin ignite-form-field__label(label, name, required, disabled)
-    label.ignite-form-field__label(
-        id=`{{ ${name} }}Label`
-        for=`{{ ${name} }}Input`
-        ng-class=disabled && `{'ignite-form-field__label-disabled': ${disabled}}`
-    )
-        span(class=`{{ ${required} ? 'required' : '' }}`) !{label}
-        if block
-            block
\ No newline at end of file
diff --git a/modules/web-console/frontend/app/helpers/jade/form/form-field-number.pug b/modules/web-console/frontend/app/helpers/jade/form/form-field-number.pug
deleted file mode 100644
index 75f2a20..0000000
--- a/modules/web-console/frontend/app/helpers/jade/form/form-field-number.pug
+++ /dev/null
@@ -1,59 +0,0 @@
-//-
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-         http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-
-mixin ignite-form-field-number(label, model, name, disabled, required, placeholder, min, max, step, tip)
-    -var errLbl = label.substring(0, label.length - 1)
-
-    mixin form-field-input()
-        input.form-control(
-            id=`{{ ${name} }}Input`
-            name=`{{ ${name} }}`
-            placeholder=placeholder
-            type='number'
-
-            min=min ? min : '0'
-            max=max ? max : '{{ Number.MAX_VALUE }}'
-            step=step ? step : '1'
-
-            ng-model=model
-
-            ng-required=required && `${required}`
-            ng-disabled=disabled && `${disabled}`
-            expose-ignite-form-field-control='$input'
-        )&attributes(attributes.attributes)
-
-    .ignite-form-field
-        +ignite-form-field__label(label, name, required, disabled)
-            +tooltip(tip, tipOpts)
-        .ignite-form-field__control
-            .input-tip
-                +form-field-input(attributes=attributes)
-        .ignite-form-field__errors(
-            ng-messages=`$input.$error`
-            ng-show=`($input.$dirty || $input.$touched || $input.$submitted) && $input.$invalid`
-        )
-            if block
-                block
-            +form-field-feedback(name, 'required', `${errLbl} could not be empty`)
-            +form-field-feedback(name, 'min', `${errLbl} is less than allowable minimum: ${min || 0}`)
-            +form-field-feedback(name, 'max', `${errLbl} is more than allowable maximum: ${max}`)
-            +form-field-feedback(name, 'number', `Only numbers are allowed`)
-            +form-field-feedback(name, 'step', `${errLbl} step should be ${step || 1}`)
-
-mixin sane-ignite-form-field-number({label, model, name, disabled = 'false', required = false, placeholder, min = '0', max, step = '1', tip})
-    +ignite-form-field-number(label, model, name, disabled, required, placeholder, min, max, step, tip)&attributes(attributes)
-        if block
-            block
\ No newline at end of file
diff --git a/modules/web-console/frontend/app/helpers/jade/form/form-field-password.pug b/modules/web-console/frontend/app/helpers/jade/form/form-field-password.pug
deleted file mode 100644
index 3e35974..0000000
--- a/modules/web-console/frontend/app/helpers/jade/form/form-field-password.pug
+++ /dev/null
@@ -1,47 +0,0 @@
-//-
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-         http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-
-mixin ignite-form-field-password-input(name, model, disabled, required, placeholder)
-    input.form-control(
-        id=`{{ ${name} }}Input`
-        name=`{{ ${name} }}`
-        placeholder=placeholder
-        type='password'
-
-        ng-model=model
-
-        ng-required=required && `${required}`
-        ng-disabled=disabled && `${disabled}`
-        expose-ignite-form-field-control='$input'
-    )&attributes(attributes ? attributes.attributes ? attributes.attributes : attributes : {})
-
-mixin ignite-form-field-password(label, model, name, disabled, required, placeholder, tip)
-    -var errLbl = label.substring(0, label.length - 1)
-
-    .ignite-form-field
-        +ignite-form-field__label(label, name, required, disabled)
-            +tooltip(tip, tipOpts)
-        .ignite-form-field__control
-            .input-tip
-                +ignite-form-field-password-input(name, model, disabled, required, placeholder)(attributes=attributes)
-        .ignite-form-field__errors(
-            ng-messages=`$input.$error`
-            ng-if=`!$input.$pristine && $input.$invalid`
-        )
-            if block
-                block
-
-            +form-field-feedback(name, 'required', `${errLbl} could not be empty!`)
diff --git a/modules/web-console/frontend/app/helpers/jade/form/form-field-text.pug b/modules/web-console/frontend/app/helpers/jade/form/form-field-text.pug
deleted file mode 100644
index 3d28e17..0000000
--- a/modules/web-console/frontend/app/helpers/jade/form/form-field-text.pug
+++ /dev/null
@@ -1,53 +0,0 @@
-//-
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-         http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-
-mixin ignite-form-field-input(name, model, disabled, required, placeholder)
-    input.form-control(
-        id=`{{ ${name} }}Input`
-        name=`{{ ${name} }}`
-        placeholder=placeholder
-
-        ng-model=model
-
-        ng-required=required && `${required}`
-        ng-disabled=disabled && `${disabled}`
-        expose-ignite-form-field-control='$input'
-
-    )&attributes(attributes ? attributes.attributes ? attributes.attributes : attributes : {})
-
-mixin ignite-form-field-text(lbl, model, name, disabled, required, placeholder, tip)
-    -let errLbl = lbl[lbl.length - 1] === ':' ? lbl.substring(0, lbl.length - 1) : lbl
-
-    .ignite-form-field
-        +ignite-form-field__label(lbl, name, required, disabled)
-            +tooltip(tip, tipOpts)
-        .ignite-form-field__control
-            .input-tip
-                +ignite-form-field-input(name, model, disabled, required, placeholder)(attributes=attributes)
-        .ignite-form-field__errors(
-            ng-messages=`$input.$error`
-            ng-show=`($input.$dirty || $input.$touched || $input.$submitted) && $input.$invalid`
-        )
-            if block
-                block
-
-            if required
-                +form-field-feedback(name, 'required', `${errLbl} could not be empty!`)
-
-mixin sane-ignite-form-field-text({label, model, name, disabled, required, placeholder, tip})
-    +ignite-form-field-text(label, model, name, disabled, required, placeholder, tip)&attributes(attributes)
-        if block
-            block
diff --git a/modules/web-console/frontend/app/helpers/jade/form/form-field-up.pug b/modules/web-console/frontend/app/helpers/jade/form/form-field-up.pug
deleted file mode 100644
index 3522fb5..0000000
--- a/modules/web-console/frontend/app/helpers/jade/form/form-field-up.pug
+++ /dev/null
@@ -1,18 +0,0 @@
-//-
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-         http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-
-mixin ignite-form-field-up()
-    i.tipField.fa.fa-arrow-up.ng-scope(ignite-form-field-up ng-click='vm.up()')&attributes(attributes)
diff --git a/modules/web-console/frontend/app/helpers/jade/mixins.pug b/modules/web-console/frontend/app/helpers/jade/mixins.pug
index 62290c4..b2aa8e5 100644
--- a/modules/web-console/frontend/app/helpers/jade/mixins.pug
+++ b/modules/web-console/frontend/app/helpers/jade/mixins.pug
@@ -14,296 +14,138 @@
     See the License for the specific language governing permissions and
     limitations under the License.
 
-include ./form
 include ../../primitives/btn-group/index
 include ../../primitives/datepicker/index
 include ../../primitives/timepicker/index
 include ../../primitives/dropdown/index
-include ../../primitives/tooltip/index
-include ../../primitives/radio/index
 include ../../primitives/switcher/index
 include ../../primitives/form-field/index
 
-//- Mixin for advanced options toggle.
-mixin advanced-options-toggle(click, cond, showMessage, hideMessage)
-    .advanced-options
-        i.fa(
-            ng-click=`${click}`
-            ng-class=`${cond} ? 'fa-chevron-circle-down' : 'fa-chevron-circle-right'`
-        )
-        a(ng-click=click) {{ #{cond} ? '#{hideMessage}' : '#{showMessage}' }}
-
-//- Mixin for advanced options toggle with default settings.
-mixin advanced-options-toggle-default
-    +advanced-options-toggle('toggleExpanded()', 'ui.expanded', 'Show advanced settings...', 'Hide advanced settings...')
-
-//- Mixin for main table on screen with list of items.
-mixin main-table(title, rows, focusId, click, rowTemplate, searchField)
-    .padding-bottom-dflt(ng-show=`${rows} && ${rows}.length > 0`)
-        table.links(st-table='displayedRows' st-safe-src=`${rows}`)
-            thead
-                tr
-                    th
-                        label.labelHeader.labelFormField #{title}:
-                        .col-sm-3.pull-right(style='padding: 0')
-                            input.form-control(type='text' st-search=`${searchField}` placeholder=`Filter ${title}...`)
-            tbody
-                tr
-                    td
-                        .scrollable-y(ng-show='displayedRows.length > 0' style='max-height: 200px')
-                            table
-                                tbody
-                                    tr(ng-repeat='row in displayedRows track by row._id' ignite-bs-affix-update)
-                                        td
-                                            a(ng-class='{active: row._id == selectedItem._id}' ignite-on-click-focus=focusId ng-click=click) #{rowTemplate}
-                        label.placeholder(ng-show='displayedRows.length == 0') No #{title} found
-
-//- Mixin with save, remove, clone and undo buttons.
-mixin save-remove-clone-undo-buttons(objectName)
-    -var removeTip = 'Remove current ' + objectName
-    -var cloneTip = '"Clone current ' + objectName + '"'
-    -var undoTip = '"Undo all changes for current ' + objectName + '"'
-
-    button.btn-ignite.btn-ignite--success(
-        ng-disabled='!ui.inputForm.$dirty'
-        ng-click='ui.inputForm.$dirty && saveItem()'
-    ) Save
-    button.btn-ignite.btn-ignite--success(
-        ng-show='backupItem._id && contentVisible()'
-        type='button'
-        id='clone-item'
-        ng-click='cloneItem()'
-    ) Clone
-
-    .btn-ignite-group(ng-show='backupItem._id && contentVisible()')
-        button.btn-ignite.btn-ignite--success(
-            ng-click='removeItem()'
-            type='button'
-        )
-            | Remove
-        button.btn-ignite.btn-ignite--success(
-            bs-dropdown='$ctrl.extraFormActions'
-            data-placement='top-right'
-            type='button'
-        )
-            span.icon.fa.fa-caret-up
-
-    button.btn-ignite.btn-ignite--success(
-        ng-show='contentVisible()'
-        id='undo-item'
-        ng-disabled='!ui.inputForm.$dirty'
-        ng-click='ui.inputForm.$dirty && resetAll()'
-        bs-tooltip=undoTip
-        data-placement='top'
-        data-trigger='hover'
-    )
-        i.icon.fa.fa-undo()
-
-//- Mixin for feedback on specified error.
-mixin error-feedback(visible, error, errorMessage, name)
-    i.fa.fa-exclamation-triangle.form-control-feedback(
-        ng-if=visible
-        bs-tooltip=`'${errorMessage}'`
-        ignite-error=error
-        ignite-error-message=errorMessage
-        name=name
-    )
-
-//- Mixin for feedback on unique violation.
-mixin unique-feedback(name, errorMessage)
-    +form-field-feedback(name, 'igniteUnique', errorMessage)
-
-//- Mixin for feedback on IP address violation.
-mixin ipaddress-feedback(name)
-    +form-field-feedback(name, 'ipaddress', 'Invalid address!')
-
-//- Mixin for feedback on port of IP address violation.
-mixin ipaddress-port-feedback(name)
-    +form-field-feedback(name, 'ipaddressPort', 'Invalid port!')
-
-//- Mixin for feedback on port range violation.
-mixin ipaddress-port-range-feedback(name)
-    +form-field-feedback(name, 'ipaddressPortRange', 'Invalid port range!')
-
-//- Mixin for feedback on UUID violation.
-mixin uuid-feedback(name)
-    +form-field-feedback(name, 'uuid', 'Invalid node ID!')
-
 //- Function that convert enabled state to corresponding disabled state.
 -var enabledToDisabled = function (enabled) {
 -    return (enabled === false || enabled === true) ? !enabled : '!(' + enabled + ')';
 -}
 
-//- Mixin for checkbox.
-mixin checkbox(lbl, model, name, tip)
-    +form-field-checkbox(lbl, model, name, false, false, tip)
+mixin form-field__java-class({ label, model, name, disabled, required, tip, placeholder, validationActive })
+    -var errLbl = label.substring(0, label.length - 1)
 
-//- Mixin for checkbox with enabled condition.
-mixin checkbox-enabled(lbl, model, name, enabled, tip)
-    +form-field-checkbox(lbl, model, name, enabledToDisabled(enabled), false, tip)
-
-//- Mixin for Java class name field with auto focus condition.
-mixin java-class-autofocus-placholder(lbl, model, name, enabled, required, autofocus, placeholder, tip, validationActive)
-    -var errLbl = lbl.substring(0, lbl.length - 1)
-
-    +ignite-form-field-text(lbl, model, name, enabledToDisabled(enabled), required, placeholder, tip)(
+    +form-field__text({
+        label,
+        model,
+        name,
+        disabled,
+        required,
+        placeholder: placeholder || 'Enter fully qualified class name',
+        tip
+    })(
         data-java-identifier='true'
         data-java-package-specified='true'
         data-java-keywords='true'
         data-java-built-in-class='true'
-        data-ignite-form-field-input-autofocus=autofocus
         data-validation-active=validationActive ? `{{ ${validationActive} }}` : `'always'`
     )&attributes(attributes)
         if  block
             block
 
-        +form-field-feedback(name, 'javaBuiltInClass', errLbl + ' should not be the Java built-in class!')
-        +form-field-feedback(name, 'javaKeywords', errLbl + ' could not contains reserved Java keyword!')
-        +form-field-feedback(name, 'javaPackageSpecified', errLbl + ' does not have package specified!')
-        +form-field-feedback(name, 'javaIdentifier', errLbl + ' is invalid Java identifier!')
-
-//- Mixin for Java class name field with auto focus condition.
-mixin java-class-autofocus(lbl, model, name, enabled, required, autofocus, tip, validationActive)
-    +java-class-autofocus-placholder(lbl, model, name, enabled, required, autofocus, 'Enter fully qualified class name', tip, validationActive)&attributes(attributes)
-        if  block
-            block
-
-//- Mixin for Java class name field.
-mixin java-class(lbl, model, name, enabled, required, tip, validationActive)
-    +java-class-autofocus(lbl, model, name, enabled, required, 'false', tip, validationActive)
-        if  block
-            block
+        +form-field__error({ error: 'javaBuiltInClass', message: `${ errLbl } should not be the Java built-in class!` })
+        +form-field__error({ error: 'javaKeywords', message: `${ errLbl } could not contains reserved Java keyword!` })
+        +form-field__error({ error: 'javaPackageSpecified', message: `${ errLbl } does not have package specified!` })
+        +form-field__error({ error: 'javaIdentifier', message: `${ errLbl } is invalid Java identifier!` })
 
 //- Mixin for text field with enabled condition with options.
-mixin java-class-typeahead(lbl, model, name, options, enabled, required, placeholder, tip, validationActive)
-    -var errLbl = lbl.substring(0, lbl.length - 1)
+mixin form-field__java-class--typeahead({ label, model, name, options, disabled, required, placeholder, tip, validationActive })
+    -var errLbl = label.substring(0, label.length - 1)
 
-    +form-field-datalist(lbl, model, name, enabledToDisabled(enabled), required, placeholder, options, tip)&attributes(attributes)(
+    +form-field__typeahead({
+        label,
+        model,
+        name,
+        disabled,
+        required,
+        placeholder,
+        options,
+        tip
+    })&attributes(attributes)(
         data-java-identifier='true'
         data-java-package-specified='allow-built-in'
         data-java-keywords='true'
         data-validation-active=validationActive ? `{{ ${validationActive} }}` : `'always'`
     )
-        +form-field-feedback(name, 'javaKeywords', errLbl + ' could not contains reserved Java keyword!')
-        +form-field-feedback(name, 'javaPackageSpecified', errLbl + ' does not have package specified!')
-        +form-field-feedback(name, 'javaIdentifier', errLbl + ' is invalid Java identifier!')
+        +form-field__error({ error: 'javaKeywords', message: `${ errLbl } could not contains reserved Java keyword!` })
+        +form-field__error({ error: 'javaPackageSpecified', message: `${ errLbl } does not have package specified!` })
+        +form-field__error({ error: 'javaIdentifier', message: `${ errLbl } is invalid Java identifier!` })
 
-//- Mixin for java package field with enabled condition.
-mixin java-package(lbl, model, name, enabled, required, tip)
-    +ignite-form-field-text(lbl, model, name, enabledToDisabled(enabled), required, 'Enter package name', tip)(
+
+mixin form-field__java-package({ label, model, name, disabled, required, tip, tipOpts, placeholder })
+    +form-field__text({
+        label,
+        model,
+        name,
+        disabled,
+        required,
+        tip,
+        tipOpts,
+        placeholder
+    })(
         data-java-keywords='true'
         data-java-package-name='package-only'
-    )
-        +form-field-feedback(name, 'javaPackageName', 'Package name is invalid')
-        +form-field-feedback(name, 'javaKeywords', 'Package name could not contains reserved java keyword')
-
-//- Mixin for text field with IP address check.
-mixin text-ip-address(lbl, model, name, enabled, placeholder, tip)
-    +ignite-form-field-text(lbl, model, name, enabledToDisabled(enabled), false, placeholder, tip)(data-ipaddress='true')
-        +ipaddress-feedback(name)
-
-//- Mixin for text field with IP address and port range check.
-mixin text-ip-address-with-port-range(lbl, model, name, enabled, placeholder, tip)
-    +ignite-form-field-text(lbl, model, name, enabledToDisabled(enabled), false, placeholder, tip)(data-ipaddress='true' data-ipaddress-with-port='true' data-ipaddress-with-port-range='true')
-        +ipaddress-feedback(name)
-        +ipaddress-port-feedback(name)
-        +ipaddress-port-range-feedback(name)
-
-//- Mixin for text field.
-mixin text-enabled(lbl, model, name, enabled, required, placeholder, tip)
-    +ignite-form-field-text(lbl, model, name, enabledToDisabled(enabled), required, placeholder, tip)
-        if  block
-            block
-
-//- Mixin for text field with autofocus.
-mixin text-enabled-autofocus(lbl, model, name, enabled, required, placeholder, tip)
-    +ignite-form-field-text(lbl, model, name, enabledToDisabled(enabled), required, placeholder, tip)(
-        data-ignite-form-field-input-autofocus='true'
     )&attributes(attributes)
         if  block
             block
 
-//- Mixin for text field.
-mixin text(lbl, model, name, required, placeholder, tip)
-    +ignite-form-field-text(lbl, model, name, false, required, placeholder, tip)
-        if  block
-            block
+        +form-field__error({ error: 'javaPackageName', message: 'Package name is invalid!' })
+        +form-field__error({ error: 'javaKeywords', message: 'Package name could not contains reserved java keyword!' })
+
+//- Mixin for text field with IP address check.
+mixin form-field__ip-address({ label, model, name, enabled, placeholder, tip })
+    +form-field__text({
+        label,
+        model,
+        name,
+        disabled: enabledToDisabled(enabled),
+        placeholder,
+        tip
+    })(data-ipaddress='true')
+        +form-field__error({ error: 'ipaddress', message: 'Invalid address!' })
+
+//- Mixin for text field with IP address and port range check.
+mixin form-field__ip-address-with-port-range({ label, model, name, enabled, placeholder, tip })
+    +form-field__text({
+        label,
+        model,
+        name,
+        disabled: enabledToDisabled(enabled),
+        placeholder,
+        tip
+    })(
+        data-ipaddress='true'
+        data-ipaddress-with-port='true'
+        data-ipaddress-with-port-range='true'
+    )
+        +form-field__error({ error: 'ipaddress', message: 'Invalid address!' })
+        +form-field__error({ error: 'ipaddressPort', message: 'Invalid port!' })
+        +form-field__error({ error: 'ipaddressPortRange', message: 'Invalid port range!' })
 
 //- Mixin for url field.
-mixin url(lbl, model, name, enabled, required, placeholder, tip)
-    -var errLbl = lbl.substring(0, lbl.length - 1)
+mixin form-field__url({ label, model, name, enabled, required, placeholder, tip })
+    -var errLbl = label.substring(0, label.length - 1)
 
-    +ignite-form-field-text(lbl, model, name, enabledToDisabled(enabled), required, placeholder, tip)(type='url')
-        if  block
-            block
-
-        +form-field-feedback(name, 'url', errLbl + ' should be a valid URL!')
-
-//- Mixin for password field.
-mixin password(lbl, model, name, required, placeholder, tip)
-    +ignite-form-field-password(lbl, model, name, false, required, placeholder, tip)
-        if  block
-            block
-
-//- Mixin for text field with enabled condition with options.
-mixin text-options(lbl, model, name, options, enabled, required, placeholder, tip)
-    +form-field-datalist(lbl, model, name, enabledToDisabled(enabled), required, placeholder, options, tip)
-
-//- Mixin for required numeric field.
-mixin number-required(lbl, model, name, enabled, required, placeholder, min, tip)
-    +ignite-form-field-number(lbl, model, name, enabledToDisabled(enabled), required, placeholder, min, false, false, tip)
-
-//- Mixin for required numeric field with maximum and minimum limit.
-mixin number-min-max(lbl, model, name, enabled, placeholder, min, max, tip)
-    +ignite-form-field-number(lbl, model, name, enabledToDisabled(enabled), false, placeholder, min, max, '1', tip)
-
-//- Mixin for required numeric field with maximum and minimum limit.
-mixin number-min-max-step(lbl, model, name, enabled, placeholder, min, max, step, tip)
-    +ignite-form-field-number(lbl, model, name, enabledToDisabled(enabled), false, placeholder, min, max, step, tip)
-
-//- Mixin for numeric field.
-mixin number(lbl, model, name, enabled, placeholder, min, tip)
-    +ignite-form-field-number(lbl, model, name, enabledToDisabled(enabled), false, placeholder, min, false, false, tip)
-
-//- Mixin for required dropdown field.
-mixin dropdown-required-empty(lbl, model, name, enabled, required, placeholder, placeholderEmpty, options, tip)
-    +ignite-form-field-dropdown(lbl, model, name, enabledToDisabled(enabled), required, false, placeholder, placeholderEmpty, options, tip)&attributes(attributes)
-        if  block
-            block
-
-//- Mixin for required dropdown field with autofocus.
-mixin dropdown-required-empty-autofocus(lbl, model, name, enabled, required, placeholder, placeholderEmpty, options, tip)
-    +ignite-form-field-dropdown(lbl, model, name, enabledToDisabled(enabled), required, false, placeholder, placeholderEmpty, options, tip)(
-        data-ignite-form-field-input-autofocus='true'
+    +form-field__text({
+        label,
+        model,
+        name,
+        disabled: enabledToDisabled(enabled),
+        required,
+        placeholder,
+        tip
+    })(
+        type='url'
     )
         if  block
             block
 
-//- Mixin for required dropdown field.
-mixin dropdown-required(lbl, model, name, enabled, required, placeholder, options, tip)
-    +ignite-form-field-dropdown(lbl, model, name, enabledToDisabled(enabled), required, false, placeholder, '', options, tip)&attributes(attributes)
-        if  block
-            block
+        +form-field__error({ error: 'url', message: `${ errLbl } should be a valid URL!` })
 
-//- Mixin for required dropdown field with autofocus.
-mixin dropdown-required-autofocus(lbl, model, name, enabled, required, placeholder, options, tip)
-    +ignite-form-field-dropdown(lbl, model, name, enabledToDisabled(enabled), required, false, placeholder, '', options, tip)(
-        data-ignite-form-field-input-autofocus='true'
-    )
-        if  block
-            block
-
-//- Mixin for dropdown field.
-mixin dropdown(lbl, model, name, enabled, placeholder, options, tip)
-    +ignite-form-field-dropdown(lbl, model, name, enabledToDisabled(enabled), false, false, placeholder, '', options, tip)
-        if  block
-            block
-
-//- Mixin for dropdown-multiple field.
-mixin dropdown-multiple(lbl, model, name, enabled, placeholder, placeholderEmpty, options, tip)
-    +ignite-form-field-dropdown(lbl, model, name, enabledToDisabled(enabled), false, true, placeholder, placeholderEmpty, options, tip)
-        if  block
-            block
 
 mixin list-text-field({ items, lbl, name, itemName, itemsName })
     list-editable(ng-model=items)&attributes(attributes)
@@ -311,9 +153,15 @@
             | {{ $item }}
 
         list-editable-item-edit
-            +ignite-form-field-text(lbl, '$item', `"${name}"`, false, true, `Enter ${lbl.toLowerCase()}`)(
-                data-ignite-unique=items
-                data-ignite-form-field-input-autofocus='true'
+            +form-field__text({
+                label: lbl,
+                model: '$item',
+                name: `"${name}"`,
+                required: true,
+                placeholder: `Enter ${lbl.toLowerCase()}`
+            })(
+                ignite-unique=items
+                ignite-form-field-input-autofocus='true'
             )
                 if  block
                     block
@@ -325,46 +173,44 @@
                 label-multiple=itemsName
             )
 
-mixin list-java-class-field(lbl, model, name, items)
-    +ignite-form-field-text(lbl, model, name, false, true, 'Enter fully qualified class name')(
-        data-java-identifier='true'
-        data-java-package-specified='true'
-        data-java-keywords='true'
-        data-java-built-in-class='true'
+mixin list-java-class-field(label, model, name, items)
+    +form-field__text({
+        label,
+        model,
+        name,
+        required: true,
+        placeholder: 'Enter fully qualified class name'
+    })(
+        java-identifier='true'
+        java-package-specified='true'
+        java-keywords='true'
+        java-built-in-class='true'
 
-        data-ignite-unique=items
-        data-ignite-form-field-input-autofocus='true'
+        ignite-unique=items
+        ignite-form-field-input-autofocus='true'
     )
-        +form-field-feedback(name, 'javaBuiltInClass', lbl + ' should not be the Java built-in class!')
-        +form-field-feedback(name, 'javaKeywords', lbl + ' could not contains reserved Java keyword!')
-        +form-field-feedback(name, 'javaPackageSpecified', lbl + ' does not have package specified!')
-        +form-field-feedback(name, 'javaIdentifier', lbl + ' is invalid Java identifier!')
+        +form-field__error({ error: 'javaBuiltInClass', message: `${ label } should not be the Java built-in class!` })
+        +form-field__error({ error: 'javaKeywords', message: `${ label } could not contains reserved Java keyword!` })
+        +form-field__error({ error: 'javaPackageSpecified', message: `${ label } does not have package specified!` })
+        +form-field__error({ error: 'javaIdentifier', message: `${ label } is invalid Java identifier!` })
 
         if block
             block
 
-mixin list-java-package-field(lbl, model, name, items)
-    +ignite-form-field-text(lbl, model, name, false, true, 'Enter package name')(
-        data-java-keywords='true'
-        data-java-package-name='package-only'
-
-        data-ignite-unique=items
-        data-ignite-form-field-input-autofocus='true'
-    )&attributes(attributes)
-        +form-field-feedback(name, 'javaKeywords', 'Package name could not contains reserved Java keyword!')
-        +form-field-feedback(name, 'javaPackageName', 'Package name is invalid!')
-
-        if block
-            block
-
-mixin list-url-field(lbl, model, name, items)
-    +ignite-form-field-text(lbl, model, name, false, true, 'Enter URL')(
+mixin list-url-field(label, model, name, items)
+    +form-field__text({
+        label,
+        model,
+        name,
+        required: true,
+        placeholder: 'Enter URL'
+    })(
         type='url'
 
-        data-ignite-unique=items
-        data-ignite-form-field-input-autofocus='true'
+        ignite-unique=items
+        ignite-form-field-input-autofocus='true'
     )
-        +form-field-feedback(name, 'url', 'URL should be valid!')
+        +form-field__error({ error: 'url', message: 'URL should be valid!' })
 
         if block
             block
@@ -377,18 +223,24 @@
     )&attributes(attributes)
         list-editable-item-view {{ $item }}
         list-editable-item-edit(item-name='address')
-            +ignite-form-field-text('Address', 'address', '"address"', false, true, 'IP address:port')(
-                data-ipaddress='true'
-                data-ipaddress-with-port='true'
-                data-ipaddress-with-port-range=withPortRange
-                data-ignite-unique=items
-                data-ignite-form-field-input-autofocus='true'
+            +form-field__text({
+                label: 'Address',
+                model: 'address',
+                name: '"address"',
+                required: true,
+                placeholder: 'IP address:port'
+            })(
+                ipaddress='true'
+                ipaddress-with-port='true'
+                ipaddress-with-port-range=withPortRange
+                ignite-unique=items
+                ignite-form-field-input-autofocus='true'
             )
-                +unique-feedback('"address"', 'Such IP address already exists!')
-                +ipaddress-feedback('"address"')
-                +ipaddress-port-feedback('"address"')
-                +ipaddress-port-range-feedback('"address"')
-                +form-field-feedback('"address"', 'required', 'IP address:port could not be empty!')
+                +form-field__error({ error: 'igniteUnique', message: 'Such IP address already exists!' })
+                +form-field__error({ error: 'ipaddress', message: 'Invalid address!' })
+                +form-field__error({ error: 'ipaddressPort', message: 'Invalid port!' })
+                +form-field__error({ error: 'ipaddressPortRange', message: 'Invalid port range!' })
+                +form-field__error({ error: 'required', message: 'IP address:port could not be empty!' })
 
         list-editable-no-items
             list-editable-add-item-button(
@@ -397,29 +249,32 @@
                 label-single='address'
             )
 
-//- Mixin for cache mode.
-mixin cacheMode(lbl, model, name, placeholder)
-    +dropdown(lbl, model, name, 'true', placeholder,
-        '[\
+
+mixin form-field__cache-modes({ label, model, name, placeholder })
+    +form-field__dropdown({
+        label, model, name, placeholder,
+        options: '[\
             {value: "LOCAL", label: "LOCAL"},\
             {value: "REPLICATED", label: "REPLICATED"},\
             {value: "PARTITIONED", label: "PARTITIONED"}\
         ]',
-        'Cache modes:\
+        tip: 'Cache modes:\
         <ul>\
             <li>PARTITIONED - in this mode the overall key set will be divided into partitions and all partitions will be split equally between participating nodes</li>\
             <li>REPLICATED - in this mode all the keys are distributed to all participating nodes</li>\
             <li>LOCAL - in this mode caches residing on different grid nodes will not know about each other</li>\
         </ul>'
-    )
+    })&attributes(attributes)
+        if  block
+            block
 
 //- Mixin for eviction policy.
-mixin evictionPolicy(model, name, enabled, required, tip)
+mixin form-field__eviction-policy({ model, name, enabled, required, tip })
     -var kind = model + '.kind'
     -var policy = model + '[' + kind + ']'
 
     .pc-form-grid-col-60
-        +sane-ignite-form-field-dropdown({
+        +form-field__dropdown({
             label: 'Eviction policy:',
             model: kind,
             name: `${name}+"Kind"`,
@@ -431,10 +286,17 @@
         })
     .pc-form-group.pc-form-grid-row(ng-if=kind)
         .pc-form-grid-col-30
-            +number('Batch size', policy + '.batchSize', name + '+ "batchSize"', enabled, '1', '1',
-                'Number of entries to remove on shrink')
+            +form-field__number({
+                label: 'Batch size',
+                model: policy + '.batchSize',
+                name: name + '+ "batchSize"',
+                disabled: enabledToDisabled(enabled),
+                placeholder: '1',
+                min: '1',
+                tip: 'Number of entries to remove on shrink'
+            })
         .pc-form-grid-col-30
-            pc-form-field-size(
+            form-field-size(
                 label='Max memory size:'
                 ng-model=`${policy}.maxMemorySize`
                 ng-model-options='{allowInvalid: true}'
@@ -446,9 +308,9 @@
                 size-scale-label='mb'
                 size-type='bytes'
             )
-                +form-field-feedback(null, 'min', 'Either maximum memory size or maximum size should be greater than 0')
+                +form-field__error({ error: 'min', message: 'Either maximum memory size or maximum size should be greater than 0' })
         .pc-form-grid-col-60
-            +sane-ignite-form-field-number({
+            +form-field__number({
                 label: 'Max size:',
                 model: policy + '.maxSize',
                 name: name + '+ "maxSize"',
@@ -459,33 +321,7 @@
             })(
                 ng-model-options='{allowInvalid: true}'
             )
-                +form-field-feedback(null, 'min', 'Either maximum memory size or maximum size should be greater than 0')
-
-//- Mixin for clusters dropdown.
-mixin clusters(model, tip)
-    +dropdown-multiple('Clusters:',
-        model + '.clusters', '"clusters"', true, 'Choose clusters', 'No clusters configured', 'clusters', tip)
-
-//- Mixin for caches dropdown.
-mixin caches(model, tip)
-    +dropdown-multiple('Caches:',
-        model + '.caches', '"caches"', true, 'Choose caches', 'No caches configured', 'caches', tip)
-
-//- Mixin for XML, Java, .Net preview.
-mixin preview(master, generator, detail)
-    ignite-ui-ace-tabs
-        .preview-panel(ng-init='mode = "spring"')
-            .preview-legend
-                a(ng-class='{active: mode === "spring"}' ng-click='mode = "spring"') Spring
-                a(ng-class='{active: mode === "java"}' ng-click='mode = "java"') Java
-                a(ng-class='{active: mode === "csharp"}' ng-click='mode = "csharp"') C#
-                //a(ng-class='{active: mode === "app.config"}' ng-click='mode = "app.config"') app.config
-            .preview-content(ng-switch='mode')
-                ignite-ui-ace-spring(ng-switch-when='spring' data-master=master data-generator=generator ng-model='$parent.data' data-detail=detail)
-                ignite-ui-ace-java(ng-switch-when='java' data-master=master data-generator=generator ng-model='$parent.data' data-detail=detail)
-                ignite-ui-ace-sharp(ng-switch-when='csharp' data-master=master data-generator=generator ng-model='$parent.data' data-detail=detail)
-            .preview-content-empty(ng-if='!data')
-                label All Defaults
+                +form-field__error({ error: 'min', message: 'Either maximum memory size or maximum size should be greater than 0' })
 
 //- Mixin for XML and Java preview.
 mixin preview-xml-java(master, generator, detail)
@@ -503,21 +339,6 @@
             .preview-content-empty(ng-if='!data')
                 label All Defaults
 
-//- LEGACY mixin for LEGACY tables.
-mixin btn-save(show, click)
-    i.tipField.fa.fa-floppy-o(ng-show=show ng-click=click bs-tooltip='' data-title='Click icon or press [Enter] to save item' data-trigger='hover')
-
-//- LEGACY mixin for LEGACY tables.
-mixin btn-add(click, tip)
-    i.tipField.fa.fa-plus(ng-click=click bs-tooltip=tip data-trigger = 'hover')
-
-//- LEGACY mixin for LEGACY tables.
-mixin btn-remove(click, tip)
-    i.tipField.fa.fa-remove(ng-click=click bs-tooltip=tip data-trigger='hover')
-
-//- LEGACY mixin for LEGACY tables.
-mixin btn-remove-cond(cond, click, tip)
-    i.tipField.fa.fa-remove(ng-show=cond ng-click=click bs-tooltip=tip data-trigger='hover')
 
 mixin list-pair-edit({ items, keyLbl, valLbl, itemName, itemsName })
     list-editable(ng-model=items)
@@ -528,14 +349,26 @@
             - form = '$parent.form'
             .pc-form-grid-row
                 .pc-form-grid-col-30(divider='=')
-                    +ignite-form-field-text(keyLbl, '$item.name', '"name"', false, true, keyLbl)(
-                        data-ignite-unique=items
-                        data-ignite-unique-property='name'
+                    +form-field__text({
+                        label: keyLbl,
+                        model: '$item.name',
+                        name: '"name"',
+                        required: true,
+                        placeholder: keyLbl
+                    })(
+                        ignite-unique=items
+                        ignite-unique-property='name'
                         ignite-auto-focus
                     )
-                        +unique-feedback('"name"', 'Property with such name already exists!')
+                        +form-field__error({ error: 'igniteUnique', message: 'Property with such name already exists!' })
                 .pc-form-grid-col-30
-                    +ignite-form-field-text(valLbl, '$item.value', '"value"', false, true, valLbl)
+                    +form-field__text({
+                        label: valLbl,
+                        model: '$item.value',
+                        name: '"value"',
+                        required: true,
+                        placeholder: valLbl
+                    })
 
         list-editable-no-items
             list-editable-add-item-button(
@@ -544,9 +377,14 @@
                 label-multiple=itemsName
             )
 
-//- Mixin for DB dialect.
-mixin dialect(lbl, model, name, required, tipTitle, genericDialectName, placeholder)
-    +dropdown-required(lbl, model, name, 'true', required, placeholder, '[\
+mixin form-field__dialect({ label, model, name, required, tip, genericDialectName, placeholder })
+    +form-field__dropdown({
+        label,
+        model,
+        name,
+        required,
+        placeholder,
+        options: '[\
                 {value: "Generic", label: "' + genericDialectName + '"},\
                 {value: "Oracle", label: "Oracle"},\
                 {value: "DB2", label: "IBM DB2"},\
@@ -555,13 +393,14 @@
                 {value: "PostgreSQL", label: "PostgreSQL"},\
                 {value: "H2", label: "H2 database"}\
         ]',
-        tipTitle +
-        '<ul>\
-            <li>' + genericDialectName + '</li>\
-            <li>Oracle database</li>\
-            <li>IBM DB2</li>\
-            <li>Microsoft SQL Server</li>\
-            <li>MySQL</li>\
-            <li>PostgreSQL</li>\
-            <li>H2 database</li>\
-        </ul>')
+        tip: `${ tip }
+            <ul>
+                <li>${ genericDialectName }</li>
+                <li>Oracle database</li>
+                <li>IBM DB2</li>
+                <li>Microsoft SQL Server</li>
+                <li>MySQL</li>
+                <li>PostgreSQL</li>
+                <li>H2 database</li>
+            </ul>`
+    })
diff --git a/modules/web-console/frontend/app/modules/ace.module.js b/modules/web-console/frontend/app/modules/ace.module.js
index 44e51ca..4decbe1 100644
--- a/modules/web-console/frontend/app/modules/ace.module.js
+++ b/modules/web-console/frontend/app/modules/ace.module.js
@@ -21,7 +21,7 @@
 angular
     .module('ignite-console.ace', [])
     .constant('igniteAceConfig', {})
-    .directive('igniteAce', ['igniteAceConfig', (aceConfig) => {
+    .directive('igniteAce', ['igniteAceConfig', function(aceConfig) {
         if (_.isUndefined(window.ace))
             throw new Error('ignite-ace need ace to work... (o rly?)');
 
diff --git a/modules/web-console/frontend/app/modules/agent/AgentManager.service.js b/modules/web-console/frontend/app/modules/agent/AgentManager.service.js
index a1c0ff9..c5fcc99 100644
--- a/modules/web-console/frontend/app/modules/agent/AgentManager.service.js
+++ b/modules/web-console/frontend/app/modules/agent/AgentManager.service.js
@@ -20,6 +20,9 @@
 
 import { BehaviorSubject } from 'rxjs/BehaviorSubject';
 import 'rxjs/add/operator/first';
+import 'rxjs/add/operator/partition';
+import 'rxjs/add/operator/takeUntil';
+import 'rxjs/add/operator/pluck';
 
 import AgentModal from './AgentModal.service';
 // @ts-ignore
@@ -39,7 +42,7 @@
 
 const IGNITE_2_0 = '2.0.0';
 const LAZY_QUERY_SINCE = [['2.1.4-p1', '2.2.0'], '2.2.1'];
-const COLLOCATED_QUERY_SINCE = [['2.3.5', '2.4.0'], ['2.4.6', '2.5.0'], '2.5.2'];
+const COLLOCATED_QUERY_SINCE = [['2.3.5', '2.4.0'], ['2.4.6', '2.5.0'], ['2.5.1-p13', '2.6.0'], '2.7.0'];
 
 // Error codes from o.a.i.internal.processors.restGridRestResponse.java
 
@@ -115,7 +118,7 @@
 }
 
 export default class AgentManager {
-    static $inject = ['$rootScope', '$q', '$transitions', 'igniteSocketFactory', AgentModal.name, 'UserNotifications', 'IgniteVersion', ClusterLoginService.name];
+    static $inject = ['$rootScope', '$q', '$transitions', 'igniteSocketFactory', 'AgentModal', 'UserNotifications', 'IgniteVersion', 'ClusterLoginService'];
 
     /** @type {ng.IScope} */
     $root;
@@ -165,6 +168,11 @@
             .distinctUntilChanged(({ cluster }) => prevCluster === cluster)
             .do(({ cluster }) => prevCluster = cluster);
 
+        this.clusterIsActive$ = this.connectionSbj
+            .map(({ cluster }) => cluster)
+            .filter((cluster) => Boolean(cluster))
+            .pluck('active');
+
         if (!this.isDemoMode()) {
             this.connectionSbj.subscribe({
                 next: ({cluster}) => {
diff --git a/modules/web-console/frontend/app/modules/agent/AgentModal.service.js b/modules/web-console/frontend/app/modules/agent/AgentModal.service.js
index 15a08a2..857a71e 100644
--- a/modules/web-console/frontend/app/modules/agent/AgentModal.service.js
+++ b/modules/web-console/frontend/app/modules/agent/AgentModal.service.js
@@ -33,7 +33,7 @@
             show: false,
             backdrop: 'static',
             keyboard: false,
-            controller: () => self,
+            controller() { return self;},
             controllerAs: 'ctrl'
         });
 
diff --git a/modules/web-console/frontend/app/modules/agent/agent.module.js b/modules/web-console/frontend/app/modules/agent/agent.module.js
index 1812af0..9189d92 100644
--- a/modules/web-console/frontend/app/modules/agent/agent.module.js
+++ b/modules/web-console/frontend/app/modules/agent/agent.module.js
@@ -26,5 +26,5 @@
     .module('ignite-console.agent', [
         clusterLogin.name
     ])
-    .service(AgentModal.name, AgentModal)
-    .service(AgentManager.name, AgentManager);
+    .service('AgentModal', AgentModal)
+    .service('AgentManager', AgentManager);
diff --git a/modules/web-console/frontend/app/modules/agent/components/cluster-login/component.js b/modules/web-console/frontend/app/modules/agent/components/cluster-login/component.js
index a8311ed..5f1d9e3 100644
--- a/modules/web-console/frontend/app/modules/agent/components/cluster-login/component.js
+++ b/modules/web-console/frontend/app/modules/agent/components/cluster-login/component.js
@@ -19,7 +19,6 @@
 import {ClusterSecrets} from '../../types/ClusterSecrets';
 
 export const component = {
-    name: 'clusterLogin',
     bindings: {
         secrets: '=',
         onLogin: '&',
diff --git a/modules/web-console/frontend/app/modules/agent/components/cluster-login/index.js b/modules/web-console/frontend/app/modules/agent/components/cluster-login/index.js
index 24cce4f..177ccda 100644
--- a/modules/web-console/frontend/app/modules/agent/components/cluster-login/index.js
+++ b/modules/web-console/frontend/app/modules/agent/components/cluster-login/index.js
@@ -20,7 +20,6 @@
 import service from './service';
 
 export default angular
-    .module('ignite-console.agent.cluster-login', [
-    ])
-    .service(service.name, service)
-    .component(component.name, component);
+    .module('ignite-console.agent.cluster-login', [])
+    .service('ClusterLoginService', service)
+    .component('clusterLogin', component);
diff --git a/modules/web-console/frontend/app/modules/agent/components/cluster-login/template.pug b/modules/web-console/frontend/app/modules/agent/components/cluster-login/template.pug
index c6bc474..19ab6f4 100644
--- a/modules/web-console/frontend/app/modules/agent/components/cluster-login/template.pug
+++ b/modules/web-console/frontend/app/modules/agent/components/cluster-login/template.pug
@@ -23,7 +23,6 @@
        form.modal-content(name=form novalidate ng-submit='$ctrl.login()')
             .modal-header
                 h4.modal-title
-                    i.icon-confirm
                     span Cluster Authentication
                 button.close(type='button' aria-label='Close' ng-click='$ctrl.onHide()')
                     svg(ignite-icon="cross")
@@ -53,5 +52,7 @@
                             autocomplete='node-password'
                         )
             .modal-footer
-                button#btn-cancel.btn-ignite.btn-ignite--link-success(type='button' ng-click='$ctrl.onHide()') Cancel
-                button#btn-login.btn-ignite.btn-ignite--success Login
+                div
+                    button#btn-cancel.btn-ignite.btn-ignite--link-success(type='button' ng-click='$ctrl.onHide()') Cancel
+                    button#btn-login.btn-ignite.btn-ignite--success Login
+
diff --git a/modules/web-console/frontend/app/modules/branding/features.directive.js b/modules/web-console/frontend/app/modules/branding/features.directive.js
index 9226a3f..d99b885 100644
--- a/modules/web-console/frontend/app/modules/branding/features.directive.js
+++ b/modules/web-console/frontend/app/modules/branding/features.directive.js
@@ -17,7 +17,7 @@
 
 const template = '<div class="features" ng-bind-html="features.html"></div>';
 
-export default ['igniteFeatures', ['IgniteBranding', (branding) => {
+export default ['igniteFeatures', ['IgniteBranding', function(branding) {
     function controller() {
         const ctrl = this;
 
diff --git a/modules/web-console/frontend/app/modules/branding/footer.directive.js b/modules/web-console/frontend/app/modules/branding/footer.directive.js
index f0b1994..7f09e11 100644
--- a/modules/web-console/frontend/app/modules/branding/footer.directive.js
+++ b/modules/web-console/frontend/app/modules/branding/footer.directive.js
@@ -17,7 +17,7 @@
 
 const template = '<div class="footer" ng-bind-html="footer.html"></div>';
 
-export default ['igniteFooter', ['IgniteBranding', (branding) => {
+export default ['igniteFooter', ['IgniteBranding', function(branding) {
     function controller() {
         const ctrl = this;
 
diff --git a/modules/web-console/frontend/app/modules/branding/header-logo.directive.js b/modules/web-console/frontend/app/modules/branding/header-logo.directive.js
index 231411b..18dff1c 100644
--- a/modules/web-console/frontend/app/modules/branding/header-logo.directive.js
+++ b/modules/web-console/frontend/app/modules/branding/header-logo.directive.js
@@ -17,7 +17,7 @@
 
 import template from './header-logo.pug';
 
-export default ['igniteHeaderLogo', ['IgniteBranding', (branding) => {
+export default ['igniteHeaderLogo', ['IgniteBranding', function(branding) {
     function controller() {
         const ctrl = this;
 
diff --git a/modules/web-console/frontend/app/modules/configuration/configuration.module.js b/modules/web-console/frontend/app/modules/configuration/configuration.module.js
index a350871..844c87f 100644
--- a/modules/web-console/frontend/app/modules/configuration/configuration.module.js
+++ b/modules/web-console/frontend/app/modules/configuration/configuration.module.js
@@ -42,10 +42,10 @@
 .module('ignite-console.configuration', [
 
 ])
-.service('IgniteConfigurationGenerator', () => IgniteConfigurationGenerator)
+.service('IgniteConfigurationGenerator', function() { return IgniteConfigurationGenerator;})
 .service('IgnitePlatformGenerator', IgnitePlatformGenerator)
-.service('SpringTransformer', () => IgniteSpringTransformer)
-.service('JavaTransformer', () => IgniteJavaTransformer)
+.service('SpringTransformer', function() { return IgniteSpringTransformer;})
+.service('JavaTransformer', function() { return IgniteJavaTransformer;})
 .service('IgniteSharpTransformer', SharpTransformer)
 .service('IgniteEventGroups', IgniteEventGroups)
 .service('IgniteClusterDefaults', IgniteClusterDefaults)
diff --git a/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js b/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js
index c3efea5..2a1a506 100644
--- a/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js
+++ b/modules/web-console/frontend/app/modules/configuration/generator/ConfigurationGenerator.js
@@ -1698,7 +1698,7 @@
                 .emptyBeanProperty('service')
                 .intProperty('maxPerNodeCount')
                 .intProperty('totalCount')
-                .stringProperty('cache', 'cacheName', (_id) => _id ? _.find(caches, {_id}).name : null)
+                .stringProperty('cache', 'cacheName', (_id) => _id ? _.get(_.find(caches, {_id}), 'name', null) : null)
                 .stringProperty('affinityKey');
 
             srvBeans.push(bean);
diff --git a/modules/web-console/frontend/app/modules/configuration/generator/PlatformGenerator.js b/modules/web-console/frontend/app/modules/configuration/generator/PlatformGenerator.js
index 99b93cc..f4a712d4 100644
--- a/modules/web-console/frontend/app/modules/configuration/generator/PlatformGenerator.js
+++ b/modules/web-console/frontend/app/modules/configuration/generator/PlatformGenerator.js
@@ -15,10 +15,12 @@
  * limitations under the License.
  */
 
+import _ from 'lodash';
+
 import {nonEmpty} from 'app/utils/lodashMixins';
 import { EmptyBean, Bean } from './Beans';
 
-export default ['JavaTypes', 'igniteClusterPlatformDefaults', 'igniteCachePlatformDefaults', (JavaTypes, clusterDflts, cacheDflts) => {
+export default ['JavaTypes', 'igniteClusterPlatformDefaults', 'igniteCachePlatformDefaults', function(JavaTypes, clusterDflts, cacheDflts) {
     class PlatformGenerator {
         static igniteConfigurationBean(cluster) {
             return new Bean('Apache.Ignite.Core.IgniteConfiguration', 'cfg', cluster, clusterDflts);
diff --git a/modules/web-console/frontend/app/modules/demo/Demo.module.js b/modules/web-console/frontend/app/modules/demo/Demo.module.js
index 2e1a627..96d409f 100644
--- a/modules/web-console/frontend/app/modules/demo/Demo.module.js
+++ b/modules/web-console/frontend/app/modules/demo/Demo.module.js
@@ -20,6 +20,8 @@
 import DEMO_INFO from 'app/data/demo-info.json';
 import templateUrl from 'views/templates/demo-info.tpl.pug';
 
+const DEMO_QUERY_STATE = {state: 'base.sql.notebook', params: {noteId: 'demo'}};
+
 angular
 .module('ignite-console.demo', [
     'ignite-console.socket'
@@ -34,7 +36,7 @@
         .state('demo.resume', {
             url: '/resume',
             permission: 'demo',
-            redirectTo: 'default-state',
+            redirectTo: DEMO_QUERY_STATE,
             unsaved: true,
             tfMetaTags: {
                 title: 'Demo resume'
@@ -47,11 +49,11 @@
                 const $http = trans.injector().get('$http');
 
                 return $http.post('/api/v1/demo/reset')
-                    .then(() => 'default-state')
+                    .then(() => DEMO_QUERY_STATE)
                     .catch((err) => {
                         trans.injector().get('IgniteMessages').showError(err);
 
-                        return 'default-state';
+                        return DEMO_QUERY_STATE;
                     });
             },
             unsaved: true,
@@ -78,7 +80,7 @@
         return {enabled};
     }];
 }])
-.factory('demoInterceptor', ['Demo', (Demo) => {
+.factory('demoInterceptor', ['Demo', function(Demo) {
     const isApiRequest = (url) => /\/api\/v1/ig.test(url);
 
     return {
@@ -90,7 +92,7 @@
         }
     };
 }])
-.controller('demoController', ['$scope', '$state', '$window', 'IgniteConfirm', ($scope, $state, $window, Confirm) => {
+.controller('demoController', ['$scope', '$state', '$window', 'IgniteConfirm', function($scope, $state, $window, Confirm) {
     const _openTab = (stateName) => $window.open($state.href(stateName), '_blank');
 
     $scope.startDemo = () => {
@@ -119,7 +121,7 @@
         return items;
     }];
 }])
-.service('DemoInfo', ['$rootScope', '$modal', '$state', '$q', 'igniteDemoInfo', 'AgentManager', ($rootScope, $modal, $state, $q, igniteDemoInfo, agentMgr) => {
+.service('DemoInfo', ['$rootScope', '$modal', '$state', '$q', 'igniteDemoInfo', 'AgentManager', function($rootScope, $modal, $state, $q, igniteDemoInfo, agentMgr) {
     const scope = $rootScope.$new();
 
     let closePromise = null;
@@ -138,27 +140,14 @@
         backdrop: 'static'
     });
 
+    scope.downloadAgentHref = '/api/v1/downloads/agent';
+
     scope.close = () => {
         dialog.hide();
 
         closePromise && closePromise.resolve();
     };
 
-    scope.downloadAgent = () => {
-        const lnk = document.createElement('a');
-
-        lnk.setAttribute('href', '/api/v1/agent/downloads/agent');
-        lnk.setAttribute('target', '_self');
-        lnk.setAttribute('download', null);
-        lnk.style.display = 'none';
-
-        document.body.appendChild(lnk);
-
-        lnk.click();
-
-        document.body.removeChild(lnk);
-    };
-
     return {
         show: () => {
             closePromise = $q.defer();
diff --git a/modules/web-console/frontend/app/modules/dialog/dialog.factory.js b/modules/web-console/frontend/app/modules/dialog/dialog.factory.js
index 599433a..749f833 100644
--- a/modules/web-console/frontend/app/modules/dialog/dialog.factory.js
+++ b/modules/web-console/frontend/app/modules/dialog/dialog.factory.js
@@ -17,7 +17,7 @@
 
 import templateUrl from './dialog.tpl.pug';
 
-export default ['IgniteDialog', ['$modal', ($modal) => {
+export default ['IgniteDialog', ['$modal', function($modal) {
     const defaults = {
         templateUrl,
         show: false
diff --git a/modules/web-console/frontend/app/modules/form/field/down.directive.js b/modules/web-console/frontend/app/modules/form/field/down.directive.js
deleted file mode 100644
index c957e97..0000000
--- a/modules/web-console/frontend/app/modules/form/field/down.directive.js
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-export default ['igniteFormFieldDown', ['$tooltip', ($tooltip) => {
-    const controller = ['$element', function($element) {
-        const ctrl = this;
-
-        this.$onInit = () => {
-            $tooltip($element, { title: 'Move item down' });
-
-            ctrl.down = () => {
-                const i = ctrl.models.indexOf(ctrl.model);
-
-                ctrl.models.splice(i, 1);
-                ctrl.models.splice(i + 1, 0, ctrl.model);
-            };
-        };
-
-
-    }];
-
-    return {
-        restrict: 'A',
-        bindToController: {
-            model: '=ngModel',
-            models: '=models'
-        },
-        controller,
-        controllerAs: 'vm'
-    };
-}]];
diff --git a/modules/web-console/frontend/app/modules/form/field/feedback.scss b/modules/web-console/frontend/app/modules/form/field/feedback.scss
deleted file mode 100644
index 08d0aef..0000000
--- a/modules/web-console/frontend/app/modules/form/field/feedback.scss
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-@import "../../../../public/stylesheets/variables";
-
-.form-field-feedback {
-    position: relative;
-    width: 0;
-    height: 28px;
-    float: right;
-    z-index: 2;
-
-    color: $brand-primary;
-    line-height: $input-height;
-    pointer-events: initial;
-    text-align: center;
-
-    &:before {
-        position: absolute;
-        right: 0;
-        width: 38px;
-    }
-}
diff --git a/modules/web-console/frontend/app/modules/form/field/field.scss b/modules/web-console/frontend/app/modules/form/field/field.scss
deleted file mode 100644
index 5717766..0000000
--- a/modules/web-console/frontend/app/modules/form/field/field.scss
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-@import "../../../../public/stylesheets/variables";
-
-.indexField {
-    float: left;
-    line-height: 28px;
-    margin-right: 5px;
-    color: $brand-primary;
-}
-
-.form-field-save {
-    position: relative;
-    width: 0;
-    height: 28px;
-    float: right;
-    z-index: 2;
-
-    line-height: $input-height;
-    pointer-events: initial;
-    text-align: center;
-
-    &:before {
-        position: absolute;
-        right: 0;
-        width: 38px;
-    }    
-}
diff --git a/modules/web-console/frontend/app/modules/form/field/form-control-feedback.directive.js b/modules/web-console/frontend/app/modules/form/field/form-control-feedback.directive.js
deleted file mode 100644
index 797ba69..0000000
--- a/modules/web-console/frontend/app/modules/form/field/form-control-feedback.directive.js
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-export default ['formFieldFeedback', [() => {
-    const link = ($scope, $element, $attrs, [form]) => {
-        let name = $scope.name;
-
-        if (_.isNil(name))
-            name = $attrs.name;
-
-        const err = $attrs.igniteError;
-        const msg = $attrs.igniteErrorMessage;
-
-        if (name && err && msg) {
-            form.$errorMessages = form.$errorMessages || {};
-            form.$errorMessages[name] = form.$errorMessages[name] || {};
-            form.$errorMessages[name][err] = msg;
-        }
-    };
-
-    return {
-        restrict: 'C',
-        link,
-        require: ['^form']
-    };
-}]];
diff --git a/modules/web-console/frontend/app/modules/form/field/input/text.scss b/modules/web-console/frontend/app/modules/form/field/input/text.scss
deleted file mode 100644
index 658d740..0000000
--- a/modules/web-console/frontend/app/modules/form/field/input/text.scss
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-.checkbox label .input-tip {
-	position: initial;
-    overflow: visible;
-}
-
-.input-tip .fa-floppy-o {
-	position: absolute;
-    top: 0;
-    right: 0;
-    z-index: 2;
-
-    width: 34px;
-    height: 34px;
-
-    text-align: center;
-
-    display: inline-block;
-    line-height: 28px;
-    pointer-events: initial;
-}
-
-.input-tip .form-control-feedback {
-    height: auto;
-}
diff --git a/modules/web-console/frontend/app/modules/form/field/label.directive.js b/modules/web-console/frontend/app/modules/form/field/label.directive.js
deleted file mode 100644
index 94f7889..0000000
--- a/modules/web-console/frontend/app/modules/form/field/label.directive.js
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-export default ['igniteFormFieldLabel', [() => {
-    return {
-        restrict: 'E',
-        compile() {
-            return {
-                post($scope, $element, $attrs, [field], $transclude) {
-                    $transclude($scope, function(clone) {
-                        const text = clone.text();
-
-                        if (/(.*):$/.test(text))
-                            field.name = /(.*):$/.exec(text)[1];
-
-                        const $label = $element.parent().parent().find('.group-legend > label, .ignite-field > label');
-
-                        if ($label[0] && $element[0].id) {
-                            const id = $element[0].id;
-
-                            $label[0].id = id.indexOf('+') >= 0 ? $scope.$eval(id) : id;
-                        }
-
-                        $label.append(clone);
-                    });
-                }
-            };
-        },
-        replace: true,
-        transclude: true,
-        require: ['?^igniteFormField']
-    };
-}]];
diff --git a/modules/web-console/frontend/app/modules/form/field/tooltip.directive.js b/modules/web-console/frontend/app/modules/form/field/tooltip.directive.js
deleted file mode 100644
index 9e764bc..0000000
--- a/modules/web-console/frontend/app/modules/form/field/tooltip.directive.js
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-const template = '<i class="tipField icon-help"></i>';
-
-export default ['igniteFormFieldTooltip', ['$tooltip', ($tooltip) => {
-    const link = ($scope, $element, $attrs, [field], $transclude) => {
-        const content = Array.prototype.slice
-            .apply($transclude($scope))
-            .reduce((html, el) => html += el.outerHTML || el.textContent || el, '');
-
-        $tooltip($element, { title: content });
-
-        if (field)
-            $element.attr('id', field.for + 'Tooltip');
-
-        // TODO cleanup css styles.
-        if ($element.hasClass('tipLabel'))
-            $element.removeClass('tipField');
-
-        if ($element.parent('label').length)
-            $element.addClass('tipLabel').removeClass('tipField');
-    };
-
-    return {
-        priority: 1,
-        restrict: 'E',
-        scope: {},
-        template,
-        link,
-        replace: true,
-        transclude: true,
-        require: ['?^igniteFormField']
-    };
-}]];
diff --git a/modules/web-console/frontend/app/modules/form/field/up.directive.js b/modules/web-console/frontend/app/modules/form/field/up.directive.js
deleted file mode 100644
index 6f87180..0000000
--- a/modules/web-console/frontend/app/modules/form/field/up.directive.js
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-export default ['igniteFormFieldUp', ['$tooltip', ($tooltip) => {
-    const controller = ['$element', function($element) {
-        const ctrl = this;
-
-        this.$onInit = () => {
-            $tooltip($element, { title: 'Move item up' });
-
-            this.up = () => {
-                const idx = ctrl.models.indexOf(ctrl.model);
-
-                ctrl.models.splice(idx, 1);
-                ctrl.models.splice(idx - 1, 0, ctrl.model);
-            };
-        };
-    }];
-
-    return {
-        restrict: 'A',
-        bindToController: {
-            model: '=ngModel',
-            models: '=models'
-        },
-        controller,
-        controllerAs: 'vm'
-    };
-}]];
diff --git a/modules/web-console/frontend/app/modules/form/form.module.js b/modules/web-console/frontend/app/modules/form/form.module.js
index 59fedff..ac15c6a 100644
--- a/modules/web-console/frontend/app/modules/form/form.module.js
+++ b/modules/web-console/frontend/app/modules/form/form.module.js
@@ -17,23 +17,9 @@
 
 import angular from 'angular';
 
-// Fields styles.
-import './field/field.scss';
-import './field/feedback.scss';
-import './field/input/text.scss';
-
-// Panel.
-import igniteFormPanelChevron from './panel/chevron.directive';
-
 // Field.
-import igniteFormFieldLabel from './field/label.directive';
-import igniteFormFieldTooltip from './field/tooltip.directive';
 import placeholder from './field/bs-select-placeholder.directive';
 
-// Group.
-import igniteFormGroupAdd from './group/add.directive';
-import igniteFormGroupTooltip from './group/tooltip.directive';
-
 // Validators.
 import ipaddress from './validator/ipaddress.directive';
 import javaKeywords from './validator/java-keywords.directive';
@@ -48,25 +34,14 @@
 
 // Helpers.
 import igniteFormFieldInputAutofocus from './field/input/autofocus.directive';
-import igniteFormControlFeedback from './field/form-control-feedback.directive';
-import igniteFormFieldUp from './field/up.directive';
-import igniteFormFieldDown from './field/down.directive';
-
 import IgniteFormGUID from './services/FormGUID.service.js';
 
 angular
 .module('ignite-console.Form', [
 
 ])
-// Panel.
-.directive(...igniteFormPanelChevron)
 // Field.
-.directive(...igniteFormFieldLabel)
-.directive(...igniteFormFieldTooltip)
 .directive(...placeholder)
-// Group.
-.directive(...igniteFormGroupAdd)
-.directive(...igniteFormGroupTooltip)
 // Validators.
 .directive(...ipaddress)
 .directive(...javaKeywords)
@@ -80,9 +55,6 @@
 .directive(...uuid)
 // Helpers.
 .directive(...igniteFormFieldInputAutofocus)
-.directive(...igniteFormControlFeedback)
-.directive(...igniteFormFieldUp)
-.directive(...igniteFormFieldDown)
 
 // Generator of globally unique identifier.
 .service('IgniteFormGUID', IgniteFormGUID);
diff --git a/modules/web-console/frontend/app/modules/form/group/add.directive.js b/modules/web-console/frontend/app/modules/form/group/add.directive.js
deleted file mode 100644
index 71070cc..0000000
--- a/modules/web-console/frontend/app/modules/form/group/add.directive.js
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-const template = '<i class="group-legend-btn fa fa-plus"></i>';
-
-export default ['igniteFormGroupAdd', ['$tooltip', ($tooltip) => {
-    return {
-        restrict: 'E',
-        scope: {},
-        template,
-        link($scope, $el, $attr, $ctrl, $transclude) {
-            $transclude((clone) => {
-                const title = Array.from(clone)
-                    .reduce((html, el) => html += el.outerHTML || el.textContent || el, '');
-                const legend = $el.closest('.group').find('.group-legend');
-
-                $tooltip($el, {title});
-                legend.append($el);
-            });
-        },
-        replace: true,
-        transclude: true
-    };
-}]];
diff --git a/modules/web-console/frontend/app/modules/form/group/tooltip.directive.js b/modules/web-console/frontend/app/modules/form/group/tooltip.directive.js
deleted file mode 100644
index 4190dee..0000000
--- a/modules/web-console/frontend/app/modules/form/group/tooltip.directive.js
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-const template = '<i class="group-legend-btn icon-help"></i>';
-
-export default ['igniteFormGroupTooltip', ['$tooltip', ($tooltip) => {
-    return {
-        restrict: 'E',
-        scope: {},
-        template,
-        link($scope, $el, $attr, $ctrl, $transclude) {
-            $transclude((clone) => {
-                const title = Array.from(clone)
-                    .reduce((html, el) => html += el.outerHTML || el.textContent || el, '');
-                const legend = $el.closest('.group').find('.group-legend');
-
-                $tooltip($el, {title});
-                legend.append($el);
-            });
-        },
-        replace: true,
-        transclude: true
-    };
-}]];
diff --git a/modules/web-console/frontend/app/modules/form/panel/chevron.directive.js b/modules/web-console/frontend/app/modules/form/panel/chevron.directive.js
deleted file mode 100644
index f5ad957..0000000
--- a/modules/web-console/frontend/app/modules/form/panel/chevron.directive.js
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-const template = `<img ng-src="{{ isOpen ? '/images/collapse.svg' : '/images/expand.svg' }}" style='width:13px;height:13px;' />`;
-
-export default ['igniteFormPanelChevron', ['$timeout', ($timeout) => {
-    const controller = [() => {}];
-
-    const link = ($scope, $element, $attrs, [bsCollapseCtrl]) => {
-        const $target = $element.parent().parent().find('[bs-collapse-target]');
-
-        const listener = function() {
-            const index = bsCollapseCtrl.$targets.reduce((acc, el, i) => {
-                if (el[0] === $target[0])
-                    acc.push(i);
-
-                return acc;
-            }, [])[0];
-
-            $scope.isOpen = false;
-
-            const active = bsCollapseCtrl.$activeIndexes();
-
-            if ((active instanceof Array) && active.indexOf(index) !== -1 || active === index)
-                $scope.isOpen = true;
-        };
-
-        bsCollapseCtrl.$viewChangeListeners.push(listener);
-        $timeout(listener);
-    };
-
-    return {
-        restrict: 'E',
-        scope: {},
-        link,
-        template,
-        controller,
-        // replace: true,
-        // transclude: true,
-        require: ['^bsCollapse']
-    };
-}]];
diff --git a/modules/web-console/frontend/app/modules/form/services/FormGUID.service.js b/modules/web-console/frontend/app/modules/form/services/FormGUID.service.js
index b886851..a018500 100644
--- a/modules/web-console/frontend/app/modules/form/services/FormGUID.service.js
+++ b/modules/web-console/frontend/app/modules/form/services/FormGUID.service.js
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-export default [() => {
+export default [function() {
     let guid = 0;
 
     return () => `form-field-${guid++}`;
diff --git a/modules/web-console/frontend/app/modules/getting-started/GettingStarted.provider.js b/modules/web-console/frontend/app/modules/getting-started/GettingStarted.provider.js
index 2608bd2..09b65f4 100644
--- a/modules/web-console/frontend/app/modules/getting-started/GettingStarted.provider.js
+++ b/modules/web-console/frontend/app/modules/getting-started/GettingStarted.provider.js
@@ -54,11 +54,15 @@
         const scope = $root.$new();
 
         scope.ui = {
-            showGettingStarted: false
+            dontShowGettingStarted: false
         };
 
         function _fillPage() {
-            scope.title = _model[_page].title;
+            if (_page === 0)
+                scope.title = `${_model[_page].title}`;
+            else
+                scope.title = `${_page}. ${_model[_page].title}`;
+
             scope.message = _model[_page].message.join(' ');
         }
 
@@ -82,7 +86,7 @@
 
         scope.close = () => {
             try {
-                localStorage.showGettingStarted = scope.ui.showGettingStarted;
+                localStorage.showGettingStarted = !scope.ui.dontShowGettingStarted;
             }
             catch (ignore) {
                 // No-op.
@@ -94,14 +98,14 @@
         return {
             tryShow: (force) => {
                 try {
-                    scope.ui.showGettingStarted = _.isNil(localStorage.showGettingStarted)
-                        || localStorage.showGettingStarted === 'true';
+                    scope.ui.dontShowGettingStarted = !(_.isNil(localStorage.showGettingStarted)
+                        || localStorage.showGettingStarted === 'true');
                 }
                 catch (ignore) {
                     // No-op.
                 }
 
-                if (force || scope.ui.showGettingStarted) {
+                if (force || !scope.ui.dontShowGettingStarted) {
                     _page = 0;
 
                     _fillPage();
diff --git a/modules/web-console/frontend/app/modules/loading/loading.service.js b/modules/web-console/frontend/app/modules/loading/loading.service.js
index bdc80b8..a5c9e7e 100644
--- a/modules/web-console/frontend/app/modules/loading/loading.service.js
+++ b/modules/web-console/frontend/app/modules/loading/loading.service.js
@@ -15,7 +15,7 @@
  * limitations under the License.
  */
 
-export default ['IgniteLoading', [() => {
+export default ['IgniteLoading', [function() {
     const _overlays = {};
 
     const start = (key) => {
diff --git a/modules/web-console/frontend/app/modules/nodes/nodes-dialog.tpl.pug b/modules/web-console/frontend/app/modules/nodes/nodes-dialog.tpl.pug
index 22f3b1d..7bd8c7a 100644
--- a/modules/web-console/frontend/app/modules/nodes/nodes-dialog.tpl.pug
+++ b/modules/web-console/frontend/app/modules/nodes/nodes-dialog.tpl.pug
@@ -14,7 +14,7 @@
     See the License for the specific language governing permissions and
     limitations under the License.
 
-.modal.modal--ignite.theme--ignite.ignite-nodes-dialog(tabindex='-1' role='dialog')
+.modal.modal--ignite.ignite-nodes-dialog(tabindex='-1' role='dialog')
     .modal-dialog.modal-dialog--adjust-height
         form.modal-content
             .modal-header
@@ -30,12 +30,13 @@
                             span Cache Nodes
                             span.badge.badge--blue {{ $ctrl.data.length }}
 
-                .panel--ignite
+                .panel--ignite.panel--ignite__without-border
                     .grid.ui-grid--ignite(ui-grid='$ctrl.gridOptions' ui-grid-resize-columns ui-grid-selection ui-grid-pinning ui-grid-hovering)
 
             .modal-footer
-                button.pull-left.btn-ignite(disabled)
+                div
                     grid-item-selected(class='pull-left' grid-api='$ctrl.gridApi')
 
-                button.btn-ignite.btn-ignite--link-success(id='confirm-btn-close' ng-click='$cancel()') Cancel
-                button.btn-ignite.btn-ignite--success(id='confirm-btn-confirm' ng-click='$ok($ctrl.selected)' ng-disabled='$ctrl.selected.length === 0') Select node
+                div
+                    button.btn-ignite.btn-ignite--link-success(id='confirm-btn-close' ng-click='$cancel()') Cancel
+                    button.btn-ignite.btn-ignite--success(id='confirm-btn-confirm' ng-click='$ok($ctrl.selected)' ng-disabled='$ctrl.selected.length === 0') Select node
diff --git a/modules/web-console/frontend/app/modules/socket.module.js b/modules/web-console/frontend/app/modules/socket.module.js
index 17856c4..3f50295 100644
--- a/modules/web-console/frontend/app/modules/socket.module.js
+++ b/modules/web-console/frontend/app/modules/socket.module.js
@@ -32,7 +32,7 @@
     };
 
     this.$get = ['socketFactory', function(socketFactory) {
-        return () => {
+        return function() {
             const ioSocket = io.connect(_options);
 
             return socketFactory({ioSocket});
diff --git a/modules/web-console/frontend/app/modules/states/logout.state.js b/modules/web-console/frontend/app/modules/states/logout.state.js
index 9f9c7c6..a46f871 100644
--- a/modules/web-console/frontend/app/modules/states/logout.state.js
+++ b/modules/web-console/frontend/app/modules/states/logout.state.js
@@ -25,7 +25,7 @@
     $stateProvider.state('logout', {
         url: '/logout',
         permission: 'logout',
-        controller: ['Auth', (Auth) => Auth.logout()],
+        controller: ['Auth', function(Auth) {Auth.logout();}],
         tfMetaTags: {
             title: 'Logout'
         }
diff --git a/modules/web-console/frontend/app/modules/user/user.module.js b/modules/web-console/frontend/app/modules/user/user.module.js
index 889ee5e..24dfc8f 100644
--- a/modules/web-console/frontend/app/modules/user/user.module.js
+++ b/modules/web-console/frontend/app/modules/user/user.module.js
@@ -26,7 +26,7 @@
     'ignite-console.config',
     'ignite-console.core'
 ])
-.factory('sessionRecoverer', ['$injector', '$q', ($injector, $q) => {
+.factory('sessionRecoverer', ['$injector', '$q', function($injector, $q) {
     return {
         responseError: (response) => {
             // Session has expired
diff --git a/modules/web-console/frontend/app/primitives/btn/index.scss b/modules/web-console/frontend/app/primitives/btn/index.scss
index 277b1ae..4401a81 100644
--- a/modules/web-console/frontend/app/primitives/btn/index.scss
+++ b/modules/web-console/frontend/app/primitives/btn/index.scss
@@ -271,7 +271,7 @@
 .btn-ignite--secondary {
     background-color: white;
     color: #424242;
-    border: 1px solid #dedede;
+    border: 1px solid #c5c5c5;
     padding: $btn-content-padding-with-border;
 
     &:hover, &.hover,
@@ -319,6 +319,9 @@
         $line-color: $ignite-brand-success;
         border-right-color: change-color($line-color, $saturation: 63%, $lightness: 33%);
     }
+    .btn-ignite.btn-ignite--secondary + .btn-ignite.btn-ignite--secondary {
+        border-left: 0;
+    }
 
     &[disabled] .btn-ignite.btn-ignite--primary {
         border-right-color: change-color($ignite-brand-primary, $lightness: 83%);
diff --git a/modules/web-console/frontend/app/primitives/checkbox/index.scss b/modules/web-console/frontend/app/primitives/checkbox/index.scss
index d1e1e83..847e33c 100644
--- a/modules/web-console/frontend/app/primitives/checkbox/index.scss
+++ b/modules/web-console/frontend/app/primitives/checkbox/index.scss
@@ -15,26 +15,6 @@
  * limitations under the License.
  */
 
-input[type='checkbox'] {
-    background-image: url(/images/checkbox.svg);
-    width: 12px !important;
-    height: 12px !important;
-    -webkit-appearance: none;
-    -moz-appearance: none;
-    appearance: none;
-    background-repeat: no-repeat;
-    background-size: 100%;
-    padding: 0;
-    border: none;
-
-    &:checked {
-        background-image: url(/images/checkbox-active.svg);
-    }
-    &:disabled {
-        opacity: 0.5;
-    }
-}
-
 .theme--ignite {
     .form-field-checkbox {
         z-index: 2;
diff --git a/modules/web-console/frontend/app/primitives/datepicker/index.pug b/modules/web-console/frontend/app/primitives/datepicker/index.pug
index 7120111..28cd1a0 100644
--- a/modules/web-console/frontend/app/primitives/datepicker/index.pug
+++ b/modules/web-console/frontend/app/primitives/datepicker/index.pug
@@ -14,9 +14,9 @@
     See the License for the specific language governing permissions and
     limitations under the License.
 
-mixin ignite-form-field-datepicker(label, model, name, mindate, maxdate, minview = 1, format = 'MMM yyyy', disabled, required, placeholder, tip)
-    mixin form-field-input()
-        input.form-control(
+mixin form-field__datepicker({ label, model, name, mindate, maxdate, minview = 1, format = 'MMM yyyy', disabled, required, placeholder, tip })
+    mixin __form-field__datepicker()
+        input(
             id=`{{ ${name} }}Input`
             name=`{{ ${name} }}`
 
@@ -41,19 +41,25 @@
 
             tabindex='0'
 
-            onkeydown='return false'
+            onkeydown='return false',
+            ng-ref='$input'
+            ng-ref-read='ngModel'
         )&attributes(attributes.attributes)
 
-    .datepicker--ignite.ignite-form-field
-        if name
-            +ignite-form-field__label(label, name, required)
+    .form-field.form-field__datepicker.ignite-form-field(id=`{{ ${name} }}Field`)
+        +form-field__label({ label, name, required, disabled })
+            +form-field__tooltip({ title: tip, options: tipOpts })
 
-        .ignite-form-field__control
-            if tip
-                i.tipField.icon-help(bs-tooltip='' data-title=tip)
+        .form-field__control
+            - attributes.type='button'
+            +__form-field__datepicker(attributes=attributes)
 
+        .form-field__errors(
+            ng-messages=`$input.$error`
+            ng-show=`($input.$dirty || $input.$touched || $input.$submitted) && $input.$invalid`
+        )
             if block
                 block
 
-            .input-tip
-                +form-field-input(attributes=attributes)
+            if required
+                +form-field__error({ error: 'required', message: `${errLbl} could not be empty!` })
diff --git a/modules/web-console/frontend/app/primitives/datepicker/index.scss b/modules/web-console/frontend/app/primitives/datepicker/index.scss
index b0c13e6..c8edab5 100644
--- a/modules/web-console/frontend/app/primitives/datepicker/index.scss
+++ b/modules/web-console/frontend/app/primitives/datepicker/index.scss
@@ -39,58 +39,3 @@
         }
     }
 }
-
-.datepicker--ignite {
-    $height: 36px;
-
-    display: inline-block;
-    width: auto;
-
-    font-size: 14px;
-
-    label.ignite-form-field__label {
-        width: auto;
-        max-width: initial;
-
-        font-size: inherit;
-        line-height: $height;
-    }
-
-    .ignite-form-field__control {
-        @import "./../../../public/stylesheets/variables.scss";
-
-        width: auto;
-
-        input {
-            width: auto;
-            height: $height;
-            min-width: 70px;
-            max-width: 70px;
-            padding: 0;
-            padding-left: 5px;
-
-            cursor: pointer;
-            color: transparent;
-            font-size: inherit;
-            line-height: $height;
-            text-align: left;
-            text-shadow: 0 0 0 $ignite-brand-success;
-
-            border: none;
-            box-shadow: none;
-
-            &:hover, &:focus {
-                text-shadow: 0 0 0 change-color($ignite-brand-success, $lightness: 26%);
-            }
-        }
-    }
-}
-
-.theme--ignite {
-    .datepicker--ignite {
-        display: block;
-        width: 100%;
-
-        font-size: 14px;
-    }
-}
\ No newline at end of file
diff --git a/modules/web-console/frontend/app/primitives/file/index.pug b/modules/web-console/frontend/app/primitives/file/index.pug
deleted file mode 100644
index 7bdd3cc..0000000
--- a/modules/web-console/frontend/app/primitives/file/index.pug
+++ /dev/null
@@ -1,37 +0,0 @@
-//-
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-         http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-
-mixin ignite-form-field-file(label, model, name, disabled, required, options, tip)
-    .file--ignite.ignite-form-field(ng-class='{ "choosen": `${model}` }')
-        label.ignite-form-field__label(for=`{{ ${name} }}Input`)
-            span Folder name:
-        .ignite-form-field__control
-            label.btn-ignite.btn-ignite--primary(for=`{{ ${name} }}Input`)
-                | Choose Destination Folder
-            label.tipField.link-primary(for=`{{ ${name} }}Input`) Change folder
-
-            +tooltip(tip, tipOpts)
-
-            if block
-                block
-
-            .input-tip
-                input(
-                    id=`{{ ${name} }}Input`
-                    type='file'
-                    ng-model=model
-                )&attributes(attributes)
-                | {{ `${model}` }}
diff --git a/modules/web-console/frontend/app/primitives/file/index.scss b/modules/web-console/frontend/app/primitives/file/index.scss
deleted file mode 100644
index dbd32a4..0000000
--- a/modules/web-console/frontend/app/primitives/file/index.scss
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-.file--ignite {
-    label {
-        cursor: pointer;
-    }
-
-    &.choosen {
-        .ignite-form-field__control {
-            .btn-ignite {
-                display: none;
-            }
-
-            .tipField {
-                display: block;
-            }
-        }
-    }
-
-    &:not(.choosen) {
-        .ignite-form-field__label {
-            font-size: 0;
-            line-height: 0;
-            margin-bottom: 0px;
-        }
-    }
-
-    .ignite-form-field__control {
-        display: flex;
-
-        input[type='file'] {
-            position: absolute;
-            opacity: 0;
-            z-index: -1;
-        }
-
-        .tipField {
-            display: none;
-        }
-
-        .input-tip {
-            order: -1;
-        }
-    }
-}
\ No newline at end of file
diff --git a/modules/web-console/frontend/app/primitives/form-field/checkbox.pug b/modules/web-console/frontend/app/primitives/form-field/checkbox.pug
index b498cbd..88b8f5a 100644
--- a/modules/web-console/frontend/app/primitives/form-field/checkbox.pug
+++ b/modules/web-console/frontend/app/primitives/form-field/checkbox.pug
@@ -14,9 +14,9 @@
     See the License for the specific language governing permissions and
     limitations under the License.
 
-mixin form-field__checkbox({ label, model, name, disabled, required, tip })
+mixin form-field__checkbox({ label, model, name, disabled, required, tip, tipOpts })
     .form-field.form-field__checkbox(id=`{{ ${name} }}Field`)
-        +form-field__label({ label, name, required })
+        +form-field__label({ label, name, required, disabled })
             +form-field__tooltip({ title: tip, options: tipOpts })
 
         .form-field__control
diff --git a/modules/web-console/frontend/app/primitives/form-field/dropdown.pug b/modules/web-console/frontend/app/primitives/form-field/dropdown.pug
index de83bf9..96d8482 100644
--- a/modules/web-console/frontend/app/primitives/form-field/dropdown.pug
+++ b/modules/web-console/frontend/app/primitives/form-field/dropdown.pug
@@ -14,8 +14,9 @@
     See the License for the specific language governing permissions and
     limitations under the License.
 
-mixin form-field__dropdown({ label, model, name, disabled, required, multiple, placeholder, placeholderEmpty, options, tip })
-    -var errLbl = label.substring(0, label.length - 1)
+mixin form-field__dropdown({ label, model, name, disabled, required, multiple, placeholder, placeholderEmpty, options, optionLabel = 'label', tip, tipOpts })
+    -var errLbl = label ? label.substring(0, label.length - 1) : 'Field';
+
     mixin __form-field__input()
         button.select-toggle(
             type='button'
@@ -27,27 +28,30 @@
             ng-model=model
             ng-disabled=disabled && `${disabled}`
             ng-required=required && `${required}`
+            ng-ref='$input'
+            ng-ref-read='ngModel'
 
             bs-select
-            bs-options=`item.value as item.label for item in ${options}`
+            bs-options=`item.value as item.${optionLabel} for item in ${options}`
 
             data-multiple=multiple ? '1' : false
 
             tabindex='0'
         )&attributes(attributes.attributes)
 
-    .form-field(id=`{{ ${name} }}Field`)
-        +form-field__label({ label, name, required })
+    .form-field.form-field__dropdown.ignite-form-field(id=`{{ ${name} }}Field`)
+        +form-field__label({ label, name, required, disabled })
             +form-field__tooltip({ title: tip, options: tipOpts })
 
         .form-field__control
             +__form-field__input(attributes=attributes)
 
         .form-field__errors(
-            ng-messages=`(${form}[${name}].$dirty || ${form}[${name}].$touched || ${form}[${name}].$submitted) && ${form}[${name}].$invalid ? ${form}[${name}].$error : {}`
+            ng-messages=`$input.$error`
+            ng-show=`($input.$dirty || $input.$touched || $input.$submitted) && $input.$invalid`
         )
             if block
                 block
 
             if required
-                +form-field__error({ name, error: 'required', message: `${errLbl} could not be empty!` })
+                +form-field__error({ error: 'required', message: `${errLbl} could not be empty!` })
diff --git a/modules/web-console/frontend/app/primitives/form-field/email.pug b/modules/web-console/frontend/app/primitives/form-field/email.pug
index 5fccdb2..b68a520 100644
--- a/modules/web-console/frontend/app/primitives/form-field/email.pug
+++ b/modules/web-console/frontend/app/primitives/form-field/email.pug
@@ -32,6 +32,6 @@
                 block
 
             if required
-                +form-field__error({ name, error: 'required', message: `${errLbl} could not be empty!` })
+                +form-field__error({ error: 'required', message: `${errLbl} could not be empty!` })
 
-            +form-field__error({ name, error: 'email', message: `${errLbl} has invalid format!` })
+            +form-field__error({ error: 'email', message: `${errLbl} has invalid format!` })
diff --git a/modules/web-console/frontend/app/primitives/form-field/error.pug b/modules/web-console/frontend/app/primitives/form-field/error.pug
index 34b03c2..9b44c3c 100644
--- a/modules/web-console/frontend/app/primitives/form-field/error.pug
+++ b/modules/web-console/frontend/app/primitives/form-field/error.pug
@@ -16,6 +16,7 @@
 
 mixin form-field__error({ error, message })
     .form-field__error(ng-message=error)
+        div #{ message }
         div(
             bs-tooltip=''
             data-title=message
diff --git a/modules/web-console/frontend/app/primitives/form-field/index.pug b/modules/web-console/frontend/app/primitives/form-field/index.pug
index 9b67d6c..5a54f21 100644
--- a/modules/web-console/frontend/app/primitives/form-field/index.pug
+++ b/modules/web-console/frontend/app/primitives/form-field/index.pug
@@ -25,3 +25,5 @@
 include ./phone
 include ./dropdown
 include ./checkbox
+include ./typeahead
+include ./radio
diff --git a/modules/web-console/frontend/app/primitives/form-field/index.scss b/modules/web-console/frontend/app/primitives/form-field/index.scss
index 070dfe7..0330e58 100644
--- a/modules/web-console/frontend/app/primitives/form-field/index.scss
+++ b/modules/web-console/frontend/app/primitives/form-field/index.scss
@@ -194,11 +194,24 @@
             font-style: normal;
             color: $gray-light;
         }
+
+        svg {
+            flex: 0 0 auto;
+            margin-left: 4px;
+        }
+
+        [ignite-icon='info'] {
+            position: relative;
+            top: 1px;
+
+            color: $ignite-brand-success;
+        }
     }
 
     &__control {
         overflow: visible;
         display: flex;
+        flex-direction: row;
         width: 100%;
 
         & > input::placeholder,
@@ -226,6 +239,7 @@
             box-shadow: none;
 
             color: $text-color;
+            font-size: 14px;
             text-align: left;
             line-height: 16px;
 
@@ -264,6 +278,27 @@
         }
     }
 
+    &__control-group {
+        input {
+            min-width: 0;
+            margin-right: -1px;
+
+            border-top-right-radius: 0 !important;
+            border-bottom-right-radius: 0 !important;
+
+            &:focus {
+                z-index: 1;
+            }
+        }
+
+        input + * {
+            border-top-left-radius: 0 !important;
+            border-bottom-left-radius: 0 !important;
+            flex: 0 0 auto;
+            width: 60px !important;
+        }
+    }
+
     &__errors {
         position: absolute;
         right: 0;
@@ -302,7 +337,11 @@
             width: 38px;
         }
 
-        div {
+        div:first-child {
+            display: none;
+        }
+
+        [bs-tooltip] {
             z-index: 1;
             position: absolute;
             top: 0;
@@ -311,7 +350,7 @@
             height: 36px;
         }
 
-        svg {
+        [ignite-icon] {
             position: absolute;
             top: 10px;
             right: 0;
@@ -324,14 +363,93 @@
     }
 }
 
+.theme--ignite-errors-horizontal {
+    .form-field__control {
+        // Reset offset to appearance of input for invalid password
+        & > input[type='email'].ng-invalid.ng-touched,
+        & > input[type='text'].ng-invalid.ng-touched,
+        & > input[type='password'].ng-invalid.ng-touched {
+            padding-right: 0;
+        }
+        // Reset offset to appearance of dropdown for invalid data
+        & > button.select-toggle.ng-invalid.ng-touched {
+            &:after {
+                right: 10px;
+            }
+        }
+    }
+
+    .form-field__errors {
+        position: relative;
+        
+        padding: 5px 10px 0px;
+
+        color: $ignite-brand-primary;
+        font-size: 12px;
+        line-height: 14px;
+
+        &:empty {
+            display: none;
+        }
+
+        [ng-message] + [ng-message] {
+            margin-top: 10px;
+        }
+    }
+
+    .form-field__error {
+        float: none;
+        width: auto;
+        height: auto;
+
+        text-align: left;
+        line-height: 14px;
+
+        div:first-child {
+            display: block;
+        }
+
+        [bs-tooltip],
+        [ignite-icon] {
+            display: none;
+        }
+    }
+
+    .form-field__error + .form-field__error {
+        margin-top: 10px;
+    }
+
+    .form-field__checkbox {
+        flex-wrap: wrap;
+
+        .form-field__errors {
+            margin-left: -10px;
+            flex-basis: 100%;
+
+            .form-field__error {
+                width: auto;
+
+                div {
+                    width: auto;
+                }
+            }
+        }
+    }
+}
+
+.form-field__radio,
 .form-field__checkbox {
     $errorSize: 16px;
-    display: flex;
+    display: inline-flex;
+    width: auto;
+
+    сursor: pointer;
 
     .form-field {
         &__label {
             order: 1;
             margin: 0;
+            cursor: pointer;
         }
 
         &__control {
@@ -376,6 +494,132 @@
     }
 }
 
+.form-field__radio {
+    .form-field__control {
+        padding: 2px 0;
+    }
+
+    .form-field__control > input[type='radio'] {
+        -webkit-appearance: none;
+
+        width: 13px;
+        height: 13px;
+        padding: 0;
+
+        background: white;
+        border: none;
+        border-radius: 50%;
+        box-shadow: inset 0 0 0 1px rgb(197, 197, 197);
+
+        &:focus {
+            outline: none;
+            border: none;
+            box-shadow: 0 0 0 2px rgba(0, 103, 185, .3),
+                  inset 0 0 0 1px rgb(197, 197, 197);
+        }
+
+        &:checked {
+            border: none;
+            box-shadow: inset 0 0 0 5px rgba(0, 103, 185, 1); 
+
+            &:focus {
+                box-shadow: 0 0 0 2px rgba(0, 103, 185, .3),
+                      inset 0 0 0 5px rgba(0, 103, 185, 1); 
+            }
+        }
+    }
+}
+
+.form-field__checkbox {
+    .form-field__control > input[type='checkbox'] {
+        border-radius: 2px;
+
+        background-image: url(/images/checkbox.svg);
+        width: 12px !important;
+        height: 12px !important;
+        -webkit-appearance: none;
+        -moz-appearance: none;
+        appearance: none;
+        background-repeat: no-repeat;
+        background-size: 100%;
+        padding: 0;
+        border: none;
+
+        &:checked {
+            background-image: url(/images/checkbox-active.svg);
+        }
+
+        &:disabled {
+            opacity: 0.5;
+        }
+
+        &:focus {
+            outline: none;
+            box-shadow: 0 0 0 2px rgba(0, 103, 185, .3);
+        }
+    }
+}
+
+.form-field--inline {
+    display: inline-block;
+    width: auto;
+
+    .form-field {
+        display: flex;
+        align-items: baseline;
+    }
+
+    .form-field__label {
+        white-space: nowrap;
+    }
+
+    form-field-size,
+    .form-field__text {
+        .form-field__control {
+            margin-left: 10px;
+        }
+    }
+
+    .form-field__dropdown,
+    .form-field__datepicker,
+    .form-field__timepicker {
+        .form-field__control {
+            width: auto;
+
+            input,
+            button {
+                color: transparent;
+
+                text-shadow: 0 0 0 $ignite-brand-success;
+
+                border: none;
+                box-shadow: none;
+
+                background: linear-gradient(to right, rgb(0, 103, 185), transparent) 0px 25px / 0px, 
+                            linear-gradient(to right, rgb(0, 103, 185) 70%, transparent 0%) 0% 0% / 8px 1px repeat-x,
+                            0% 0% / 0px, 0% 0% / 4px;
+                background-size: 0, 8px 1px, 0, 0;
+                background-position: 1px 25px;
+
+                padding-left: 0px;
+                padding-right: 0px;
+                margin-left: 10px;
+                margin-right: 10px;
+
+                &:hover, &:focus {
+                    text-shadow: 0 0 0 change-color($ignite-brand-success, $lightness: 26%);
+                }
+            }
+        }
+    }
+
+    .form-field__dropdown {
+        button::after {
+            display: none;
+        }
+    }
+}
+
 .form-field__password {
     // Validation error notification will overlap with visibility button if it's not moved more to the left
     input[type='password'].ng-invalid.ng-touched,
@@ -400,10 +644,44 @@
     }
 }
 
+.form-field__dropdown {
+    .form-field__control {
+        > button:not(.btn-ignite) {
+            padding-top: 10px;
+        }
+    }
+}
+
+.form-field__ace {
+    .ace_editor {
+        width: 100%;
+        min-height: 70px;
+        margin: 0;
+
+        border: solid 1px #c5c5c5;
+        border-radius: 4px;
+        background-color: #ffffff;
+        box-shadow: none;
+
+        .ace_content {
+            padding-left: 2px;
+        }
+
+        &.ace_focus {
+            border-color: $ignite-brand-success;
+            box-shadow: none;
+        }
+    }
+}
+
+.form-field.ignite-form-field label.required {
+    margin-left: 0 !important;
+}
+
 .form-fieldset {
     padding: 10px;
 
-    border: 1px solid hsla(0,0%,77%,.5);
+    border: 1px solid hsla(0, 0%, 77%, .5);
     border-radius: 4px;
 
     legend {
diff --git a/modules/web-console/frontend/app/primitives/form-field/input.pug b/modules/web-console/frontend/app/primitives/form-field/input.pug
index 0fee77b..0551101 100644
--- a/modules/web-console/frontend/app/primitives/form-field/input.pug
+++ b/modules/web-console/frontend/app/primitives/form-field/input.pug
@@ -14,9 +14,9 @@
     See the License for the specific language governing permissions and
     limitations under the License.
 
-mixin form-field__input({ name, model, disabled, required, placeholder })
+mixin form-field__input({ name, model, disabled, required, placeholder, namePostfix = '' })
     input(
-        id=`{{ ${name} }}Input`
+        id=`{{ ${name} }}${ namePostfix }Input`
         name=`{{ ${name} }}`
         placeholder=placeholder
 
@@ -24,5 +24,6 @@
 
         ng-required=required && `${required}`
         ng-disabled=disabled && `${disabled}`
-        ng-focus='tableReset()'
+        ng-ref='$input'
+        ng-ref-read='ngModel'
     )&attributes(attributes ? attributes.attributes ? attributes.attributes : attributes : {})
diff --git a/modules/web-console/frontend/app/primitives/form-field/label.pug b/modules/web-console/frontend/app/primitives/form-field/label.pug
index d725f9d..74ddecb 100644
--- a/modules/web-console/frontend/app/primitives/form-field/label.pug
+++ b/modules/web-console/frontend/app/primitives/form-field/label.pug
@@ -14,17 +14,18 @@
     See the License for the specific language governing permissions and
     limitations under the License.
 
-mixin form-field__label({ label, name, required, optional, disabled })
-    -var colon = label[label.length-1] === ':' ? ':' : '';
-    - label = label[label.length-1] === ':' ? label.substring(0, label.length - 1) : label
-    - optional = optional ? ' <i>(optional)</i>' : '';
+mixin form-field__label({ label, name, required, optional, disabled, namePostfix = '' })
+    if label
+        -var colon = label[label.length-1] === ':' ? ':' : '';
+        - label = label[label.length-1] === ':' ? label.substring(0, label.length - 1) : label
+        - optional = optional ? ' <i>(optional)</i>' : '';
 
-    label.form-field__label(
-        id=name && `{{ ${name} }}Label`
-        for=name && `{{ ${name} }}Input`
-        class=`{{ ${required} ? 'required' : '' }}`
-        ng-disabled=disabled && `${disabled}`
-    )
-        span !{label}!{optional}!{colon}
-        if block
-            block
+        label.form-field__label(
+            id=name && `{{ ${name} }}Label`
+            for=name && `{{ ${name} }}${ namePostfix }Input`
+            class=`{{ ${required} ? 'required' : '' }}`
+            ng-disabled=disabled && `${disabled}`
+        )
+            span !{label}!{optional}!{colon}
+            if block
+                block
diff --git a/modules/web-console/frontend/app/primitives/form-field/number.pug b/modules/web-console/frontend/app/primitives/form-field/number.pug
index 11f8e22..7755415 100644
--- a/modules/web-console/frontend/app/primitives/form-field/number.pug
+++ b/modules/web-console/frontend/app/primitives/form-field/number.pug
@@ -14,23 +14,24 @@
     See the License for the specific language governing permissions and
     limitations under the License.
 
-mixin form-field__number({ label, model, name, disabled, required, placeholder, tip, min, max, step, postfix })
+mixin form-field__number({ label, model, name, disabled, required, placeholder, tip, min, max, step = '1', postfix })
     -var errLbl = label.substring(0, label.length - 1)
 
-    .form-field
-        +form-field__label({ label, name, required })
+    .form-field.ignite-form-field
+        +form-field__label({ label, name, required, disabled })
             +form-field__tooltip({ title: tip, options: tipOpts })
 
         .form-field__control(class=postfix && 'form-field__control--postfix' data-postfix=postfix)
             - attributes.type = 'number'
             - attributes.min = min ? min : '0'
             - attributes.max = max ? max : '{{ Number.MAX_VALUE }}'
-            - attributes.step = step ? step : '1'
+            - attributes.step = step
             +form-field__input({ name, model, disabled, required, placeholder })(attributes=attributes)
 
         .form-field__errors(
             data-postfix=postfix
-            ng-messages=`(${form}[${name}].$dirty || ${form}[${name}].$touched || ${form}[${name}].$submitted) && ${form}[${name}].$invalid ? ${form}[${name}].$error : {}`
+            ng-messages=`$input.$error`
+            ng-show=`($input.$dirty || $input.$touched || $input.$submitted) && $input.$invalid`
         )
             if block
                 block
@@ -38,10 +39,10 @@
             if required
                 +form-field__error({ error: 'required', message: `${errLbl} could not be empty!` })
 
-            if min
-                +form-field__error({ error: 'min', message: `${errLbl} is less than allowable minimum: ${ min || 0 }`})
+            +form-field__error({ error: 'min', message: `${errLbl} is less than allowable minimum: ${ min || 0 }`})
 
-            if max
-                +form-field__error({ error: 'max', message: `${errLbl} is more than allowable maximum: ${ max }`})
+            +form-field__error({ error: 'max', message: `${errLbl} is more than allowable maximum: ${ max }`})
+
+            +form-field__error({ error: 'step', message: `${errLbl} step should be ${step || 1}` })
 
             +form-field__error({ error: 'number', message: 'Only numbers allowed' })
diff --git a/modules/web-console/frontend/app/primitives/form-field/password.pug b/modules/web-console/frontend/app/primitives/form-field/password.pug
index 40e1aa9..6b9818b 100644
--- a/modules/web-console/frontend/app/primitives/form-field/password.pug
+++ b/modules/web-console/frontend/app/primitives/form-field/password.pug
@@ -42,6 +42,6 @@
                 block
 
             if required
-                +form-field__error({ name, error: 'required', message: `${errLbl} could not be empty!` })
+                +form-field__error({ error: 'required', message: `${errLbl} could not be empty!` })
                 
-            +form-field__error({ name, error: 'mismatch', message: `Password does not match the confirm password!` })
+            +form-field__error({ error: 'mismatch', message: `Password does not match the confirm password!` })
diff --git a/modules/web-console/frontend/app/primitives/form-field/phone.pug b/modules/web-console/frontend/app/primitives/form-field/phone.pug
index b65c5d2..8b50301 100644
--- a/modules/web-console/frontend/app/primitives/form-field/phone.pug
+++ b/modules/web-console/frontend/app/primitives/form-field/phone.pug
@@ -32,4 +32,4 @@
                 block
 
             if required
-                +form-field__error({ name, error: 'required', message: `${errLbl} could not be empty!` })
+                +form-field__error({ error: 'required', message: `${errLbl} could not be empty!` })
diff --git a/modules/web-console/frontend/app/primitives/form-field/radio.pug b/modules/web-console/frontend/app/primitives/form-field/radio.pug
new file mode 100644
index 0000000..57ae097
--- /dev/null
+++ b/modules/web-console/frontend/app/primitives/form-field/radio.pug
@@ -0,0 +1,32 @@
+//-
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+
+         http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+
+mixin form-field__radio({ label, model, name, value, disabled, required, tip })
+    .form-field.form-field__radio
+        +form-field__label({ label, name, required, disabled, namePostfix: value })
+            +form-field__tooltip({ title: tip, options: tipOpts })
+
+        .form-field__control
+            - attributes.type='radio'
+            - attributes['ng-value'] = value
+            +form-field__input({ name, model, disabled, required, placeholder, namePostfix: value })(attributes=attributes)
+
+        .form-field__errors(
+            ng-messages=`$input.$error`
+            ng-show=`($input.$dirty || $input.$touched || $input.$submitted) && $input.$invalid`
+        )
+            if block
+                block
diff --git a/modules/web-console/frontend/app/primitives/form-field/text.pug b/modules/web-console/frontend/app/primitives/form-field/text.pug
index 8f63f19..ab8d14d 100644
--- a/modules/web-console/frontend/app/primitives/form-field/text.pug
+++ b/modules/web-console/frontend/app/primitives/form-field/text.pug
@@ -15,9 +15,9 @@
     limitations under the License.
 
 mixin form-field__text({ label, model, name, disabled, required, placeholder, tip })
-    -var errLbl = label.substring(0, label.length - 1)
+    -let errLbl = label[label.length - 1] === ':' ? label.substring(0, label.length - 1) : label
 
-    .form-field(id=`{{ ${name} }}Field`)
+    .form-field.form-field__text.ignite-form-field(id=`{{ ${name} }}Field`)
         +form-field__label({ label, name, required, disabled })
             +form-field__tooltip({ title: tip, options: tipOpts })
 
@@ -26,10 +26,11 @@
             +form-field__input({ name, model, disabled, required, placeholder })(attributes=attributes)
 
         .form-field__errors(
-            ng-messages=`(${form}[${name}].$dirty || ${form}[${name}].$touched || ${form}[${name}].$submitted) && ${form}[${name}].$invalid ? ${form}[${name}].$error : {}`
+            ng-messages=`$input.$error`
+            ng-show=`($input.$dirty || $input.$touched || $input.$submitted) && $input.$invalid`
         )
             if block
                 block
 
             if required
-                +form-field__error({ name, error: 'required', message: `${errLbl} could not be empty!` })
+                +form-field__error({ error: 'required', message: `${errLbl} could not be empty!` })
diff --git a/modules/web-console/frontend/app/primitives/form-field/tooltip.pug b/modules/web-console/frontend/app/primitives/form-field/tooltip.pug
index 08ffd83..34376fd 100644
--- a/modules/web-console/frontend/app/primitives/form-field/tooltip.pug
+++ b/modules/web-console/frontend/app/primitives/form-field/tooltip.pug
@@ -14,5 +14,13 @@
     See the License for the specific language governing permissions and
     limitations under the License.
 
-mixin form-field__tooltip({ title, options })
-    +tooltip(title, options)
+mixin form-field__tooltip({ title, options = { placement: 'auto' }})
+    if title
+        svg(
+            ignite-icon='info'
+            bs-tooltip=''
+
+            data-title=title
+            data-container=options && options.container || false
+            data-placement=options && options.placement || false
+        )&attributes(attributes)
diff --git a/modules/web-console/frontend/app/primitives/form-field/typeahead.pug b/modules/web-console/frontend/app/primitives/form-field/typeahead.pug
new file mode 100644
index 0000000..b2c62ae
--- /dev/null
+++ b/modules/web-console/frontend/app/primitives/form-field/typeahead.pug
@@ -0,0 +1,55 @@
+//-
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+
+         http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+
+mixin form-field__typeahead({ label, model, name, disabled, required, placeholder, options, tip })
+    -var errLbl = label.substring(0, label.length - 1)
+    mixin __form-field__typeahead()
+        input(
+            id=`{{ ${name} }}Input`
+            name=`{{ ${name} }}`
+            placeholder=placeholder
+           
+            ng-model=model
+
+            ng-required=required && `${required}`
+            ng-disabled=disabled && `${disabled}` || `!${options}.length`
+
+            bs-typeahead
+            bs-options=`item for item in ${options}`
+            container='body'
+            data-min-length='1'
+            ignite-retain-selection
+            ng-ref='$input'
+            ng-ref-read='ngModel'
+        )&attributes(attributes.attributes)
+
+    .form-field.form-field__typeahead.ignite-form-field(id=`{{ ${name} }}Field`)
+        +form-field__label({ label, name, required, disabled })
+            +form-field__tooltip({ title: tip, options: tipOpts })
+
+        .form-field__control
+            - attributes.type='text'
+            +__form-field__typeahead(attributes=attributes)
+
+        .form-field__errors(
+            ng-messages=`$input.$error`
+            ng-show=`($input.$dirty || $input.$touched || $input.$submitted) && $input.$invalid`
+        )
+            if block
+                block
+
+            if required
+                +form-field__error({ error: 'required', message: `${errLbl} could not be empty!` })
diff --git a/modules/web-console/frontend/app/primitives/index.js b/modules/web-console/frontend/app/primitives/index.js
index f9d8591..a9ff053 100644
--- a/modules/web-console/frontend/app/primitives/index.js
+++ b/modules/web-console/frontend/app/primitives/index.js
@@ -28,7 +28,7 @@
 import './ui-grid-header/index.scss';
 import './ui-grid-settings/index.scss';
 import './page/index.scss';
-import './radio/index.scss';
+import './spinner-circle/index.scss';
 import './switcher/index.scss';
 import './form-field/index.scss';
 import './typography/index.scss';
diff --git a/modules/web-console/frontend/app/primitives/modal/index.scss b/modules/web-console/frontend/app/primitives/modal/index.scss
index 802a241..2be2ecd 100644
--- a/modules/web-console/frontend/app/primitives/modal/index.scss
+++ b/modules/web-console/frontend/app/primitives/modal/index.scss
@@ -67,14 +67,6 @@
     }
 }
 
-.modal .modal-content {
-    background-color: $gray-lighter;
-
-    .input-tip {
-        padding-top: 1px;
-    }
-}
-
 .modal .modal-content .modal-header {
     background-color: $ignite-background-color;
     text-align: center;
@@ -130,7 +122,7 @@
         opacity: 1;
         background: none;
         color: $gray-light;
-        
+
         [ignite-icon] {
             height: 12px;
         }
@@ -182,12 +174,22 @@
         }
 
         .modal-body {
-            margin: 0;
             padding: 5px 20px;
+            margin: 0;
 
             .input-tip {
                 padding-top: 0;
             }
+
+            .modal-body--inner-content {
+                max-height: 300px;
+                overflow-x: auto;
+
+                p {
+                    text-align: left;
+                    white-space: nowrap;
+                }
+            }
         }
 
         .modal-footer {
@@ -223,3 +225,26 @@
         }
     }
 }
+
+.modal-footer {
+    display: flex;
+    align-items: center;
+
+    > div {
+        display: flex;
+        flex: 1;
+
+        &:last-child {
+            flex: 1;
+            justify-content: flex-end;
+
+            &.modal-footer--no-grow {
+                flex-grow: 0;
+            }
+        }
+    }
+}
+
+.modal--wide .modal-dialog {
+    width: 900px;
+}
\ No newline at end of file
diff --git a/modules/web-console/frontend/app/primitives/panel/index.scss b/modules/web-console/frontend/app/primitives/panel/index.scss
index 5dab39f..6afcd5c 100644
--- a/modules/web-console/frontend/app/primitives/panel/index.scss
+++ b/modules/web-console/frontend/app/primitives/panel/index.scss
@@ -33,11 +33,17 @@
 
             background-color: initial;
             font-size: 16px;
-            line-height: 36px;
 
             &:hover {
                 text-decoration: none;
             }
+
+            h5 {
+                margin: 0;
+                font-size: 16px;
+                font-weight: normal;
+                line-height: 36px;
+            }
         }
 
         & > hr {
@@ -72,6 +78,18 @@
                 }
             }
         }
+
+        & > header.header-with-selector {
+            margin: 0;
+            padding: 14px 20px;
+            min-height: 65px;
+
+            border-bottom: 1px solid #ddd;
+
+            sub {
+                bottom: 0;
+            }
+        }
     }
 
     &--collapse {
@@ -106,3 +124,11 @@
         }
     }
 }
+
+// Adding top border for panels in modals
+.modal-body {
+    .panel--ignite:not(.panel--ignite__without-border) {
+        border-top: 1px solid #d4d4d4;
+        border-radius: 4px 4px 0 0 ;
+    }
+}
diff --git a/modules/web-console/frontend/app/primitives/radio/index.pug b/modules/web-console/frontend/app/primitives/radio/index.pug
deleted file mode 100644
index f47fd17..0000000
--- a/modules/web-console/frontend/app/primitives/radio/index.pug
+++ /dev/null
@@ -1,37 +0,0 @@
-//-
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-         http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-
-mixin form-field-radio(label, model, name, value, disabled, required, tip)
-    .radio--ignite.ignite-form-field
-        label(id=`{{ ${name} }}Label`)
-            .input-tip
-                if block
-                    block
-                else
-                    input(
-                        id=`{{ ${name} }}Input`
-                        name=`{{ ${name} }}`
-                        type='radio'
-
-                        ng-model=model
-                        ng-value=value
-                        ng-required=required && `${required}`
-                        ng-disabled=disabled && `${disabled}`
-                    )
-                    div
-            span #{label}
-
-            +tooltip(tip, tipOpts)
diff --git a/modules/web-console/frontend/app/primitives/radio/index.scss b/modules/web-console/frontend/app/primitives/radio/index.scss
deleted file mode 100644
index ff9b5b3..0000000
--- a/modules/web-console/frontend/app/primitives/radio/index.scss
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-@import '../../../public/stylesheets/variables';
-
-.radio--ignite {
-    label {
-        padding-left: 20px;
-
-        line-height: 20px;
-        vertical-align: middle;
-
-        cursor: pointer;
-
-        .input-tip {
-            float: left;
-            width: 14px;
-            margin-left: -20px;
-
-            input[type="radio"] {
-                position: relative;
-                left: -20px;
-
-                & + div {
-                    position: absolute;
-                    top: 50%;
-
-                    width: 14px;
-                    height: 14px;
-                    margin-top: -8px;
-
-                    border-radius: 50%;
-                    box-shadow: inset 0 1px 3px 0 rgba(0, 0, 0, 0.35);
-                }
-
-                &:checked + div {
-                    background-color: #0098ff;
-                    box-shadow: none;
-
-                    &:before {
-                        content: '';
-
-                        position: absolute;
-                        top: 50%;
-                        left: 50%;
-
-                        width: 4px;
-                        height: 4px;
-                        margin-top: -2px;
-                        margin-left: -2px;
-
-                        border-radius: 50%;
-                        background-color: #ffffff;
-                        box-shadow: 0 1px 1px 0 rgba(12, 50, 76, 0.3);
-                    }
-                }
-            }
-        }
-    }
-
-    & + .radio--ignite {
-        margin-left: 45px;
-    }
-}
\ No newline at end of file
diff --git a/modules/web-console/frontend/app/primitives/spinner-circle/index.scss b/modules/web-console/frontend/app/primitives/spinner-circle/index.scss
new file mode 100644
index 0000000..88152fa
--- /dev/null
+++ b/modules/web-console/frontend/app/primitives/spinner-circle/index.scss
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+$color-inactive-primary: #C5C5C5;
+$color-inactive-secondary: #FFFFFF;
+$color-active-primary: #EE2B27;
+$color-active-secondary: #FF8485;
+
+.spinner-circle {
+  display: inline-block;
+
+  &:before {
+    content: '';
+
+    display: block;
+
+    width: 20px;
+    height: 20px;
+
+    border-width: 1px;
+    border-style: solid;
+    border-radius: 50%;
+    border-color: $color-inactive-primary;
+    border-left-color: $color-active-primary;
+  }
+}
+
+.spinner-circle:before {
+  border-left-width: 2px;
+  border-left-color: $color-active-primary;
+
+  animation-name: spinner-circle--animation;
+  animation-duration: 1s;
+  animation-iteration-count: infinite;
+  animation-timing-function: linear;
+}
+
+@keyframes spinner-circle--animation {
+  0% {
+    transform: rotate(0deg);
+  }
+  100% {
+    transform: rotate(360deg);
+  }
+}
diff --git a/modules/web-console/frontend/app/primitives/timepicker/index.pug b/modules/web-console/frontend/app/primitives/timepicker/index.pug
index 54ce8c1..5e3936c 100644
--- a/modules/web-console/frontend/app/primitives/timepicker/index.pug
+++ b/modules/web-console/frontend/app/primitives/timepicker/index.pug
@@ -14,10 +14,9 @@
     See the License for the specific language governing permissions and
     limitations under the License.
 
-mixin ignite-form-field-timepicker(label, model, name, mindate, maxdate, disabled, required, placeholder, tip)
-
-    mixin form-field-input()
-        input.form-control(
+mixin form-field__timepicker({ label, model, name, mindate, maxdate, disabled, required, placeholder, tip })
+    mixin __form-field__timepicker()
+        input(
             id=`{{ ${name} }}Input`
             name=`{{ ${name} }}`
 
@@ -35,23 +34,29 @@
             data-arrow-behavior='picker'
 
             data-placement='bottom'
-            data-container=`.timepicker--ignite`
+            data-container='body'
 
             tabindex='0'
 
-            onkeydown="return false"
+            onkeydown='return false'
+            ng-ref='$input'
+            ng-ref-read='ngModel'
         )&attributes(attributes.attributes)
 
-    .timepicker--ignite.ignite-form-field
-        if name
-            +ignite-form-field__label(label, name, required)
+    .form-field.form-field__timepicker.ignite-form-field(id=`{{ ${name} }}Field`)
+        +form-field__label({ label, name, required, disabled })
+            +form-field__tooltip({ title: tip, options: tipOpts })
 
-        .ignite-form-field__control
-            if tip
-                i.tipField.icon-help(bs-tooltip='' data-title=tip)
+        .form-field__control
+            - attributes.type='button'
+            +__form-field__timepicker(attributes=attributes)
 
+        .form-field__errors(
+            ng-messages=`$input.$error`
+            ng-show=`($input.$dirty || $input.$touched || $input.$submitted) && $input.$invalid`
+        )
             if block
                 block
 
-            .input-tip
-                +form-field-input(attributes=attributes)
+            if required
+                +form-field__error({ error: 'required', message: `${errLbl} could not be empty!` })
diff --git a/modules/web-console/frontend/app/primitives/timepicker/index.scss b/modules/web-console/frontend/app/primitives/timepicker/index.scss
index 5be534e..643c741 100644
--- a/modules/web-console/frontend/app/primitives/timepicker/index.scss
+++ b/modules/web-console/frontend/app/primitives/timepicker/index.scss
@@ -17,6 +17,7 @@
 
 .timepicker.dropdown-menu {
     padding: 0 4px;
+    line-height: 30px;
 
     button {
         outline: none;
@@ -27,113 +28,62 @@
         height: 100%;
         padding: 6px;
     }
+
+    thead, tfoot {
+        th {
+            text-align: center;
+            line-height: 0;
+        }
+
+        .btn.btn-default {
+            float: none !important;
+            display: inline-block;
+            margin-right: 0;
+
+            &:active {
+                box-shadow: none;
+                background: none;
+            }
+
+            &:before {
+                content: '';
+
+                display: block;
+                width: 10px;
+                height: 10px;
+
+                border: 2px solid #757575;
+                transform: rotate(45deg);
+            }
+
+            &:hover {
+                background: none;
+            }
+        }
+    }
+
+    thead {
+        th {
+            padding-top: 10px;
+        }
+
+        .btn.btn-default {
+            &:before {
+                border-width: 2px 0 0 2px;
+            }
+        }
+    }
+
+    tfoot {
+        th {
+            padding-top: 2px;
+            padding-bottom: 10px;
+        }
+
+        .btn.btn-default {
+            &:before {
+                border-width: 0 2px 2px 0;
+            }
+        }
+    }
 }
-
-.timepicker--ignite {
-    $height: 36px;
-
-    display: inline-block;
-    width: auto;
-
-    font-size: 14px;
-
-    label.ignite-form-field__label {
-        width: auto;
-        max-width: initial;
-
-        font-size: inherit;
-        line-height: $height;
-    }
-
-    .ignite-form-field__control {
-        @import "./../../../public/stylesheets/variables.scss";
-
-        width: auto;
-
-        input {
-            width: auto;
-            height: $height;
-            min-width: 40px;
-            max-width: 70px;
-            padding: 0;
-            padding-left: 5px;
-
-            cursor: pointer;
-            color: transparent;
-            font-size: inherit;
-            line-height: $height;
-            text-align: left;
-            text-shadow: 0 0 0 $ignite-brand-success;
-
-            border: none;
-            box-shadow: none;
-            background: none;
-
-            &:hover, &:focus {
-                text-shadow: 0 0 0 change-color($ignite-brand-success, $lightness: 26%);
-            }
-        }
-    }
-
-    .dropdown-menu {
-        line-height: 30px;
-
-        thead, tfoot {
-            th {
-                text-align: center;
-                line-height: 0;
-            }
-
-            .btn.btn-default {
-                float: none !important;
-                display: inline-block;
-                margin-right: 0;
-
-                &:active {
-                    box-shadow: none;
-                    background: none;
-                }
-
-                &:before {
-                    content: '';
-
-                    display: block;
-                    width: 10px;
-                    height: 10px;
-
-                    border: 2px solid #757575;
-                    transform: rotate(45deg);
-                }
-
-                &:hover {
-                    background: none;
-                }
-            }
-        }
-
-        thead {
-            th {
-                padding-top: 10px;
-            }
-
-            .btn.btn-default {
-                &:before {
-                    border-width: 2px 0 0 2px;
-                }
-            }
-        }
-
-        tfoot {
-            th {
-                padding-top: 2px;
-                padding-bottom: 10px;
-            }
-
-            .btn.btn-default {
-                &:before {
-                    border-width: 0 2px 2px 0;
-                }
-            }
-        }
-    }
-}
\ No newline at end of file
diff --git a/modules/web-console/frontend/app/primitives/tooltip/index.pug b/modules/web-console/frontend/app/primitives/tooltip/index.pug
deleted file mode 100644
index ea6a344..0000000
--- a/modules/web-console/frontend/app/primitives/tooltip/index.pug
+++ /dev/null
@@ -1,27 +0,0 @@
-//-
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
-
-         http://www.apache.org/licenses/LICENSE-2.0
-
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
-
-mixin tooltip(title, options, tipClass = 'tipField')
-    if title
-        svg(
-            ignite-icon='info'
-            bs-tooltip=''
-
-            data-title=title
-            data-container=options && options.container || false
-            data-placement=options && options.placement || false
-            class=`${tipClass}`
-        )
diff --git a/modules/web-console/frontend/app/primitives/ui-grid/index.scss b/modules/web-console/frontend/app/primitives/ui-grid/index.scss
index c450dad..274e21d 100644
--- a/modules/web-console/frontend/app/primitives/ui-grid/index.scss
+++ b/modules/web-console/frontend/app/primitives/ui-grid/index.scss
@@ -208,10 +208,6 @@
                 }
 
                 .ui-grid-header--subcategories {
-                    .ui-grid-tree-base-row-header-buttons {
-                        margin-top: 10px;
-                    }
-
                     .ui-grid-selection-row-header-buttons {
                         margin-top: 12px;
 
@@ -449,7 +445,7 @@
                     z-index: 1;
 
                     width: 4px;
-                    height: 46px;
+                    height: 47px;
 
                     background: #0067b9;
                     box-shadow: 0 -1px 0 0 rgba(0, 0, 0, .3), 0 -1px 0 0 rgba(0, 103, 185, 1);
diff --git a/modules/web-console/frontend/app/services/AngularStrapSelect.decorator.js b/modules/web-console/frontend/app/services/AngularStrapSelect.decorator.js
index 32fa167..44ed8ed 100644
--- a/modules/web-console/frontend/app/services/AngularStrapSelect.decorator.js
+++ b/modules/web-console/frontend/app/services/AngularStrapSelect.decorator.js
@@ -23,7 +23,7 @@
  * If this problem will be fixed in AngularStrap we can remove this delegate.
  */
 export default angular.module('mgcrea.ngStrap.select')
-    .decorator('$select', ['$delegate', ($delegate) => {
+    .decorator('$select', ['$delegate', function($delegate) {
         function SelectFactoryDecorated(element, controller, config) {
             const delegate = $delegate(element, controller, config);
 
diff --git a/modules/web-console/frontend/app/services/AngularStrapTooltip.decorator.js b/modules/web-console/frontend/app/services/AngularStrapTooltip.decorator.js
index fa59f32..f1f8673 100644
--- a/modules/web-console/frontend/app/services/AngularStrapTooltip.decorator.js
+++ b/modules/web-console/frontend/app/services/AngularStrapTooltip.decorator.js
@@ -26,7 +26,7 @@
     /**
      * Don't hide tooltip when mouse move from element to tooltip.
      */
-    .decorator('$tooltip', ['$delegate', ($delegate) => {
+    .decorator('$tooltip', ['$delegate', function($delegate) {
         function TooltipFactoryDecorated(element, config) {
             let tipElementEntered = false;
 
diff --git a/modules/web-console/frontend/app/services/Clusters.js b/modules/web-console/frontend/app/services/Clusters.js
index 0228a77..e0a2ec7 100644
--- a/modules/web-console/frontend/app/services/Clusters.js
+++ b/modules/web-console/frontend/app/services/Clusters.js
@@ -266,7 +266,7 @@
             }
         },
         evictionThreshold: {
-            step: 0.05,
+            step: 0.001,
             max: 0.999,
             min: 0.5,
             default: 0.9
diff --git a/modules/web-console/frontend/app/services/Confirm.service.js b/modules/web-console/frontend/app/services/Confirm.service.js
index c2eaf35..8c88a40 100644
--- a/modules/web-console/frontend/app/services/Confirm.service.js
+++ b/modules/web-console/frontend/app/services/Confirm.service.js
@@ -39,7 +39,7 @@
                 templateUrl,
                 backdrop: true,
                 onBeforeHide: () => reject(new CancellationError()),
-                controller: ['$scope', ($scope) => {
+                controller: ['$scope', function($scope) {
                     $scope.yesNo = yesNo;
                     $scope.content = content;
                     $scope.confirmCancel = $scope.confirmNo = () => {
@@ -57,7 +57,7 @@
 }
 
 // Confirm popup service.
-export default ['IgniteConfirm', ['$rootScope', '$q', '$modal', '$animate', ($root, $q, $modal, $animate) => {
+export default ['IgniteConfirm', ['$rootScope', '$q', '$modal', '$animate', function($root, $q, $modal, $animate) {
     const scope = $root.$new();
 
     const modal = $modal({templateUrl, scope, show: false, backdrop: true});
diff --git a/modules/web-console/frontend/app/services/CopyToClipboard.service.js b/modules/web-console/frontend/app/services/CopyToClipboard.service.js
index df0bb8a..52285a6 100644
--- a/modules/web-console/frontend/app/services/CopyToClipboard.service.js
+++ b/modules/web-console/frontend/app/services/CopyToClipboard.service.js
@@ -16,7 +16,7 @@
  */
 
 // Service to copy some value to OS clipboard.
-export default ['IgniteCopyToClipboard', ['$window', 'IgniteMessages', ($window, Messages) => {
+export default ['IgniteCopyToClipboard', ['$window', 'IgniteMessages', function($window, Messages) {
     const body = angular.element($window.document.body);
 
     const textArea = angular.element('<textarea/>');
diff --git a/modules/web-console/frontend/app/services/Focus.service.js b/modules/web-console/frontend/app/services/Focus.service.js
index a07e181..a285fbc 100644
--- a/modules/web-console/frontend/app/services/Focus.service.js
+++ b/modules/web-console/frontend/app/services/Focus.service.js
@@ -16,7 +16,7 @@
  */
 
 // Service to transfer focus for specified element.
-export default ['IgniteFocus', ['$timeout', ($timeout) => {
+export default ['IgniteFocus', ['$timeout', function($timeout) {
     return {
         move(id) {
             // Timeout makes sure that is invoked after any other event has been triggered.
diff --git a/modules/web-console/frontend/app/services/FormUtils.service.js b/modules/web-console/frontend/app/services/FormUtils.service.js
index 2c81c57..cda68f7 100644
--- a/modules/web-console/frontend/app/services/FormUtils.service.js
+++ b/modules/web-console/frontend/app/services/FormUtils.service.js
@@ -16,7 +16,7 @@
  */
 import _ from 'lodash';
 
-export default ['IgniteFormUtils', ['$window', 'IgniteFocus', '$rootScope', ($window, Focus, $rootScope) => {
+export default ['IgniteFormUtils', ['$window', 'IgniteFocus', '$rootScope', function($window, Focus, $rootScope) {
     function ensureActivePanel(ui, pnl, focusId) {
         if (ui && ui.loadPanel) {
             const collapses = $('[bs-collapse-target]');
@@ -427,14 +427,6 @@
         ensureActivePanel(panels, id, focusId) {
             ensureActivePanel(panels, id, focusId);
         },
-        confirmUnsavedChanges(dirty, selectFunc) {
-            if (dirty) {
-                if ($window.confirm('You have unsaved changes.\n\nAre you sure you want to discard them?'))
-                    selectFunc();
-            }
-            else
-                selectFunc();
-        },
         saveBtnTipText(dirty, objectName) {
             if (dirty)
                 return 'Save ' + objectName;
diff --git a/modules/web-console/frontend/app/services/LegacyTable.service.js b/modules/web-console/frontend/app/services/LegacyTable.service.js
index 38b041a..2d795ce 100644
--- a/modules/web-console/frontend/app/services/LegacyTable.service.js
+++ b/modules/web-console/frontend/app/services/LegacyTable.service.js
@@ -17,7 +17,7 @@
 
 // TODO: Refactor this service for legacy tables with more than one input field.
 export default ['IgniteLegacyTable',
-    ['IgniteLegacyUtils', 'IgniteFocus', 'IgniteErrorPopover', (LegacyUtils, Focus, ErrorPopover) => {
+    ['IgniteLegacyUtils', 'IgniteFocus', 'IgniteErrorPopover', function(LegacyUtils, Focus, ErrorPopover) {
         function _model(item, field) {
             let path = field.path;
 
diff --git a/modules/web-console/frontend/app/services/LegacyUtils.service.js b/modules/web-console/frontend/app/services/LegacyUtils.service.js
index a169343..1aea642 100644
--- a/modules/web-console/frontend/app/services/LegacyUtils.service.js
+++ b/modules/web-console/frontend/app/services/LegacyUtils.service.js
@@ -18,7 +18,7 @@
 import saver from 'file-saver';
 
 // TODO: Refactor this service for legacy tables with more than one input field.
-export default ['IgniteLegacyUtils', ['IgniteErrorPopover', (ErrorPopover) => {
+export default ['IgniteLegacyUtils', ['IgniteErrorPopover', function(ErrorPopover) {
     function isDefined(v) {
         return !_.isNil(v);
     }
diff --git a/modules/web-console/frontend/app/services/Messages.service.js b/modules/web-console/frontend/app/services/Messages.service.js
index 1337e24..b5e1b3c 100644
--- a/modules/web-console/frontend/app/services/Messages.service.js
+++ b/modules/web-console/frontend/app/services/Messages.service.js
@@ -18,7 +18,7 @@
 import {CancellationError} from 'app/errors/CancellationError';
 
 // Service to show various information and error messages.
-export default ['IgniteMessages', ['$alert', 'IgniteErrorParser', ($alert, errorParser) => {
+export default ['IgniteMessages', ['$alert', 'IgniteErrorParser', function($alert, errorParser) {
     // Common instance of alert modal.
     let msgModal;
 
diff --git a/modules/web-console/frontend/app/services/ModelNormalizer.service.js b/modules/web-console/frontend/app/services/ModelNormalizer.service.js
index 4c7052b..a617784 100644
--- a/modules/web-console/frontend/app/services/ModelNormalizer.service.js
+++ b/modules/web-console/frontend/app/services/ModelNormalizer.service.js
@@ -16,7 +16,7 @@
  */
 
 // Service to normalize objects for dirty checks.
-export default ['IgniteModelNormalizer', () => {
+export default ['IgniteModelNormalizer', function() {
     /**
      * Normalize object for dirty checks.
      *
diff --git a/modules/web-console/frontend/app/services/UnsavedChangesGuard.service.js b/modules/web-console/frontend/app/services/UnsavedChangesGuard.service.js
deleted file mode 100644
index 91244b0..0000000
--- a/modules/web-console/frontend/app/services/UnsavedChangesGuard.service.js
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-const MSG = 'You have unsaved changes.\n\nAre you sure you want to discard them?';
-
-// Service that show confirmation about unsaved changes on user change location.
-export default ['IgniteUnsavedChangesGuard', ['$rootScope', ($root) => {
-    return {
-        install(scope, customDirtyCheck = () => scope.ui.inputForm.$dirty) {
-            scope.$on('$destroy', () => window.onbeforeunload = null);
-
-            const unbind = $root.$on('$stateChangeStart', (event) => {
-                if (_.get(scope, 'ui.inputForm', false) && customDirtyCheck()) {
-                    if (!confirm(MSG)) // eslint-disable-line no-alert
-                        event.preventDefault();
-                    else
-                        unbind();
-                }
-            });
-
-            window.onbeforeunload = () => _.get(scope, 'ui.inputForm.$dirty', false) ? MSG : null;
-        }
-    };
-}]];
diff --git a/modules/web-console/frontend/app/style.scss b/modules/web-console/frontend/app/style.scss
new file mode 100644
index 0000000..c7eb726
--- /dev/null
+++ b/modules/web-console/frontend/app/style.scss
@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+.flex-full-height > [ui-view] {
+	display: flex;
+    flex-direction: column;
+    flex: 1 0 auto;
+}
diff --git a/modules/web-console/frontend/app/vendor.js b/modules/web-console/frontend/app/vendor.js
index 84dea92e..6961e96 100644
--- a/modules/web-console/frontend/app/vendor.js
+++ b/modules/web-console/frontend/app/vendor.js
@@ -15,7 +15,6 @@
  * limitations under the License.
  */
 
-import 'babel-polyfill';
 import 'jquery';
 import 'angular';
 import 'angular-acl';
diff --git a/modules/web-console/frontend/package.json b/modules/web-console/frontend/package.json
index 2702494..33f397f 100644
--- a/modules/web-console/frontend/package.json
+++ b/modules/web-console/frontend/package.json
@@ -5,9 +5,9 @@
   "private": true,
   "main": "index.js",
   "scripts": {
-    "start": "webpack-dev-server --config ./webpack/webpack.dev.babel.js",
+    "start": "webpack-dev-server --config ./webpack/webpack.dev.js",
     "dev": "npm start",
-    "build": "webpack --config ./webpack/webpack.prod.babel.js",
+    "build": "webpack --config ./webpack/webpack.prod.js",
     "test": "karma start ./test/karma.conf.js",
     "test-watch": "npm test -- --no-single-run",
     "eslint": "eslint --format node_modules/eslint-friendly-formatter app/ -- --eff-by-issue"
@@ -27,25 +27,26 @@
     "win32"
   ],
   "dependencies": {
-    "@uirouter/angularjs": "1.0.18",
+    "@babel/plugin-transform-parameters": "7.0.0",
+    "@uirouter/angularjs": "1.0.20",
     "@uirouter/core": "5.0.19",
     "@uirouter/rx": "0.4.1",
     "@uirouter/visualizer": "4.0.2",
-    "angular": "1.7.2",
+    "angular": "1.7.4",
     "angular-acl": "0.1.10",
-    "angular-animate": "1.7.2",
-    "angular-aria": "1.6.6",
-    "angular-cookies": "1.7.2",
+    "angular-animate": "1.7.4",
+    "angular-aria": "1.7.4",
+    "angular-cookies": "1.7.4",
     "angular-drag-and-drop-lists": "1.4.0",
     "angular-gridster": "0.13.14",
-    "angular-messages": "1.6.9",
+    "angular-messages": "1.7.4",
     "angular-motion": "0.4.4",
     "angular-nvd3": "1.0.9",
-    "angular-sanitize": "1.7.2",
-    "angular-smart-table": "2.1.8",
+    "angular-sanitize": "1.7.4",
+    "angular-smart-table": "2.1.11",
     "angular-socket-io": "0.7.0",
     "angular-strap": "2.3.12",
-    "angular-translate": "2.16.0",
+    "angular-translate": "2.18.1",
     "angular-tree-control": "0.2.28",
     "angular-ui-carousel": "0.1.10",
     "angular-ui-grid": "4.6.1",
@@ -55,60 +56,58 @@
     "browser-update": "3.1.13",
     "bson-objectid": "1.1.5",
     "chart.js": "2.7.2",
-    "chartjs-plugin-streaming": "1.5.0",
+    "chartjs-plugin-streaming": "1.6.1",
     "file-saver": "1.3.3",
     "font-awesome": "4.7.0",
     "jquery": "3.2.1",
     "json-bigint": "0.2.3",
     "jsondiffpatch": "0.2.5",
     "jszip": "3.1.5",
-    "lodash": "4.17.10",
+    "lodash": "4.17.11",
     "natural-compare-lite": "1.4.0",
     "nvd3": "1.8.6",
     "outdent": "0.5.0",
     "pako": "1.0.6",
     "roboto-font": "0.1.0",
-    "rxjs": "5.4.2",
+    "rxjs": "5.5.11",
     "socket.io-client": "1.7.3",
     "tf-metatags": "2.0.0"
   },
   "devDependencies": {
-    "@types/angular": "1.6.48",
+    "@babel/core": "7.0.1",
+    "@babel/plugin-proposal-class-properties": "7.0.0",
+    "@babel/plugin-proposal-object-rest-spread": "7.0.0",
+    "@babel/plugin-syntax-dynamic-import": "7.0.0",
+    "@babel/preset-env": "7.0.0",
+    "@types/angular": "1.6.51",
     "@types/angular-animate": "1.5.10",
     "@types/angular-mocks": "1.5.12",
     "@types/angular-strap": "2.3.1",
     "@types/chai": "4.1.4",
-    "@types/copy-webpack-plugin": "4.4.1",
+    "@types/copy-webpack-plugin": "4.4.2",
     "@types/karma": "1.7.4",
     "@types/lodash": "4.14.110",
     "@types/mini-css-extract-plugin": "0.2.0",
     "@types/mocha": "2.2.48",
+    "@types/node": "10.5.1",
     "@types/sinon": "4.0.0",
     "@types/socket.io-client": "1.4.32",
     "@types/ui-grid": "0.0.38",
-    "@types/webpack": "4.4.5",
+    "@types/webpack": "4.4.11",
     "@types/webpack-merge": "4.1.3",
-    "angular-mocks": "1.6.9",
+    "angular-mocks": "1.7.4",
     "app-root-path": "2.0.1",
-    "babel-core": "6.26.0",
-    "babel-eslint": "7.2.3",
-    "babel-loader": "7.1.4",
-    "babel-plugin-add-module-exports": "0.2.1",
-    "babel-plugin-transform-object-rest-spread": "6.26.0",
-    "babel-plugin-transform-runtime": "6.23.0",
-    "babel-polyfill": "6.26.0",
-    "babel-preset-es2015": "6.24.1",
-    "babel-preset-stage-1": "6.24.1",
-    "babel-runtime": "6.26.0",
+    "babel-eslint": "8.2.6",
+    "babel-loader": "8.0.2",
     "bootstrap-sass": "3.3.7",
     "chai": "4.1.0",
     "chalk": "2.1.0",
     "copy-webpack-plugin": "4.5.2",
     "css-loader": "0.28.7",
-    "eslint": "4.3.0",
+    "eslint": "4.19.1",
     "eslint-friendly-formatter": "3.0.0",
-    "eslint-loader": "1.9.0",
-    "eslint-plugin-babel": "4.1.1",
+    "eslint-loader": "2.1.0",
+    "eslint-plugin-babel": "5.2.0",
     "expose-loader": "0.7.5",
     "file-loader": "1.1.11",
     "glob": "7.1.2",
@@ -119,17 +118,17 @@
     "jasmine-core": "2.6.4",
     "json-loader": "0.5.7",
     "karma": "2.0.0",
-    "karma-babel-preprocessor": "6.0.1",
+    "karma-babel-preprocessor": "8.0.0-beta.0",
     "karma-chrome-launcher": "2.2.0",
     "karma-mocha": "1.3.0",
     "karma-mocha-reporter": "2.2.3",
     "karma-teamcity-reporter": "1.0.0",
-    "karma-webpack": "4.0.0-beta.0",
-    "mini-css-extract-plugin": "0.4.1",
+    "karma-webpack": "4.0.0-rc.2",
+    "mini-css-extract-plugin": "0.4.2",
     "mocha": "3.4.2",
     "mocha-teamcity-reporter": "1.1.1",
     "node-fetch": "1.7.3",
-    "node-sass": "4.9.0",
+    "node-sass": "4.9.3",
     "progress": "2.0.0",
     "progress-bar-webpack-plugin": "1.11.0",
     "pug-html-loader": "1.1.0",
@@ -139,14 +138,13 @@
     "sinon": "2.3.8",
     "slash": "1.0.0",
     "style-loader": "0.19.0",
-    "svg-sprite-loader": "3.9.0",
+    "svg-sprite-loader": "3.9.2",
     "teamcity-service-messages": "0.1.9",
-    "type-detect": "4.0.3",
-    "uglifyjs-webpack-plugin": "1.2.4",
-    "webpack": "4.12.0",
-    "webpack-cli": "2.0.14",
-    "webpack-dev-server": "3.1.4",
-    "webpack-merge": "4.1.3",
+    "uglifyjs-webpack-plugin": "1.3.0",
+    "webpack": "4.19.1",
+    "webpack-cli": "3.1.0",
+    "webpack-dev-server": "3.1.8",
+    "webpack-merge": "4.1.4",
     "worker-loader": "2.0.0",
     "yargs": "9.0.1"
   }
diff --git a/modules/web-console/frontend/public/images/icons/index.js b/modules/web-console/frontend/public/images/icons/index.js
index 24ffac1..0951d32 100644
--- a/modules/web-console/frontend/public/images/icons/index.js
+++ b/modules/web-console/frontend/public/images/icons/index.js
@@ -15,31 +15,31 @@
  * limitations under the License.
  */
 
-export alert from './alert.icon.svg';
-export attention from './attention.icon.svg';
-export check from './check.icon.svg';
-export checkmark from './checkmark.icon.svg';
-export clock from './clock.icon.svg';
-export collapse from './collapse.icon.svg';
-export connectedClusters from './connectedClusters.icon.svg';
-export copy from './copy.icon.svg';
-export cross from './cross.icon.svg';
-export csv from './csv.icon.svg';
-export download from './download.icon.svg';
-export exclamation from './exclamation.icon.svg';
-export exit from './exit.icon.svg';
-export expand from './expand.icon.svg';
-export eyeClosed from './eyeClosed.icon.svg';
-export eyeOpened from './eyeOpened.icon.svg';
-export filter from './filter.icon.svg';
-export gear from './gear.icon.svg';
-export home from './home.icon.svg';
-export info from './info.icon.svg';
-export lockClosed from './lockClosed.icon.svg';
-export lockOpened from './lockOpened.icon.svg';
-export manual from './manual.icon.svg';
-export plus from './plus.icon.svg';
-export refresh from './refresh.icon.svg';
-export search from './search.icon.svg';
-export sort from './sort.icon.svg';
-export structure from './structure.icon.svg';
+export {default as alert} from './alert.icon.svg';
+export {default as attention} from './attention.icon.svg';
+export {default as check} from './check.icon.svg';
+export {default as checkmark} from './checkmark.icon.svg';
+export {default as clock} from './clock.icon.svg';
+export {default as collapse} from './collapse.icon.svg';
+export {default as connectedClusters} from './connectedClusters.icon.svg';
+export {default as copy} from './copy.icon.svg';
+export {default as cross} from './cross.icon.svg';
+export {default as csv} from './csv.icon.svg';
+export {default as download} from './download.icon.svg';
+export {default as exclamation} from './exclamation.icon.svg';
+export {default as exit} from './exit.icon.svg';
+export {default as expand} from './expand.icon.svg';
+export {default as eyeClosed} from './eyeClosed.icon.svg';
+export {default as eyeOpened} from './eyeOpened.icon.svg';
+export {default as filter} from './filter.icon.svg';
+export {default as gear} from './gear.icon.svg';
+export {default as home} from './home.icon.svg';
+export {default as info} from './info.icon.svg';
+export {default as lockClosed} from './lockClosed.icon.svg';
+export {default as lockOpened} from './lockOpened.icon.svg';
+export {default as manual} from './manual.icon.svg';
+export {default as plus} from './plus.icon.svg';
+export {default as refresh} from './refresh.icon.svg';
+export {default as search} from './search.icon.svg';
+export {default as sort} from './sort.icon.svg';
+export {default as structure} from './structure.icon.svg';
diff --git a/modules/web-console/frontend/public/stylesheets/style.scss b/modules/web-console/frontend/public/stylesheets/style.scss
index be288dc..a00a585 100644
--- a/modules/web-console/frontend/public/stylesheets/style.scss
+++ b/modules/web-console/frontend/public/stylesheets/style.scss
@@ -496,62 +496,40 @@
         border-top: 1px solid $ignite-border-color;
     }
 
+    .btn-ignite-group {
+        padding: 0;
+        border: none;
+        margin-right: 0;
+        background: transparent;
+    }
+
     .sql-controls {
-        position: relative;
-        top: -1px;
+        display: flex;
+        justify-content: space-between;
         border-top: 1px solid #ddd;
 
-        padding: 10px 10px;
+        & > div {
+            display: flex;
+            padding: 10px;
+            align-items: flex-start;
 
-        input[type="checkbox"] {
-            line-height: 20px;
-            margin-right: 5px;
-        }
-
-        label {
-            line-height: 28px;
-            vertical-align: middle;
-        }
-
-        .btn {
-            line-height: 20px;
-        }
-
-        .ignite-form-field {
-            margin-right: 10px;
-
-            .ignite-form-field__label {
-                float: left;
-                width: auto;
-                margin-right: 5px;
-                line-height: 28px;
+            &:nth-child(2) {
+                flex: 1;
+                justify-content: flex-end;
             }
 
-            .ignite-form-field__label + div {
-                display: block;
-                float: none;
-                width: auto;
+            &:last-child {
+                flex-direction: column;
+                flex-basis: 25%;
             }
         }
 
-        .tipLabel .btn {
-            float: right;
+        button + button {
+            margin-left: 20px;
         }
 
-        .pull-right {
-            margin-left: 10px;
-
-            .ignite-form-field {
-                margin-right: -24px;
-
-                label {
-                    margin-left: 5px;
-                }
-            }
-        }
-
-        .col-sm-3 + .tipLabel {
-            margin-left: 0;
+        .form-field--inline + .form-field--inline {
+            margin-left: 20px;
         }
     }
 
@@ -621,14 +599,6 @@
     font-size: $font-size-large;
     line-height: 24px;
 
-    label {
-        overflow: hidden;
-        text-overflow: ellipsis;
-        white-space: nowrap;
-        max-width: calc(100% - 85px);
-        cursor: pointer;
-    }
-
     .btn-group {
         vertical-align: top;
         margin-left: 10px;
@@ -1782,7 +1752,20 @@
 }
 
 .getting-started {
+    min-height: 240px;
     margin: 15px 15px 300px;
+
+    ul {
+        line-height: 20px;
+    }
+
+    [class*="col-"] {
+        align-self: flex-start !important;
+    }
+
+    .align-center {
+        justify-content: center !important;
+    }
 }
 
 .getting-started-demo {
@@ -2114,6 +2097,7 @@
 
     div {
         display: flex;
+        align-items: center;
 
         &:nth-child(2) {
             .btn-ignite {
diff --git a/modules/web-console/frontend/test/karma.conf.babel.js b/modules/web-console/frontend/test/karma.conf.babel.js
deleted file mode 100644
index dcf6cb0..0000000
--- a/modules/web-console/frontend/test/karma.conf.babel.js
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import path from 'path';
-
-import testCfg from '../webpack/webpack.test';
-
-export default (/** @type {import('karma').Config} */ config) => {
-    config.set({
-        // Base path that will be used to resolve all patterns (eg. files, exclude).
-        basePath: path.resolve('./'),
-
-        // Frameworks to use available frameworks: https://npmjs.org/browse/keyword/karma-adapter
-        frameworks: ['mocha'],
-
-        // List of files / patterns to load in the browser.
-        files: [
-            'node_modules/babel-polyfill/dist/polyfill.js',
-            'node_modules/angular/angular.js',
-            'node_modules/angular-mocks/angular-mocks.js',
-            'app/**/*.spec.js',
-            'test/**/*.test.js'
-        ],
-
-        plugins: [
-            require('karma-chrome-launcher'),
-            require('karma-teamcity-reporter'),
-            require('karma-mocha-reporter'),
-            require('karma-webpack'),
-            require('karma-mocha')
-        ],
-
-        // Preprocess matching files before serving them to the browser
-        // available preprocessors: https://npmjs.org/browse/keyword/karma-preprocessor.
-        preprocessors: {
-            '+(app|test)/**/*.js': ['webpack']
-        },
-
-        webpack: testCfg,
-
-        webpackMiddleware: {
-            noInfo: true
-        },
-
-        // Test results reporter to use
-        // possible values: 'dots', 'progress'
-        // available reporters: https://npmjs.org/browse/keyword/karma-reporter.
-        reporters: [process.env.TEST_REPORTER || 'mocha'],
-
-        mochaReporter: {
-            showDiff: true
-        },
-
-        // web server port
-        port: 9876,
-
-        // enable / disable colors in the output (reporters and logs)
-        colors: true,
-
-        // level of logging
-        // possible values: config.LOG_DISABLE || config.LOG_ERROR || config.LOG_WARN || config.LOG_INFO || config.LOG_DEBUG
-        logLevel: config.LOG_INFO,
-
-        // enable / disable watching file and executing tests whenever any file changes
-        autoWatch: true,
-
-        // start these browsers
-        // available browser launchers: https://npmjs.org/browse/keyword/karma-launcher
-        browsers: ['ChromeHeadlessNoSandbox'],
-        customLaunchers: {
-            ChromeHeadlessNoSandbox: {
-                base: 'ChromeHeadless',
-                flags: ['--no-sandbox']
-            },
-            ChromeDebug: {
-                base: 'Chrome',
-                flags: [
-                    '--start-maximized',
-                    '--auto-open-devtools-for-tabs'
-                ],
-                debug: true
-            }
-        },
-
-        // Continuous Integration mode
-        // if true, Karma captures browsers, runs the tests and exits
-        singleRun: true,
-
-        // Concurrency level
-        // how many browser should be started simultaneous
-        concurrency: Infinity,
-
-        client: {
-            mocha: {
-                ui: 'tdd'
-            }
-        }
-    });
-};
diff --git a/modules/web-console/frontend/test/karma.conf.js b/modules/web-console/frontend/test/karma.conf.js
index e354482..b8c0886 100644
--- a/modules/web-console/frontend/test/karma.conf.js
+++ b/modules/web-console/frontend/test/karma.conf.js
@@ -15,5 +15,98 @@
  * limitations under the License.
  */
 
-require('babel-core/register');
-module.exports = require('./karma.conf.babel.js');
+const path = require('path');
+
+const testCfg = require('../webpack/webpack.test');
+
+module.exports = (/** @type {import('karma').Config} */ config) => {
+    config.set({
+        // Base path that will be used to resolve all patterns (eg. files, exclude).
+        basePath: path.resolve('./'),
+
+        // Frameworks to use available frameworks: https://npmjs.org/browse/keyword/karma-adapter
+        frameworks: ['mocha'],
+
+        // List of files / patterns to load in the browser.
+        files: [
+            'node_modules/angular/angular.js',
+            'node_modules/angular-mocks/angular-mocks.js',
+            'app/**/*.spec.js',
+            'test/**/*.test.js'
+        ],
+
+        plugins: [
+            require('karma-chrome-launcher'),
+            require('karma-teamcity-reporter'),
+            require('karma-mocha-reporter'),
+            require('karma-webpack'),
+            require('karma-mocha')
+        ],
+
+        // Preprocess matching files before serving them to the browser
+        // available preprocessors: https://npmjs.org/browse/keyword/karma-preprocessor.
+        preprocessors: {
+            '+(app|test)/**/*.js': ['webpack']
+        },
+
+        webpack: testCfg,
+
+        webpackMiddleware: {
+            noInfo: true
+        },
+
+        // Test results reporter to use
+        // possible values: 'dots', 'progress'
+        // available reporters: https://npmjs.org/browse/keyword/karma-reporter.
+        reporters: [process.env.TEST_REPORTER || 'mocha'],
+
+        mochaReporter: {
+            showDiff: true
+        },
+
+        // web server port
+        port: 9876,
+
+        // enable / disable colors in the output (reporters and logs)
+        colors: true,
+
+        // level of logging
+        // possible values: config.LOG_DISABLE || config.LOG_ERROR || config.LOG_WARN || config.LOG_INFO || config.LOG_DEBUG
+        logLevel: config.LOG_INFO,
+
+        // enable / disable watching file and executing tests whenever any file changes
+        autoWatch: true,
+
+        // start these browsers
+        // available browser launchers: https://npmjs.org/browse/keyword/karma-launcher
+        browsers: ['ChromeHeadlessNoSandbox'],
+        customLaunchers: {
+            ChromeHeadlessNoSandbox: {
+                base: 'ChromeHeadless',
+                flags: ['--no-sandbox']
+            },
+            ChromeDebug: {
+                base: 'Chrome',
+                flags: [
+                    '--start-maximized',
+                    '--auto-open-devtools-for-tabs'
+                ],
+                debug: true
+            }
+        },
+
+        // Continuous Integration mode
+        // if true, Karma captures browsers, runs the tests and exits
+        singleRun: true,
+
+        // Concurrency level
+        // how many browser should be started simultaneous
+        concurrency: Infinity,
+
+        client: {
+            mocha: {
+                ui: 'tdd'
+            }
+        }
+    });
+};
diff --git a/modules/web-console/frontend/views/sql/paragraph-rate.tpl.pug b/modules/web-console/frontend/views/sql/paragraph-rate.tpl.pug
index 2933a3e..bd65208 100644
--- a/modules/web-console/frontend/views/sql/paragraph-rate.tpl.pug
+++ b/modules/web-console/frontend/views/sql/paragraph-rate.tpl.pug
@@ -14,18 +14,33 @@
     See the License for the specific language governing permissions and
     limitations under the License.
 
-.popover.settings(tabindex='-1' style='width: 200px')
-    .arrow
-    h3.popover-title(style='color: black') Refresh rate
-    button.close(id='paragraph-rate-close' ng-click='$hide()') &times;
+.popover.settings.refresh-rate(tabindex='-1')
+    h3.popover-title Refresh rate
+    button.close(ng-click='$hide()')
+        svg(ignite-icon='cross')
     .popover-content
-        form(name='popoverForm' novalidate)
-            .form-group(style='padding: 5px')
-                .col-sm-4
-                    input.form-control(id='paragraph-rate' ng-init='value = paragraph.rate.value' ng-model='value' type='number' min='1' required ignite-auto-focus)
-                .col-sm-8(style='padding-left: 5px')
-                    button.form-control.select-toggle(id='paragraph-unit' ng-init='unit = paragraph.rate.unit' ng-model='unit' required placeholder='Time unit' bs-select bs-options='item.value as item.label for item in timeUnit' tabindex='0')
-            .form-actions(style='margin-top: 30px; padding: 5px')
-                button.btn.btn-primary(id='paragraph-rate-start' ng-disabled='popoverForm.$invalid' ng-click='startRefresh(paragraph, value, unit); $hide()') Start
-                button.btn.btn-primary.btn-default(id='paragraph-rate-stop' ng-click='stopRefresh(paragraph); $hide()' ng-disabled='!paragraph.rate.installed') Stop
+        form.theme--ignite(name='popoverForm' novalidate ng-init='refreshRate = {}')
+            .form-field.form-field__text.ignite-form-field
+                .form-field__control
+                    input(ng-init='refreshRate.value = paragraph.rate.value' ng-model='refreshRate.value' type='number' min='1' required ignite-auto-focus)
+                .form-field__control
+                    button.select-toggle(ng-init='refreshRate.unit = paragraph.rate.unit'  ng-model='refreshRate.unit' required placeholder='Time unit' bs-select bs-options='item.value as item.label for item in timeUnit' tabindex='0')
+            .form-field.form-field__text.ignite-form-field
+                .actions
+                    button.btn-ignite.btn-ignite--primary(
+                        ng-disabled='popoverForm.$invalid'
+                        ng-click='startRefresh(paragraph, refreshRate.value, refreshRate.unit); $hide()'
+                        ng-hide='paragraph.rate.installed'
+                    ) Start
+
+                    button.btn-ignite.btn-ignite--primary(
+                        ng-disabled='popoverForm.$invalid || (refreshRate.unit === paragraph.rate.unit && refreshRate.value === paragraph.rate.value)'
+                        ng-click='startRefresh(paragraph, refreshRate.value, refreshRate.unit); $hide()'
+                        ng-hide='!paragraph.rate.installed'
+                    ) Start new
+
+                    button.btn-ignite.btn-ignite--primary(
+                        ng-click='stopRefresh(paragraph); $hide()'
+                        ng-hide='!paragraph.rate.installed'
+                    ) Stop
 
diff --git a/modules/web-console/frontend/views/templates/agent-download.tpl.pug b/modules/web-console/frontend/views/templates/agent-download.tpl.pug
index 4da9fad..46acfed 100644
--- a/modules/web-console/frontend/views/templates/agent-download.tpl.pug
+++ b/modules/web-console/frontend/views/templates/agent-download.tpl.pug
@@ -21,7 +21,6 @@
         .modal-content(ng-switch-when='agentMissing')
             .modal-header.header
                 h4.modal-title
-                    i.fa.fa-download
                     span Connection to Ignite Web Agent is not established
             .modal-body.agent-download
                 p Please download and run #[a(href='/api/v1/downloads/agent' target='_self') ignite-web-agent] to use this functionality:
@@ -45,8 +44,9 @@
                     )
 
             .modal-footer
-                button.btn-ignite.btn-ignite--link-success(ng-click='ctrl.back()') {{::ctrl.backText}}
-                a.btn-ignite.btn-ignite--success(href='/api/v1/downloads/agent' target='_self') Download agent
+                div
+                    button.btn-ignite.btn-ignite--link-success(ng-click='ctrl.back()') {{::ctrl.backText}}
+                    a.btn-ignite.btn-ignite--success(href='/api/v1/downloads/agent' target='_self') Download agent
 
         .modal-content(ng-switch-when='nodeMissing')
             .modal-header.header
@@ -65,4 +65,5 @@
                         li Refer to #[b README.txt] in the ignite-web-agent folder for more information.
 
             .modal-footer
-                button.btn-ignite.btn-ignite--link-success(ng-click='ctrl.back()') {{::ctrl.backText}}
+                div
+                    button.btn-ignite.btn-ignite--link-success(ng-click='ctrl.back()') {{::ctrl.backText}}
diff --git a/modules/web-console/frontend/views/templates/batch-confirm.tpl.pug b/modules/web-console/frontend/views/templates/batch-confirm.tpl.pug
index ad8741b..874979b 100644
--- a/modules/web-console/frontend/views/templates/batch-confirm.tpl.pug
+++ b/modules/web-console/frontend/views/templates/batch-confirm.tpl.pug
@@ -18,7 +18,7 @@
     .modal-dialog
         .modal-content
             .modal-header
-                h4.modal-title 
+                h4.modal-title
                     svg(ignite-icon='attention')
                     | Confirmation
                 button.close(type='button' aria-label='Close' ng-click='cancel()')
diff --git a/modules/web-console/frontend/views/templates/confirm.tpl.pug b/modules/web-console/frontend/views/templates/confirm.tpl.pug
index 9f5e2bb..581aa37 100644
--- a/modules/web-console/frontend/views/templates/confirm.tpl.pug
+++ b/modules/web-console/frontend/views/templates/confirm.tpl.pug
@@ -18,17 +18,17 @@
     .modal-dialog.modal-dialog--adjust-height
         .modal-content
             .modal-header
-                h4.modal-title 
-                    i.icon-confirm
+                h4.modal-title
                     span Confirmation
                 button.close(type='button' aria-label='Close' ng-click='confirmCancel()')
                      svg(ignite-icon="cross")
             .modal-body(ng-show='content')
                 p(ng-bind-html='content')
             .modal-footer
-                button#confirm-btn-cancel.btn-ignite.btn-ignite--link-success(ng-click='confirmCancel()') Cancel
+                div
+                    button#confirm-btn-cancel.btn-ignite.btn-ignite--link-success(ng-click='confirmCancel()') Cancel
 
-                button#confirm-btn-no.btn-ignite.btn-ignite--link-success(ng-if='yesNo' ng-click='confirmNo()') No
-                button#confirm-btn-yes.btn-ignite.btn-ignite--success(ignite-auto-focus ng-if='yesNo' ng-click='confirmYes()') Yes
+                    button#confirm-btn-no.btn-ignite.btn-ignite--link-success(ng-if='yesNo' ng-click='confirmNo()') No
+                    button#confirm-btn-yes.btn-ignite.btn-ignite--success(ignite-auto-focus ng-if='yesNo' ng-click='confirmYes()') Yes
 
-                button#confirm-btn-ok.btn-ignite.btn-ignite--success(ignite-auto-focus ng-if='!yesNo' ng-click='confirmYes()') Confirm
+                    button#confirm-btn-ok.btn-ignite.btn-ignite--success(ignite-auto-focus ng-if='!yesNo' ng-click='confirmYes()') Confirm
diff --git a/modules/web-console/frontend/views/templates/demo-info.tpl.pug b/modules/web-console/frontend/views/templates/demo-info.tpl.pug
index 62642b3..437c8a0 100644
--- a/modules/web-console/frontend/views/templates/demo-info.tpl.pug
+++ b/modules/web-console/frontend/views/templates/demo-info.tpl.pug
@@ -14,34 +14,40 @@
     See the License for the specific language governing permissions and
     limitations under the License.
 
-.modal.center(role='dialog')
+.modal.modal--ignite.theme--ignite.center(role='dialog')
     .modal-dialog
         .modal-content
             #errors-container.modal-header.header
-                button.close(ng-click='close()' aria-hidden='true') &times;
+                button.close(ng-click='close()' aria-hidden='true')
+                    svg(ignite-icon="cross")
                 h4.modal-title
-                    i.fa.fa-info-circle
+                    svg(ignite-icon="attention")
                     | {{title}}
             .modal-body
                 div(ng-bind-html='message')
+
                 div(ng-hide='hasAgents')
-                    p &nbsp;
-                    div
-                        h4
-                            i.fa.fa-download.fa-cursor-default
-                            | &nbsp;How To Start Demo
-                        ul
-                            li
-                                a(ng-click='downloadAgent()') #[b Download]
-                                | &nbsp; and unzip ignite-web-agent archive
-                            li #[b Run] shell file ignite-web-agent.{sh|bat}
-                div(ng-show='hasAgents')
+                    h4
+                        i.fa.fa-download.fa-cursor-default
+                        | &nbsp;How To Start Demo
+                    ul
+                        li
+                            a(ng-href='{{downloadAgentHref}}' target='_self') #[b Download]
+                            | &nbsp; and unzip ignite-web-agent archive
+                        li #[b Run] shell file ignite-web-agent.{sh|bat}
+
+                div(ng-hide='!hasAgents')
                     h4
                         i.fa.fa-star-o.fa-cursor-default
                         | &nbsp;Start Demo
                     ul
                         li Web Agent is already started
                         li Close dialog and try Web Console
+
             .modal-footer
-                button.btn.btn-default(ng-class='hasAgents ? "btn-primary" : "btn-default"' ng-click='close()') Close
-                a.btn.btn-primary(ng-hide='hasAgents' href='/api/v1/downloads/agent' target='_self') Download agent
+                .ng-animate-disabled(ng-if='!hasAgents')
+                    button.btn-ignite.btn-ignite--link-success(ng-click='close()') Close
+                    a.btn-ignite.btn-ignite--success(ng-href='{{downloadAgentHref}}' target='_self') Download agent
+
+                .ng-animate-disabled(ng-if='hasAgents')
+                    button.btn-ignite.btn-ignite--success(ng-click='close()') Close
diff --git a/modules/web-console/frontend/views/templates/getting-started.tpl.pug b/modules/web-console/frontend/views/templates/getting-started.tpl.pug
index 3a89035..8b0a03c 100644
--- a/modules/web-console/frontend/views/templates/getting-started.tpl.pug
+++ b/modules/web-console/frontend/views/templates/getting-started.tpl.pug
@@ -14,21 +14,26 @@
     See the License for the specific language governing permissions and
     limitations under the License.
 
-.modal.center(role='dialog')
+include /app/helpers/jade/mixins
+
+.modal.modal--ignite.theme--ignite.center(role='dialog')
     .modal-dialog
         .modal-content
             #errors-container.modal-header.header
-                button.close(ng-click='close()' aria-hidden='true') &times;
-                h4.modal-title 
-                    i.fa.fa-book
+                button.close(ng-click='close()' aria-hidden='true')
+                    svg(ignite-icon="cross")
+                h4.modal-title
                     | {{title}}
-            .getting-started
-                .col-xs-12(ng-bind-html='message')
+            .modal-body
+                .getting-started.row(ng-bind-html='message')
             .modal-footer
-                .checkbox
-                    label
-                        input(type='checkbox' ng-model='ui.showGettingStarted')
-                        | Show getting started on next login
-                a.btn.btn-primary(ng-disabled='isFirst()' ng-click='!isFirst() && prev()') Prev
-                a.btn.btn-primary(ng-disabled='isLast()' ng-click='!isLast() && next()') Next
-                a.btn.btn-primary(ng-click='close()') Close
+                div
+                    +form-field__checkbox({
+                        label: 'Do not show this window again',
+                        model: 'ui.dontShowGettingStarted',
+                        name: '"dontShowGettingStarted"'
+                    })
+                div
+                    a.btn-ignite.btn-ignite--link-success(ng-disabled='isFirst()' ng-click='!isFirst() && prev()') Prev
+                    a.btn-ignite.btn-ignite--link-success(ng-disabled='isLast()' ng-click='!isLast() && next()') Next
+                    a.btn-ignite.btn-ignite--success(ng-click='close()') Close
diff --git a/modules/web-console/frontend/views/templates/message.tpl.pug b/modules/web-console/frontend/views/templates/message.tpl.pug
index 3cdb3c8..bfe4c18 100644
--- a/modules/web-console/frontend/views/templates/message.tpl.pug
+++ b/modules/web-console/frontend/views/templates/message.tpl.pug
@@ -14,16 +14,19 @@
     See the License for the specific language governing permissions and
     limitations under the License.
 
-.modal(tabindex='-1' role='dialog')
+.modal.modal--ignite.theme--ignite.center(tabindex='-1' role='dialog')
     .modal-dialog
         .modal-content
             .modal-header
-                button.close(ng-click='$hide()' aria-hidden='true') &times;
+                button.close(ng-click='$hide()' aria-hidden='true')
+                    svg(ignite-icon="cross")
                 h4.modal-title
-                    i.fa.fa-info-circle
+                    svg(ignite-icon="attention")
                     | {{title}}
-            .modal-body(ng-show='content' style='overflow: auto; max-height: 300px;')
-                p(ng-bind-html='content.join("<br/>")' style='text-align: left; white-space: nowrap;')
+            .modal-body(ng-show='content')
+                .modal-body--inner-content
+                    p(ng-bind-html='content.join("<br/>")')
             .modal-footer
-                .pull-left(ng-show='meta') {{meta}}
-                button.btn.btn-primary(id='confirm-btn-confirm' ng-click='$hide()') Ok
+                div(ng-show='meta') {{meta}}
+                div
+                    button.btn-ignite.btn-ignite--success(ng-click='$hide()') Ok
diff --git a/modules/web-console/frontend/webpack/webpack.common.js b/modules/web-console/frontend/webpack/webpack.common.js
index f2901e1..d8e4ce2 100644
--- a/modules/web-console/frontend/webpack/webpack.common.js
+++ b/modules/web-console/frontend/webpack/webpack.common.js
@@ -15,22 +15,17 @@
  * limitations under the License.
  */
 
-import path from 'path';
-import webpack from 'webpack';
+const path = require('path');
+const webpack = require('webpack');
 
-import transformRuntime from 'babel-plugin-transform-runtime';
-import presetEs2015 from 'babel-preset-es2015';
-import presetStage1 from 'babel-preset-stage-1';
+const CopyWebpackPlugin = require('copy-webpack-plugin');
+const HtmlWebpackPlugin = require('html-webpack-plugin');
+const ProgressBarPlugin = require('progress-bar-webpack-plugin');
 
-import CopyWebpackPlugin from 'copy-webpack-plugin';
-import HtmlWebpackPlugin from 'html-webpack-plugin';
-import ProgressBarPlugin from 'progress-bar-webpack-plugin';
-
-import eslintFormatter from 'eslint-friendly-formatter';
+const eslintFormatter = require('eslint-friendly-formatter');
 
 const basedir = path.join(__dirname, '../');
 const contentBase = path.join(basedir, 'public');
-const node_modules = path.join(basedir, 'node_modules');
 const app = path.join(basedir, 'app');
 
 /** @type {webpack.Configuration} */
@@ -53,7 +48,6 @@
 
     // Resolves modules.
     resolve: {
-        modules: [node_modules],
         // A list of module source folders.
         alias: {
             app,
@@ -62,21 +56,13 @@
         }
     },
 
-    // Resolve loader use postfix.
-    resolveLoader: {
-        modules: [
-            node_modules
-        ],
-        moduleExtensions: ['-loader']
-    },
-
     module: {
         rules: [
             // Exclude tpl.pug files to import in bundle.
             {
                 test: /^(?:(?!tpl\.pug$).)*\.pug$/, // TODO: check this regexp for correct.
                 use: {
-                    loader: 'pug-html',
+                    loader: 'pug-html-loader',
                     options: {
                         basedir
                     }
@@ -87,8 +73,8 @@
             {
                 test: /\.tpl\.pug$/,
                 use: [
-                    'file?exports=false&name=assets/templates/[name].[hash].html',
-                    `pug-html?exports=false&basedir=${basedir}`
+                    'file-loader?exports=false&name=assets/templates/[name].[hash].html',
+                    `pug-html-loader?exports=false&basedir=${basedir}`
                 ]
             },
             { test: /\.worker\.js$/, use: { loader: 'worker-loader' } },
@@ -97,10 +83,8 @@
                 enforce: 'pre',
                 exclude: [/node_modules/],
                 use: [{
-                    loader: 'eslint',
+                    loader: 'eslint-loader',
                     options: {
-                        failOnWarning: false,
-                        failOnError: false,
                         formatter: eslintFormatter,
                         context: process.cwd()
                     }
@@ -108,25 +92,13 @@
             },
             {
                 test: /\.js$/,
-                exclude: [node_modules],
-                use: [{
-                    loader: 'babel-loader',
-                    options: {
-                        cacheDirectory: true,
-                        plugins: [
-                            transformRuntime
-                        ],
-                        presets: [
-                            presetEs2015,
-                            presetStage1
-                        ]
-                    }
-                }]
+                exclude: /node_modules/,
+                use: 'babel-loader'
             },
             {
                 test: /\.(ttf|eot|svg|woff(2)?)(\?v=[\d.]+)?(\?[a-z0-9#-]+)?$/,
                 exclude: [contentBase, /\.icon\.svg$/],
-                use: 'file?name=assets/fonts/[name].[ext]'
+                use: 'file-loader?name=assets/fonts/[name].[ext]'
             },
             {
                 test: /\.icon\.svg$/,
@@ -141,11 +113,11 @@
             {
                 test: /.*\.url\.svg$/,
                 include: [contentBase],
-                use: 'file?name=assets/fonts/[name].[ext]'
+                use: 'file-loader?name=assets/fonts/[name].[ext]'
             },
             {
                 test: /\.(jpe?g|png|gif)$/i,
-                use: 'file?name=assets/images/[name].[hash].[ext]'
+                use: 'file-loader?name=assets/images/[name].[hash].[ext]'
             },
             {
                 test: require.resolve('jquery'),
@@ -169,17 +141,6 @@
 
     // Load plugins.
     plugins: [
-        new webpack.LoaderOptionsPlugin({
-            options: {
-                pug: {
-                    basedir
-                },
-                eslint: {
-                    configFile: path.join(basedir, '.eslintrc')
-                },
-                target: 'web'
-            }
-        }),
         new webpack.ProvidePlugin({
             $: 'jquery',
             'window.jQuery': 'jquery',
@@ -198,4 +159,4 @@
     ]
 };
 
-export default config;
+module.exports = config;
diff --git a/modules/web-console/frontend/webpack/webpack.dev.babel.js b/modules/web-console/frontend/webpack/webpack.dev.babel.js
deleted file mode 100644
index c5950ee..0000000
--- a/modules/web-console/frontend/webpack/webpack.dev.babel.js
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import merge from 'webpack-merge';
-
-import path from 'path';
-
-import commonCfg from './webpack.common';
-
-const MiniCssExtractPlugin = require('mini-css-extract-plugin');
-
-const backendPort = process.env.BACKEND_PORT || 3000;
-const devServerPort = process.env.PORT || 9000;
-const devServerHost = process.env.HOST || '0.0.0.0';
-
-export default merge(commonCfg, {
-    mode: 'development',
-    devtool: 'source-map',
-    watch: true,
-    module: {
-        exprContextCritical: false,
-        rules: [
-            {
-                test: /\.css$/,
-                use: ['style', 'css']
-            },
-            {
-                test: /\.scss$/,
-                use: [
-                    MiniCssExtractPlugin.loader, // style-loader does not work with styles in IgniteModules
-                    {
-                        loader: 'css-loader',
-                        options: {
-                            sourceMap: true
-                        }
-                    },
-                    {
-                        loader: 'sass-loader',
-                        options: {
-                            sourceMap: true,
-                            includePaths: [ path.join(__dirname, '../') ]
-                        }
-                    }
-                ]
-            }
-        ]
-    },
-    plugins: [
-        new MiniCssExtractPlugin({filename: 'assets/css/[name].css'})
-    ],
-    devServer: {
-        compress: true,
-        historyApiFallback: true,
-        disableHostCheck: true,
-        contentBase: path.resolve('build'),
-        inline: true,
-        proxy: {
-            '/socket.io': {
-                target: `http://localhost:${backendPort}`,
-                ws: true
-            },
-            '/agents': {
-                target: `http://localhost:${backendPort}`,
-                ws: true
-            },
-            '/api/*': {
-                target: `http://localhost:${backendPort}`
-            }
-        },
-        watchOptions: {
-            aggregateTimeout: 1000,
-            poll: 2000
-        },
-        stats: 'errors-only',
-        host: devServerHost,
-        port: devServerPort
-    }
-});
diff --git a/modules/web-console/frontend/webpack/webpack.dev.js b/modules/web-console/frontend/webpack/webpack.dev.js
new file mode 100644
index 0000000..3af6377
--- /dev/null
+++ b/modules/web-console/frontend/webpack/webpack.dev.js
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const merge = require('webpack-merge');
+
+const path = require('path');
+
+const commonCfg = require('./webpack.common');
+
+const MiniCssExtractPlugin = require('mini-css-extract-plugin');
+
+const backendPort = process.env.BACKEND_PORT || 3000;
+const devServerPort = process.env.PORT || 9000;
+const devServerHost = process.env.HOST || '0.0.0.0';
+
+module.exports = merge(commonCfg, {
+    mode: 'development',
+    devtool: 'source-map',
+    watch: true,
+    module: {
+        exprContextCritical: false,
+        rules: [
+            {
+                test: /\.css$/,
+                use: ['style-loader', 'css-loader']
+            },
+            {
+                test: /\.scss$/,
+                use: [
+                    MiniCssExtractPlugin.loader, // style-loader does not work with styles in IgniteModules
+                    {
+                        loader: 'css-loader',
+                        options: {
+                            sourceMap: true
+                        }
+                    },
+                    {
+                        loader: 'sass-loader',
+                        options: {
+                            sourceMap: true,
+                            includePaths: [ path.join(__dirname, '../') ]
+                        }
+                    }
+                ]
+            }
+        ]
+    },
+    plugins: [
+        new MiniCssExtractPlugin({filename: 'assets/css/[name].css'})
+    ],
+    devServer: {
+        compress: true,
+        historyApiFallback: true,
+        disableHostCheck: true,
+        contentBase: path.resolve('build'),
+        inline: true,
+        proxy: {
+            '/socket.io': {
+                target: `http://localhost:${backendPort}`,
+                ws: true
+            },
+            '/agents': {
+                target: `http://localhost:${backendPort}`,
+                ws: true
+            },
+            '/api/*': {
+                target: `http://localhost:${backendPort}`
+            }
+        },
+        watchOptions: {
+            aggregateTimeout: 1000,
+            poll: 2000
+        },
+        stats: 'errors-only',
+        host: devServerHost,
+        port: devServerPort
+    }
+});
diff --git a/modules/web-console/frontend/webpack/webpack.prod.babel.js b/modules/web-console/frontend/webpack/webpack.prod.babel.js
deleted file mode 100644
index a5aa1c6..0000000
--- a/modules/web-console/frontend/webpack/webpack.prod.babel.js
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import path from 'path';
-import merge from 'webpack-merge';
-
-const MiniCssExtractPlugin = require('mini-css-extract-plugin');
-import UglifyJSPlugin from 'uglifyjs-webpack-plugin';
-
-import commonCfg from './webpack.common';
-
-const basedir = path.join(__dirname, '../');
-
-export default merge(commonCfg, {
-    bail: true, // Cancel build on error.
-    mode: 'production',
-    module: {
-        rules: [
-            {
-                test: /\.css$/,
-                use: [MiniCssExtractPlugin.loader, 'css-loader']
-            },
-            {
-                test: /\.scss$/,
-                use: [MiniCssExtractPlugin.loader, 'css-loader', {
-                    loader: 'sass',
-                    options: {
-                        includePaths: [basedir]
-                    }
-                }]
-            }
-        ]
-    },
-    plugins: [
-        new MiniCssExtractPlugin({filename: 'assets/css/[name].[hash].css'})
-    ],
-    optimization: {
-        minimizer: [
-            new UglifyJSPlugin({
-                uglifyOptions: {
-                    keep_fnames: true,
-                    keep_classnames: true
-                }
-            })
-        ]
-    }
-});
diff --git a/modules/web-console/frontend/webpack/webpack.prod.js b/modules/web-console/frontend/webpack/webpack.prod.js
new file mode 100644
index 0000000..fa6374e
--- /dev/null
+++ b/modules/web-console/frontend/webpack/webpack.prod.js
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+const path = require('path');
+const merge = require('webpack-merge');
+
+const MiniCssExtractPlugin = require('mini-css-extract-plugin');
+const UglifyJSPlugin = require('uglifyjs-webpack-plugin');
+
+const commonCfg = require('./webpack.common');
+
+const basedir = path.join(__dirname, '../');
+
+module.exports = merge(commonCfg, {
+    bail: true, // Cancel build on error.
+    mode: 'production',
+    module: {
+        rules: [
+            {
+                test: /\.css$/,
+                use: [MiniCssExtractPlugin.loader, 'css-loader']
+            },
+            {
+                test: /\.scss$/,
+                use: [MiniCssExtractPlugin.loader, 'css-loader', {
+                    loader: 'sass-loader',
+                    options: {
+                        includePaths: [basedir]
+                    }
+                }]
+            }
+        ]
+    },
+    plugins: [
+        new MiniCssExtractPlugin({filename: 'assets/css/[name].[hash].css'})
+    ],
+    optimization: {
+        minimizer: [
+            new UglifyJSPlugin({
+                uglifyOptions: {
+                    keep_fnames: true,
+                    keep_classnames: true
+                }
+            })
+        ]
+    }
+});
diff --git a/modules/web-console/frontend/webpack/webpack.test.js b/modules/web-console/frontend/webpack/webpack.test.js
index 2ade625..c6d90df 100644
--- a/modules/web-console/frontend/webpack/webpack.test.js
+++ b/modules/web-console/frontend/webpack/webpack.test.js
@@ -15,11 +15,10 @@
  * limitations under the License.
  */
 
-import merge from 'webpack-merge';
+const merge = require('webpack-merge');
+const commonCfg = require('./webpack.common');
 
-import commonCfg from './webpack.common';
-
-export default merge(commonCfg, {
+module.exports = merge(commonCfg, {
     mode: 'development',
     cache: true,
     node: {
diff --git a/modules/web/src/test/java/org/apache/ignite/internal/websession/WebSessionSelfTest.java b/modules/web/src/test/java/org/apache/ignite/internal/websession/WebSessionSelfTest.java
index e566624..269b273 100644
--- a/modules/web/src/test/java/org/apache/ignite/internal/websession/WebSessionSelfTest.java
+++ b/modules/web/src/test/java/org/apache/ignite/internal/websession/WebSessionSelfTest.java
@@ -65,6 +65,12 @@
     /** Servers count in load test. */
     private static final int SRV_CNT = 3;
 
+    /** Session invalidated marker. Added to HTTP response to indicate that session invalidation was successful. */
+    public static final String INVALIDATED = "invalidated";
+
+    /** Session invalidated failed marker fot HTTP reponse. */
+    public static final String FAILED = "failed";
+
     /**
      * @return Name of the cache for this test.
      */
@@ -675,7 +681,13 @@
 
                 assertEquals("true", reqSesValid);
 
-                assertEquals("invalidated", rdr.readLine());
+                StringBuilder sb = new StringBuilder();
+                String line;
+
+                while ((line = rdr.readLine()) != null)
+                    sb.append(line);
+
+                assertTrue(sb.toString().contains(INVALIDATED));
             }
         }
         finally {
@@ -1075,10 +1087,11 @@
 
                 try {
                     req.getSession().invalidate();
-                    res.getWriter().println("invalidated");
+
+                    res.getWriter().println(INVALIDATED);
                 }
                 catch (Exception ignored) {
-                    res.getWriter().println("failed");
+                    res.getWriter().println(FAILED);
                 }
 
                 res.getWriter().flush();
@@ -1120,10 +1133,10 @@
                 res.getWriter().flush();
 
             } else if (req.getPathInfo().equals("/simple")) {
-                HttpSession session = req.getSession();
-                X.println(">>>", "Request session simple: " + session.getId(), ">>>");
+                HttpSession ses = req.getSession();
+                X.println(">>>", "Request session simple: " + ses.getId(), ">>>");
 
-                res.getWriter().write(session.getId());
+                res.getWriter().write(ses.getId());
 
                 res.getWriter().flush();
             }
@@ -1138,11 +1151,11 @@
                     X.printerrln("Login failed due to exception.", e);
                 }
 
-                HttpSession session = req.getSession();
+                HttpSession ses = req.getSession();
 
-                X.println(">>>", "Logged In session: " + session.getId(), ">>>");
+                X.println(">>>", "Logged In session: " + ses.getId(), ">>>");
 
-                res.getWriter().write(session.getId());
+                res.getWriter().write(ses.getId());
 
                 res.getWriter().flush();
             }
@@ -1248,9 +1261,11 @@
 
         /** {@inheritDoc} */
         @Override public int hashCode() {
-            int result = val != null ? val.hashCode() : 0;
-            result = 31 * result + (keepBinaryFlag ? 1 : 0);
-            return result;
+            int res = val != null ? val.hashCode() : 0;
+
+            res = 31 * res + (keepBinaryFlag ? 1 : 0);
+
+            return res;
         }
     }
 }
diff --git a/modules/yardstick/config/benchmark-atomic-sequence.properties b/modules/yardstick/config/benchmark-atomic-sequence.properties
new file mode 100644
index 0000000..c7ef074
--- /dev/null
+++ b/modules/yardstick/config/benchmark-atomic-sequence.properties
@@ -0,0 +1,84 @@
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+now0=`date +'%H%M%S'`
+
+# JVM options.
+JVM_OPTS=${JVM_OPTS}" -DIGNITE_QUIET=false"
+
+# Uncomment to enable concurrent garbage collection (GC) if you encounter long GC pauses.
+JVM_OPTS=${JVM_OPTS}" \
+-Xms6g \
+-Xmx6g \
+-Xloggc:./gc${now0}.log \
+-XX:+PrintGCDetails \
+-verbose:gc \
+-XX:+UseParNewGC \
+-XX:+UseConcMarkSweepGC \
+-XX:+PrintGCDateStamps \
+"
+
+#Ignite version
+ver="RELEASE-"
+
+# List of default probes.
+# Add DStatProbe or VmStatProbe if your OS supports it (e.g. if running on Linux).
+BENCHMARK_DEFAULT_PROBES=ThroughputLatencyProbe,PercentileProbe
+
+# Packages where the specified benchmark is searched by reflection mechanism.
+BENCHMARK_PACKAGES=org.yardstickframework,org.apache.ignite.yardstick
+
+# Flag which indicates to restart the servers before every benchmark execution.
+RESTART_SERVERS=true
+
+# Probe point writer class name.
+# BENCHMARK_WRITER=
+
+# Comma-separated list of the hosts to run BenchmarkServers on. 2 nodes on local host are enabled by default.
+SERVER_HOSTS=localhost,localhost,localhost
+
+# Comma-separated list of the hosts to run BenchmarkDrivers on. 1 node on local host is enabled by default.
+DRIVER_HOSTS=localhost
+
+# Remote username.
+# REMOTE_USER=
+
+# Number of nodes, used to wait for the specified number of nodes to start.
+nodesNum=$((`echo ${SERVER_HOSTS} | tr ',' '\n' | wc -l` + `echo ${DRIVER_HOSTS} | tr ',' '\n' | wc -l`))
+
+# Warmup.
+w=60
+
+# Duration.
+d=300
+
+# Threads count.
+t=64
+
+# Batch size
+bs=10000
+
+# Backups count.
+b=1
+
+# Run configuration.
+# Note that each benchmark is set to run for 300 seconds (5 min) with warm-up set to 60 seconds (1 minute).
+CONFIGS="\
+-cfg ${SCRIPT_DIR}/../config/ignite-localhost-config.xml -bs ${bs} -b ${b} -w ${w} -d ${d} -t ${t} -dn IgniteAtomicSequenceAddAndGetBenchmark -sn IgniteNode -nn ${nodesNum} -ds ${ver}atomic-sequence-addAndGet-benchmark-${bs}-${b}-backup, \
+-cfg ${SCRIPT_DIR}/../config/ignite-localhost-config.xml -bs ${bs} -b ${b} -w ${w} -d ${d} -t ${t} -dn IgniteAtomicSequenceGetAndAddBenchmark -sn IgniteNode -nn ${nodesNum} -ds ${ver}atomic-sequence-getAndAdd-benchmark-${bs}-${b}-backup, \
+-cfg ${SCRIPT_DIR}/../config/ignite-localhost-config.xml -bs ${bs} -b ${b} -w ${w} -d ${d} -t ${t} -dn IgniteAtomicSequenceGetAndIncrementBenchmark -sn IgniteNode -nn ${nodesNum} -ds ${ver}atomic-sequence-getAndIncrement-benchmark-${bs}-${b}-backup, \
+-cfg ${SCRIPT_DIR}/../config/ignite-localhost-config.xml -bs ${bs} -b ${b} -w ${w} -d ${d} -t ${t} -dn IgniteAtomicSequenceIncrementAndGetBenchmark -sn IgniteNode -nn ${nodesNum} -ds ${ver}atomic-sequence-incrementAndGet-benchmark-${bs}-${b}-backup, \
+"
diff --git a/modules/yardstick/config/mvcc/benchmark-mvcc-messages.sh b/modules/yardstick/config/mvcc/benchmark-mvcc-messages.sh
new file mode 100644
index 0000000..47f546d
--- /dev/null
+++ b/modules/yardstick/config/mvcc/benchmark-mvcc-messages.sh
@@ -0,0 +1,97 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# Configuration to measure increased messages load with mvcc turned on.
+#
+
+now0=`date +'%H%M%S'`
+
+# JVM options.
+JVM_OPTS=${JVM_OPTS}" -DIGNITE_QUIET=false"
+
+# Uncomment to enable concurrent garbage collection (GC) if you encounter long GC pauses.
+JVM_OPTS=${JVM_OPTS}" \
+-Xms8g \
+-Xmx8g \
+-Xloggc:./gc${now0}.log \
+-XX:+PrintGCDetails \
+-verbose:gc \
+-XX:+UseParNewGC \
+-XX:+UseConcMarkSweepGC \
+-XX:+PrintGCDateStamps \
+"
+
+#Ignite version
+ver="RELEASE-"
+
+# List of default probes.
+# Add DStatProbe or VmStatProbe if your OS supports it (e.g. if running on Linux).
+BENCHMARK_DEFAULT_PROBES=ThroughputLatencyProbe,PercentileProbe,DStatProbe
+
+# Packages where the specified benchmark is searched by reflection mechanism.
+BENCHMARK_PACKAGES=org.yardstickframework,org.apache.ignite.yardstick
+
+# Flag which indicates to restart the servers before every benchmark execution.
+RESTART_SERVERS=true
+
+# Probe point writer class name.
+# BENCHMARK_WRITER=
+
+# The benchmark is designed to run with 1 client node (driver itself) and many (4 for instance) server nodes.
+SERVER_HOSTS=localhost,localhost,localhost,localhost
+DRIVER_HOSTS=localhost
+
+# Remote username.
+# REMOTE_USER=
+
+# Number of nodes, used to wait for the specified number of nodes to start.
+nodesNum=$((`echo ${SERVER_HOSTS} | tr ',' '\n' | wc -l` + `echo ${DRIVER_HOSTS} | tr ',' '\n' | wc -l`))
+
+# Warmup.
+w=30
+
+# Duration.
+d=300
+
+# Threads count.
+t=1
+
+# Sync mode.
+sm=FULL_SYNC
+
+# Parameters that should be the same across all the benchmarks launches.
+commonParams="-cfg ${SCRIPT_DIR}/../config/ignite-localhost-config.xml -nn ${nodesNum} -w ${w} -d ${d} \
+  -jdbc jdbc:ignite:thin://auto.find/ -t ${t} -sm ${sm} \
+  --clientNodesAfterId 100 \
+  -sn IgniteNode -cl --range 1000000"
+
+# Run configuration which contains all benchmarks.
+# Note that each benchmark is set to run for 300 seconds (5 min) with warm-up set to 60 seconds (1 minute).
+CONFIGS="\
+${commonParams} -dn NativeSqlUpdateRangeBenchmark -ds ${ver}sql-update-batch-1-backup-0-mvcc-off -b 0 --sqlRange 1 --atomic-mode TRANSACTIONAL, \
+${commonParams} -dn NativeSqlUpdateRangeBenchmark -ds ${ver}sql-update-batch-1-backup-0-mvcc-on -b 0 --sqlRange 1  --atomic-mode TRANSACTIONAL_SNAPSHOT, \
+  \
+${commonParams} -dn NativeSqlUpdateRangeBenchmark -ds ${ver}sql-update-batch-1000-backup-0-mvcc-off -b 0 --sqlRange 1000, --atomic-mode TRANSACTIONAL, \
+${commonParams} -dn NativeSqlUpdateRangeBenchmark -ds ${ver}sql-update-batch-1000-backup-0-mvcc-on -b 0 --sqlRange 1000 --atomic-mode TRANSACTIONAL_SNAPSHOT, \
+  \
+${commonParams} -dn NativeSqlUpdateRangeBenchmark -ds ${ver}sql-update-batch-1-backup-2-mvcc-off -b 2 --sqlRange 1 --atomic-mode TRANSACTIONAL, \
+${commonParams} -dn NativeSqlUpdateRangeBenchmark -ds ${ver}sql-update-batch-1-backup-2-mvcc-on -b 2 --sqlRange 1 --atomic-mode TRANSACTIONAL_SNAPSHOT, \
+  \
+${commonParams} -dn NativeSqlUpdateRangeBenchmark -ds ${ver}sql-update-batch-1000-backup-2-mvcc-off -b 2 --sqlRange 1000 --atomic-mode TRANSACTIONAL, \
+${commonParams} -dn NativeSqlUpdateRangeBenchmark -ds ${ver}sql-update-batch-1000-backup-2-mvcc-on -b 2 --sqlRange 1000 --atomic-mode TRANSACTIONAL_SNAPSHOT, \
+"
diff --git a/modules/yardstick/config/mvcc/benchmark-mvcc-processor.sh b/modules/yardstick/config/mvcc/benchmark-mvcc-processor.sh
new file mode 100644
index 0000000..25525a4
--- /dev/null
+++ b/modules/yardstick/config/mvcc/benchmark-mvcc-processor.sh
@@ -0,0 +1,94 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# Configuration to compare mvcc on/off. This benchmark creates load on mvcc processor.
+#
+
+now0=`date +'%H%M%S'`
+
+# JVM options.
+JVM_OPTS=${JVM_OPTS}" -DIGNITE_QUIET=false"
+
+# Uncomment to enable concurrent garbage collection (GC) if you encounter long GC pauses.
+JVM_OPTS=${JVM_OPTS}" \
+-Xms8g \
+-Xmx8g \
+-Xloggc:./gc${now0}.log \
+-XX:+PrintGCDetails \
+-verbose:gc \
+-XX:+UseParNewGC \
+-XX:+UseConcMarkSweepGC \
+-XX:+PrintGCDateStamps \
+"
+
+#Ignite version
+ver="RELEASE-"
+
+# List of default probes.
+# Add DStatProbe or VmStatProbe if your OS supports it (e.g. if running on Linux).
+BENCHMARK_DEFAULT_PROBES=ThroughputLatencyProbe,PercentileProbe,DStatProbe
+
+# Packages where the specified benchmark is searched by reflection mechanism.
+BENCHMARK_PACKAGES=org.yardstickframework,org.apache.ignite.yardstick
+
+# Flag which indicates to restart the servers before every benchmark execution.
+RESTART_SERVERS=true
+
+# Probe point writer class name.
+# BENCHMARK_WRITER=
+
+# The benchmark is designed to run with 4 client nodes (drivers) and 1 server node.
+SERVER_HOSTS=localhost
+DRIVER_HOSTS=localhost,localhost,localhost,localhost
+
+# Remote username.
+# REMOTE_USER=
+
+# Number of nodes, used to wait for the specified number of nodes to start.
+nodesNum=$((`echo ${SERVER_HOSTS} | tr ',' '\n' | wc -l` + `echo ${DRIVER_HOSTS} | tr ',' '\n' | wc -l`))
+
+# Warmup.
+w=30
+
+# Duration.
+d=300
+
+# Threads count.
+t=16
+
+# Sync mode.
+sm=FULL_SYNC
+
+# Parameters that should be the same across all the benchmarks launches.
+commonParams="-cfg ${SCRIPT_DIR}/../config/ignite-localhost-config.xml -nn ${nodesNum} -w ${w} -d ${d} \
+  -jdbc jdbc:ignite:thin://auto.find/ -t ${t} -sm ${sm} \
+  --clientNodesAfterId 100 \
+  -sn IgniteNode -cl --range 1000000"
+
+# Run configuration which contains all benchmarks.
+# Note that each benchmark is set to run for 300 seconds (5 min) with warm-up set to 60 seconds (1 minute).
+CONFIGS="\
+${commonParams} -dn MvccProcessorBenchmark -ds ${ver}sql-update-batch-1-backup-0-mvcc-off -b 0 --sqlRange 1 --atomic-mode TRANSACTIONAL, \
+${commonParams} -dn MvccProcessorBenchmark -ds ${ver}sql-update-batch-1-backup-0-mvcc-on -b 0 --sqlRange 1 --atomic-mode TRANSACTIONAL_SNAPSHOT, \
+  \
+${commonParams} -dn MvccProcessorBenchmark -ds ${ver}sql-update-batch-25-backup-0-mvcc-off -b 0 --sqlRange 25 --atomic-mode TRANSACTIONAL, \
+${commonParams} -dn MvccProcessorBenchmark -ds ${ver}sql-update-batch-25-backup-0-mvcc-on -b 0 --sqlRange 25 --atomic-mode TRANSACTIONAL_SNAPSHOT, \
+  \
+${commonParams} -dn MvccProcessorBenchmark -ds ${ver}sql-update-batch-1000-backup-0-mvcc-off -b 0 --sqlRange 1000 --atomic-mode TRANSACTIONAL, \
+${commonParams} -dn MvccProcessorBenchmark -ds ${ver}sql-update-batch-1000-backup-0-mvcc-on -b 0 --sqlRange 1000 --atomic-mode TRANSACTIONAL_SNAPSHOT \
+"
diff --git a/modules/yardstick/config/mvcc/benchmark-mvcc-updates-contention.sh b/modules/yardstick/config/mvcc/benchmark-mvcc-updates-contention.sh
new file mode 100644
index 0000000..39f7424
--- /dev/null
+++ b/modules/yardstick/config/mvcc/benchmark-mvcc-updates-contention.sh
@@ -0,0 +1,95 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# Configuration to measure performance of concurrent sql updates with contention.
+# Update keys are shared among the threads/hosts.
+#
+now0=`date +'%H%M%S'`
+
+# JVM options.
+JVM_OPTS=${JVM_OPTS}" -DIGNITE_QUIET=false"
+
+# Uncomment to enable concurrent garbage collection (GC) if you encounter long GC pauses.
+JVM_OPTS=${JVM_OPTS}" \
+-Xms8g \
+-Xmx8g \
+-Xloggc:./gc${now0}.log \
+-XX:+PrintGCDetails \
+-verbose:gc \
+-XX:+UseParNewGC \
+-XX:+UseConcMarkSweepGC \
+-XX:+PrintGCDateStamps \
+"
+
+#Ignite version
+ver="RELEASE-"
+
+# List of default probes.
+# Add DStatProbe or VmStatProbe if your OS supports it (e.g. if running on Linux).
+BENCHMARK_DEFAULT_PROBES=ThroughputLatencyProbe,PercentileProbe,DStatProbe
+
+# Packages where the specified benchmark is searched by reflection mechanism.
+BENCHMARK_PACKAGES=org.yardstickframework,org.apache.ignite.yardstick
+
+# Flag which indicates to restart the servers before every benchmark execution.
+RESTART_SERVERS=true
+
+# Probe point writer class name.
+# BENCHMARK_WRITER=
+
+# The benchmark is designed to run with 4 client node (drivers) and several (2 for instance) server nodes
+SERVER_HOSTS=localhost,localhost
+DRIVER_HOSTS=localhost,localhost,localhost,localhost
+
+# Remote username.
+# REMOTE_USER=
+
+# Number of nodes, used to wait for the specified number of nodes to start.
+nodesNum=$((`echo ${SERVER_HOSTS} | tr ',' '\n' | wc -l` + `echo ${DRIVER_HOSTS} | tr ',' '\n' | wc -l`))
+
+# Warmup.
+w=30
+
+# Duration.
+d=300
+
+# Threads count.
+t=16
+
+# Sync mode.
+sm=FULL_SYNC
+
+# Parameters that should be the same across all the benchmarks launches.
+commonParams="-cfg ${SCRIPT_DIR}/../config/ignite-localhost-config.xml -nn ${nodesNum} -w ${w} -d ${d} \
+  -jdbc jdbc:ignite:thin://auto.find/ -t ${t} -sm ${sm} \
+  --clientNodesAfterId 100 \
+  -sn IgniteNode -cl \
+  --range 1000000 --mvcc-contention-range 10000"
+
+# Run configuration which contains all benchmarks.
+# Note that each benchmark is set to run for 300 seconds (5 min) with warm-up set to 60 seconds (1 minute).
+CONFIGS="\
+${commonParams} -dn MvccUpdateContentionBenchmark -ds ${ver}sql-update-batch-1-backup-0-mvcc-off -b 0 --sqlRange 1 --atomic-mode TRANSACTIONAL, \
+${commonParams} -dn MvccUpdateContentionBenchmark -ds ${ver}sql-update-batch-1-backup-0-mvcc-on -b 0 --sqlRange 1 --atomic-mode TRANSACTIONAL_SNAPSHOT, \
+  \
+${commonParams} -dn MvccUpdateContentionBenchmark -ds ${ver}sql-update-batch-25-backup-0-mvcc-off -b 0 --sqlRange 25 --atomic-mode TRANSACTIONAL, \
+${commonParams} -dn MvccUpdateContentionBenchmark -ds ${ver}sql-update-batch-25-backup-0-mvcc-on -b 0 --sqlRange 25 --atomic-mode TRANSACTIONAL_SNAPSHOT, \
+  \
+${commonParams} -dn MvccUpdateContentionBenchmark -ds ${ver}sql-update-batch-1000-backup-0-mvcc-off -b 0 --sqlRange 1000 --atomic-mode TRANSACTIONAL, \
+${commonParams} -dn MvccUpdateContentionBenchmark -ds ${ver}sql-update-batch-1000-backup-0-mvcc-on -b 0 --sqlRange 1000 --atomic-mode TRANSACTIONAL_SNAPSHOT \
+"
diff --git a/modules/yardstick/config/mvcc/benchmark-thin-native.properties b/modules/yardstick/config/mvcc/benchmark-thin-native.properties
new file mode 100644
index 0000000..7281f21
--- /dev/null
+++ b/modules/yardstick/config/mvcc/benchmark-thin-native.properties
@@ -0,0 +1,123 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# Configuration to measure mvcc impact on jdbc operations.
+#
+
+now0=`date +'%H%M%S'`
+
+# JVM options.
+JVM_OPTS=${JVM_OPTS}" -DIGNITE_QUIET=false"
+
+# Uncomment to enable concurrent garbage collection (GC) if you encounter long GC pauses.
+JVM_OPTS=${JVM_OPTS}" \
+-Xms8g \
+-Xmx8g \
+-Xloggc:./gc${now0}.log \
+-XX:+PrintGCDetails \
+-verbose:gc \
+-XX:+UseParNewGC \
+-XX:+UseConcMarkSweepGC \
+-XX:+PrintGCDateStamps \
+"
+
+#Ignite version
+ver="RELEASE-"
+
+# List of default probes.
+# Add DStatProbe or VmStatProbe if your OS supports it (e.g. if running on Linux).
+BENCHMARK_DEFAULT_PROBES=ThroughputLatencyProbe,PercentileProbe,DStatProbe
+
+# Packages where the specified benchmark is searched by reflection mechanism.
+BENCHMARK_PACKAGES=org.yardstickframework,org.apache.ignite.yardstick
+
+# Flag which indicates to restart the servers before every benchmark execution.
+RESTART_SERVERS=true
+
+# Probe point writer class name.
+# BENCHMARK_WRITER=
+
+# The benchmark is applicable only for 2 servers (the second server is started in client mode) and 1 driver.
+SERVER_HOSTS=localhost,localhost
+DRIVER_HOSTS=localhost
+
+# Remote username.
+# REMOTE_USER=
+
+# Number of nodes, used to wait for the specified number of nodes to start.
+nodesNum=$((`echo ${SERVER_HOSTS} | tr ',' '\n' | wc -l` + `echo ${DRIVER_HOSTS} | tr ',' '\n' | wc -l`))
+
+# Backups count.
+b=1
+
+# Warmup.
+w=30
+
+# Duration.
+d=300
+
+# Threads count.
+t=4
+
+# Sync mode.
+sm=FULL_SYNC
+
+# Parameters that should be the same across all the benchmarks launches.
+commonParams="-cfg ${SCRIPT_DIR}/../config/ignite-localhost-config.xml -nn ${nodesNum} -b ${b} -w ${w} -d ${d} \
+  -jdbc jdbc:ignite:thin://auto.find/ -t ${t} -sm ${sm} \
+  --clientNodesAfterId 0 \
+  -sn IgniteNode -cl --range 1000000"
+
+# Run configuration which contains all benchmarks.
+# Note that each benchmark is set to run for 300 seconds (5 min) with warm-up set to 60 seconds (1 minute).
+CONFIGS="\
+${commonParams} -dn JdbcSqlInsertDeleteBenchmark -ds ${ver}sql-insert-delete-batch-1-jdbc-thin-mvcc-off --atomic-mode TRANSACTIONAL, \
+${commonParams} -dn JdbcSqlInsertDeleteBenchmark -ds ${ver}sql-insert-delete-batch-1-jdbc-thin-mvcc-on --atomic-mode TRANSACTIONAL_SNAPSHOT, \
+  \
+${commonParams} -dn JdbcSqlQueryRangeBenchmark -ds ${ver}sql-select-batch-1-jdbc-thin-mvcc-off --sqlRange 1 --atomic-mode TRANSACTIONAL, \
+${commonParams} -dn JdbcSqlQueryRangeBenchmark -ds ${ver}sql-select-batch-1-jdbc-thin-mvcc-on --sqlRange 1 --atomic-mode TRANSACTIONAL_SNAPSHOT, \
+  \
+${commonParams} -dn JdbcSqlQueryRangeBenchmark -ds ${ver}sql-select-batch-1000-jdbc-thin-mvcc-off --sqlRange 1000 --atomic-mode TRANSACTIONAL, \
+${commonParams} -dn JdbcSqlQueryRangeBenchmark -ds ${ver}sql-select-batch-1000-jdbc-thin-mvcc-on --sqlRange 1000 --atomic-mode TRANSACTIONAL_SNAPSHOT, \
+  \
+${commonParams} -dn JdbcSqlUpdateBenchmark -ds ${ver}sql-update-batch-1-jdbc-thin-mvcc-off --sqlRange 1 --atomic-mode TRANSACTIONAL, \
+${commonParams} -dn JdbcSqlUpdateBenchmark -ds ${ver}sql-update-batch-1-jdbc-thin-mvcc-on --sqlRange 1 --atomic-mode TRANSACTIONAL_SNAPSHOT, \
+  \
+${commonParams} -dn JdbcSqlUpdateBenchmark -ds ${ver}sql-update-batch-1000-jdbc-thin-mvcc-off --sqlRange 1000 --atomic-mode TRANSACTIONAL, \
+${commonParams} -dn JdbcSqlUpdateBenchmark -ds ${ver}sql-update-batch-1000-jdbc-thin-mvcc-on --sqlRange 1000 --atomic-mode TRANSACTIONAL_SNAPSHOT, \
+  \
+  \
+${commonParams} -dn NativeSqlInsertDeleteBenchmark -ds ${ver}sql-insert-delete-batch-1-native-sql-mvcc-off --atomic-mode TRANSACTIONAL, \
+${commonParams} -dn NativeSqlInsertDeleteBenchmark -ds ${ver}sql-insert-delete-batch-1-native-sql-mvcc-on --atomic-mode TRANSACTIONAL_SNAPSHOT, \
+  \
+${commonParams} -dn NativeSqlQueryRangeBenchmark -ds ${ver}sql-select-batch-1-native-sql-mvcc-off --sqlRange 1 --atomic-mode TRANSACTIONAL, \
+${commonParams} -dn NativeSqlQueryRangeBenchmark -ds ${ver}sql-select-batch-1-native-sql-mvcc-on --sqlRange 1 --atomic-mode TRANSACTIONAL_SNAPSHOT, \
+  \
+${commonParams} -dn NativeSqlQueryRangeBenchmark -ds ${ver}sql-select-batch-1000-native-sql-mvcc-off --sqlRange 1000 --atomic-mode TRANSACTIONAL, \
+${commonParams} -dn NativeSqlQueryRangeBenchmark -ds ${ver}sql-select-batch-1000-native-sql-mvcc-on --sqlRange 1000 --atomic-mode TRANSACTIONAL_SNAPSHOT, \
+  \
+${commonParams} -dn NativeSqlUpdateRangeBenchmark -ds ${ver}sql-update-batch-1-native-sql-mvcc-off --sqlRange 1 --atomic-mode TRANSACTIONAL, \
+${commonParams} -dn NativeSqlUpdateRangeBenchmark -ds ${ver}sql-update-batch-1-native-sql-mvcc-on --sqlRange 1 --atomic-mode TRANSACTIONAL_SNAPSHOT, \
+  \
+${commonParams} -dn NativeSqlUpdateRangeBenchmark -ds ${ver}sql-update-batch-1000-native-sql-mvcc-off --sqlRange 1000 --atomic-mode TRANSACTIONAL, \
+${commonParams} -dn NativeSqlUpdateRangeBenchmark -ds ${ver}sql-update-batch-1000-native-sql-mvcc-on --sqlRange 1000 --atomic-mode TRANSACTIONAL_SNAPSHOT, \
+  \
+  \
+${commonParams} -dn NativeJavaApiPutRemoveBenchmark -ds ${ver}sql-update-batch-1-native-sql-mvcc-off --atomic-mode TRANSACTIONAL, \
+${commonParams} -dn NativeJavaApiPutRemoveBenchmark -ds ${ver}sql-update-batch-1-native-sql-mvcc-on --atomic-mode TRANSACTIONAL_SNAPSHOT \
+"
diff --git a/modules/yardstick/config/upload/benchmark-jdbc-thin-inmemory-mvcc.properties b/modules/yardstick/config/upload/benchmark-jdbc-thin-inmemory-mvcc.properties
new file mode 100644
index 0000000..dad8ed7
--- /dev/null
+++ b/modules/yardstick/config/upload/benchmark-jdbc-thin-inmemory-mvcc.properties
@@ -0,0 +1,104 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# Benchmarks for data upload in inmemory mode (persistence disabled) with and without mvcc.
+#
+
+now0=`date +'%H%M%S'`
+
+# JVM options.
+JVM_OPTS=${JVM_OPTS}" -DIGNITE_QUIET=false"
+
+# Uncomment to enable concurrent garbage collection (GC) if you encounter long GC pauses.
+JVM_OPTS=${JVM_OPTS}" \
+-Xms8g \
+-Xmx8g \
+-Xloggc:./gc${now0}.log \
+-XX:+PrintGCDetails \
+-verbose:gc \
+-XX:+UseParNewGC \
+-XX:+UseConcMarkSweepGC \
+-XX:+PrintGCDateStamps \
+"
+
+#Ignite version
+ver="RELEASE-"
+
+# List of default probes.
+# Add DStatProbe or VmStatProbe if your OS supports it (e.g. if running on Linux).
+BENCHMARK_DEFAULT_PROBES=TotalTimeProbe
+
+# Packages where the specified benchmark is searched by reflection mechanism.
+BENCHMARK_PACKAGES=org.yardstickframework,org.apache.ignite.yardstick
+
+# Flag which indicates to restart the servers before every benchmark execution.
+RESTART_SERVERS=true
+
+# Probe point writer class name.
+# BENCHMARK_WRITER=
+
+# The benchmark is applicable only for 2 servers (the second server is started in client mode) and 1 driver.
+SERVER_HOSTS=localhost,localhost
+DRIVER_HOSTS=localhost
+
+# Remote username.
+# REMOTE_USER=
+
+# Number of nodes, used to wait for the specified number of nodes to start.
+nodesNum=$((`echo ${SERVER_HOSTS} | tr ',' '\n' | wc -l` + `echo ${DRIVER_HOSTS} | tr ',' '\n' | wc -l`))
+
+# Backups count.
+b=1
+
+# Warmup.
+w=0
+
+# Threads count.
+t=1
+
+# Sync mode.
+sm=FULL_SYNC
+
+# Parameters that should be the same across all the benchmarks launches.
+commonParams="\
+-cfg ${SCRIPT_DIR}/../config/ignite-localhost-config.xml -nn ${nodesNum} -b ${b} \
+  --warmup ${w} --operations 1 \
+  -jdbc jdbc:ignite:thin://auto.find/ \
+  --threads ${t} --syncMode ${sm} -sn IgniteNode \
+  --upload-rows 1000000 -cl \
+  --clientNodesAfterId 0 \
+"
+
+# Run configuration which contains all benchmarks.
+# Note that each benchmark is set to run only one time, warmup parameter is set to 0 due to custom warmup operation.
+CONFIGS="\
+${commonParams} -dn NativePutBenchmark -ds ${ver}upload-native-put-mvcc-off --atomic-mode TRANSACTIONAL, \
+${commonParams} -dn NativePutBenchmark -ds ${ver}upload-native-put-mvcc-on --atomic-mode TRANSACTIONAL_SNAPSHOT, \
+  \
+${commonParams} -dn NativeStreamerBenchmark -ds ${ver}upload-native-streamer-mvcc-off --streamer-local-batch-size 1000 --atomic-mode TRANSACTIONAL, \
+${commonParams} -dn NativeStreamerBenchmark -ds ${ver}upload-native-streamer-mvcc-on --streamer-local-batch-size 1000 --atomic-mode TRANSACTIONAL_SNAPSHOT, \
+  \
+${commonParams} -dn CopyBenchmark -ds ${ver}upload-copy-mvcc-off --atomic-mode TRANSACTIONAL, \
+${commonParams} -dn CopyBenchmark -ds ${ver}upload-copy-mvcc-on --atomic-mode TRANSACTIONAL_SNAPSHOT, \
+  \
+${commonParams} -dn InsertBenchmark -ds ${ver}upload-insert-mvcc-off --atomic-mode TRANSACTIONAL, \
+${commonParams} -dn InsertBenchmark -ds ${ver}upload-insert-mvcc-on --atomic-mode TRANSACTIONAL_SNAPSHOT, \
+  \
+${commonParams} -dn BatchedInsertBenchmark -ds ${ver}upload-batched-insert-mvcc-off --upload-jdbc-batch-size 1000 --atomic-mode TRANSACTIONAL, \
+${commonParams} -dn BatchedInsertBenchmark -ds ${ver}upload-batched-insert-mvcc-on --upload-jdbc-batch-size 1000 --atomic-mode TRANSACTIONAL_SNAPSHOT \
+"
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/IgniteBenchmarkArguments.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/IgniteBenchmarkArguments.java
index 72409a0..3f4fddc 100644
--- a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/IgniteBenchmarkArguments.java
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/IgniteBenchmarkArguments.java
@@ -19,17 +19,16 @@
 
 import com.beust.jcommander.Parameter;
 import com.beust.jcommander.ParametersDelegate;
-import java.util.Collections;
+import java.util.ArrayList;
+import java.util.List;
 import org.apache.ignite.IgniteDataStreamer;
+import org.apache.ignite.cache.CacheAtomicityMode;
 import org.apache.ignite.cache.CacheWriteSynchronizationMode;
 import org.apache.ignite.configuration.DataStorageConfiguration;
 import org.apache.ignite.internal.util.tostring.GridToStringBuilder;
 import org.apache.ignite.internal.util.tostring.GridToStringInclude;
 import org.apache.ignite.transactions.TransactionConcurrency;
 import org.apache.ignite.transactions.TransactionIsolation;
-
-import java.util.ArrayList;
-import java.util.List;
 import org.apache.ignite.yardstick.cache.IgniteStreamerBenchmark;
 import org.apache.ignite.yardstick.upload.UploadBenchmarkArguments;
 import org.jetbrains.annotations.Nullable;
@@ -60,6 +59,10 @@
     private CacheWriteSynchronizationMode syncMode = CacheWriteSynchronizationMode.PRIMARY_SYNC;
 
     /** */
+    @Parameter(names = {"--atomic-mode", "--atomicMode"})
+    @Nullable private CacheAtomicityMode atomicMode = null;
+
+    /** */
     @Parameter(names = {"-cl", "--client"}, description = "Client flag")
     private boolean clientOnly = false;
 
@@ -277,6 +280,15 @@
     @GridToStringInclude
     public UploadBenchmarkArguments upload = new UploadBenchmarkArguments();
 
+    /** */
+    @Parameter(names = {"--mvcc-contention-range", "--mvccContentionRange"},
+        description = "Mvcc benchmark specific: " +
+            "Size of range of table keys that should be used in query. " +
+            "Should be less than 'range'. " +
+            "Useful together with 'sqlRange' to control, how often key contentions of sql operations occur.")
+    @GridToStringInclude
+    public long mvccContentionRange = 10_000;
+
     /**
      * @return {@code True} if need set {@link DataStorageConfiguration}.
      */
@@ -389,6 +401,11 @@
         return syncMode;
     }
 
+    /** With what cache atomicity mode to create tables. */
+    @Nullable public CacheAtomicityMode atomicMode(){
+        return atomicMode;
+    }
+
     /**
      * @return Backups.
      */
@@ -688,6 +705,13 @@
         return clientNodesAfterId;
     }
 
+    /**
+     * @return Mvcc contention range.
+     */
+    public long mvccContentionRange() {
+        return mvccContentionRange;
+    }
+
     /** {@inheritDoc} */
     @Override public String toString() {
         return GridToStringBuilder.toString(IgniteBenchmarkArguments.class, this);
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/IgniteThinAbstractBenchmark.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/IgniteThinAbstractBenchmark.java
new file mode 100644
index 0000000..4ba595d
--- /dev/null
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/IgniteThinAbstractBenchmark.java
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.yardstick;
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.ConcurrentLinkedDeque;
+import java.util.concurrent.ThreadLocalRandom;
+import org.apache.ignite.client.ClientCache;
+import org.apache.ignite.client.IgniteClient;
+import org.apache.ignite.yardstick.thin.cache.IgniteThinBenchmarkUtils;
+import org.yardstickframework.BenchmarkConfiguration;
+import org.yardstickframework.BenchmarkDriverAdapter;
+import org.yardstickframework.BenchmarkUtils;
+
+import static org.yardstickframework.BenchmarkUtils.jcommander;
+import static org.yardstickframework.BenchmarkUtils.println;
+
+/**
+ * Abstract class for Thin client benchmarks.
+ */
+public abstract class IgniteThinAbstractBenchmark extends BenchmarkDriverAdapter {
+    /** Arguments. */
+    protected final IgniteBenchmarkArguments args = new IgniteBenchmarkArguments();
+
+    /** Client. */
+    private ThreadLocal<IgniteClient> client;
+
+    /** Server host addresses queue. */
+    private ConcurrentLinkedDeque<String> servHosts;
+
+    /** {@inheritDoc} */
+    @Override public void setUp(BenchmarkConfiguration cfg) throws Exception {
+        super.setUp(cfg);
+
+        jcommander(cfg.commandLineArguments(), args, "<ignite-driver>");
+
+        String locIp = IgniteThinBenchmarkUtils.getLocalIp(cfg);
+
+        client = new ThreadLocal<IgniteClient>() {
+            @Override protected IgniteClient initialValue() {
+                synchronized (IgniteThinAbstractBenchmark.class) {
+                    try {
+                        if (servHosts == null || servHosts.isEmpty())
+                            setServHosts(cfg);
+
+                        return new IgniteThinClient().start(cfg, servHosts.poll());
+                    }
+                    catch (Exception e) {
+                        e.printStackTrace();
+                    }
+
+                    return null;
+                }
+            }
+        };
+
+        println("Custom properties:");
+
+        for (String prop : cfg.customProperties().keySet())
+            println(String.format("%s=%s", prop, cfg.customProperties().get(prop)));
+
+        // Create util cache for checking if all driver processes had been started.
+        ClientCache<String, String> utilCache = client().getOrCreateCache("start-util-cache");
+
+        // Put 'started' message in util cache.
+        utilCache.put(locIp, "started");
+
+        List<String> hostList = IgniteThinBenchmarkUtils.drvHostList(cfg);
+
+        int cnt = 0;
+
+        // Wait for all driver processes to start.
+        while(!checkIfAllClientsStarted(hostList) && cnt++ < 600)
+            Thread.sleep(500L);
+    }
+
+    /**
+     *
+     * @param cfg
+     */
+    private synchronized void setServHosts(BenchmarkConfiguration cfg){
+        BenchmarkUtils.println("Setting serv host queue");
+
+        String[] servHostArr = IgniteThinBenchmarkUtils.servHostArr(cfg);
+
+        servHosts = new ConcurrentLinkedDeque<>(Arrays.asList(servHostArr));
+    }
+
+    /**
+     * Check if all driver processes had been started.
+     *
+     * @param hostList List of driver host addresses.
+     * @return {@code true} if all driver processes had been started or {@code false} if not.
+     */
+    private boolean checkIfAllClientsStarted(List<String> hostList){
+        ClientCache<String, String> utilCache = client().getOrCreateCache("start-util-cache");
+
+        for(String host : hostList){
+            if(host.equals("localhost"))
+                host = "127.0.0.1";
+
+            String res = utilCache.get(host);
+
+            if (res == null || !res.equals("started"))
+                return false;
+        }
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void tearDown() throws Exception {
+        if (client.get() != null)
+            client.get().close();
+    }
+
+    /** {@inheritDoc} */
+    @Override public String description() {
+        String desc = BenchmarkUtils.description(cfg, this);
+
+        return desc.isEmpty() ?
+            getClass().getSimpleName() + args.description() + cfg.defaultDescription() : desc;
+    }
+
+    /**
+     * @return Client.
+     */
+    protected IgniteClient client() {
+        return client.get();
+    }
+
+    /**
+     * @param max Key range.
+     * @return Next key.
+     */
+    public static int nextRandom(int max) {
+        return ThreadLocalRandom.current().nextInt(max);
+    }
+
+    /**
+     * @param min Minimum key in range.
+     * @param max Maximum key in range.
+     * @return Next key.
+     */
+    protected int nextRandom(int min, int max) {
+        return ThreadLocalRandom.current().nextInt(max - min) + min;
+    }
+}
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/IgniteThinClient.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/IgniteThinClient.java
new file mode 100644
index 0000000..96904d5
--- /dev/null
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/IgniteThinClient.java
@@ -0,0 +1,146 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.yardstick;
+
+import java.net.MalformedURLException;
+import java.net.URL;
+import java.util.List;
+import java.util.Map;
+import org.apache.ignite.IgniteCheckedException;
+import org.apache.ignite.Ignition;
+import org.apache.ignite.client.IgniteClient;
+import org.apache.ignite.configuration.ClientConfiguration;
+import org.apache.ignite.configuration.IgniteConfiguration;
+import org.apache.ignite.internal.util.IgniteUtils;
+import org.apache.ignite.internal.util.typedef.internal.U;
+import org.apache.ignite.lang.IgniteBiTuple;
+import org.apache.ignite.yardstick.io.FileUtils;
+import org.springframework.beans.BeansException;
+import org.springframework.beans.factory.xml.XmlBeanDefinitionReader;
+import org.springframework.context.ApplicationContext;
+import org.springframework.context.support.GenericApplicationContext;
+import org.springframework.core.io.UrlResource;
+import org.yardstickframework.BenchmarkConfiguration;
+import org.yardstickframework.BenchmarkUtils;
+
+/**
+ * Thin client.
+ */
+public class IgniteThinClient {
+    /** Thin client. */
+    private IgniteClient client;
+
+    /** */
+    public IgniteThinClient() {
+        // No-op.
+    }
+
+    /**
+     * @param client Use exist ignite client.
+     */
+    public IgniteThinClient(IgniteClient client) {
+        this.client = client;
+    }
+
+    /** {@inheritDoc} */
+    public IgniteClient start(BenchmarkConfiguration cfg, String host) throws Exception {
+        IgniteBenchmarkArguments args = new IgniteBenchmarkArguments();
+
+        BenchmarkUtils.jcommander(cfg.commandLineArguments(), args, "<ignite-node>");
+
+        IgniteBiTuple<IgniteConfiguration, ? extends ApplicationContext> tup = loadConfiguration(args.configuration());
+
+        IgniteConfiguration c = tup.get1();
+
+        assert c != null;
+
+        if (args.cleanWorkDirectory())
+            FileUtils.cleanDirectory(U.workDirectory(c.getWorkDirectory(), c.getIgniteHome()));
+
+        ClientConfiguration clCfg = new ClientConfiguration();
+
+        String hostPort = host + ":10800";
+
+        BenchmarkUtils.println(String.format("Using for connection address: %s", hostPort));
+
+        clCfg.setAddresses(hostPort);
+
+        client = Ignition.startClient(clCfg);
+
+        return client;
+    }
+
+    /**
+     * @param springCfgPath Spring configuration file path.
+     * @return Tuple with grid configuration and Spring application context.
+     * @throws Exception If failed.
+     */
+    private static IgniteBiTuple<IgniteConfiguration, ? extends ApplicationContext> loadConfiguration(String springCfgPath)
+        throws Exception {
+        URL url;
+
+        try {
+            url = new URL(springCfgPath);
+        }
+        catch (MalformedURLException e) {
+            url = IgniteUtils.resolveIgniteUrl(springCfgPath);
+
+            if (url == null) {
+                throw new IgniteCheckedException("Spring XML configuration path is invalid: " + springCfgPath +
+                    ". Note that this path should be either absolute or a relative local file system path, " +
+                    "relative to META-INF in classpath or valid URL to IGNITE_HOME.", e);
+            }
+        }
+
+        GenericApplicationContext springCtx;
+
+        try {
+            springCtx = new GenericApplicationContext();
+
+            new XmlBeanDefinitionReader(springCtx).loadBeanDefinitions(new UrlResource(url));
+
+            springCtx.refresh();
+        }
+        catch (BeansException e) {
+            throw new Exception("Failed to instantiate Spring XML application context [springUrl=" +
+                url + ", err=" + e.getMessage() + ']', e);
+        }
+
+        Map<String, IgniteConfiguration> cfgMap;
+
+        try {
+            cfgMap = springCtx.getBeansOfType(IgniteConfiguration.class);
+        }
+        catch (BeansException e) {
+            throw new Exception("Failed to instantiate bean [type=" + IgniteConfiguration.class + ", err=" +
+                e.getMessage() + ']', e);
+        }
+
+        if (cfgMap == null || cfgMap.isEmpty())
+            throw new Exception("Failed to find ignite configuration in: " + url);
+
+        return new IgniteBiTuple<>(cfgMap.values().iterator().next(), springCtx);
+    }
+
+    /**
+     * @return Thin client.
+     */
+    public IgniteClient client() {
+        return client;
+    }
+}
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/cache/IgniteAtomicSequenceBenchmark.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/cache/IgniteAtomicSequenceBenchmark.java
deleted file mode 100644
index e961439..0000000
--- a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/cache/IgniteAtomicSequenceBenchmark.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.ignite.yardstick.cache;
-
-import java.util.Map;
-import org.apache.ignite.IgniteAtomicSequence;
-import org.apache.ignite.yardstick.IgniteAbstractBenchmark;
-import org.yardstickframework.BenchmarkConfiguration;
-
-/**
- * Ignite atomic sequence benchmark.
- */
-public class IgniteAtomicSequenceBenchmark extends IgniteAbstractBenchmark {
-    /** Cache. */
-    private IgniteAtomicSequence seq;
-
-    /** {@inheritDoc} */
-    @Override public void setUp(BenchmarkConfiguration cfg) throws Exception {
-        super.setUp(cfg);
-
-        seq = ignite().atomicSequence("benchSequence", 0, true);
-
-        seq.batchSize(args.batch());
-    }
-
-    /** {@inheritDoc} */
-    @Override public boolean test(Map<Object, Object> ctx) throws Exception {
-        seq.incrementAndGet();
-
-        return true;
-    }
-}
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/cache/IgnitePutAllSimpleBenchmark.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/cache/IgnitePutAllSimpleBenchmark.java
new file mode 100644
index 0000000..6051fc8
--- /dev/null
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/cache/IgnitePutAllSimpleBenchmark.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.yardstick.cache;
+
+import java.util.HashMap;
+import java.util.Map;
+import org.apache.ignite.IgniteCache;
+
+/**
+ * Ignite benchmark that performs put operations.
+ */
+public class IgnitePutAllSimpleBenchmark extends IgniteCacheAbstractBenchmark<Integer, Object> {
+    /** {@inheritDoc} */
+    @Override public boolean test(Map<Object, Object> ctx) throws Exception {
+        Map<Integer, Integer> vals = new HashMap<>();
+
+        for (int i = 0; i < 500; i++ )
+            vals.put(i, nextRandom(1000));
+
+        cache().putAll(vals);
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected IgniteCache<Integer, Object> cache() {
+        return ignite().cache("atomic");
+    }
+}
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/jdbc/AbstractJdbcBenchmark.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/jdbc/AbstractJdbcBenchmark.java
index 6ab024f..7d5b940 100644
--- a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/jdbc/AbstractJdbcBenchmark.java
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/jdbc/AbstractJdbcBenchmark.java
@@ -92,7 +92,7 @@
      * @throws Exception On error.
      */
     protected void setupData() throws Exception {
-        fillData(cfg, (IgniteEx)ignite(), args.range());
+        fillData(cfg, (IgniteEx)ignite(), args.range(), args.atomicMode());
     }
 
     /**
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/jdbc/AbstractNativeBenchmark.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/jdbc/AbstractNativeBenchmark.java
index 5e92514..129e6a4 100644
--- a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/jdbc/AbstractNativeBenchmark.java
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/jdbc/AbstractNativeBenchmark.java
@@ -32,6 +32,6 @@
     @Override public void setUp(BenchmarkConfiguration cfg) throws Exception {
         super.setUp(cfg);
 
-        fillData(cfg, (IgniteEx)ignite(), args.range());
+        fillData(cfg, (IgniteEx)ignite(), args.range(), args.atomicMode());
     }
 }
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/jdbc/JdbcUtils.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/jdbc/JdbcUtils.java
index 74b5da5..81d6c17 100644
--- a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/jdbc/JdbcUtils.java
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/jdbc/JdbcUtils.java
@@ -17,6 +17,7 @@
 
 package org.apache.ignite.yardstick.jdbc;
 
+import org.apache.ignite.cache.CacheAtomicityMode;
 import org.apache.ignite.cache.query.SqlFieldsQuery;
 import org.apache.ignite.internal.IgniteEx;
 import org.yardstickframework.BenchmarkConfiguration;
@@ -33,11 +34,17 @@
      * @param ignite Ignite node.
      * @param range Data key range.
      */
-    static void fillData(BenchmarkConfiguration cfg,  IgniteEx ignite, long range) {
+    public static void fillData(BenchmarkConfiguration cfg,  IgniteEx ignite, long range, CacheAtomicityMode atomicMode) {
         println(cfg, "Create table...");
 
+        String withExpr = atomicMode != null ? " WITH \"atomicity=" + atomicMode.name() + "\";" : ";";
+
+        String qry = "CREATE TABLE test_long (id long primary key, val long)" + withExpr;
+
+        println(cfg, "Creating table with schema: " + qry);
+
         ignite.context().query().querySqlFields(
-            new SqlFieldsQuery("CREATE TABLE test_long (id long primary key, val long)"), true);
+            new SqlFieldsQuery(qry), true);
 
         println(cfg, "Populate data...");
 
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/jdbc/NativeJavaApiPutRemoveBenchmark.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/jdbc/NativeJavaApiPutRemoveBenchmark.java
new file mode 100644
index 0000000..c1106d7
--- /dev/null
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/jdbc/NativeJavaApiPutRemoveBenchmark.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.yardstick.jdbc;
+
+import java.util.Map;
+import java.util.concurrent.ThreadLocalRandom;
+import org.apache.ignite.IgniteCache;
+import org.apache.ignite.IgniteException;
+import org.yardstickframework.BenchmarkConfiguration;
+
+/**
+ * Benchmark that fetches data from cache to compare with SQL SELECT operation.
+ */
+public class NativeJavaApiPutRemoveBenchmark extends AbstractNativeBenchmark {
+    /** Cache for created table. */
+    private IgniteCache<Object, Object> tabCache;
+
+    @Override public void setUp(BenchmarkConfiguration cfg) throws Exception {
+        super.setUp(cfg);
+
+        tabCache = ignite().cache("SQL_PUBLIC_TEST_LONG");
+    }
+
+    @Override public boolean test(Map<Object, Object> ctx) throws Exception {
+        long insertKey = ThreadLocalRandom.current().nextLong(args.range()) + 1 + args.range();
+        long insertVal = insertKey + 1;
+
+        try {
+            tabCache.put(insertKey, insertVal);
+            tabCache.remove(insertKey);
+        } catch (IgniteException ign){
+            // Collision occurred, ignoring.
+        }
+
+        return true;
+    }
+}
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/jdbc/mvcc/AbstractDistributedMvccBenchmark.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/jdbc/mvcc/AbstractDistributedMvccBenchmark.java
new file mode 100644
index 0000000..daf50a1
--- /dev/null
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/jdbc/mvcc/AbstractDistributedMvccBenchmark.java
@@ -0,0 +1,101 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.yardstick.jdbc.mvcc;
+
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import org.apache.ignite.IgniteCountDownLatch;
+import org.apache.ignite.cache.query.QueryCursor;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.internal.IgniteEx;
+import org.apache.ignite.yardstick.IgniteAbstractBenchmark;
+import org.yardstickframework.BenchmarkConfiguration;
+
+import static org.apache.ignite.yardstick.jdbc.JdbcUtils.fillData;
+import static org.yardstickframework.BenchmarkUtils.println;
+
+/**
+ * Base for mvcc benchmarks that are running on multiply hosts.
+ */
+public abstract class AbstractDistributedMvccBenchmark extends IgniteAbstractBenchmark {
+    /** Sql query to create load. */
+    public static final String UPDATE_QRY = "UPDATE test_long SET val = (val + 1) WHERE id BETWEEN ? AND ?";
+
+    /** Timeout in minutest for test data to be loaded. */
+    public static final long DATA_WAIT_TIMEOUT_MIN = 20;
+
+    /** Member id of the host driver is running */
+    protected int memberId;
+    /**
+     * Number of nodes handled by driver.
+     */
+    protected int driversNodesCnt;
+
+    /** {@inheritDoc} */
+    @Override public void setUp(BenchmarkConfiguration cfg) throws Exception {
+        super.setUp(cfg);
+
+        memberId = cfg.memberId();
+
+        if (memberId < 0)
+            throw new IllegalStateException("Member id should be initialized with non-negative value");
+
+        // We assume there is no client nodes in the cluster except clients that are yardstick drivers.
+        driversNodesCnt = ignite().cluster().forClients().nodes().size();
+
+        IgniteCountDownLatch dataIsReady = ignite().countDownLatch("fillDataLatch", 1, true, true);
+
+        try {
+            if (memberId == 0) {
+                fillData(cfg, (IgniteEx)ignite(), args.range(), args.atomicMode());
+
+                dataIsReady.countDown();
+            }
+            else {
+                println(cfg, "No need to upload data for memberId=" + memberId + ". Just waiting");
+
+                dataIsReady.await(DATA_WAIT_TIMEOUT_MIN, TimeUnit.MINUTES);
+
+                println(cfg, "Data is ready.");
+            }
+
+        }
+        catch (Throwable th) {
+            dataIsReady.countDownAll();
+
+            throw new RuntimeException("Fill Data failed.", th);
+        }
+
+        // Workaround for "Table TEST_LONG not found" on sql update.
+        execute(new SqlFieldsQuery("SELECT COUNT(*) FROM test_long"));
+    }
+
+    /**
+     * Execute specified query using started driver node.
+     * Returns result using {@link QueryCursor#getAll()}.
+     *
+     * @param qry sql query to execute.
+     */
+    protected List<List<?>> execute(SqlFieldsQuery qry) {
+        return ((IgniteEx)ignite())
+            .context()
+            .query()
+            .querySqlFields(qry, false)
+            .getAll();
+    }
+}
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/jdbc/mvcc/MvccProcessorBenchmark.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/jdbc/mvcc/MvccProcessorBenchmark.java
new file mode 100644
index 0000000..543e754
--- /dev/null
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/jdbc/mvcc/MvccProcessorBenchmark.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.yardstick.jdbc.mvcc;
+
+import java.util.Map;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.yardstick.jdbc.DisjointRangeGenerator;
+import org.yardstickframework.BenchmarkConfiguration;
+
+/**
+ * Benchmark app that creates load on Mvcc Processor.
+ * Should be run in many threads on many hosts against one single server node.
+ */
+public class MvccProcessorBenchmark extends AbstractDistributedMvccBenchmark {
+    /** Generates id, that are disjoint only among threads running current host. */
+    private DisjointRangeGenerator locIdGen;
+
+    /** Offset for current host ids range, to make it disjoint among all the other host id ranges. */
+    private int idOffset;
+
+    /** {@inheritDoc} */
+    @Override public void setUp(BenchmarkConfiguration cfg) throws Exception {
+        super.setUp(cfg);
+
+        int locIdRangeWidth = args.range() / driversNodesCnt;
+
+        locIdGen = new DisjointRangeGenerator(cfg.threads(), locIdRangeWidth, args.sqlRange());
+
+        idOffset = locIdRangeWidth * memberId;
+    }
+
+    /**
+     *  Performs sql updates on the key sets that are disjoint among all the threads on all the hosts.
+     */
+    @Override public boolean test(Map<Object, Object> ctx) throws Exception {
+        long locStart = locIdGen.nextRangeStartId();
+
+        long start = idOffset + locStart;
+
+        long end = idOffset + locIdGen.endRangeId(locStart);
+
+        execute(new SqlFieldsQuery(UPDATE_QRY).setArgs(start, end));
+
+        return true;
+    }
+}
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/jdbc/mvcc/MvccUpdateContentionBenchmark.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/jdbc/mvcc/MvccUpdateContentionBenchmark.java
new file mode 100644
index 0000000..77c8c50
--- /dev/null
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/jdbc/mvcc/MvccUpdateContentionBenchmark.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.yardstick.jdbc.mvcc;
+
+import java.util.Map;
+import java.util.concurrent.ThreadLocalRandom;
+import java.util.concurrent.atomic.AtomicLong;
+import org.apache.ignite.cache.CacheAtomicityMode;
+import org.apache.ignite.cache.query.SqlFieldsQuery;
+import org.apache.ignite.internal.processors.query.IgniteSQLException;
+
+import static org.yardstickframework.BenchmarkUtils.println;
+
+/**
+ * Benchmark app that eliminates update contention in mvcc mode.
+ * Designed to be ran in many threads on many hosts.
+ */
+public class MvccUpdateContentionBenchmark extends AbstractDistributedMvccBenchmark {
+    /** Expected expception message in mvcc on mode on update fail. */
+    private static final String MVCC_EXC_MSG = "Mvcc version mismatch.";
+
+    /** Expected exception message in mvcc off mode on update fail. */
+    private static final String NO_MVCC_EXC_MSG_PREFIX =
+        "Failed to UPDATE some keys because they had been modified concurrently";
+
+    /** Counter of failed updates. */
+    private final AtomicLong failsCnt = new AtomicLong();
+
+    /** {@inheritDoc} */
+    @Override public boolean test(Map<Object, Object> ctx) throws Exception {
+        ThreadLocalRandom rnd = ThreadLocalRandom.current();
+
+        long start = rnd.nextLong(args.mvccContentionRange() - (args.sqlRange() - 1)) + 1;
+
+        long end = start + (args.sqlRange() - 1);
+
+        try {
+            execute(new SqlFieldsQuery(UPDATE_QRY).setArgs(start, end));
+        }
+        catch (IgniteSQLException exc) {
+            if ((args.atomicMode() == CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT && !exc.getMessage().equals(MVCC_EXC_MSG)) ||
+                (args.atomicMode() != CacheAtomicityMode.TRANSACTIONAL_SNAPSHOT && !exc.getMessage().startsWith(NO_MVCC_EXC_MSG_PREFIX)))
+                throw new RuntimeException("Exception with unexpected message is thrown.", exc);
+
+            failsCnt.incrementAndGet();
+        }
+        catch (Exception e) {
+            throw new RuntimeException("Could not perform update.", e);
+        }
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void tearDown() throws Exception {
+        try {
+            super.tearDown();
+        }
+        finally {
+            println("Update contention count : " + failsCnt.get());
+        }
+    }
+}
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/sequence/IgniteAtomicSequenceAbstractBenchmark.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/sequence/IgniteAtomicSequenceAbstractBenchmark.java
new file mode 100644
index 0000000..5f219c6
--- /dev/null
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/sequence/IgniteAtomicSequenceAbstractBenchmark.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.yardstick.sequence;
+
+import org.apache.ignite.IgniteAtomicSequence;
+import org.apache.ignite.configuration.AtomicConfiguration;
+import org.apache.ignite.yardstick.IgniteAbstractBenchmark;
+import org.yardstickframework.BenchmarkConfiguration;
+
+/**
+ * Abstract class for {@link IgniteAtomicSequence} benchmarks.
+ */
+public abstract class IgniteAtomicSequenceAbstractBenchmark extends IgniteAbstractBenchmark {
+    /** Bound for random operation, by default 1/10 of batchSize. */
+    protected int randomBound;
+
+    /** Sequence. */
+    protected IgniteAtomicSequence seq;
+
+    /** {@inheritDoc} */
+    @Override public void setUp(BenchmarkConfiguration cfg) throws Exception {
+        super.setUp(cfg);
+
+        int batchSize = args.batch();
+        int backups = args.backups();
+
+        AtomicConfiguration acfg = new AtomicConfiguration();
+
+        acfg.setAtomicSequenceReserveSize(batchSize);
+        acfg.setBackups(backups);
+
+        seq = ignite().atomicSequence("benchSequence", acfg, 0, true);
+
+        randomBound = batchSize < 10 ? 1 : batchSize / 10;
+    }
+}
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/sequence/IgniteAtomicSequenceAddAndGetBenchmark.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/sequence/IgniteAtomicSequenceAddAndGetBenchmark.java
new file mode 100644
index 0000000..a2ee428
--- /dev/null
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/sequence/IgniteAtomicSequenceAddAndGetBenchmark.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.yardstick.sequence;
+
+import java.util.Map;
+import java.util.concurrent.ThreadLocalRandom;
+import org.apache.ignite.IgniteAtomicSequence;
+
+/**
+ * {@link IgniteAtomicSequence#addAndGet(long)} benchmark.
+ */
+public class IgniteAtomicSequenceAddAndGetBenchmark extends IgniteAtomicSequenceAbstractBenchmark {
+    /** {@inheritDoc} */
+    @Override public boolean test(Map<Object, Object> map) throws Exception {
+        int delta = ThreadLocalRandom.current().nextInt(randomBound) + 1;
+
+        seq.addAndGet(delta);
+
+        return true;
+    }
+}
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/sequence/IgniteAtomicSequenceGetAndAddBenchmark.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/sequence/IgniteAtomicSequenceGetAndAddBenchmark.java
new file mode 100644
index 0000000..798ba25
--- /dev/null
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/sequence/IgniteAtomicSequenceGetAndAddBenchmark.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.yardstick.sequence;
+
+import java.util.Map;
+import java.util.concurrent.ThreadLocalRandom;
+import org.apache.ignite.IgniteAtomicSequence;
+
+/**
+ * {@link IgniteAtomicSequence#getAndAdd(long)} benchmark.
+ */
+public class IgniteAtomicSequenceGetAndAddBenchmark extends IgniteAtomicSequenceAbstractBenchmark {
+    /** {@inheritDoc} */
+    @Override public boolean test(Map<Object, Object> map) throws Exception {
+        int delta = ThreadLocalRandom.current().nextInt(randomBound) + 1;
+
+        seq.getAndAdd(delta);
+
+        return true;
+    }
+}
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/sequence/IgniteAtomicSequenceGetAndIncrementBenchmark.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/sequence/IgniteAtomicSequenceGetAndIncrementBenchmark.java
new file mode 100644
index 0000000..604c1d7
--- /dev/null
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/sequence/IgniteAtomicSequenceGetAndIncrementBenchmark.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.yardstick.sequence;
+
+import java.util.Map;
+import org.apache.ignite.IgniteAtomicSequence;
+
+/**
+ * {@link IgniteAtomicSequence#getAndIncrement()} benchmark.
+ */
+public class IgniteAtomicSequenceGetAndIncrementBenchmark extends IgniteAtomicSequenceAbstractBenchmark {
+    /** {@inheritDoc} */
+    @Override public boolean test(Map<Object, Object> ctx) throws Exception {
+        seq.getAndIncrement();
+
+        return true;
+    }
+}
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/sequence/IgniteAtomicSequenceIncrementAndGetBenchmark.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/sequence/IgniteAtomicSequenceIncrementAndGetBenchmark.java
new file mode 100644
index 0000000..1390568
--- /dev/null
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/sequence/IgniteAtomicSequenceIncrementAndGetBenchmark.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.yardstick.sequence;
+
+import java.util.Map;
+import org.apache.ignite.IgniteAtomicSequence;
+
+/**
+ * {@link IgniteAtomicSequence#incrementAndGet()} benchmark.
+ */
+public class IgniteAtomicSequenceIncrementAndGetBenchmark extends IgniteAtomicSequenceAbstractBenchmark {
+    /** {@inheritDoc} */
+    @Override public boolean test(Map<Object, Object> ctx) throws Exception {
+        seq.incrementAndGet();
+
+        return true;
+    }
+}
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/thin/cache/IgniteThinBenchmarkUtils.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/thin/cache/IgniteThinBenchmarkUtils.java
new file mode 100644
index 0000000..159887a
--- /dev/null
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/thin/cache/IgniteThinBenchmarkUtils.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.yardstick.thin.cache;
+
+import java.net.InetAddress;
+import java.net.NetworkInterface;
+import java.net.SocketException;
+import java.util.ArrayList;
+import java.util.Enumeration;
+import java.util.List;
+import org.yardstickframework.BenchmarkConfiguration;
+
+/**
+ * Thin client benchmark utils.
+ */
+public class IgniteThinBenchmarkUtils {
+    /**
+     * Compute local IP address.
+     *
+     * @param cfg Configuration.
+     * @return local IP address.
+     * @throws SocketException
+     */
+    public static String getLocalIp(BenchmarkConfiguration cfg) throws SocketException {
+        List<String> hostList = drvHostList(cfg);
+
+        Enumeration e = NetworkInterface.getNetworkInterfaces();
+
+        while(e.hasMoreElements()) {
+            NetworkInterface n = (NetworkInterface) e.nextElement();
+
+            Enumeration ee = n.getInetAddresses();
+
+            while (ee.hasMoreElements()) {
+                InetAddress i = (InetAddress) ee.nextElement();
+
+                if(hostList.contains(i.getHostAddress()))
+                    return i.getHostAddress();
+            }
+        }
+
+        return null;
+    }
+
+    /**
+     * Creates list of driver host addresses.
+     *
+     * @param cfg Configuration.
+     * @return List of driver host addresses.
+     */
+    public static List<String> drvHostList(BenchmarkConfiguration cfg){
+        String driverHosts = cfg.customProperties().get("DRIVER_HOSTS");
+
+        String[] hostArr = driverHosts.split(",");
+
+        List<String> res = new ArrayList<>(hostArr.length);
+
+        for(String host : hostArr){
+            if(host.equals("localhost"))
+                res.add("127.0.0.1");
+            else
+                res.add(host);
+        }
+
+        return res;
+    }
+
+    /**
+     * Creates array of server host addresses.
+     *
+     * @param cfg Configuration.
+     * @return {@code Array} of server host addresses.
+     */
+    public static String[] servHostArr(BenchmarkConfiguration cfg){
+        String servHosts = cfg.customProperties().get("SERVER_HOSTS");
+
+        return servHosts.split(",");
+
+    }
+}
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/thin/cache/IgniteThinCacheAbstractBenchmark.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/thin/cache/IgniteThinCacheAbstractBenchmark.java
new file mode 100644
index 0000000..301c29e
--- /dev/null
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/thin/cache/IgniteThinCacheAbstractBenchmark.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.yardstick.thin.cache;
+
+import org.apache.ignite.client.ClientCache;
+import org.apache.ignite.yardstick.IgniteThinAbstractBenchmark;
+import org.yardstickframework.BenchmarkConfiguration;
+
+/**
+ *
+ * Abstract class for thin client benchmarks which use cache.
+ */
+public abstract class IgniteThinCacheAbstractBenchmark<K, V> extends IgniteThinAbstractBenchmark {
+    /** Cache. */
+    protected ClientCache<K, V> cache;
+
+    /** {@inheritDoc} */
+    @Override public void setUp(BenchmarkConfiguration cfg) throws Exception {
+        super.setUp(cfg);
+
+        cache = cache();
+    }
+
+    /**
+     * Each benchmark must determine which cache will be used.
+     *
+     * @return ClientCache Cache to use.
+     */
+    protected abstract ClientCache<K, V> cache();
+}
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/thin/cache/IgniteThinGetBenchmark.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/thin/cache/IgniteThinGetBenchmark.java
new file mode 100644
index 0000000..ad7937b
--- /dev/null
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/thin/cache/IgniteThinGetBenchmark.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.yardstick.thin.cache;
+
+import java.util.Map;
+import org.apache.ignite.client.ClientCache;
+import org.apache.ignite.yardstick.cache.model.SampleValue;
+import org.yardstickframework.BenchmarkConfiguration;
+
+import static org.yardstickframework.BenchmarkUtils.println;
+
+/**
+ * Thin client benchmark that performs get operations.
+ */
+public class IgniteThinGetBenchmark extends IgniteThinCacheAbstractBenchmark<Integer, Object> {
+    /** {@inheritDoc} */
+    @Override public void setUp(BenchmarkConfiguration cfg) throws Exception {
+        super.setUp(cfg);
+
+        if (args.preloadAmount() > args.range())
+            throw new IllegalArgumentException("Preloading amount (\"-pa\", \"--preloadAmount\") " +
+                "must by less then the range (\"-r\", \"--range\").");
+
+        loadSampleValues(cache().getName(), args.preloadAmount());
+    }
+
+    /** {@inheritDoc} */
+    @Override public boolean test(Map<Object, Object> ctx) throws Exception {
+        int key = nextRandom(args.range());
+
+        cache().get(key);
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected ClientCache<Integer, Object> cache() {
+        return client().cache("atomic");
+    }
+
+    /**
+     * @param cacheName Cache name.
+     * @param cnt Number of entries to load.
+     */
+    private void loadSampleValues(String cacheName, int cnt) {
+        for (int i = 0; i < cnt; i++) {
+            cache.put(i, new SampleValue(i));
+
+            if (i % 100000 == 0) {
+                if (Thread.currentThread().isInterrupted())
+                    break;
+
+                println("Loaded entries [cache=" + cacheName + ", cnt=" + i + ']');
+            }
+        }
+
+        println("Load entries done [cache=" + cacheName + ", cnt=" + cnt + ']');
+    }
+}
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/thin/cache/IgniteThinGetTxBenchmark.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/thin/cache/IgniteThinGetTxBenchmark.java
new file mode 100644
index 0000000..601ce63
--- /dev/null
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/thin/cache/IgniteThinGetTxBenchmark.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.yardstick.thin.cache;
+
+import org.apache.ignite.client.ClientCache;
+
+/**
+ * Thin client benchmark that performs get operations.
+ */
+public class IgniteThinGetTxBenchmark extends IgniteThinGetBenchmark {
+    /** {@inheritDoc} */
+    @Override protected ClientCache<Integer, Object> cache() {
+        return client().cache("tx");
+    }
+}
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/thin/cache/IgniteThinPutAllBenchmark.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/thin/cache/IgniteThinPutAllBenchmark.java
new file mode 100644
index 0000000..3e17007
--- /dev/null
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/thin/cache/IgniteThinPutAllBenchmark.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.yardstick.thin.cache;
+
+import java.util.HashMap;
+import java.util.Map;
+import org.apache.ignite.client.ClientCache;
+
+/**
+ * Thin client benchmark that performs putAll operations.
+ */
+public class IgniteThinPutAllBenchmark extends IgniteThinCacheAbstractBenchmark<Integer, Object> {
+    /** {@inheritDoc} */
+    @Override public boolean test(Map<Object, Object> ctx) throws Exception {
+        Map<Integer, Integer> vals = new HashMap<>();
+
+        for (int i = 0; i < 500; i++ )
+            vals.put(i, nextRandom(1000));
+
+        cache().putAll(vals);
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected ClientCache<Integer, Object> cache() {
+        return client().cache("atomic");
+    }
+}
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/thin/cache/IgniteThinPutAllTxBenchmark.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/thin/cache/IgniteThinPutAllTxBenchmark.java
new file mode 100644
index 0000000..fba3a96
--- /dev/null
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/thin/cache/IgniteThinPutAllTxBenchmark.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.yardstick.thin.cache;
+
+import org.apache.ignite.client.ClientCache;
+
+/**
+ * Thin client benchmark that performs putAll operations.
+ */
+public class IgniteThinPutAllTxBenchmark extends IgniteThinPutAllBenchmark {
+    /** {@inheritDoc} */
+    @Override protected ClientCache<Integer, Object> cache() {
+        return client().cache("tx");
+    }
+}
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/thin/cache/IgniteThinPutBenchmark.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/thin/cache/IgniteThinPutBenchmark.java
new file mode 100644
index 0000000..e8f675b
--- /dev/null
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/thin/cache/IgniteThinPutBenchmark.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.yardstick.thin.cache;
+
+import java.util.Map;
+import org.apache.ignite.client.ClientCache;
+import org.apache.ignite.yardstick.cache.model.SampleValue;
+
+/**
+ * Thin client benchmark that performs put operations.
+ */
+public class IgniteThinPutBenchmark extends IgniteThinCacheAbstractBenchmark<Integer, Object> {
+    /** {@inheritDoc} */
+    @Override public boolean test(Map<Object, Object> ctx) throws Exception {
+        int key = nextRandom(args.range());
+
+        cache().put(key, new SampleValue(key));
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected ClientCache<Integer, Object> cache() {
+        return client().cache("atomic");
+    }
+}
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/thin/cache/IgniteThinPutGetBenchmark.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/thin/cache/IgniteThinPutGetBenchmark.java
new file mode 100644
index 0000000..c40a10c
--- /dev/null
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/thin/cache/IgniteThinPutGetBenchmark.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.yardstick.thin.cache;
+
+import java.util.Map;
+import org.apache.ignite.client.ClientCache;
+import org.apache.ignite.yardstick.cache.model.SampleValue;
+
+/**
+ * Thin client benchmark that performs put and get operations.
+ */
+public class IgniteThinPutGetBenchmark extends IgniteThinCacheAbstractBenchmark<Integer, Object> {
+    /** {@inheritDoc} */
+    @Override public boolean test(Map<Object, Object> ctx) throws Exception {
+        int key = nextRandom(args.range());
+
+        Object val = cache().get(key);
+
+        if (val != null)
+            key = nextRandom(args.range());
+
+        cache().put(key, new SampleValue(key));
+
+        return true;
+    }
+
+    /** {@inheritDoc} */
+    @Override protected ClientCache<Integer, Object> cache() {
+        return client().cache("atomic");
+    }
+}
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/thin/cache/IgniteThinPutTxBenchmark.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/thin/cache/IgniteThinPutTxBenchmark.java
new file mode 100644
index 0000000..bf05632
--- /dev/null
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/thin/cache/IgniteThinPutTxBenchmark.java
@@ -0,0 +1,30 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.yardstick.thin.cache;
+
+import org.apache.ignite.client.ClientCache;
+
+/**
+ * Thin client benchmark that performs put operations.
+ */
+public class IgniteThinPutTxBenchmark extends IgniteThinPutBenchmark {
+    /** {@inheritDoc} */
+    @Override protected ClientCache<Integer, Object> cache() {
+        return client().cache("tx");
+    }
+}
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/upload/AbstractNativeBenchmark.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/upload/AbstractNativeBenchmark.java
index de1ad28..01eb6d4 100644
--- a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/upload/AbstractNativeBenchmark.java
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/upload/AbstractNativeBenchmark.java
@@ -20,6 +20,7 @@
 import java.util.Map;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.IgniteException;
+import org.apache.ignite.configuration.CacheConfiguration;
 import org.apache.ignite.yardstick.IgniteAbstractBenchmark;
 import org.apache.ignite.yardstick.upload.model.Values10;
 import org.yardstickframework.BenchmarkConfiguration;
@@ -58,7 +59,7 @@
         BenchmarkUtils.println(cfg, "Starting custom warmup.");
         String warmupCacheName = cacheName + "Warmup";
 
-        try (IgniteCache<Long, Values10> warmupCache = ignite().createCache(warmupCacheName)) {
+        try (IgniteCache<Long, Values10> warmupCache = createCache(warmupCacheName)) {
             upload(warmupCacheName, warmupRowsCnt);
         }
         finally {
@@ -68,7 +69,16 @@
         BenchmarkUtils.println(cfg, "Custom warmup finished.");
 
         // cache for benchmarked action
-        cache = ignite().createCache(cacheName);
+        cache = createCache(cacheName);
+    }
+
+    private IgniteCache<Long, Values10> createCache(String name) {
+        CacheConfiguration<Long, Values10> cfg = new CacheConfiguration<>(name);
+
+        if (args.atomicMode() != null)
+            cfg.setAtomicityMode(args.atomicMode());
+
+        return ignite().createCache(cfg);
     }
 
     /** {@inheritDoc} */
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/upload/AbstractUploadBenchmark.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/upload/AbstractUploadBenchmark.java
index 20dfdd5..6d9c84d 100644
--- a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/upload/AbstractUploadBenchmark.java
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/upload/AbstractUploadBenchmark.java
@@ -41,7 +41,7 @@
     long warmupRowsCnt;
 
     /** Factory that hides all the test data details. */
-    protected QueryFactory queries = new QueryFactory();
+    protected QueryFactory queries;
 
     /** {@inheritDoc} */
     @Override public final void setUp(BenchmarkConfiguration cfg) throws Exception {
@@ -93,7 +93,9 @@
     /**
      * Creates empty table.
      */
-    @Override protected void setupData() throws Exception{
+    @Override protected void setupData() throws Exception {
+        queries = new QueryFactory(args.atomicMode());
+
         dropAndCreate();
     }
 
@@ -131,6 +133,9 @@
      */
     private void dropAndCreate() throws SQLException {
         executeUpdate(QueryFactory.DROP_TABLE_IF_EXISTS);
+
+        BenchmarkUtils.println(cfg, "Creating table with schema: " + queries.createTable());
+
         executeUpdate(queries.createTable());
     }
 
diff --git a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/upload/model/QueryFactory.java b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/upload/model/QueryFactory.java
index 3ff4cb4..6c98d1a 100644
--- a/modules/yardstick/src/main/java/org/apache/ignite/yardstick/upload/model/QueryFactory.java
+++ b/modules/yardstick/src/main/java/org/apache/ignite/yardstick/upload/model/QueryFactory.java
@@ -20,6 +20,7 @@
 import java.sql.PreparedStatement;
 import java.sql.SQLException;
 import java.util.concurrent.ThreadLocalRandom;
+import org.apache.ignite.cache.CacheAtomicityMode;
 import org.apache.ignite.yardstick.upload.StreamerParams;
 import org.jetbrains.annotations.Nullable;
 
@@ -47,16 +48,21 @@
     /** Number of "values" fields in the test table (any field except primary key). */
     private int valFieldsCnt = 10;
 
-    /** Create table with long primary key and number of long and varchar fields */
-    private String createTable = newCreateTableQuery();
-
     /** Parametrised query to insert new row. */
     private String insert = newInsertQuery();
 
+    /** Atomicity mode of test table's cache. */
+    private CacheAtomicityMode tabAtomicMode;
+
+    /** */
+    public QueryFactory(CacheAtomicityMode tabAtomicMode) {
+        this.tabAtomicMode = tabAtomicMode;
+    }
+
     /**
-     * See {@link #createTable}.
+     * Create table with long primary key and number of long and varchar fields
      */
-    private String newCreateTableQuery() {
+    public String createTable() {
         StringBuilder create = new StringBuilder("CREATE TABLE test_upload (id LONG PRIMARY KEY");
 
         for (int vi = 1; vi <= valFieldsCnt; vi++) {
@@ -69,7 +75,12 @@
 
         }
 
-        create.append(");");
+        create.append(')');
+
+        if (tabAtomicMode != null)
+            create.append(" WITH \"ATOMICITY=").append(tabAtomicMode.name()).append('\"');
+
+        create.append(';');
 
         return create.toString();
     }
@@ -87,13 +98,6 @@
     }
 
     /**
-     * See {@link #createTable}.
-     */
-    public String createTable() {
-        return createTable;
-    }
-
-    /**
      * See {@link #insert}.
      */
     public String insert() {
diff --git a/modules/yarn/pom.xml b/modules/yarn/pom.xml
index a41bf01..3dbfa7c 100644
--- a/modules/yarn/pom.xml
+++ b/modules/yarn/pom.xml
@@ -35,7 +35,7 @@
     <url>http://ignite.apache.org</url>
 
     <properties>
-        <hadoop-yarn.version>2.7.0</hadoop-yarn.version>
+        <hadoop-yarn.version>2.7.7</hadoop-yarn.version>
     </properties>
 
     <dependencies>
@@ -72,6 +72,24 @@
         </dependency>
 
         <dependency>
+            <groupId>commons-collections</groupId>
+            <artifactId>commons-collections</artifactId>
+            <version>${commons.collections.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>commons-codec</groupId>
+            <artifactId>commons-codec</artifactId>
+            <version>${commons.codec.version}</version>
+        </dependency>
+
+        <dependency>
+            <groupId>commons-beanutils</groupId>
+            <artifactId>commons-beanutils</artifactId>
+            <version>${commons.beanutils.version}</version>
+        </dependency>
+
+        <dependency>
             <groupId>junit</groupId>
             <artifactId>junit</artifactId>
             <version>4.11</version>
diff --git a/modules/zookeeper/pom.xml b/modules/zookeeper/pom.xml
index 9c42e47..8b338b2 100644
--- a/modules/zookeeper/pom.xml
+++ b/modules/zookeeper/pom.xml
@@ -71,10 +71,18 @@
             <version>${guava16.version}</version>
         </dependency>
 
+        <!-- Do not remove org.codehaus.jackson:jackson-core-asl it is required by Apache Curator at runtime -->
         <dependency>
-            <groupId>com.fasterxml.jackson.core</groupId>
-            <artifactId>jackson-annotations</artifactId>
-            <version>${jackson.version}</version>
+            <groupId>org.codehaus.jackson</groupId>
+            <artifactId>jackson-core-asl</artifactId>
+            <version>${jackson1.version}</version>
+        </dependency>
+
+        <!-- Do not remove org.codehaus.jackson:jackson-mapper-asl it is required by Apache Curator at runtime -->
+        <dependency>
+            <groupId>org.codehaus.jackson</groupId>
+            <artifactId>jackson-mapper-asl</artifactId>
+            <version>${jackson1.version}</version>
         </dependency>
 
         <dependency>
diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/tcp/ipfinder/zk/TcpDiscoveryZookeeperIpFinder.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/tcp/ipfinder/zk/TcpDiscoveryZookeeperIpFinder.java
index 31d118b..93837a8 100644
--- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/tcp/ipfinder/zk/TcpDiscoveryZookeeperIpFinder.java
+++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/tcp/ipfinder/zk/TcpDiscoveryZookeeperIpFinder.java
@@ -25,7 +25,6 @@
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicBoolean;
-import com.fasterxml.jackson.annotation.JsonRootName;
 import com.google.common.collect.Sets;
 import org.apache.curator.RetryPolicy;
 import org.apache.curator.framework.CuratorFramework;
@@ -44,6 +43,7 @@
 import org.apache.ignite.resources.LoggerResource;
 import org.apache.ignite.spi.IgniteSpiException;
 import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinderAdapter;
+import org.codehaus.jackson.map.annotate.JsonRootName;
 
 /**
  * This TCP Discovery IP Finder uses Apache ZooKeeper (ZK) to locate peer nodes when bootstrapping in order to join
@@ -111,7 +111,7 @@
     private String serviceName = SERVICE_NAME;
 
     /** Whether to allow or not duplicate registrations. See setter doc. */
-    private boolean allowDuplicateRegistrations = false;
+    private boolean allowDuplicateRegistrations;
 
     /** The Service Discovery recipe. */
     private ServiceDiscovery<IgniteInstanceDetails> discovery;
@@ -131,7 +131,7 @@
 
         String sysPropZkConnString = System.getProperty(PROP_ZK_CONNECTION_STRING);
 
-        if (sysPropZkConnString != null && sysPropZkConnString.trim().length() > 0)
+        if (sysPropZkConnString != null && !sysPropZkConnString.trim().isEmpty())
             zkConnectionString = sysPropZkConnString;
 
         if (log.isInfoEnabled())
diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkDistributedCollectDataFuture.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkDistributedCollectDataFuture.java
index e9b28e1..e710055 100644
--- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkDistributedCollectDataFuture.java
+++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZkDistributedCollectDataFuture.java
@@ -18,6 +18,7 @@
 package org.apache.ignite.spi.discovery.zk.internal;
 
 import java.util.Iterator;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Set;
 import java.util.UUID;
@@ -145,23 +146,19 @@
         UUID futId,
         IgniteLogger log
     ) throws Exception {
-        // TODO ZK: https://issues.apache.org/jira/browse/IGNITE-8189
+        List<String> batch = new LinkedList<>();
+
         String evtDir = paths.distributedFutureBasePath(futId);
 
-        try {
-            client.deleteAll(evtDir,
-                client.getChildrenIfPathExists(evtDir),
-                -1);
-        }
-        catch (KeeperException.NoNodeException e) {
-            U.log(log, "Node for deletion was not found: " + e.getPath());
+        if (client.exists(evtDir)) {
+            batch.addAll(client.getChildrenPaths(evtDir));
 
-            // TODO ZK: https://issues.apache.org/jira/browse/IGNITE-8189
+            batch.add(evtDir);
         }
 
-        client.deleteIfExists(evtDir, -1);
+        batch.add(paths.distributedFutureResultPath(futId));
 
-        client.deleteIfExists(paths.distributedFutureResultPath(futId), -1);
+        client.deleteAll(batch, -1);
     }
 
     /**
diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperClient.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperClient.java
index b58f0ce..39417c2 100644
--- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperClient.java
+++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperClient.java
@@ -504,9 +504,7 @@
      * @throws ZookeeperClientFailedException If connection to zk was lost.
      * @throws InterruptedException If interrupted.
      */
-    List<String> getChildren(String path)
-        throws ZookeeperClientFailedException, InterruptedException
-    {
+    List<String> getChildren(String path) throws ZookeeperClientFailedException, InterruptedException {
         for (;;) {
             long connStartTime = this.connStartTime;
 
@@ -520,30 +518,24 @@
     }
 
     /**
+     * Get children paths.
+     *
      * @param path Path.
-     * @return Children nodes.
-     * @throws KeeperException.NoNodeException If provided path does not exist.
+     * @return Children paths.
      * @throws ZookeeperClientFailedException If connection to zk was lost.
      * @throws InterruptedException If interrupted.
      */
-    List<String> getChildrenIfPathExists(String path) throws
-        KeeperException.NoNodeException, InterruptedException, ZookeeperClientFailedException {
-        for (;;) {
-            long connStartTime = this.connStartTime;
+    List<String> getChildrenPaths(String path) throws ZookeeperClientFailedException, InterruptedException {
+        List<String> children = getChildren(path);
 
-            try {
-                return zk.getChildren(path, false);
-            }
-            catch (KeeperException.NoNodeException e) {
-                throw e;
-            }
-            catch (Exception e) {
-                onZookeeperError(connStartTime, e);
-            }
-        }
+        ArrayList<String> paths = new ArrayList(children.size());
+
+        for (String child : children)
+            paths.add(path + "/" + child);
+
+        return paths;
     }
 
-
     /**
      * @param path Path.
      * @throws InterruptedException If interrupted.
@@ -593,7 +585,7 @@
      * @throws ZookeeperClientFailedException If connection to zk was lost.
      * @throws InterruptedException If interrupted.
      */
-    void deleteAll(@Nullable String parent, List<String> paths, int ver)
+    void deleteAll(List<String> paths, int ver)
         throws ZookeeperClientFailedException, InterruptedException {
         if (paths.isEmpty())
             return;
@@ -605,10 +597,8 @@
         List<Op> batch = new LinkedList<>();
 
         for (String path : paths) {
-            String path0 = parent != null ? parent + "/" + path : path;
-
             //TODO ZK: https://issues.apache.org/jira/browse/IGNITE-8187
-            int size = requestOverhead(path0) + 17 /* overhead */;
+            int size = requestOverhead(path) + 17 /* overhead */;
 
             assert size <= MAX_REQ_SIZE;
 
@@ -620,7 +610,7 @@
                 batchSize = 0;
             }
 
-            batch.add(Op.delete(path0, ver));
+            batch.add(Op.delete(path, ver));
 
             batchSize += size;
         }
diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperClusterNode.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperClusterNode.java
index 1c2a589d..f90bc01 100644
--- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperClusterNode.java
+++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperClusterNode.java
@@ -169,11 +169,6 @@
     }
 
     /** {@inheritDoc} */
-    @Override public boolean isCacheClient() {
-        return isClient();
-    }
-
-    /** {@inheritDoc} */
     @Nullable @Override public <T> T attribute(String name) {
         // Even though discovery SPI removes this attribute after authentication, keep this check for safety.
         if (IgniteNodeAttributes.ATTR_SECURITY_CREDENTIALS.equals(name))
diff --git a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryImpl.java b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryImpl.java
index dc687b8..b89fbe4 100644
--- a/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryImpl.java
+++ b/modules/zookeeper/src/main/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoveryImpl.java
@@ -27,6 +27,7 @@
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
@@ -466,12 +467,17 @@
         if (rtState.joined) {
             assert rtState.evtsData != null;
 
-            lsnr.onDiscovery(EVT_CLIENT_NODE_DISCONNECTED,
-                rtState.evtsData.topVer,
-                locNode,
-                rtState.top.topologySnapshot(),
-                Collections.<Long, Collection<ClusterNode>>emptyMap(),
-                null);
+            try {
+                lsnr.onDiscovery(EVT_CLIENT_NODE_DISCONNECTED,
+                    rtState.evtsData.topVer,
+                    locNode,
+                    rtState.top.topologySnapshot(),
+                    Collections.emptyMap(),
+                    null).get();
+            }
+            catch (IgniteCheckedException e) {
+                throw new IgniteException("Failed to wait for discovery listener notification", e);
+            }
         }
 
         try {
@@ -532,14 +538,19 @@
         List<ClusterNode> nodes = rtState.top.topologySnapshot();
 
         if (nodes.isEmpty())
-            nodes = Collections.singletonList((ClusterNode)locNode);
+            nodes = Collections.singletonList(locNode);
 
-        lsnr.onDiscovery(EVT_NODE_SEGMENTED,
-            rtState.evtsData != null ? rtState.evtsData.topVer : 1L,
-            locNode,
-            nodes,
-            Collections.<Long, Collection<ClusterNode>>emptyMap(),
-            null);
+        try {
+            lsnr.onDiscovery(EVT_NODE_SEGMENTED,
+                rtState.evtsData != null ? rtState.evtsData.topVer : 1L,
+                locNode,
+                nodes,
+                Collections.emptyMap(),
+                null).get();
+        }
+        catch (IgniteCheckedException e) {
+            throw new IgniteException("Failed to wait for discovery listener notification", e);
+        }
     }
 
     /**
@@ -1333,7 +1344,9 @@
             }
         }
 
-        assert !aliveClients.isEmpty();
+        // This situation may appear while reconnection and this callback can be skipped.
+        if(!aliveClients.containsKey(locInternalOrder))
+            return;
 
         Map.Entry<Long, String> oldest = aliveClients.firstEntry();
 
@@ -2015,9 +2028,7 @@
 
         if (subj == null) {
             U.warn(log, "Authentication failed [nodeId=" + node.id() +
-                    ", addrs=" + U.addressesAsString(node) + ']',
-                "Authentication failed [nodeId=" + U.id8(node.id()) + ", addrs=" +
-                    U.addressesAsString(node) + ']');
+                ", addrs=" + U.addressesAsString(node) + ']');
 
             // Note: exception message test is checked in tests.
             return new ZkNodeValidateResult("Authentication failed");
@@ -2025,10 +2036,7 @@
 
         if (!(subj instanceof Serializable)) {
             U.warn(log, "Authentication subject is not Serializable [nodeId=" + node.id() +
-                    ", addrs=" + U.addressesAsString(node) + ']',
-                "Authentication subject is not Serializable [nodeId=" + U.id8(node.id()) +
-                    ", addrs=" +
-                    U.addressesAsString(node) + ']');
+                ", addrs=" + U.addressesAsString(node) + ']');
 
             return new ZkNodeValidateResult("Authentication subject is not serializable");
         }
@@ -2244,12 +2252,19 @@
 
         final List<ClusterNode> topSnapshot = Collections.singletonList((ClusterNode)locNode);
 
-        lsnr.onDiscovery(EVT_NODE_JOINED,
-            1L,
-            locNode,
-            topSnapshot,
-            Collections.<Long, Collection<ClusterNode>>emptyMap(),
-            null);
+        try {
+            lsnr.onDiscovery(EVT_NODE_JOINED,
+                1L,
+                locNode,
+                topSnapshot,
+                Collections.emptyMap(),
+                null).get();
+        }
+        catch (IgniteCheckedException e) {
+            joinFut.onDone(e);
+
+            throw new IgniteException("Failed to wait for discovery listener notification", e);
+        }
 
         // Reset events (this is also notification for clients left from previous cluster).
         rtState.zkClient.setData(zkPaths.evtsPath, marshalZip(rtState.evtsData), -1);
@@ -2268,33 +2283,27 @@
 
         ZookeeperClient client = rtState.zkClient;
 
-        // TODO ZK: use multi, better batching + max-size safe + NoNodeException safe.
-        List<String> evtChildren = rtState.zkClient.getChildren(zkPaths.evtsPath);
+        List<String> batch = new LinkedList<>();
 
-        for (String evtPath : evtChildren) {
-            String evtDir = zkPaths.evtsPath + "/" + evtPath;
+        List<String> evtChildren = client.getChildrenPaths(zkPaths.evtsPath);
 
-            removeChildren(evtDir);
-        }
+        for (String evtPath : evtChildren)
+            batch.addAll(client.getChildrenPaths(evtPath));
 
-        client.deleteAll(zkPaths.evtsPath, evtChildren, -1);
+        batch.addAll(evtChildren);
 
-        client.deleteAll(zkPaths.customEvtsDir,
-            client.getChildren(zkPaths.customEvtsDir),
-            -1);
+        batch.addAll(client.getChildrenPaths(zkPaths.customEvtsDir));
 
-        rtState.zkClient.deleteAll(zkPaths.customEvtsPartsDir,
-            rtState.zkClient.getChildren(zkPaths.customEvtsPartsDir),
-            -1);
+        batch.addAll(client.getChildrenPaths(zkPaths.customEvtsPartsDir));
 
-        rtState.zkClient.deleteAll(zkPaths.customEvtsAcksDir,
-            rtState.zkClient.getChildren(zkPaths.customEvtsAcksDir),
-            -1);
+        batch.addAll(client.getChildrenPaths(zkPaths.customEvtsAcksDir));
+
+        client.deleteAll(batch, -1);
 
         if (startInternalOrder > 0) {
-            for (String alive : rtState.zkClient.getChildren(zkPaths.aliveNodesDir)) {
+            for (String alive : client.getChildren(zkPaths.aliveNodesDir)) {
                 if (ZkIgnitePaths.aliveInternalId(alive) < startInternalOrder)
-                    rtState.zkClient.deleteIfExists(zkPaths.aliveNodesDir + "/" + alive, -1);
+                    client.deleteIfExists(zkPaths.aliveNodesDir + "/" + alive, -1);
             }
         }
 
@@ -2307,14 +2316,6 @@
     }
 
     /**
-     * @param path Path.
-     * @throws Exception If failed.
-     */
-    private void removeChildren(String path) throws Exception {
-        rtState.zkClient.deleteAll(path, rtState.zkClient.getChildren(path), -1);
-    }
-
-    /**
      * @param zkClient Client.
      * @param evtPath Event path.
      * @param sndNodeId Sender node ID.
@@ -2958,16 +2959,16 @@
                 joinedEvtData.topVer,
                 locNode,
                 topSnapshot,
-                Collections.<Long, Collection<ClusterNode>>emptyMap(),
-                null);
+                Collections.emptyMap(),
+                null).get();
 
             if (rtState.prevJoined) {
                 lsnr.onDiscovery(EVT_CLIENT_NODE_RECONNECTED,
                     joinedEvtData.topVer,
                     locNode,
                     topSnapshot,
-                    Collections.<Long, Collection<ClusterNode>>emptyMap(),
-                    null);
+                    Collections.emptyMap(),
+                    null).get();
 
                 U.quietAndWarn(log, "Client node was reconnected after it was already considered failed [locId=" + locNode.id() + ']');
             }
@@ -3418,13 +3419,23 @@
 
         final List<ClusterNode> topSnapshot = rtState.top.topologySnapshot();
 
-        lsnr.onDiscovery(
+        IgniteInternalFuture fut = lsnr.onDiscovery(
             DiscoveryCustomEvent.EVT_DISCOVERY_CUSTOM_EVT,
             evtData.topologyVersion(),
             sndNode,
             topSnapshot,
-            Collections.<Long, Collection<ClusterNode>>emptyMap(),
-            msg);
+            Collections.emptyMap(),
+            msg
+        );
+
+        if (msg != null && msg.isMutable()) {
+            try {
+                fut.get();
+            }
+            catch (IgniteCheckedException e) {
+                throw new IgniteException("Failed to wait for discovery listener notification", e);
+            }
+        }
     }
 
     /**
@@ -3442,12 +3453,17 @@
 
         final List<ClusterNode> topSnapshot = rtState.top.topologySnapshot();
 
-        lsnr.onDiscovery(EVT_NODE_JOINED,
-            joinedEvtData.topVer,
-            joinedNode,
-            topSnapshot,
-            Collections.<Long, Collection<ClusterNode>>emptyMap(),
-            null);
+        try {
+            lsnr.onDiscovery(EVT_NODE_JOINED,
+                joinedEvtData.topVer,
+                joinedNode,
+                topSnapshot,
+                Collections.emptyMap(),
+                null).get();
+        }
+        catch (IgniteCheckedException e) {
+            throw new IgniteException("Failed to wait for discovery listener notification", e);
+        }
     }
 
     /**
@@ -3473,12 +3489,17 @@
 
         final List<ClusterNode> topSnapshot = rtState.top.topologySnapshot();
 
-        lsnr.onDiscovery(EVT_NODE_FAILED,
-            topVer,
-            failedNode,
-            topSnapshot,
-            Collections.<Long, Collection<ClusterNode>>emptyMap(),
-            null);
+        try {
+            lsnr.onDiscovery(EVT_NODE_FAILED,
+                topVer,
+                failedNode,
+                topSnapshot,
+                Collections.emptyMap(),
+                null).get();
+        }
+        catch (IgniteCheckedException e) {
+            throw new IgniteException("Failed to wait for discovery listener notification", e);
+        }
 
         stats.onNodeFailed();
     }
diff --git a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/zk/ZookeeperIpFinderTest.java b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/zk/ZookeeperIpFinderTest.java
index 25598fe..ec5630f 100644
--- a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/zk/ZookeeperIpFinderTest.java
+++ b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/zk/ZookeeperIpFinderTest.java
@@ -26,7 +26,6 @@
 import org.apache.curator.retry.ExponentialBackoffRetry;
 import org.apache.curator.retry.RetryNTimes;
 import org.apache.curator.test.InstanceSpec;
-import org.apache.curator.test.TestingCluster;
 import org.apache.curator.utils.CloseableUtils;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.configuration.IgniteConfiguration;
@@ -34,8 +33,11 @@
 import org.apache.ignite.events.EventType;
 import org.apache.ignite.lang.IgniteBiPredicate;
 import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.zk.curator.TestingCluster;
 import org.apache.ignite.testframework.GridTestUtils;
 import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest;
+import org.junit.Rule;
+import org.junit.rules.Timeout;
 
 /**
  * Test for {@link TcpDiscoveryZookeeperIpFinder}.
@@ -43,6 +45,10 @@
  * @author Raul Kripalani
  */
 public class ZookeeperIpFinderTest extends GridCommonAbstractTest {
+    /** Per test timeout */
+    @Rule
+    public Timeout globalTimeout = new Timeout((int) GridTestUtils.DFLT_TEST_TIMEOUT);
+
     /** ZK Cluster size. */
     private static final int ZK_CLUSTER_SIZE = 3;
 
@@ -79,6 +85,7 @@
 
         // start the ZK cluster
         zkCluster = new TestingCluster(ZK_CLUSTER_SIZE);
+
         zkCluster.start();
 
         // start the Curator client so we can perform assertions on the ZK state later
diff --git a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/zk/curator/FixedTestingQuorumPeerMain.java b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/zk/curator/FixedTestingQuorumPeerMain.java
new file mode 100644
index 0000000..e0a416e
--- /dev/null
+++ b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/zk/curator/FixedTestingQuorumPeerMain.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.spi.discovery.tcp.ipfinder.zk.curator;
+
+import org.apache.curator.test.ZooKeeperMainFace;
+import org.apache.zookeeper.server.ServerCnxnFactory;
+import org.apache.zookeeper.server.quorum.QuorumPeer;
+import org.apache.zookeeper.server.quorum.QuorumPeerConfig;
+import org.apache.zookeeper.server.quorum.QuorumPeerMain;
+import java.io.IOException;
+import java.lang.reflect.Field;
+import java.nio.channels.ServerSocketChannel;
+
+/**
+ */
+public class FixedTestingQuorumPeerMain extends QuorumPeerMain implements ZooKeeperMainFace {
+    @Override public void runFromConfig(QuorumPeerConfig config) throws IOException {
+        quorumPeer = QuorumPeer.testingQuorumPeer();
+        super.runFromConfig(config);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void kill() {
+        try {
+            if (quorumPeer != null) {
+                Field cnxnFactoryField = QuorumPeer.class.getDeclaredField("cnxnFactory");
+
+                cnxnFactoryField.setAccessible(true);
+
+                ServerCnxnFactory cnxnFactory = (ServerCnxnFactory)cnxnFactoryField.get(quorumPeer);
+
+                cnxnFactory.closeAll();
+
+                Field ssField = cnxnFactory.getClass().getDeclaredField("ss");
+
+                ssField.setAccessible(true);
+
+                ServerSocketChannel ss = (ServerSocketChannel)ssField.get(cnxnFactory);
+
+                ss.close();
+            }
+            close();
+        }
+        catch (Exception e) {
+            e.printStackTrace();
+        }
+    }
+
+    /** {@inheritDoc} */
+    @Override public QuorumPeer getQuorumPeer() {
+        return quorumPeer;
+    }
+
+    /** {@inheritDoc} */
+    @Override public void close() throws IOException {
+        if (quorumPeer != null)
+            quorumPeer.shutdown();
+    }
+
+    /** {@inheritDoc} */
+    @Override public void blockUntilStarted() throws Exception {
+        while (quorumPeer == null) {
+            try {
+                Thread.sleep(100);
+            }
+            catch (InterruptedException e) {
+                Thread.currentThread().interrupt();
+                break;
+            }
+        }
+    }
+}
diff --git a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/zk/curator/TestingCluster.java b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/zk/curator/TestingCluster.java
new file mode 100644
index 0000000..c6e8d72
--- /dev/null
+++ b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/zk/curator/TestingCluster.java
@@ -0,0 +1,237 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.spi.discovery.tcp.ipfinder.zk.curator;
+
+import com.google.common.base.Function;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Iterables;
+import com.google.common.collect.Lists;
+import org.apache.curator.test.ByteCodeRewrite;
+import org.apache.curator.test.InstanceSpec;
+import org.apache.curator.test.QuorumConfigBuilder;
+import org.apache.zookeeper.ZooKeeper;
+import java.io.Closeable;
+import java.io.IOException;
+import java.lang.reflect.Method;
+import java.net.InetSocketAddress;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Manages an internally running ensemble of ZooKeeper servers. FOR TESTING PURPOSES ONLY.
+ * This class is a copy of {{org.apache.curator.test.TestingCluster}},
+ * but have very small change, that allow to run testing cluster with ZooKeeper 2.4.13 ver.
+ */
+public class TestingCluster implements Closeable {
+    static {
+        ByteCodeRewrite.apply();
+    }
+
+    /** Servers. */
+    private final List<TestingZooKeeperServer> servers;
+
+    /**
+     * Creates an ensemble comprised of <code>n</code> servers. Each server will use
+     * a temp directory and random ports
+     *
+     * @param instanceQty number of servers to create in the ensemble
+     */
+    public TestingCluster(int instanceQty) {
+        this(makeSpecs(instanceQty));
+    }
+
+    /**
+     * Creates an ensemble using the given server specs
+     *
+     * @param specs the server specs
+     */
+    public TestingCluster(InstanceSpec... specs) {
+        this(listToMap(ImmutableList.copyOf(specs)));
+    }
+
+    /**
+     * Creates an ensemble using the given server specs
+     *
+     * @param specs the server specs
+     */
+    public TestingCluster(Collection<InstanceSpec> specs) {
+        this(listToMap(specs));
+    }
+
+    /**
+     * Creates an ensemble using the given server specs
+     *
+     * @param specs map of an instance spec to its set of quorum instances. Allows simulation of an ensemble with
+     * instances having different config peers
+     */
+    public TestingCluster(Map<InstanceSpec, Collection<InstanceSpec>> specs) {
+        ImmutableList.Builder<TestingZooKeeperServer> serverBuilder = ImmutableList.builder();
+        for (Map.Entry<InstanceSpec, Collection<InstanceSpec>> entry : specs.entrySet()) {
+            List<InstanceSpec> instanceSpecs = Lists.newArrayList(entry.getValue());
+            int index = instanceSpecs.indexOf(entry.getKey());
+            Preconditions.checkState(index >= 0, entry.getKey() + " not found in specs");
+            QuorumConfigBuilder builder = new QuorumConfigBuilder(instanceSpecs);
+            serverBuilder.add(new TestingZooKeeperServer(builder, index));
+        }
+        servers = serverBuilder.build();
+    }
+
+    /**
+     * Returns the set of servers in the ensemble
+     *
+     * @return set of servers
+     */
+    public Collection<InstanceSpec> getInstances() {
+        Iterable<InstanceSpec> transformed = Iterables.transform
+            (
+                servers,
+                new Function<TestingZooKeeperServer, InstanceSpec>() {
+                    @Override
+                    public InstanceSpec apply(TestingZooKeeperServer server) {
+                        return server.getInstanceSpec();
+                    }
+                }
+            );
+        return Lists.newArrayList(transformed);
+    }
+
+    public List<TestingZooKeeperServer> getServers() {
+        return Lists.newArrayList(servers);
+    }
+
+    /**
+     * Returns the connection string to pass to the ZooKeeper constructor
+     *
+     * @return connection string
+     */
+    public String getConnectString() {
+        StringBuilder str = new StringBuilder();
+        for (InstanceSpec spec : getInstances()) {
+            if (str.length() > 0)
+                str.append(",");
+
+            str.append(spec.getConnectString());
+        }
+        return str.toString();
+    }
+
+    /**
+     * Start the ensemble. The cluster must be started before use.
+     *
+     * @throws Exception errors
+     */
+    public void start() throws Exception {
+        for (TestingZooKeeperServer server : servers)
+            server.start();
+
+    }
+
+    /**
+     * Shutdown the ensemble WITHOUT freeing resources, etc.
+     */
+    public void stop() throws IOException {
+        for (TestingZooKeeperServer server : servers)
+            server.stop();
+
+    }
+
+    /**
+     * Shutdown the ensemble, free resources, etc. If temp directories were used, they
+     * are deleted. You should call this in a <code>finally</code> block.
+     *
+     * @throws IOException errors
+     */
+    @Override public void close() throws IOException {
+        for (TestingZooKeeperServer server : servers)
+            server.close();
+    }
+
+    /**
+     * Kills the given server. This simulates the server unexpectedly crashing
+     *
+     * @param instance server to kill
+     * @return true if the instance was found
+     * @throws Exception errors
+     */
+    public boolean killServer(InstanceSpec instance) throws Exception {
+        for (TestingZooKeeperServer server : servers) {
+            if (server.getInstanceSpec().equals(instance)) {
+                server.kill();
+                return true;
+            }
+        }
+        return false;
+    }
+
+    /**
+     * Restart the given server of the cluster
+     *
+     * @param instance server instance
+     * @return true of the server was found
+     * @throws Exception errors
+     */
+    public boolean restartServer(InstanceSpec instance) throws Exception {
+        for (TestingZooKeeperServer server : servers) {
+            if (server.getInstanceSpec().equals(instance)) {
+                server.restart();
+                return true;
+            }
+        }
+        return false;
+    }
+
+    /**
+     * Given a ZooKeeper instance, returns which server it is connected to
+     *
+     * @param client ZK instance
+     * @return the server
+     * @throws Exception errors
+     */
+    public InstanceSpec findConnectionInstance(ZooKeeper client) throws Exception {
+        Method m = client.getClass().getDeclaredMethod("testableRemoteSocketAddress");
+        m.setAccessible(true);
+        InetSocketAddress address = (InetSocketAddress)m.invoke(client);
+        if (address != null) {
+            for (TestingZooKeeperServer server : servers) {
+                if (server.getInstanceSpec().getPort() == address.getPort())
+                    return server.getInstanceSpec();
+            }
+        }
+
+        return null;
+    }
+
+    private static Map<InstanceSpec, Collection<InstanceSpec>> makeSpecs(int instanceQty) {
+        ImmutableList.Builder<InstanceSpec> builder = ImmutableList.builder();
+        for (int i = 0; i < instanceQty; ++i)
+            builder.add(InstanceSpec.newInstanceSpec());
+
+        return listToMap(builder.build());
+    }
+
+    private static Map<InstanceSpec, Collection<InstanceSpec>> listToMap(Collection<InstanceSpec> list) {
+        ImmutableMap.Builder<InstanceSpec, Collection<InstanceSpec>> mapBuilder = ImmutableMap.builder();
+        for (InstanceSpec spec : list)
+            mapBuilder.put(spec, list);
+
+        return mapBuilder.build();
+    }
+}
diff --git a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/zk/curator/TestingZooKeeperServer.java b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/zk/curator/TestingZooKeeperServer.java
new file mode 100644
index 0000000..808e617
--- /dev/null
+++ b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/tcp/ipfinder/zk/curator/TestingZooKeeperServer.java
@@ -0,0 +1,169 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.ignite.spi.discovery.tcp.ipfinder.zk.curator;
+
+import org.apache.curator.test.DirectoryUtils;
+import org.apache.curator.test.InstanceSpec;
+import org.apache.curator.test.QuorumConfigBuilder;
+import org.apache.curator.test.TestingZooKeeperMain;
+import org.apache.curator.test.ZooKeeperMainFace;
+import org.apache.zookeeper.server.quorum.QuorumPeer;
+import org.apache.zookeeper.server.quorum.QuorumPeerConfig;
+import org.apache.zookeeper.server.quorum.QuorumPeerMain;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.concurrent.atomic.AtomicReference;
+
+/**
+ */
+public class TestingZooKeeperServer extends QuorumPeerMain implements Closeable {
+    /** Logger. */
+    private static final Logger logger = LoggerFactory.getLogger(org.apache.curator.test.TestingZooKeeperServer.class);
+
+    /** Config builder. */
+    private final QuorumConfigBuilder configBuilder;
+    /** This instance index. */
+    private final int thisInstanceIndex;
+    /** Main. */
+    private volatile ZooKeeperMainFace main;
+    /** State. */
+    private final AtomicReference<State> state = new AtomicReference<>(State.LATENT);
+
+    /**
+     * Server state.
+     */
+    private enum State {
+        /** Latent. */
+        LATENT,
+        /** Started. */
+        STARTED,
+        /** Stopped. */
+        STOPPED,
+        /** Closed. */
+        CLOSED
+    }
+
+    /**
+     * @param configBuilder Config builder.
+     */
+    public TestingZooKeeperServer(QuorumConfigBuilder configBuilder) {
+        this(configBuilder, 0);
+    }
+
+    /**
+     * @param configBuilder Config builder.
+     * @param thisInstanceIndex This instance index.
+     */
+    public TestingZooKeeperServer(QuorumConfigBuilder configBuilder, int thisInstanceIndex) {
+        this.configBuilder = configBuilder;
+        this.thisInstanceIndex = thisInstanceIndex;
+        main = (configBuilder.size() > 1) ? new FixedTestingQuorumPeerMain() : new TestingZooKeeperMain();
+    }
+
+    /** {@inheritDoc} */
+    public QuorumPeer getQuorumPeer() {
+        return main.getQuorumPeer();
+    }
+
+    /**
+     *
+     */
+    public Collection<InstanceSpec> getInstanceSpecs() {
+        return configBuilder.getInstanceSpecs();
+    }
+
+    public void kill() {
+        main.kill();
+        state.set(State.STOPPED);
+    }
+
+    /**
+     * Restart the server. If the server is running it will be stopped and then
+     * started again. If it is not running (in a LATENT or STOPPED state) then
+     * it will be restarted. If it is in a CLOSED state then an exception will
+     * be thrown.
+     *
+     * @throws Exception
+     */
+    public void restart() throws Exception {
+        // Can't restart from a closed state as all the temporary data is gone
+        if (state.get() == State.CLOSED)
+            throw new IllegalStateException("Cannot restart a closed instance");
+
+        // If the server's currently running then stop it.
+        if (state.get() == State.STARTED)
+            stop();
+
+        // Set to a LATENT state so we can restart
+        state.set(State.LATENT);
+
+        main = (configBuilder.size() > 1) ? new FixedTestingQuorumPeerMain() : new TestingZooKeeperMain();
+        start();
+    }
+
+    /**
+     *
+     */
+    public void stop() throws IOException {
+        if (state.compareAndSet(State.STARTED, State.STOPPED))
+            main.close();
+    }
+
+    /**
+     *
+     */
+    public InstanceSpec getInstanceSpec() {
+        return configBuilder.getInstanceSpec(thisInstanceIndex);
+    }
+
+    /** {@inheritDoc} */
+    @Override public void close() throws IOException {
+        stop();
+
+        if (state.compareAndSet(State.STOPPED, State.CLOSED)) {
+            InstanceSpec spec = getInstanceSpec();
+            if (spec.deleteDataDirectoryOnClose())
+                DirectoryUtils.deleteRecursively(spec.getDataDirectory());
+        }
+    }
+
+    /**
+     *
+     */
+    public void start() throws Exception {
+        if (!state.compareAndSet(State.LATENT, State.STARTED))
+            return;
+
+        new Thread(new Runnable() {
+            public void run() {
+                try {
+                    QuorumPeerConfig config = configBuilder.buildConfig(thisInstanceIndex);
+                    main.runFromConfig(config);
+                }
+                catch (Exception e) {
+                    logger.error(String.format("From testing server (random state: %s) for instance: %s", String.valueOf(configBuilder.isFromRandom()), getInstanceSpec()), e);
+                }
+            }
+        }).start();
+
+        main.blockUntilStarted();
+    }
+}
diff --git a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpiAbstractTestSuite.java b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpiAbstractTestSuite.java
index c5d3488..9f0cc6d 100644
--- a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpiAbstractTestSuite.java
+++ b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpiAbstractTestSuite.java
@@ -22,7 +22,7 @@
 import java.util.List;
 import junit.framework.TestSuite;
 import org.apache.curator.test.InstanceSpec;
-import org.apache.curator.test.TestingCluster;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.zk.curator.TestingCluster;
 import org.apache.ignite.IgniteException;
 import org.apache.ignite.configuration.IgniteConfiguration;
 import org.apache.ignite.spi.discovery.DiscoverySpi;
diff --git a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpiTestSuite1.java b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpiTestSuite1.java
index cb8f8fc..1a9e351 100644
--- a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpiTestSuite1.java
+++ b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpiTestSuite1.java
@@ -61,7 +61,7 @@
 
         suite.addTestSuite(ZookeeperClientTest.class);
         suite.addTestSuite(ZookeeperDiscoverySpiTest.class);
-        //suite.addTestSuite(ZookeeperDiscoverySpiSaslFailedAuthTest.class);
+        suite.addTestSuite(ZookeeperDiscoverySpiSaslFailedAuthTest.class);
         suite.addTestSuite(ZookeeperDiscoverySpiSaslSuccessfulAuthTest.class);
 
         return suite;
diff --git a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpiTestSuite2.java b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpiTestSuite2.java
index 012366f..3b8ddee 100644
--- a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpiTestSuite2.java
+++ b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/ZookeeperDiscoverySpiTestSuite2.java
@@ -18,7 +18,7 @@
 package org.apache.ignite.spi.discovery.zk;
 
 import junit.framework.TestSuite;
-import org.apache.curator.test.TestingCluster;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.zk.curator.TestingCluster;
 import org.apache.ignite.internal.ClusterNodeMetricsUpdateTest;
 import org.apache.ignite.internal.IgniteClientReconnectCacheTest;
 import org.apache.ignite.internal.processors.cache.datastructures.IgniteClientDataStructuresTest;
diff --git a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperClientTest.java b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperClientTest.java
index 0d64980..e3b91c7 100644
--- a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperClientTest.java
+++ b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperClientTest.java
@@ -26,7 +26,7 @@
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
-import org.apache.curator.test.TestingCluster;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.zk.curator.TestingCluster;
 import org.apache.ignite.internal.IgniteInternalFuture;
 import org.apache.ignite.internal.util.future.GridFutureAdapter;
 import org.apache.ignite.internal.util.typedef.internal.U;
@@ -196,12 +196,12 @@
         client.createIfNeeded("/apacheIgnite/1", null, CreateMode.PERSISTENT);
         client.createIfNeeded("/apacheIgnite/2", null, CreateMode.PERSISTENT);
 
-        client.deleteAll("/apacheIgnite", Arrays.asList("1", "2"), -1);
+        client.deleteAll(Arrays.asList("/apacheIgnite/1", "/apacheIgnite/2"), -1);
 
         assertTrue(client.getChildren("/apacheIgnite").isEmpty());
 
         client.createIfNeeded("/apacheIgnite/1", null, CreateMode.PERSISTENT);
-        client.deleteAll("/apacheIgnite", Collections.singletonList("1"), -1);
+        client.deleteAll(Collections.singletonList("/apacheIgnite/1"), -1);
 
         assertTrue(client.getChildren("/apacheIgnite").isEmpty());
     }
@@ -227,12 +227,7 @@
 
         assertEquals(cnt, client.getChildren("/apacheIgnite").size());
 
-        List<String> subPaths = new ArrayList<>(cnt);
-
-        for (int i = 0; i < cnt; i++)
-            subPaths.add(String.valueOf(i));
-
-        client.deleteAll("/apacheIgnite", subPaths, -1);
+        client.deleteAll(paths, -1);
 
         assertTrue(client.getChildren("/apacheIgnite").isEmpty());
     }
@@ -249,7 +244,7 @@
         client.createIfNeeded("/apacheIgnite/1", null, CreateMode.PERSISTENT);
         client.createIfNeeded("/apacheIgnite/2", null, CreateMode.PERSISTENT);
 
-        client.deleteAll("/apacheIgnite", Arrays.asList("1", "2", "3"), -1);
+        client.deleteAll(Arrays.asList("/apacheIgnite/1", "/apacheIgnite/2", "/apacheIgnite/3"), -1);
 
         assertTrue(client.getChildren("/apacheIgnite").isEmpty());
     }
diff --git a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySpiSaslFailedAuthTest.java b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySpiSaslFailedAuthTest.java
index 864ac96..097b7c5 100644
--- a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySpiSaslFailedAuthTest.java
+++ b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySpiSaslFailedAuthTest.java
@@ -27,6 +27,8 @@
      * @throws Exception If failed.
      */
     public void testIgniteNodeWithInvalidPasswordFailsToJoin() throws Exception {
+        fail("https://issues.apache.org/jira/browse/IGNITE-9573");
+
         System.setProperty(ZooKeeperSaslClient.LOGIN_CONTEXT_NAME_KEY,
             "InvalidZookeeperClient");
 
diff --git a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySpiTest.java b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySpiTest.java
index 19ca5f2..f249b59 100644
--- a/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySpiTest.java
+++ b/modules/zookeeper/src/test/java/org/apache/ignite/spi/discovery/zk/internal/ZookeeperDiscoverySpiTest.java
@@ -52,8 +52,8 @@
 import org.apache.curator.framework.CuratorFramework;
 import org.apache.curator.framework.CuratorFrameworkFactory;
 import org.apache.curator.retry.RetryNTimes;
-import org.apache.curator.test.TestingCluster;
-import org.apache.curator.test.TestingZooKeeperServer;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.zk.curator.TestingCluster;
+import org.apache.ignite.spi.discovery.tcp.ipfinder.zk.curator.TestingZooKeeperServer;
 import org.apache.ignite.Ignite;
 import org.apache.ignite.IgniteCache;
 import org.apache.ignite.IgniteCheckedException;
diff --git a/parent/pom.xml b/parent/pom.xml
index 7f95406..5f72dfb 100644
--- a/parent/pom.xml
+++ b/parent/pom.xml
@@ -53,7 +53,7 @@
         <aspectj.version>1.8.13</aspectj.version>
         <aws.sdk.bundle.version>1.10.12_1</aws.sdk.bundle.version>
         <aws.sdk.version>1.11.75</aws.sdk.version>
-        <camel.version>2.16.0</camel.version>
+        <camel.version>2.22.0</camel.version>
         <commons.beanutils.bundle.version>1.9.2_1</commons.beanutils.bundle.version>
         <commons.beanutils.version>1.9.3</commons.beanutils.version>
         <commons.codec.version>1.11</commons.codec.version>
@@ -66,7 +66,7 @@
         <ezmorph.version>1.0.6</ezmorph.version>
         <flume.ng.version>1.7.0</flume.ng.version>
         <guava.retrying.version>2.0.0</guava.retrying.version>
-        <guava.version>18.0</guava.version>
+        <guava.version>25.1-jre</guava.version>
         <guava14.version>14.0.1</guava14.version>
         <guava16.version>16.0.1</guava16.version>
         <h2.version>1.4.195</h2.version>
@@ -75,12 +75,13 @@
         <httpclient.version>4.5.1</httpclient.version>
         <httpcore.version>4.4.3</httpcore.version>
         <jackson.version>2.9.6</jackson.version>
+        <jackson1.version>1.9.13</jackson1.version>
         <javassist.version>3.20.0-GA</javassist.version>
         <javax.cache.bundle.version>1.0.0_1</javax.cache.bundle.version>
         <javax.cache.tck.version>1.0.1</javax.cache.tck.version>
         <javax.cache.version>1.0.0</javax.cache.version>
         <jetbrains.annotations.version>13.0</jetbrains.annotations.version>
-        <jetty.version>9.2.11.v20150529</jetty.version>
+        <jetty.version>9.4.11.v20180605</jetty.version>
         <jmh.version>1.13</jmh.version>
         <jms.spec.version>1.1.1</jms.spec.version>
         <jsch.bundle.version>0.1.54_1</jsch.bundle.version>
@@ -91,8 +92,8 @@
         <kafka.version>0.10.0.1</kafka.version>
         <karaf.version>4.0.2</karaf.version>
         <log4j.version>2.11.0</log4j.version>
-        <lucene.bundle.version>5.5.2_1</lucene.bundle.version>
-        <lucene.version>5.5.2</lucene.version>
+        <lucene.bundle.version>7.4.0_1</lucene.bundle.version>
+        <lucene.version>7.4.0</lucene.version>
         <maven.bundle.plugin.version>3.5.0</maven.bundle.plugin.version>
         <mockito.version>1.10.19</mockito.version>
         <mysql.connector.version>5.1.39</mysql.connector.version>
@@ -102,19 +103,18 @@
         <osgi.enterprise.version>5.0.0</osgi.enterprise.version>
         <paho.version>1.0.2</paho.version>
         <postgres.connector.version>9.4.1208.jre7</postgres.connector.version>
-        <rocketmq.version>4.2.0</rocketmq.version>
-        <scala210.jline.version>2.10.4</scala210.jline.version>
-        <scala210.library.version>2.10.6</scala210.library.version>
-        <scala211.library.version>2.11.8</scala211.library.version>
-        <scala.library.version>2.11.8</scala.library.version>
+        <rocketmq.version>4.3.0</rocketmq.version>
+        <scala210.jline.version>2.10.7</scala210.jline.version>
+        <scala210.library.version>2.10.7</scala210.library.version>
+        <scala.library.version>2.11.12</scala.library.version>
         <slf4j.version>1.7.7</slf4j.version>
         <slf4j16.version>1.6.4</slf4j16.version>
         <spark.hadoop.version>2.6.5</spark.hadoop.version>
         <spark.version>2.3.0</spark.version>
-        <spring.data.version>1.13.11.RELEASE</spring.data.version> <!-- don't forget to update spring version -->
-        <spring.version>4.3.16.RELEASE</spring.version><!-- don't forget to update spring-data version -->
-        <spring.data-2.0.version>2.0.6.RELEASE</spring.data-2.0.version> <!-- don't forget to update spring-5.0 version -->
-        <spring-5.0.version>5.0.5.RELEASE</spring-5.0.version><!-- don't forget to update spring-data-2.0 version -->
+        <spring.data.version>1.13.14.RELEASE</spring.data.version> <!-- don't forget to update spring version -->
+        <spring.version>4.3.18.RELEASE</spring.version><!-- don't forget to update spring-data version -->
+        <spring.data-2.0.version>2.0.9.RELEASE</spring.data-2.0.version> <!-- don't forget to update spring-5.0 version -->
+        <spring-5.0.version>5.0.8.RELEASE</spring-5.0.version><!-- don't forget to update spring-data-2.0 version -->
         <spring41.osgi.feature.version>4.1.7.RELEASE_1</spring41.osgi.feature.version>
         <storm.version>1.1.1</storm.version>
         <tomcat.version>9.0.10</tomcat.version>
@@ -123,7 +123,7 @@
         <yammer.metrics.core.version>2.2.0</yammer.metrics.core.version>
         <yardstick.version>0.8.3</yardstick.version>
         <zkclient.version>0.5</zkclient.version>
-        <zookeeper.version>3.4.6</zookeeper.version>
+        <zookeeper.version>3.4.13</zookeeper.version>
 
         <!-- OSGI Manifest generation default property values -->
         <osgi.import.package>*</osgi.import.package>
@@ -469,7 +469,11 @@
                                 <packages>org.apache.ignite.springdata.repository*</packages>
                             </group>
                             <group>
-                                <title>RocketMQ integration</title>
+                                <title>SpringData 2.0 integration</title>
+                                <packages>org.apache.ignite.springdata20.repository*</packages>
+                            </group>
+                            <group>
+                            <title>RocketMQ integration</title>
                                 <packages>org.apache.ignite.stream.rocketmq*</packages>
                             </group>
                             <group>
@@ -824,6 +828,7 @@
                                         <exclude>**/*.csv</exclude><!--CSV files-->
                                         <exclude>**/*.jks</exclude><!--bin-files-->
                                         <exclude>**/pom-installed.xml</exclude><!--tmp-files-->
+                                        <exclude>**/keystore</exclude><!--bin-files-->
                                         <exclude>**/keystore/*.jks</exclude><!--bin-files-->
                                         <exclude>**/keystore/*.pem</exclude><!--auto generated files-->
                                         <exclude>**/keystore/*.pfx</exclude><!--bin-files-->
@@ -835,6 +840,7 @@
                                         <exclude>**/META-INF/services/**</exclude> <!-- Interface mappings: cannot be changed -->
                                         <!--special excludes-->
                                         <exclude>idea/ignite_codeStyle.xml</exclude>
+                                        <exclude>idea/ignite_inspections.xml</exclude>
                                         <exclude>**/DEVNOTES*.txt</exclude>
                                         <exclude>**/NOTICE*</exclude>
                                         <exclude>**/LICENSE*</exclude>
@@ -982,7 +988,7 @@
             <properties>
                 <maven.compiler.source>9</maven.compiler.source>
                 <maven.compiler.target>9</maven.compiler.target>
-                <scala.library.version>2.12.4</scala.library.version>
+                <scala.library.version>2.12.6</scala.library.version>
             </properties>
             <dependencies/>
             <build>